OSDN Git Service

Merge "msm: kgsl: Add missing check for snapshot IB dump"
authorLinux Build Service Account <lnxbuild@localhost>
Sat, 29 Jun 2019 22:41:50 +0000 (15:41 -0700)
committerGerrit - the friendly Code Review server <code-review@localhost>
Sat, 29 Jun 2019 22:41:50 +0000 (15:41 -0700)
110 files changed:
Makefile
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/configs/msmcortex_defconfig
arch/arm/mach-exynos/suspend.c
arch/arm64/configs/msmcortex-perf_defconfig
arch/arm64/configs/msmcortex_defconfig
arch/ia64/mm/numa.c
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_rtas.c
arch/s390/kvm/kvm-s390.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kvm/pmu_intel.c
arch/x86/pci/irq.c
drivers/ata/libata-core.c
drivers/clk/rockchip/clk-rk3288.c
drivers/crypto/amcc/crypto4xx_alg.c
drivers/crypto/amcc/crypto4xx_core.c
drivers/dma/idma64.c
drivers/dma/idma64.h
drivers/gpio/Kconfig
drivers/gpio/gpio-omap.c
drivers/gpu/drm/i2c/adv7511.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/msm/kgsl_pwrctrl.c
drivers/i2c/busses/i2c-acorn.c
drivers/i2c/i2c-dev.c
drivers/infiniband/hw/mlx4/main.c
drivers/iommu/intel-iommu.c
drivers/isdn/mISDN/socket.c
drivers/md/bcache/bset.c
drivers/md/bcache/bset.h
drivers/mfd/intel-lpss.c
drivers/mfd/twl6040.c
drivers/misc/kgdbts.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/usb/ipheth.c
drivers/nvmem/core.c
drivers/pci/host/pcie-rcar.c
drivers/pci/host/pcie-xilinx.c
drivers/pci/hotplug/rpadlpar_core.c
drivers/platform/chrome/cros_ec_proto.c
drivers/pwm/core.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/sysfs.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/lpfc/lpfc_els.c
drivers/soc/mediatek/mtk-pmic-wrap.c
drivers/soc/qcom/smem.c
drivers/spi/spi-pxa2xx.c
drivers/thermal/msm_lmh_dcvs.c
drivers/thermal/msm_thermal.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/sunhv.c
drivers/usb/core/quirks.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/storage/unusual_realtek.h
drivers/video/fbdev/hgafb.c
drivers/video/fbdev/imsttfb.c
fs/configfs/dir.c
fs/fat/file.c
fs/fuse/dev.c
fs/inode.c
fs/nfsd/vfs.h
fs/ocfs2/dcache.c
fs/proc/task_mmu.c
fs/userfaultfd.c
include/linux/cgroup.h
include/linux/mm.h
include/linux/pwm.h
include/linux/sched.h
include/net/bluetooth/hci_core.h
ipc/mqueue.c
ipc/msgutil.c
kernel/cpuset.c
kernel/cred.c
kernel/events/ring_buffer.c
kernel/fork.c
kernel/futex.c
kernel/ptrace.c
kernel/sched/core.c
kernel/sched/hmp.c
kernel/sched/sched.h
kernel/smpboot.c
kernel/sys.c
kernel/sysctl.c
kernel/time/ntp.c
mm/cma.c
mm/cma_debug.c
mm/hugetlb.c
mm/list_lru.c
mm/mmap.c
net/ax25/ax25_route.c
net/bluetooth/hci_conn.c
net/core/neighbour.c
net/ipv6/ip6_flowlabel.c
net/lapb/lapb_iface.c
sound/core/seq/seq_ports.c
sound/pci/hda/hda_intel.c
sound/soc/codecs/cs42xx8.c

index 1aa2bfe..1bef674 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 182
+SUBLEVEL = 183
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
index 4ecef69..b54c0b8 100644 (file)
@@ -97,6 +97,7 @@
                                regulator-name = "PVDD_APIO_1V8";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
                        };
 
                        ldo3_reg: LDO3 {
                                regulator-name = "PVDD_ABB_1V8";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
                        };
 
                        ldo9_reg: LDO9 {
index e6af41c..3992b8e 100644 (file)
                                compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
                                reg = <0x020ec000 0x4000>;
                                interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX6QDL_CLK_SDMA>,
+                               clocks = <&clks IMX6QDL_CLK_IPG>,
                                         <&clks IMX6QDL_CLK_SDMA>;
                                clock-names = "ipg", "ahb";
                                #dma-cells = <3>;
index d8ba99f..ac820df 100644 (file)
                                reg = <0x020ec000 0x4000>;
                                interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SL_CLK_SDMA>,
-                                        <&clks IMX6SL_CLK_SDMA>;
+                                        <&clks IMX6SL_CLK_AHB>;
                                clock-names = "ipg", "ahb";
                                #dma-cells = <3>;
                                /* imx6sl reuses imx6q sdma firmware */
index 6963dff..5783eb8 100644 (file)
                                compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma";
                                reg = <0x020ec000 0x4000>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX6SX_CLK_SDMA>,
+                               clocks = <&clks IMX6SX_CLK_IPG>,
                                         <&clks IMX6SX_CLK_SDMA>;
                                clock-names = "ipg", "ahb";
                                #dma-cells = <3>;
index 4af5514..8e08663 100644 (file)
@@ -484,9 +484,7 @@ CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_SENSORS_SSC=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_FUSE_FS=y
 CONFIG_MSDOS_FS=y
index a003833..013f4d5 100644 (file)
@@ -508,8 +508,27 @@ early_wakeup:
 
 static void exynos5420_prepare_pm_resume(void)
 {
+       unsigned int mpidr, cluster;
+
+       mpidr = read_cpuid_mpidr();
+       cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
        if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
                WARN_ON(mcpm_cpu_powered_up());
+
+       if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
+               /*
+                * When system is resumed on the LITTLE/KFC core (cluster 1),
+                * the DSCR is not properly updated until the power is turned
+                * on also for the cluster 0. Enable it for a while to
+                * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
+                * block and avoid undefined instruction issue on CP14 reset.
+                */
+               pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+                               EXYNOS_COMMON_CONFIGURATION(0));
+               pmu_raw_writel(0,
+                               EXYNOS_COMMON_CONFIGURATION(0));
+       }
 }
 
 static void exynos5420_pm_resume(void)
index 570a21b..8275c55 100644 (file)
@@ -609,9 +609,7 @@ CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_SENSORS_SSC=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
 CONFIG_EXT4_FS_ENCRYPTION=y
index 93b9e27..b975671 100644 (file)
@@ -636,9 +636,7 @@ CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_SENSORS_SSC=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_EXT4_ENCRYPTION=y
 CONFIG_EXT4_FS_ENCRYPTION=y
index aa19b7a..476c7b4 100644 (file)
@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
 
        return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
 }
+EXPORT_SYMBOL(paddr_to_nid);
 
 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
 /*
index a92d95a..1883627 100644 (file)
@@ -250,6 +250,7 @@ struct kvm_arch {
 #ifdef CONFIG_PPC_BOOK3S_64
        struct list_head spapr_tce_tables;
        struct list_head rtas_tokens;
+       struct mutex rtas_token_lock;
        DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
 #endif
 #ifdef CONFIG_KVM_MPIC
index 099c79d..4aab1c9 100644 (file)
@@ -809,6 +809,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
 #ifdef CONFIG_PPC64
        INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
        INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+       mutex_init(&kvm->arch.rtas_token_lock);
 #endif
 
        return kvm->arch.kvm_ops->init_vm(kvm);
index ef27fbd..b1b2273 100644 (file)
@@ -133,7 +133,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
 {
        struct rtas_token_definition *d, *tmp;
 
-       lockdep_assert_held(&kvm->lock);
+       lockdep_assert_held(&kvm->arch.rtas_token_lock);
 
        list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
                if (rtas_name_matches(d->handler->name, name)) {
@@ -154,7 +154,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
        bool found;
        int i;
 
-       lockdep_assert_held(&kvm->lock);
+       lockdep_assert_held(&kvm->arch.rtas_token_lock);
 
        list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
                if (d->token == token)
@@ -193,14 +193,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
        if (copy_from_user(&args, argp, sizeof(args)))
                return -EFAULT;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.rtas_token_lock);
 
        if (args.token)
                rc = rtas_token_define(kvm, args.name, args.token);
        else
                rc = rtas_token_undefine(kvm, args.name);
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.rtas_token_lock);
 
        return rc;
 }
@@ -232,7 +232,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
        orig_rets = args.rets;
        args.rets = &args.args[be32_to_cpu(args.nargs)];
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
 
        rc = -ENOENT;
        list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
@@ -243,7 +243,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
                }
        }
 
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
 
        if (rc == 0) {
                args.rets = orig_rets;
@@ -269,8 +269,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
 {
        struct rtas_token_definition *d, *tmp;
 
-       lockdep_assert_held(&kvm->lock);
-
        list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
                list_del(&d->list);
                kfree(d);
index 5ddb1de..23911ec 100644 (file)
@@ -2721,21 +2721,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                const struct kvm_memory_slot *new,
                                enum kvm_mr_change change)
 {
-       int rc;
-
-       /* If the basics of the memslot do not change, we do not want
-        * to update the gmap. Every update causes several unnecessary
-        * segment translation exceptions. This is usually handled just
-        * fine by the normal fault handler + gmap, but it will also
-        * cause faults on the prefix page of running guest CPUs.
-        */
-       if (old->userspace_addr == mem->userspace_addr &&
-           old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
-           old->npages * PAGE_SIZE == mem->memory_size)
-               return;
+       int rc = 0;
 
-       rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
-               mem->guest_phys_addr, mem->memory_size);
+       switch (change) {
+       case KVM_MR_DELETE:
+               rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+                                       old->npages * PAGE_SIZE);
+               break;
+       case KVM_MR_MOVE:
+               rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+                                       old->npages * PAGE_SIZE);
+               if (rc)
+                       break;
+               /* FALLTHROUGH */
+       case KVM_MR_CREATE:
+               rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+                                     mem->guest_phys_addr, mem->memory_size);
+               break;
+       case KVM_MR_FLAGS_ONLY:
+               break;
+       default:
+               WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
+       }
        if (rc)
                pr_warn("failed to commit memory region\n");
        return;
index e94e6f1..6f24832 100644 (file)
@@ -717,8 +717,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
 {
        set_cpu_cap(c, X86_FEATURE_ZEN);
 
-       /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
-       if (!cpu_has(c, X86_FEATURE_CPB))
+       /*
+        * Fix erratum 1076: CPB feature bit not being set in CPUID.
+        * Always set it, except when running under a hypervisor.
+        */
+       if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
                set_cpu_cap(c, X86_FEATURE_CPB);
 }
 
index 325ed90..3572434 100644 (file)
@@ -2513,7 +2513,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                return ret;
 
        if (event->attr.precise_ip) {
-               if (!(event->attr.freq || event->attr.wakeup_events)) {
+               if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
                        if (!(event->attr.sample_type &
                              ~intel_pmu_free_running_flags(event)))
index 23a7c7b..8fc07ea 100644 (file)
@@ -235,11 +235,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                }
                break;
        default:
-               if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
-                   (pmc = get_fixed_pmc(pmu, msr))) {
-                       if (!msr_info->host_initiated)
-                               data = (s64)(s32)data;
-                       pmc->counter += data - pmc_read_counter(pmc);
+               if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
+                       if (msr_info->host_initiated)
+                               pmc->counter = data;
+                       else
+                               pmc->counter = (s32)data;
+                       return 0;
+               } else if ((pmc = get_fixed_pmc(pmu, msr))) {
+                       pmc->counter = data;
                        return 0;
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
index 9bd1154..5f0e596 100644 (file)
@@ -1117,6 +1117,8 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
 
 void __init pcibios_irq_init(void)
 {
+       struct irq_routing_table *rtable = NULL;
+
        DBG(KERN_DEBUG "PCI: IRQ init\n");
 
        if (raw_pci_ops == NULL)
@@ -1127,8 +1129,10 @@ void __init pcibios_irq_init(void)
        pirq_table = pirq_find_routing_table();
 
 #ifdef CONFIG_PCI_BIOS
-       if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
+       if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
                pirq_table = pcibios_get_irq_routing_table();
+               rtable = pirq_table;
+       }
 #endif
        if (pirq_table) {
                pirq_peer_trick();
@@ -1143,8 +1147,10 @@ void __init pcibios_irq_init(void)
                 * If we're using the I/O APIC, avoid using the PCI IRQ
                 * routing table
                 */
-               if (io_apic_assign_pci_irqs)
+               if (io_apic_assign_pci_irqs) {
+                       kfree(rtable);
                        pirq_table = NULL;
+               }
        }
 
        x86_init.pci.fixup_irqs();
index d543172..a352f09 100644 (file)
@@ -4176,9 +4176,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
-       /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
-       { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
-       { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+       /* drives which fail FPDMA_AA activation (some may freeze afterwards)
+          the ST disks also have LPM issues */
+       { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+                                               ATA_HORKAGE_NOLPM, },
+       { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+                                               ATA_HORKAGE_NOLPM, },
        { "VB0250EAVER",        "HPG7",         ATA_HORKAGE_BROKEN_FPDMA_AA },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
index 9040878..a6cda84 100644 (file)
@@ -797,6 +797,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
        RK3288_CLKSEL_CON(10),
        RK3288_CLKSEL_CON(33),
        RK3288_CLKSEL_CON(37),
+
+       /* We turn aclk_dmac1 on for suspend; this will restore it */
+       RK3288_CLKGATE_CON(10),
 };
 
 static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
@@ -813,6 +816,14 @@ static int rk3288_clk_suspend(void)
        }
 
        /*
+        * Going into deep sleep (specifically setting PMU_CLR_DMA in
+        * RK3288_PMU_PWRMODE_CON1) appears to fail unless
+        * "aclk_dmac1" is on.
+        */
+       writel_relaxed(1 << (12 + 16),
+                      rk3288_cru_base + RK3288_CLKGATE_CON(10));
+
+       /*
         * Switch PLLs other than DPLL (for SDRAM) to slow mode to
         * avoid crashes on resume. The Mask ROM on the system will
         * put APLL, CPLL, and GPLL into slow mode at resume time
index e3b8beb..4afca39 100644 (file)
@@ -138,8 +138,7 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
        sa = (struct dynamic_sa_ctl *) ctx->sa_in;
        ctx->hash_final = 0;
 
-       set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
-                                SA_SAVE_IV : SA_NOT_SAVE_IV),
+       set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
                                 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
                                 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
                                 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
index 1e810f5..78d0722 100644 (file)
@@ -645,15 +645,6 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
                addr = dma_map_page(dev->core_dev->device, sg_page(dst),
                                    dst->offset, dst->length, DMA_FROM_DEVICE);
        }
-
-       if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
-               struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-
-               crypto4xx_memcpy_from_le32((u32 *)req->iv,
-                       pd_uinfo->sr_va->save_iv,
-                       crypto_skcipher_ivsize(skcipher));
-       }
-
        crypto4xx_ret_sg_desc(dev, pd_uinfo);
        if (ablk_req->base.complete != NULL)
                ablk_req->base.complete(&ablk_req->base, 0);
index 7d56b47..25e25b6 100644 (file)
@@ -594,7 +594,7 @@ static int idma64_probe(struct idma64_chip *chip)
        idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
        idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
-       idma64->dma.dev = chip->dev;
+       idma64->dma.dev = chip->sysdev;
 
        ret = dma_async_device_register(&idma64->dma);
        if (ret)
@@ -632,6 +632,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
 {
        struct idma64_chip *chip;
        struct device *dev = &pdev->dev;
+       struct device *sysdev = dev->parent;
        struct resource *mem;
        int ret;
 
@@ -648,11 +649,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
        if (IS_ERR(chip->regs))
                return PTR_ERR(chip->regs);
 
-       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
        if (ret)
                return ret;
 
        chip->dev = dev;
+       chip->sysdev = sysdev;
 
        ret = idma64_probe(chip);
        if (ret)
index f6aeff0..e40c69b 100644 (file)
@@ -215,12 +215,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
 /**
  * struct idma64_chip - representation of iDMA 64-bit controller hardware
  * @dev:               struct device of the DMA controller
+ * @sysdev:            struct device of the physical device that does DMA
  * @irq:               irq line
  * @regs:              memory mapped I/O space
  * @idma64:            struct idma64 that is filed by idma64_probe()
  */
 struct idma64_chip {
        struct device   *dev;
+       struct device   *sysdev;
        int             irq;
        void __iomem    *regs;
        struct idma64   *idma64;
index d4729fa..1491dc4 100644 (file)
@@ -598,6 +598,7 @@ config GPIO_ADP5588
 config GPIO_ADP5588_IRQ
        bool "Interrupt controller support for ADP5588"
        depends on GPIO_ADP5588=y
+       select GPIOLIB_IRQCHIP
        help
          Say yes here to enable the adp5588 to be used as an interrupt
          controller. It requires the driver to be built in the kernel.
index 9943273..c8c49b1 100644 (file)
@@ -292,6 +292,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
        }
 }
 
+/*
+ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
+ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
+ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
+ * are capable waking up the system from off mode.
+ */
+static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
+{
+       u32 no_wake = bank->non_wakeup_gpios;
+
+       if (no_wake)
+               return !!(~no_wake & gpio_mask);
+
+       return false;
+}
+
 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
                                                unsigned trigger)
 {
@@ -323,13 +339,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
        }
 
        /* This part needs to be executed always for OMAP{34xx, 44xx} */
-       if (!bank->regs->irqctrl) {
-               /* On omap24xx proceed only when valid GPIO bit is set */
-               if (bank->non_wakeup_gpios) {
-                       if (!(bank->non_wakeup_gpios & gpio_bit))
-                               goto exit;
-               }
-
+       if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
                /*
                 * Log the edge gpio and manually trigger the IRQ
                 * after resume if the input level changes
@@ -342,7 +352,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
                        bank->enabled_non_wakeup_gpios &= ~gpio_bit;
        }
 
-exit:
        bank->level_mask =
                readl_relaxed(bank->base + bank->regs->leveldetect0) |
                readl_relaxed(bank->base + bank->regs->leveldetect1);
index c7c243e..4300e27 100644 (file)
@@ -781,11 +781,11 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
                        vsync_polarity = 1;
        }
 
-       if (mode->vrefresh <= 24000)
+       if (drm_mode_vrefresh(mode) <= 24)
                low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
-       else if (mode->vrefresh <= 25000)
+       else if (drm_mode_vrefresh(mode) <= 25)
                low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
-       else if (mode->vrefresh <= 30000)
+       else if (drm_mode_vrefresh(mode) <= 30)
                low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
        else
                low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
index ad0dd56..8dba101 100644 (file)
@@ -2442,7 +2442,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
 
        cmd = container_of(header, typeof(*cmd), header);
 
-       if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+       if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+           cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
                DRM_ERROR("Illegal shader type %u.\n",
                          (unsigned) cmd->body.type);
                return -EINVAL;
@@ -2681,6 +2682,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
        if (view_type == vmw_view_max)
                return -EINVAL;
        cmd = container_of(header, typeof(*cmd), header);
+       if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
+               DRM_ERROR("Invalid surface id.\n");
+               return -EINVAL;
+       }
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter,
                                &cmd->sid, &srf_node);
index b1b0b69..c916822 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,7 +61,7 @@ static const char * const clocks[] = {
        "iref_clk"
 };
 
-static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
+static unsigned long ib_votes[KGSL_MAX_BUSLEVELS];
 static int last_vote_buslevel;
 static int max_vote_buslevel;
 
@@ -123,7 +123,7 @@ static void _record_pwrevent(struct kgsl_device *device,
 /**
  * kgsl_get_bw() - Return latest msm bus IB vote
  */
-static unsigned int kgsl_get_bw(void)
+static unsigned long kgsl_get_bw(void)
 {
        return ib_votes[last_vote_buslevel];
 }
@@ -137,8 +137,9 @@ static unsigned int kgsl_get_bw(void)
 static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr,
                                unsigned long *ab)
 {
-       unsigned int ib = ib_votes[last_vote_buslevel];
-       unsigned int max_bw = ib_votes[max_vote_buslevel];
+       unsigned long ib = ib_votes[last_vote_buslevel];
+       unsigned long max_bw = ib_votes[max_vote_buslevel];
+
        if (!ab)
                return;
        if (ib == 0)
index 9d7be5a..6618db7 100644 (file)
@@ -83,6 +83,7 @@ static struct i2c_algo_bit_data ioc_data = {
 
 static struct i2c_adapter ioc_ops = {
        .nr                     = 0,
+       .name                   = "ioc",
        .algo_data              = &ioc_data,
 };
 
index 57e3790..e56b774 100644 (file)
@@ -295,6 +295,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
                            rdwr_pa[i].buf[0] < 1 ||
                            rdwr_pa[i].len < rdwr_pa[i].buf[0] +
                                             I2C_SMBUS_BLOCK_MAX) {
+                               i++;
                                res = -EINVAL;
                                break;
                        }
index 67c4c73..6968154 100644 (file)
@@ -1042,6 +1042,8 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
         * mlx4_ib_vma_close().
         */
        down_write(&owning_mm->mmap_sem);
+       if (!mmget_still_valid(owning_mm))
+               goto skip_mm;
        for (i = 0; i < HW_BAR_COUNT; i++) {
                vma = context->hw_bar_info[i].vma;
                if (!vma)
@@ -1061,6 +1063,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
                context->hw_bar_info[i].vma->vm_ops = NULL;
        }
 
+skip_mm:
        up_write(&owning_mm->mmap_sem);
        mmput(owning_mm);
        put_task_struct(owning_process);
index 3e97c4b..b965561 100644 (file)
@@ -3983,9 +3983,7 @@ static void __init init_no_remapping_devices(void)
 
                /* This IOMMU has *only* gfx devices. Either bypass it or
                   set the gfx_mapped flag, as appropriate */
-               if (dmar_map_gfx) {
-                       intel_iommu_gfx_mapped = 1;
-               } else {
+               if (!dmar_map_gfx) {
                        drhd->ignored = 1;
                        for_each_active_dev_scope(drhd->devices,
                                                  drhd->devices_cnt, i, dev)
@@ -4694,6 +4692,9 @@ int __init intel_iommu_init(void)
                goto out_free_reserved_range;
        }
 
+       if (dmar_map_gfx)
+               intel_iommu_gfx_mapped = 1;
+
        init_no_remapping_devices();
 
        ret = init_dmars();
index 0d29b5a..8cbb75d 100644 (file)
@@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        memcpy(di.channelmap, dev->channelmap,
                               sizeof(di.channelmap));
                        di.nrbchan = dev->nrbchan;
-                       strcpy(di.name, dev_name(&dev->dev));
+                       strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
                        if (copy_to_user((void __user *)arg, &di, sizeof(di)))
                                err = -EFAULT;
                } else
@@ -678,7 +678,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        memcpy(di.channelmap, dev->channelmap,
                               sizeof(di.channelmap));
                        di.nrbchan = dev->nrbchan;
-                       strcpy(di.name, dev_name(&dev->dev));
+                       strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
                        if (copy_to_user((void __user *)arg, &di, sizeof(di)))
                                err = -EFAULT;
                } else
@@ -692,6 +692,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        err = -EFAULT;
                        break;
                }
+               dn.name[sizeof(dn.name) - 1] = '\0';
                dev = get_mdevice(dn.id);
                if (dev)
                        err = device_rename(&dev->dev, dn.name);
index 646fe85..158eae1 100644 (file)
@@ -823,12 +823,22 @@ unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
        struct bset *i = bset_tree_last(b)->data;
        struct bkey *m, *prev = NULL;
        struct btree_iter iter;
+       struct bkey preceding_key_on_stack = ZERO_KEY;
+       struct bkey *preceding_key_p = &preceding_key_on_stack;
 
        BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
 
-       m = bch_btree_iter_init(b, &iter, b->ops->is_extents
-                               ? PRECEDING_KEY(&START_KEY(k))
-                               : PRECEDING_KEY(k));
+       /*
+        * If k has preceding key, preceding_key_p will be set to address
+        *  of k's preceding key; otherwise preceding_key_p will be set
+        * to NULL inside preceding_key().
+        */
+       if (b->ops->is_extents)
+               preceding_key(&START_KEY(k), &preceding_key_p);
+       else
+               preceding_key(k, &preceding_key_p);
+
+       m = bch_btree_iter_init(b, &iter, preceding_key_p);
 
        if (b->ops->insert_fixup(b, k, &iter, replace_key))
                return status;
index ae96462..b935839 100644 (file)
@@ -417,20 +417,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
        return __bch_cut_back(where, k);
 }
 
-#define PRECEDING_KEY(_k)                                      \
-({                                                             \
-       struct bkey *_ret = NULL;                               \
-                                                               \
-       if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
-               _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
-                                                               \
-               if (!_ret->low)                                 \
-                       _ret->high--;                           \
-               _ret->low--;                                    \
-       }                                                       \
-                                                               \
-       _ret;                                                   \
-})
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+       if (KEY_INODE(k) || KEY_OFFSET(k)) {
+               (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+               if (!(*preceding_key_p)->low)
+                       (*preceding_key_p)->high--;
+               (*preceding_key_p)->low--;
+       } else {
+               (*preceding_key_p) = NULL;
+       }
+}
 
 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
 {
index ac86748..4988751 100644 (file)
@@ -267,6 +267,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
 {
        u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
 
+       /* Set the device in reset state */
+       writel(0, lpss->priv + LPSS_PRIV_RESETS);
+
        intel_lpss_deassert_reset(lpss);
 
        intel_lpss_set_remap_addr(lpss);
index 72aab60..db86844 100644 (file)
@@ -316,8 +316,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
                        }
                }
 
+               /*
+                * Register access can produce errors after power-up unless we
+                * wait at least 8ms based on measurements on duovero.
+                */
+               usleep_range(10000, 12000);
+
                /* Sync with the HW */
-               regcache_sync(twl6040->regmap);
+               ret = regcache_sync(twl6040->regmap);
+               if (ret) {
+                       dev_err(twl6040->dev, "Failed to sync with the HW: %i\n",
+                               ret);
+                       goto out;
+               }
 
                /* Default PLL configuration after power up */
                twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
index 99635dd..bb3a76a 100644 (file)
@@ -1132,7 +1132,7 @@ static void kgdbts_put_char(u8 chr)
 
 static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
 {
-       int len = strlen(kmessage);
+       size_t len = strlen(kmessage);
 
        if (len >= MAX_CONFIG_LEN) {
                printk(KERN_ERR "kgdbts: config string too long\n");
@@ -1152,7 +1152,7 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
 
        strcpy(config, kmessage);
        /* Chop out \n char as a result of echo */
-       if (config[len - 1] == '\n')
+       if (len && config[len - 1] == '\n')
                config[len - 1] = '\0';
 
        /* Go and configure with the new params. */
index 3acde3b..7799cf3 100644 (file)
@@ -2106,7 +2106,6 @@ static struct eisa_driver de4x5_eisa_driver = {
                .remove  = de4x5_eisa_remove,
         }
 };
-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
 #endif
 
 #ifdef CONFIG_PCI
index 734f655..51bfe74 100644 (file)
@@ -1050,7 +1050,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
                break;
        case ETHTOOL_GRXRINGS:
-               cmd->data = adapter->num_rx_qs - 1;
+               cmd->data = adapter->num_rx_qs;
                break;
        default:
                return -EINVAL;
index afaf79b..2d9f4ed 100644 (file)
@@ -1408,6 +1408,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
        sh_eth_get_stats(ndev);
        sh_eth_reset(ndev);
 
+       /* Set the RMII mode again if required */
+       if (mdp->cd->rmiimode)
+               sh_eth_write(ndev, 0x1, RMIIMODE);
+
        /* Set MAC address again */
        update_mac_address(ndev);
 }
index 01f95d1..2b16a5f 100644 (file)
@@ -437,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
                          dev);
        dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
+       netif_stop_queue(net);
        retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
        if (retval) {
                dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
                        __func__, retval);
                dev->net->stats.tx_errors++;
                dev_kfree_skb_any(skb);
+               netif_wake_queue(net);
        } else {
                dev->net->stats.tx_packets++;
                dev->net->stats.tx_bytes += skb->len;
                dev_consume_skb_any(skb);
-               netif_stop_queue(net);
        }
 
        return NETDEV_TX_OK;
index 6fd4e5a..931cc33 100644 (file)
@@ -789,7 +789,7 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
                                                    void *buf)
 {
        u8 *p, *b;
-       int i, bit_offset = cell->bit_offset;
+       int i, extra, bit_offset = cell->bit_offset;
 
        p = b = buf;
        if (bit_offset) {
@@ -804,11 +804,16 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
                        p = b;
                        *b++ >>= bit_offset;
                }
-
-               /* result fits in less bytes */
-               if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
-                       *p-- = 0;
+       } else {
+               /* point to the msb */
+               p += cell->bytes - 1;
        }
+
+       /* result fits in less bytes */
+       extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
+       while (--extra >= 0)
+               *p-- = 0;
+
        /* clear msb bits if any leftover in the last byte */
        *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
 }
index 414c336..b18cf12 100644 (file)
@@ -737,6 +737,10 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
 
        /* setup MSI data target */
        msi->pages = __get_free_pages(GFP_KERNEL, 0);
+       if (!msi->pages) {
+               err = -ENOMEM;
+               goto err;
+       }
        base = virt_to_phys((void *)msi->pages);
 
        rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
index 4cfa463..6a2499f 100644 (file)
@@ -349,14 +349,19 @@ static const struct irq_domain_ops msi_domain_ops = {
  * xilinx_pcie_enable_msi - Enable MSI support
  * @port: PCIe port information
  */
-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
 {
        phys_addr_t msg_addr;
 
        port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+       if (!port->msi_pages)
+               return -ENOMEM;
+
        msg_addr = virt_to_phys((void *)port->msi_pages);
        pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
        pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+
+       return 0;
 }
 
 /* INTx Functions */
@@ -555,6 +560,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
        struct device *dev = port->dev;
        struct device_node *node = dev->of_node;
        struct device_node *pcie_intc_node;
+       int ret;
 
        /* Setup INTx */
        pcie_intc_node = of_get_next_child(node, NULL);
@@ -582,7 +588,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
                        return PTR_ERR(port->irq_domain);
                }
 
-               xilinx_pcie_enable_msi(port);
+               ret = xilinx_pcie_enable_msi(port);
+               if (ret)
+                       return ret;
        }
 
        return 0;
index f2fcbe9..aae2957 100644 (file)
@@ -55,6 +55,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
                if ((rc == 0) && (!strcmp(drc_name, name)))
                        break;
        }
+       of_node_put(parent);
 
        return dn;
 }
@@ -78,6 +79,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
        return np;
 }
 
+/* Returns a device_node with its reference count incremented */
 static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
 {
        struct device_node *dn;
@@ -314,6 +316,7 @@ int dlpar_add_slot(char *drc_name)
                        rc = dlpar_add_phb(drc_name, dn);
                        break;
        }
+       of_node_put(dn);
 
        printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
 exit:
@@ -447,6 +450,7 @@ int dlpar_remove_slot(char *drc_name)
                        rc = dlpar_remove_pci_slot(drc_name, dn);
                        break;
        }
+       of_node_put(dn);
        vm_unmap_aliases();
 
        printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
index a0b8c8a..5c285f2 100644 (file)
@@ -66,6 +66,17 @@ static int send_command(struct cros_ec_device *ec_dev,
        else
                xfer_fxn = ec_dev->cmd_xfer;
 
+       if (!xfer_fxn) {
+               /*
+                * This error can happen if a communication error happened and
+                * the EC is trying to use protocol v2, on an underlying
+                * communication mechanism that does not support v2.
+                */
+               dev_err_once(ec_dev->dev,
+                            "missing EC transfer API, cannot send command\n");
+               return -EIO;
+       }
+
        ret = (*xfer_fxn)(ec_dev, msg);
        if (msg->result == EC_RES_IN_PROGRESS) {
                int i;
index ec84ff8..6911f96 100644 (file)
@@ -284,10 +284,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
        if (IS_ENABLED(CONFIG_OF))
                of_pwmchip_add(chip);
 
-       pwmchip_sysfs_export(chip);
-
 out:
        mutex_unlock(&pwm_lock);
+
+       if (!ret)
+               pwmchip_sysfs_export(chip);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
@@ -321,7 +323,7 @@ int pwmchip_remove(struct pwm_chip *chip)
        unsigned int i;
        int ret = 0;
 
-       pwmchip_sysfs_unexport_children(chip);
+       pwmchip_sysfs_unexport(chip);
 
        mutex_lock(&pwm_lock);
 
@@ -341,8 +343,6 @@ int pwmchip_remove(struct pwm_chip *chip)
 
        free_pwms(chip);
 
-       pwmchip_sysfs_unexport(chip);
-
 out:
        mutex_unlock(&pwm_lock);
        return ret;
index 062dff1..ede17f8 100644 (file)
@@ -385,6 +385,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        }
 
        /* Update shadow register first before modifying active register */
+       ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+                     AQSFRC_RLDCSF_ZRO);
        ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
        /*
         * Changes to immediate action on Action Qualifier. This puts
index 375008e..199370e 100644 (file)
@@ -340,19 +340,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
 void pwmchip_sysfs_unexport(struct pwm_chip *chip)
 {
        struct device *parent;
-
-       parent = class_find_device(&pwm_class, NULL, chip,
-                                  pwmchip_sysfs_match);
-       if (parent) {
-               /* for class_find_device() */
-               put_device(parent);
-               device_unregister(parent);
-       }
-}
-
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
-       struct device *parent;
        unsigned int i;
 
        parent = class_find_device(&pwm_class, NULL, chip,
@@ -368,6 +355,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
        }
 
        put_device(parent);
+       device_unregister(parent);
 }
 
 static int __init pwm_sysfs_init(void)
index 28c671b..0c71b69 100644 (file)
@@ -829,7 +829,7 @@ ret_err_rqe:
                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
                        (u64)err_entry->data.err_warn_bitmap_lo;
                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
-                       if (err_warn_bit_map & (u64) (1 << i)) {
+                       if (err_warn_bit_map & ((u64)1 << i)) {
                                err_warn = i;
                                break;
                        }
index f3bb7af..5eaf14c 100644 (file)
@@ -634,6 +634,10 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
 
        if (ndev->flags & IFF_LOOPBACK) {
                ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
+               if (!ndev) {
+                       err = -ENETUNREACH;
+                       goto rel_neigh;
+               }
                mtu = ndev->mtu;
                pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
                        n->dev->name, ndev->name, mtu);
index ee1f9ee..400eee9 100644 (file)
@@ -978,6 +978,8 @@ static struct domain_device *sas_ex_discover_expander(
                list_del(&child->dev_list_node);
                spin_unlock_irq(&parent->port->dev_list_lock);
                sas_put_device(child);
+               sas_port_delete(phy->port);
+               phy->port = NULL;
                return NULL;
        }
        list_add_tail(&child->siblings, &parent->ex_dev.children);
index 398c9a0..82a6909 100644 (file)
@@ -6498,7 +6498,10 @@ int
 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
 {
        struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
-                                                       rrq->nlp_DID);
+                                                      rrq->nlp_DID);
+       if (!ndlp)
+               return 1;
+
        if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
                return lpfc_issue_els_rrq(rrq->vport, ndlp,
                                         rrq->nlp_DID, rrq);
index 105597a..33b10dd 100644 (file)
@@ -591,7 +591,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
 static int pwrap_init_cipher(struct pmic_wrapper *wrp)
 {
        int ret;
-       u32 rdata;
+       u32 rdata = 0;
 
        pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
        pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
index 19019aa..2c77f90 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2015, Sony Mobile Communications AB.
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -228,7 +228,7 @@ struct smem_region {
  * struct qcom_smem - device data for the smem device
  * @dev:       device pointer
  * @hwlock:    reference to a hwspinlock
- * @partitions:        list of pointers to partitions affecting the current
+ * @ptable_entries: list of pointers to partitions table entry of current
  *             processor/host
  * @num_regions: number of @regions
  * @regions:   list of the memory regions defining the shared memory
@@ -238,12 +238,24 @@ struct qcom_smem {
 
        struct hwspinlock *hwlock;
 
-       struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+       struct smem_ptable_entry *ptable_entries[SMEM_HOST_COUNT];
 
        unsigned num_regions;
        struct smem_region regions[0];
 };
 
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT     1000
+
+static struct smem_partition_header *
+ptable_entry_to_phdr(struct smem_ptable_entry *entry)
+{
+       return __smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+}
+
 static struct smem_private_entry *
 phdr_to_last_private_entry(struct smem_partition_header *phdr)
 {
@@ -283,32 +295,33 @@ static void *entry_to_item(struct smem_private_entry *e)
        return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
 }
 
-/* Pointer to the one and only smem handle */
-static struct qcom_smem *__smem;
-
-/* Timeout (ms) for the trylock of remote spinlocks */
-#define HWSPINLOCK_TIMEOUT     1000
-
 static int qcom_smem_alloc_private(struct qcom_smem *smem,
-                                  unsigned host,
+                                  struct smem_ptable_entry *entry,
                                   unsigned item,
                                   size_t size)
 {
        struct smem_partition_header *phdr;
        struct smem_private_entry *hdr, *end;
+       struct smem_partition_header *phdr;
        size_t alloc_size;
        void *cached;
+       void *p_end;
+
+       phdr = ptable_entry_to_phdr(entry);
+       p_end = (void *)phdr + le32_to_cpu(entry->size);
 
-       phdr = smem->partitions[host];
        hdr = phdr_to_first_private_entry(phdr);
        end = phdr_to_last_private_entry(phdr);
        cached = phdr_to_first_cached_entry(phdr);
 
+       if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
+               return -EINVAL;
+
        while (hdr < end) {
                if (hdr->canary != SMEM_PRIVATE_CANARY) {
                        dev_err(smem->dev,
-                               "Found invalid canary in host %d partition\n",
-                               host);
+                               "Found invalid canary in host %d:%d partition\n",
+                               phdr->host0, phdr->host1);
                        return -EINVAL;
                }
 
@@ -317,6 +330,8 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
 
                hdr = private_entry_next(hdr);
        }
+       if (WARN_ON((void *)hdr > p_end))
+               return -EINVAL;
 
        /* Check that we don't grow into the cached region */
        alloc_size = sizeof(*hdr) + ALIGN(size, 8);
@@ -389,6 +404,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
  */
 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
 {
+       struct smem_ptable_entry *entry;
        unsigned long flags;
        int ret;
 
@@ -407,10 +423,12 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
        if (ret)
                return ret;
 
-       if (host < SMEM_HOST_COUNT && __smem->partitions[host])
-               ret = qcom_smem_alloc_private(__smem, host, item, size);
-       else
+       if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
+               entry = __smem->ptable_entries[host];
+               ret = qcom_smem_alloc_private(__smem, entry, item, size);
+       } else {
                ret = qcom_smem_alloc_global(__smem, item, size);
+       }
 
        hwspin_unlock_irqrestore(__smem->hwlock, &flags);
 
@@ -422,9 +440,11 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
                                  unsigned item,
                                  size_t *size)
 {
+       struct smem_global_entry *entry;
        struct smem_header *header;
        struct smem_region *area;
-       struct smem_global_entry *entry;
+       u64 entry_offset;
+       u32 e_size;
        u32 aux_base;
        unsigned i;
 
@@ -442,9 +462,16 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
                area = &smem->regions[i];
 
                if (area->aux_base == aux_base || !aux_base) {
+                       e_size = le32_to_cpu(entry->size);
+                       entry_offset = le32_to_cpu(entry->offset);
+
+                       if (WARN_ON(e_size + entry_offset > area->size))
+                               return ERR_PTR(-EINVAL);
+
                        if (size != NULL)
-                               *size = le32_to_cpu(entry->size);
-                       return area->virt_base + le32_to_cpu(entry->offset);
+                               *size = e_size;
+
+                       return area->virt_base + entry_offset;
                }
        }
 
@@ -452,35 +479,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
 }
 
 static void *qcom_smem_get_private(struct qcom_smem *smem,
-                                  unsigned host,
+                                  struct smem_ptable_entry *entry,
                                   unsigned item,
                                   size_t *size)
 {
        struct smem_partition_header *phdr;
        struct smem_private_entry *e, *end;
+       void *item_ptr, *p_end;
+       u32 partition_size;
+       u32 padding_data;
+       u32 e_size;
+
+       phdr = ptable_entry_to_phdr(entry);
+       partition_size = le32_to_cpu(entry->size);
+       p_end = (void *)phdr + partition_size;
 
-       phdr = smem->partitions[host];
        e = phdr_to_first_private_entry(phdr);
        end = phdr_to_last_private_entry(phdr);
 
+       if (WARN_ON((void *)end > p_end))
+               return ERR_PTR(-EINVAL);
+
        while (e < end) {
                if (e->canary != SMEM_PRIVATE_CANARY) {
                        dev_err(smem->dev,
-                               "Found invalid canary in host %d partition\n",
-                               host);
+                               "Found invalid canary in host %d:%d partition\n",
+                               phdr->host0, phdr->host1);
                        return ERR_PTR(-EINVAL);
                }
 
                if (le16_to_cpu(e->item) == item) {
-                       if (size != NULL)
-                               *size = le32_to_cpu(e->size) -
-                                       le16_to_cpu(e->padding_data);
-
-                       return entry_to_item(e);
+                       if (size != NULL) {
+                               e_size = le32_to_cpu(e->size);
+                               padding_data = le16_to_cpu(e->padding_data);
+
+                               if (e_size < partition_size
+                                   && padding_data < e_size)
+                                       *size = e_size - padding_data;
+                               else
+                                       return ERR_PTR(-EINVAL);
+                       }
+
+                       item_ptr = entry_to_item(e);
+                       if (WARN_ON(item_ptr > p_end))
+                               return ERR_PTR(-EINVAL);
+
+                       return item_ptr;
                }
 
                e = private_entry_next(e);
        }
+       if (WARN_ON((void *)e > p_end))
+               return ERR_PTR(-EINVAL);
 
        return ERR_PTR(-ENOENT);
 }
@@ -496,6 +546,7 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
  */
 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
 {
+       struct smem_ptable_entry *entry;
        unsigned long flags;
        int ret;
        void *ptr = ERR_PTR(-EPROBE_DEFER);
@@ -509,11 +560,12 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
        if (ret)
                return ERR_PTR(ret);
 
-       if (host < SMEM_HOST_COUNT && __smem->partitions[host])
-               ptr = qcom_smem_get_private(__smem, host, item, size);
-       else
+       if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
+               entry = __smem->ptable_entries[host];
+               ptr = qcom_smem_get_private(__smem, entry, item, size);
+       } else {
                ptr = qcom_smem_get_global(__smem, item, size);
-
+       }
        hwspin_unlock_irqrestore(__smem->hwlock, &flags);
 
        return ptr;
@@ -531,19 +583,28 @@ EXPORT_SYMBOL(qcom_smem_get);
 int qcom_smem_get_free_space(unsigned host)
 {
        struct smem_partition_header *phdr;
+       struct smem_ptable_entry *entry;
        struct smem_header *header;
        unsigned ret;
 
        if (!__smem)
                return -EPROBE_DEFER;
 
-       if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
-               phdr = __smem->partitions[host];
+       if (host < SMEM_HOST_COUNT && __smem->ptable_entries[host]) {
+               entry = __smem->ptable_entries[host];
+               phdr = ptable_entry_to_phdr(entry);
+
                ret = le32_to_cpu(phdr->offset_free_cached) -
                      le32_to_cpu(phdr->offset_free_uncached);
+
+               if (ret > le32_to_cpu(entry->size))
+                       return -EINVAL;
        } else {
                header = __smem->regions[0].virt_base;
                ret = le32_to_cpu(header->available);
+
+               if (ret > __smem->regions[0].size)
+                       return -EINVAL;
        }
 
        return ret;
@@ -616,7 +677,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
                        return -EINVAL;
                }
 
-               if (smem->partitions[remote_host]) {
+               if (smem->ptable_entries[remote_host]) {
                        dev_err(smem->dev,
                                "Already found a partition for host %d\n",
                                remote_host);
@@ -658,7 +719,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
                        return -EINVAL;
                }
 
-               smem->partitions[remote_host] = header;
+               smem->ptable_entries[remote_host] = entry;
        }
 
        return 0;
index e87b6fc..193aa3d 100644 (file)
@@ -1371,12 +1371,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
 
 static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
 {
-       struct device *dev = param;
-
-       if (dev != chan->device->dev->parent)
-               return false;
-
-       return true;
+       return param == chan->device->dev;
 }
 
 static struct pxa2xx_spi_master *
index 215c375..7e141f1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@
 #define MSM_LIMITS_CLUSTER_0           0x6370302D
 #define MSM_LIMITS_CLUSTER_1           0x6370312D
 
-#define MSM_LIMITS_DOMAIN_MAX          0x444D4158
+#define MSM_LIMIT_FREQ_CAP             0x46434150
 
 #define MSM_LIMITS_HIGH_THRESHOLD_VAL  95000
 #define MSM_LIMITS_ARM_THRESHOLD_VAL   65000
@@ -194,34 +194,40 @@ static irqreturn_t lmh_dcvs_handle_isr(int irq, void *data)
 }
 
 static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn,
-               uint32_t setting, uint32_t val)
+                             uint32_t setting, uint32_t val, uint32_t val1,
+                             bool enable_val1)
 {
        int ret;
        struct scm_desc desc_arg;
        uint32_t *payload = NULL;
+       uint32_t payload_len;
 
-       payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+       payload_len = ((enable_val1) ? 6 : 5) * sizeof(uint32_t);
+       payload = kcalloc((enable_val1) ? 6 : 5, sizeof(uint32_t), GFP_KERNEL);
        if (!payload)
                return -ENOMEM;
 
        payload[0] = fn; /* algorithm */
        payload[1] = 0; /* unused sub-algorithm */
        payload[2] = setting;
-       payload[3] = 1; /* number of values */
+       payload[3] = enable_val1 ? 2 : 1; /* number of values */
        payload[4] = val;
+       if (enable_val1)
+               payload[5] = val1;
 
        desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
-       desc_arg.args[1] = sizeof(uint32_t) * 5;
+       desc_arg.args[1] = payload_len;
        desc_arg.args[2] = MSM_LIMITS_NODE_DCVS;
        desc_arg.args[3] = node_id;
        desc_arg.args[4] = 0; /* version */
        desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
                                        SCM_VAL, SCM_VAL);
 
-       dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+       dmac_flush_range(payload, (void *)payload + payload_len);
        ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, MSM_LIMITS_DCVSH), &desc_arg);
 
        kfree(payload);
+
        return ret;
 }
 
@@ -265,7 +271,7 @@ static int lmh_activate_trip(struct thermal_zone_device *dev,
        case LIMITS_TRIP_LO:
                ret =  msm_lmh_dcvs_write(hw->affinity,
                                MSM_LIMITS_SUB_FN_THERMAL,
-                               MSM_LIMITS_ARM_THRESHOLD, temp);
+                               MSM_LIMITS_ARM_THRESHOLD, temp, 0, 0);
                break;
        case LIMITS_TRIP_HI:
                /*
@@ -276,13 +282,13 @@ static int lmh_activate_trip(struct thermal_zone_device *dev,
                        return -EINVAL;
                ret =  msm_lmh_dcvs_write(hw->affinity,
                                MSM_LIMITS_SUB_FN_THERMAL,
-                               MSM_LIMITS_HI_THRESHOLD, temp);
+                               MSM_LIMITS_HI_THRESHOLD, temp, 0, 0);
                if (ret)
                        break;
                ret =  msm_lmh_dcvs_write(hw->affinity,
                                MSM_LIMITS_SUB_FN_THERMAL,
                                MSM_LIMITS_LOW_THRESHOLD, temp -
-                               MSM_LIMITS_LOW_THRESHOLD_OFFSET);
+                               MSM_LIMITS_LOW_THRESHOLD_OFFSET, 0, 0);
                break;
        default:
                return -EINVAL;
@@ -347,8 +353,9 @@ static int lmh_set_max_limit(int cpu, u32 freq)
        if (!hw)
                return -EINVAL;
 
-       return msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_GENERAL,
-                               MSM_LIMITS_DOMAIN_MAX, freq);
+       return msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_THERMAL,
+                               MSM_LIMIT_FREQ_CAP, freq,
+                               freq >= hw->max_freq ? 0 : 1, 1);
 }
 
 static int lmh_get_cur_limit(int cpu, unsigned long *freq)
@@ -457,7 +464,7 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
 
        /* Enable the thermal algorithm early */
        ret = msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_THERMAL,
-                MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+                MSM_LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
        if (ret)
                return ret;
 
index 7beef24..389a756 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 
 #define MSM_LIMITS_DCVSH               0x10
 #define MSM_LIMITS_NODE_DCVS           0x44435653
+#define MSM_LIMITS_SUB_FN_THERMAL      0x54484D4C
 #define MSM_LIMITS_SUB_FN_GENERAL      0x47454E00
 #define MSM_LIMITS_SUB_FN_CRNT         0x43524E54
 #define MSM_LIMITS_SUB_FN_REL          0x52454C00
-#define MSM_LIMITS_DOMAIN_MAX          0x444D4158
-#define MSM_LIMITS_DOMAIN_MIN          0x444D494E
+#define MSM_LIMITS_FREQ_CAP            0x46434150
 #define MSM_LIMITS_CLUSTER_0           0x6370302D
 #define MSM_LIMITS_CLUSTER_1           0x6370312D
 #define MSM_LIMITS_ALGO_MODE_ENABLE    0x454E424C
@@ -1018,55 +1018,58 @@ static struct notifier_block msm_thermal_cpufreq_notifier = {
        .notifier_call = msm_thermal_cpufreq_callback,
 };
 
-static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn, uint32_t setting,
-                               uint32_t val)
+static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn,
+                             uint32_t setting, uint32_t val, uint32_t val1,
+                             bool enable_val1)
 {
        int ret;
        struct scm_desc desc_arg;
        uint32_t *payload = NULL;
+       uint32_t payload_len;
 
-       payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+       payload_len = ((enable_val1) ? 6 : 5) * sizeof(uint32_t);
+       payload = kcalloc((enable_val1) ? 6 : 5, sizeof(uint32_t), GFP_KERNEL);
        if (!payload)
                return -ENOMEM;
 
-       payload[0] = fn;
+       payload[0] = fn; /* algorithm */
        payload[1] = 0; /* unused sub-algorithm */
        payload[2] = setting;
-       payload[3] = 1; /* number of values */
+       payload[3] = enable_val1 ? 2 : 1; /* number of values */
        payload[4] = val;
+       if (enable_val1)
+               payload[5] = val1;
 
        desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
-       desc_arg.args[1] = sizeof(uint32_t) * 5;
+       desc_arg.args[1] = payload_len;
        desc_arg.args[2] = MSM_LIMITS_NODE_DCVS;
        desc_arg.args[3] = node_id;
        desc_arg.args[4] = 0; /* version */
        desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
                                        SCM_VAL, SCM_VAL);
 
-       dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+       dmac_flush_range(payload, (void *)payload + payload_len);
        ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, MSM_LIMITS_DCVSH), &desc_arg);
 
        kfree(payload);
+
        return ret;
 }
 
 static int msm_lmh_dcvs_update(int cpu)
 {
        uint32_t id = cpus[cpu].parent_ptr->cluster_id;
-       uint32_t max_freq = cpus[cpu].limited_max_freq;
-       uint32_t min_freq = cpus[cpu].limited_min_freq;
+       uint32_t max_freq = cpus[cpu].limited_max_freq, hw_max_freq = U32_MAX;
        uint32_t affinity;
        int ret;
 
        /*
-        * It is better to use max/min limits of cluster for given
+        * It is better to use max limits of cluster for given
         * cpu if cluster mitigation is supported. It ensures that it
-        * requests aggregated max/min limits of all cpus in that cluster.
+        * requests aggregated max limits of all cpus in that cluster.
         */
-       if (core_ptr) {
+       if (core_ptr)
                max_freq = cpus[cpu].parent_ptr->limited_max_freq;
-               min_freq = cpus[cpu].parent_ptr->limited_min_freq;
-       }
 
        switch (id) {
        case 0:
@@ -1080,13 +1083,14 @@ static int msm_lmh_dcvs_update(int cpu)
                return -EINVAL;
        };
 
-       ret = msm_lmh_dcvs_write(affinity, MSM_LIMITS_SUB_FN_GENERAL,
-                                       MSM_LIMITS_DOMAIN_MAX, max_freq);
-       if (ret)
-               return ret;
+       if (cpus[cpu].parent_ptr->freq_table)
+               hw_max_freq =
+                       cpus[cpu].parent_ptr->freq_table[
+                               cpus[cpu].parent_ptr->freq_idx_high].frequency;
 
-       ret = msm_lmh_dcvs_write(affinity, MSM_LIMITS_SUB_FN_GENERAL,
-                                       MSM_LIMITS_DOMAIN_MIN, min_freq);
+       ret = msm_lmh_dcvs_write(affinity, MSM_LIMITS_SUB_FN_THERMAL,
+                                       MSM_LIMITS_FREQ_CAP, max_freq,
+                                       max_freq >= hw_max_freq ? 0 : 1, 1);
        if (ret)
                return ret;
        /*
@@ -1729,23 +1733,23 @@ static int msm_thermal_lmh_dcvs_init(struct platform_device *pdev)
         */
        ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_0,
                                MSM_LIMITS_SUB_FN_REL,
-                               MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+                               MSM_LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
        if (ret)
                pr_err("Unable to enable REL algo for cluster0\n");
        ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_1,
                                MSM_LIMITS_SUB_FN_REL,
-                               MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+                               MSM_LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
        if (ret)
                pr_err("Unable to enable REL algo for cluster1\n");
 
        ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_0,
                                MSM_LIMITS_SUB_FN_CRNT,
-                               MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+                               MSM_LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
        if (ret)
                pr_err("Unable enable CRNT algo for cluster0\n");
        ret = msm_lmh_dcvs_write(MSM_LIMITS_CLUSTER_1,
                                MSM_LIMITS_SUB_FN_CRNT,
-                               MSM_LIMITS_ALGO_MODE_ENABLE, 1);
+                               MSM_LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
        if (ret)
                pr_err("Unable enable CRNT algo for cluster1\n");
 
index a30d68c..039837d 100644 (file)
@@ -258,7 +258,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
 
 static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
 {
-       return param == chan->device->dev->parent;
+       return param == chan->device->dev;
 }
 
 static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
@@ -290,7 +290,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
                data->uart_16550_compatible = true;
        }
 
-       /* Platforms with iDMA */
+       /* Platforms with iDMA 64-bit */
        if (platform_get_resource_byname(to_platform_device(p->dev),
                                         IORESOURCE_MEM, "lpss_priv")) {
                p->set_termios = dw8250_set_termios;
index 59828d8..5ad978a 100644 (file)
@@ -392,7 +392,7 @@ static struct uart_ops sunhv_pops = {
 static struct uart_driver sunhv_reg = {
        .owner                  = THIS_MODULE,
        .driver_name            = "sunhv",
-       .dev_name               = "ttyS",
+       .dev_name               = "ttyHV",
        .major                  = TTY_MAJOR,
 };
 
index 38c7676..19e819a 100644 (file)
@@ -70,6 +70,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
        { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Logitech HD Webcam C270 */
+       { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
index 9f96dd2..1effe74 100644 (file)
@@ -1166,6 +1166,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, 0x1900),                          /* Telit LN940 (QMI) */
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
@@ -1767,6 +1771,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
          .driver_info = RSVD(5) | RSVD(6) },
        { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },   /* Simcom SIM7500/SIM7600 MBIM mode */
+       { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff),     /* Simcom SIM7500/SIM7600 RNDIS mode */
+         .driver_info = RSVD(7) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
index 9706d21..8fd5e19 100644 (file)
@@ -101,6 +101,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
        { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+       { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
        { }                                     /* Terminating entry */
 };
 
index d84c3b3..496cbcc 100644 (file)
 #define SMART_VENDOR_ID        0x0b8c
 #define SMART_PRODUCT_ID       0x2303
 
+/* Allied Telesis VT-Kit3 */
+#define AT_VENDOR_ID           0x0caa
+#define AT_VTKIT3_PRODUCT_ID   0x3001
index f5fc327..e2c5491 100644 (file)
@@ -28,6 +28,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
                "USB Card Reader",
                USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
 
+UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
+               "Realtek",
+               "USB Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
 UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
                "Realtek",
                "USB Card Reader",
index 15d3ccf..4a397c7 100644 (file)
@@ -285,6 +285,8 @@ static int hga_card_detect(void)
        hga_vram_len  = 0x08000;
 
        hga_vram = ioremap(0xb0000, hga_vram_len);
+       if (!hga_vram)
+               goto error;
 
        if (request_region(0x3b0, 12, "hgafb"))
                release_io_ports = 1;
index 9b167f7..4994a54 100644 (file)
@@ -1517,6 +1517,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        info->fix.smem_start = addr;
        info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
                                            0x400000 : 0x800000);
+       if (!info->screen_base) {
+               release_mem_region(addr, size);
+               framebuffer_release(info);
+               return -ENOMEM;
+       }
        info->fix.mmio_start = addr + 0x800000;
        par->dc_regs = ioremap(addr + 0x800000, 0x1000);
        par->cmap_regs_phys = addr + 0x840000;
index a7a1b21..8e709b6 100644 (file)
@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
        if (sd) {
                /* Coordinate with configfs_readdir */
                spin_lock(&configfs_dirent_lock);
-               /* Coordinate with configfs_attach_attr where will increase
-                * sd->s_count and update sd->s_dentry to new allocated one.
-                * Only set sd->dentry to null when this dentry is the only
-                * sd owner.
-                * If not do so, configfs_d_iput may run just after
-                * configfs_attach_attr and set sd->s_dentry to null
-                * even it's still in use.
+               /*
+                * Set sd->s_dentry to null only when this dentry is the one
+                * that is going to be killed.  Otherwise configfs_d_iput may
+                * run just after configfs_attach_attr and set sd->s_dentry to
+                * NULL even it's still in use.
                 */
-               if (atomic_read(&sd->s_count) <= 2)
+               if (sd->s_dentry == dentry)
                        sd->s_dentry = NULL;
 
                spin_unlock(&configfs_dirent_lock);
index a08f103..d3f655a 100644 (file)
@@ -156,12 +156,17 @@ static int fat_file_release(struct inode *inode, struct file *filp)
 int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
 {
        struct inode *inode = filp->f_mapping->host;
-       int res, err;
+       int err;
+
+       err = __generic_file_fsync(filp, start, end, datasync);
+       if (err)
+               return err;
 
-       res = generic_file_fsync(filp, start, end, datasync);
        err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
+       if (err)
+               return err;
 
-       return res ? res : err;
+       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 }
 
 
index 0fc9e87..817e465 100644 (file)
@@ -1734,7 +1734,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        offset = outarg->offset & ~PAGE_CACHE_MASK;
        file_size = i_size_read(inode);
 
-       num = outarg->size;
+       num = min(outarg->size, fc->max_write);
        if (outarg->offset > file_size)
                num = 0;
        else if (outarg->offset + num > file_size)
index 9f98d1d..10015f1 100644 (file)
@@ -1745,8 +1745,13 @@ int file_remove_privs(struct file *file)
        int kill;
        int error = 0;
 
-       /* Fast path for nothing security related */
-       if (IS_NOSEC(inode))
+       /*
+        * Fast path for nothing security related.
+        * As well for non-regular files, e.g. blkdev inodes.
+        * For example, blkdev_write_iter() might get here
+        * trying to remove privs which it is not allowed to.
+        */
+       if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
                return 0;
 
        kill = dentry_needs_remove_privs(dentry);
index fcfc48c..128d6e2 100644 (file)
@@ -109,8 +109,11 @@ void               nfsd_put_raparams(struct file *file, struct raparms *ra);
 
 static inline int fh_want_write(struct svc_fh *fh)
 {
-       int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
+       int ret;
 
+       if (fh->fh_want_write)
+               return 0;
+       ret = mnt_want_write(fh->fh_export->ex_path.mnt);
        if (!ret)
                fh->fh_want_write = true;
        return ret;
index 2903730..e8ace3b 100644 (file)
@@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
 
 out_attach:
        spin_lock(&dentry_attach_lock);
+       if (unlikely(dentry->d_fsdata && !alias)) {
+               /* d_fsdata is set by a racing thread which is doing
+                * the same thing as this thread is doing. Leave the racing
+                * thread going ahead and we return here.
+                */
+               spin_unlock(&dentry_attach_lock);
+               iput(dl->dl_inode);
+               ocfs2_lock_res_free(&dl->dl_lockres);
+               kfree(dl);
+               return 0;
+       }
+
        dentry->d_fsdata = dl;
        dl->dl_count++;
        spin_unlock(&dentry_attach_lock);
index 4e613f0..c3faa39 100644 (file)
@@ -1084,6 +1084,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                        continue;
                                up_read(&mm->mmap_sem);
                                down_write(&mm->mmap_sem);
+                               /*
+                                * Avoid to modify vma->vm_flags
+                                * without locked ops while the
+                                * coredump reads the vm_flags.
+                                */
+                               if (!mmget_still_valid(mm)) {
+                                       /*
+                                        * Silently return "count"
+                                        * like if get_task_mm()
+                                        * failed. FIXME: should this
+                                        * function have returned
+                                        * -ESRCH if get_task_mm()
+                                        * failed like if
+                                        * get_proc_task() fails?
+                                        */
+                                       up_write(&mm->mmap_sem);
+                                       goto out_mm;
+                               }
                                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                        vma->vm_flags &= ~VM_SOFTDIRTY;
                                        vma_set_page_prot(vma);
index 9de2b7a..08cc09b 100644 (file)
@@ -446,6 +446,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * taking the mmap_sem for writing.
         */
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto skip_mm;
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                cond_resched();
@@ -469,6 +471,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
+skip_mm:
        up_write(&mm->mmap_sem);
        mmput(mm);
 wakeup:
@@ -770,6 +773,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
@@ -929,6 +934,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
index 210ccc4..8607c93 100644 (file)
@@ -453,7 +453,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
  *
  * Find the css for the (@task, @subsys_id) combination, increment a
  * reference on and return it.  This function is guaranteed to return a
- * valid css.
+ * valid css.  The returned css may already have been offlined.
  */
 static inline struct cgroup_subsys_state *
 task_get_css(struct task_struct *task, int subsys_id)
@@ -463,7 +463,13 @@ task_get_css(struct task_struct *task, int subsys_id)
        rcu_read_lock();
        while (true) {
                css = task_css(task, subsys_id);
-               if (likely(css_tryget_online(css)))
+               /*
+                * Can't use css_tryget_online() here.  A task which has
+                * PF_EXITING set may stay associated with an offline css.
+                * If such task calls this function, css_tryget_online()
+                * will keep failing.
+                */
+               if (likely(css_tryget(css)))
                        break;
                cpu_relax();
        }
index 47c7bb4..ecdabf3 100644 (file)
@@ -1115,6 +1115,27 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
                unsigned long start, unsigned long end);
 
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+       return likely(!mm->core_state);
+}
+
 /**
  * mm_walk - callbacks for walk_page_range
  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
index aa8736d..cfc3ed4 100644 (file)
@@ -331,7 +331,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
 #ifdef CONFIG_PWM_SYSFS
 void pwmchip_sysfs_export(struct pwm_chip *chip);
 void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
 #else
 static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
 {
@@ -340,10 +339,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
 static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
 {
 }
-
-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
-}
 #endif /* CONFIG_PWM_SYSFS */
 
 #endif /* __LINUX_PWM_H */
index ccd0f37..2378cbf 100644 (file)
@@ -378,7 +378,7 @@ extern int lockdep_tasklist_lock_is_held(void);
 extern void sched_init(void);
 extern void sched_init_smp(void);
 extern asmlinkage void schedule_tail(struct task_struct *prev);
-extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
+extern void init_idle(struct task_struct *idle, int cpu);
 extern void init_idle_bootup_task(struct task_struct *idle);
 
 extern cpumask_var_t cpu_isolated_map;
index 7c0c83d..876688b 100644 (file)
@@ -174,9 +174,6 @@ struct adv_info {
 
 #define HCI_MAX_SHORT_NAME_LENGTH      10
 
-/* Min encryption key size to match with SMP */
-#define HCI_MIN_ENC_KEY_SIZE           7
-
 /* Default LE RPA expiry time, 15 minutes */
 #define HCI_DEFAULT_RPA_TIMEOUT                (15 * 60)
 
index 4643654..598b8bf 100644 (file)
@@ -373,7 +373,8 @@ static void mqueue_evict_inode(struct inode *inode)
        struct user_struct *user;
        unsigned long mq_bytes, mq_treesize;
        struct ipc_namespace *ipc_ns;
-       struct msg_msg *msg;
+       struct msg_msg *msg, *nmsg;
+       LIST_HEAD(tmp_msg);
 
        clear_inode(inode);
 
@@ -384,10 +385,15 @@ static void mqueue_evict_inode(struct inode *inode)
        info = MQUEUE_I(inode);
        spin_lock(&info->lock);
        while ((msg = msg_get(info)) != NULL)
-               free_msg(msg);
+               list_add_tail(&msg->m_list, &tmp_msg);
        kfree(info->node_cache);
        spin_unlock(&info->lock);
 
+       list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
+               list_del(&msg->m_list);
+               free_msg(msg);
+       }
+
        /* Total amount of bytes accounted for the mqueue */
        mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
                min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
index ed81aaf..9467307 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/utsname.h>
 #include <linux/proc_ns.h>
 #include <linux/uaccess.h>
+#include <linux/sched.h>
 
 #include "util.h"
 
@@ -66,6 +67,9 @@ static struct msg_msg *alloc_msg(size_t len)
        pseg = &msg->next;
        while (len > 0) {
                struct msg_msgseg *seg;
+
+               cond_resched();
+
                alen = min(len, DATALEN_SEG);
                seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
                if (seg == NULL)
@@ -178,6 +182,8 @@ void free_msg(struct msg_msg *msg)
        kfree(msg);
        while (seg != NULL) {
                struct msg_msgseg *tmp = seg->next;
+
+               cond_resched();
                kfree(seg);
                seg = tmp;
        }
index a599351..2ce1a52 100644 (file)
@@ -419,14 +419,19 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
 
        if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
                goto free_cs;
+       if (!alloc_cpumask_var(&trial->cpus_requested, GFP_KERNEL))
+               goto free_allowed;
        if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
                goto free_cpus;
 
        cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+       cpumask_copy(trial->cpus_requested, cs->cpus_requested);
        cpumask_copy(trial->effective_cpus, cs->effective_cpus);
        return trial;
 
 free_cpus:
+       free_cpumask_var(trial->cpus_requested);
+free_allowed:
        free_cpumask_var(trial->cpus_allowed);
 free_cs:
        kfree(trial);
@@ -440,6 +445,7 @@ free_cs:
 static void free_trial_cpuset(struct cpuset *trial)
 {
        free_cpumask_var(trial->effective_cpus);
+       free_cpumask_var(trial->cpus_requested);
        free_cpumask_var(trial->cpus_allowed);
        kfree(trial);
 }
@@ -948,23 +954,23 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
                return -EACCES;
 
        /*
-        * An empty cpus_allowed is ok only if the cpuset has no tasks.
+        * An empty cpus_requested is ok only if the cpuset has no tasks.
         * Since cpulist_parse() fails on an empty mask, we special case
         * that parsing.  The validate_change() call ensures that cpusets
         * with tasks have cpus.
         */
        if (!*buf) {
-               cpumask_clear(trialcs->cpus_allowed);
+               cpumask_clear(trialcs->cpus_requested);
        } else {
                retval = cpulist_parse(buf, trialcs->cpus_requested);
                if (retval < 0)
                        return retval;
+       }
 
-               if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
-                       return -EINVAL;
+       if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
+               return -EINVAL;
 
-               cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
-       }
+       cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
 
        /* Nothing to do if the cpus didn't change */
        if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
index ff8606f..098af0b 100644 (file)
@@ -447,6 +447,15 @@ int commit_creds(struct cred *new)
                if (task->mm)
                        set_dumpable(task->mm, suid_dumpable);
                task->pdeath_signal = 0;
+               /*
+                * If a task drops privileges and becomes nondumpable,
+                * the dumpability change must become visible before
+                * the credential change; otherwise, a __ptrace_may_access()
+                * racing with this change may be able to attach to a task it
+                * shouldn't be able to attach to (as if the task had dropped
+                * privileges without becoming nondumpable).
+                * Pairs with a read barrier in __ptrace_may_access().
+                */
                smp_wmb();
        }
 
index 424f5a5..b128cc8 100644 (file)
@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
        unsigned long head;
 
 again:
+       /*
+        * In order to avoid publishing a head value that goes backwards,
+        * we must ensure the load of @rb->head happens after we've
+        * incremented @rb->nest.
+        *
+        * Otherwise we can observe a @rb->head value before one published
+        * by an IRQ/NMI happening between the load and the increment.
+        */
+       barrier();
        head = local_read(&rb->head);
 
        /*
-        * IRQ/NMI can happen here, which means we can miss a head update.
+        * IRQ/NMI can happen here and advance @rb->head, causing our
+        * load above to be stale.
         */
 
-       if (!local_dec_and_test(&rb->nest))
+       /*
+        * If this isn't the outermost nesting, we don't have to update
+        * @rb->user_page->data_head.
+        */
+       if (local_read(&rb->nest) > 1) {
+               local_dec(&rb->nest);
                goto out;
+       }
 
        /*
         * Since the mmap() consumer (userspace) can run on a different CPU:
@@ -88,9 +104,18 @@ again:
        rb->user_page->data_head = head;
 
        /*
-        * Now check if we missed an update -- rely on previous implied
-        * compiler barriers to force a re-read.
+        * We must publish the head before decrementing the nest count,
+        * otherwise an IRQ/NMI can publish a more recent head value and our
+        * write will (temporarily) publish a stale value.
+        */
+       barrier();
+       local_set(&rb->nest, 0);
+
+       /*
+        * Ensure we decrement @rb->nest before we validate the @rb->head.
+        * Otherwise we cannot be sure we caught the 'last' nested update.
         */
+       barrier();
        if (unlikely(head != local_read(&rb->head))) {
                local_inc(&rb->nest);
                goto again;
index 25d0a60..e8bc7ed 100644 (file)
@@ -1769,7 +1769,7 @@ struct task_struct *fork_idle(int cpu)
                            cpu_to_node(cpu));
        if (!IS_ERR(task)) {
                init_idle_pids(task->pids);
-               init_idle(task, cpu, false);
+               init_idle(task, cpu);
        }
 
        return task;
index 39c2b3e..d24d164 100644 (file)
@@ -593,8 +593,8 @@ again:
                 * applies. If this is really a shmem page then the page lock
                 * will prevent unexpected transitions.
                 */
-               lock_page(page);
-               shmem_swizzled = PageSwapCache(page) || page->mapping;
+               lock_page(page_head);
+               shmem_swizzled = PageSwapCache(page_head) || page_head->mapping;
                unlock_page(page_head);
                put_page(page_head);
 
index 8303874..1aa33fe 100644 (file)
@@ -292,6 +292,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
        return -EPERM;
 ok:
        rcu_read_unlock();
+       /*
+        * If a task drops privileges and becomes nondumpable (through a syscall
+        * like setresuid()) while we are trying to access it, we must ensure
+        * that the dumpability is read after the credentials; otherwise,
+        * we may be able to attach to a task that we shouldn't be able to
+        * attach to (as if the task had dropped privileges without becoming
+        * nondumpable).
+        * Pairs with a write barrier in commit_creds().
+        */
+       smp_rmb();
        mm = task->mm;
        if (mm &&
            ((get_dumpable(mm) != SUID_DUMP_USER) &&
@@ -673,6 +683,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
        if (arg.nr < 0)
                return -EINVAL;
 
+       /* Ensure arg.off fits in an unsigned long */
+       if (arg.off > ULONG_MAX)
+               return 0;
+
        if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
                pending = &child->signal->shared_pending;
        else
@@ -680,18 +694,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
 
        for (i = 0; i < arg.nr; ) {
                siginfo_t info;
-               s32 off = arg.off + i;
+               unsigned long off = arg.off + i;
+               bool found = false;
 
                spin_lock_irq(&child->sighand->siglock);
                list_for_each_entry(q, &pending->list, list) {
                        if (!off--) {
+                               found = true;
                                copy_siginfo(&info, &q->info);
                                break;
                        }
                }
                spin_unlock_irq(&child->sighand->siglock);
 
-               if (off >= 0) /* beyond the end of the list */
+               if (!found) /* beyond the end of the list */
                        break;
 
 #ifdef CONFIG_COMPAT
index cccb356..543f711 100644 (file)
@@ -2447,7 +2447,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        unsigned long flags;
        int cpu;
 
-       init_new_task_load(p, false);
+       init_new_task_load(p);
        cpu = get_cpu();
 
        __sched_fork(clone_flags, p);
@@ -5407,19 +5407,15 @@ void init_idle_bootup_task(struct task_struct *idle)
  * init_idle - set up an idle thread for a given CPU
  * @idle: task in question
  * @cpu: cpu the idle task belongs to
- * @cpu_up: differentiate between initial boot vs hotplug
  *
  * NOTE: this function does not set the idle thread's NEED_RESCHED
  * flag, to make booting more robust.
  */
-void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
+void init_idle(struct task_struct *idle, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       if (!cpu_up)
-               init_new_task_load(idle, true);
-
        raw_spin_lock_irqsave(&idle->pi_lock, flags);
        raw_spin_lock(&rq->lock);
 
@@ -8571,7 +8567,8 @@ void __init sched_init(void)
         * but because we are the idle thread, we just pick up running again
         * when this runqueue becomes "idle".
         */
-       init_idle(current, smp_processor_id(), false);
+       init_idle(current, smp_processor_id());
+       init_new_task_load(current);
 
        calc_load_update = jiffies + LOAD_FREQ;
 
index ddcf7cf..5337ac7 100644 (file)
@@ -1544,7 +1544,7 @@ void free_task_load_ptrs(struct task_struct *p)
        p->ravg.prev_window_cpu = NULL;
 }
 
-void init_new_task_load(struct task_struct *p, bool idle_task)
+void init_new_task_load(struct task_struct *p)
 {
        int i;
        u32 init_load_windows = sched_init_task_load_windows;
@@ -1571,9 +1571,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
        /* Don't have much choice. CPU frequency would be bogus */
        BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
 
-       if (idle_task)
-               return;
-
        if (init_load_pct)
                init_load_windows = div64_u64((u64)init_load_pct *
                          (u64)sched_ravg_window, 100);
index 1288370..ffae8d4 100644 (file)
@@ -1154,7 +1154,7 @@ extern unsigned int  __read_mostly sched_downmigrate;
 extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
 extern unsigned int  __read_mostly sched_load_granule;
 
-extern void init_new_task_load(struct task_struct *p, bool idle_task);
+extern void init_new_task_load(struct task_struct *p);
 extern u64 sched_ktime_clock(void);
 extern int got_boost_kick(void);
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
@@ -1644,7 +1644,7 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
        return NULL;
 }
 
-static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+static inline void init_new_task_load(struct task_struct *p)
 {
 }
 
index 552e154..979248f 100644 (file)
@@ -32,7 +32,7 @@ struct task_struct *idle_thread_get(unsigned int cpu)
 
        if (!tsk)
                return ERR_PTR(-ENOMEM);
-       init_idle(tsk, cpu, true);
+       init_idle(tsk, cpu);
        return tsk;
 }
 
index ede0c1f..29413e2 100644 (file)
@@ -1764,7 +1764,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
        ((unsigned long)prctl_map->__m1 __op                            \
         (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
        error  = __prctl_check_order(start_code, <, end_code);
-       error |= __prctl_check_order(start_data, <, end_data);
+       error |= __prctl_check_order(start_data,<=, end_data);
        error |= __prctl_check_order(start_brk, <=, brk);
        error |= __prctl_check_order(arg_start, <=, arg_end);
        error |= __prctl_check_order(env_start, <=, env_end);
index 43a049f..b25717e 100644 (file)
@@ -2782,8 +2782,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                        if (neg)
                                continue;
                        val = convmul * val / convdiv;
-                       if ((min && val < *min) || (max && val > *max))
-                               continue;
+                       if ((min && val < *min) || (max && val > *max)) {
+                               err = -EINVAL;
+                               break;
+                       }
                        *i = val;
                } else {
                        val = convdiv * (*i) / convmul;
index ab86177..0e0dc5d 100644 (file)
@@ -633,7 +633,7 @@ static inline void process_adjtimex_modes(struct timex *txc,
                time_constant = max(time_constant, 0l);
        }
 
-       if (txc->modes & ADJ_TAI && txc->constant > 0)
+       if (txc->modes & ADJ_TAI && txc->constant >= 0)
                *time_tai = txc->constant;
 
        if (txc->modes & ADJ_OFFSET)
index 809f455..664364b 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -101,8 +101,10 @@ static int __init cma_activate_area(struct cma *cma)
 
        cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 
-       if (!cma->bitmap)
+       if (!cma->bitmap) {
+               cma->count = 0;
                return -ENOMEM;
+       }
 
        WARN_ON_ONCE(!pfn_valid(pfn));
        zone = page_zone(pfn_to_page(pfn));
index f8e4b60..da50dab 100644 (file)
@@ -57,7 +57,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
        mutex_lock(&cma->lock);
        for (;;) {
                start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
-               if (start >= cma->count)
+               if (start >= bitmap_maxno)
                        break;
                end = find_next_bit(cma->bitmap, bitmap_maxno, start);
                maxchunk = max(end - start, maxchunk);
index d7f65a8..fd932e7 100644 (file)
@@ -1221,12 +1221,23 @@ void free_huge_page(struct page *page)
        ClearPagePrivate(page);
 
        /*
-        * A return code of zero implies that the subpool will be under its
-        * minimum size if the reservation is not restored after page is free.
-        * Therefore, force restore_reserve operation.
+        * If PagePrivate() was set on page, page allocation consumed a
+        * reservation.  If the page was associated with a subpool, there
+        * would have been a page reserved in the subpool before allocation
+        * via hugepage_subpool_get_pages().  Since we are 'restoring' the
+        * reservtion, do not call hugepage_subpool_put_pages() as this will
+        * remove the reserved page from the subpool.
         */
-       if (hugepage_subpool_put_pages(spool, 1) == 0)
-               restore_reserve = true;
+       if (!restore_reserve) {
+               /*
+                * A return code of zero implies that the subpool will be
+                * under its minimum size if the reservation is not restored
+                * after page is free.  Therefore, force restore_reserve
+                * operation.
+                */
+               if (hugepage_subpool_put_pages(spool, 1) == 0)
+                       restore_reserve = true;
+       }
 
        spin_lock(&hugetlb_lock);
        clear_page_huge_active(page);
index 732a066..4aa714d 100644 (file)
@@ -313,7 +313,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
        }
        return 0;
 fail:
-       __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
+       __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
        return -ENOMEM;
 }
 
index ae4f0fc..3bb666c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -42,6 +42,7 @@
 #include <linux/memory.h>
 #include <linux/printk.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2435,7 +2436,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       /* don't alter vm_end if the coredump is running */
+       if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2461,6 +2463,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                return NULL;
+       /* don't alter vm_start if the coredump is running */
+       if (!mmget_still_valid(mm))
+               return NULL;
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
index 149f82b..6ba56f2 100644 (file)
@@ -443,9 +443,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
        }
 
        if (ax25->sk != NULL) {
+               local_bh_disable();
                bh_lock_sock(ax25->sk);
                sock_reset_flag(ax25->sk, SOCK_ZAPPED);
                bh_unlock_sock(ax25->sk);
+               local_bh_enable();
        }
 
 put:
index ccddf34..1588d91 100644 (file)
@@ -1177,14 +1177,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
            !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
                return 0;
 
-       /* The minimum encryption key size needs to be enforced by the
-        * host stack before establishing any L2CAP connections. The
-        * specification in theory allows a minimum of 1, but to align
-        * BR/EDR and LE transports, a minimum of 7 is chosen.
-        */
-       if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
-               return 0;
-
        return 1;
 }
 
index 7dbd495..2572b51 100644 (file)
@@ -2705,6 +2705,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
 }
 
 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+       __acquires(tbl->lock)
        __acquires(rcu_bh)
 {
        struct neigh_seq_state *state = seq->private;
@@ -2715,6 +2716,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
 
        rcu_read_lock_bh();
        state->nht = rcu_dereference_bh(tbl->nht);
+       read_lock(&tbl->lock);
 
        return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
 }
@@ -2748,8 +2750,13 @@ out:
 EXPORT_SYMBOL(neigh_seq_next);
 
 void neigh_seq_stop(struct seq_file *seq, void *v)
+       __releases(tbl->lock)
        __releases(rcu_bh)
 {
+       struct neigh_seq_state *state = seq->private;
+       struct neigh_table *tbl = state->tbl;
+
+       read_unlock(&tbl->lock);
        rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(neigh_seq_stop);
index c6061f7..8a6c682 100644 (file)
@@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
        rcu_read_lock_bh();
        for_each_sk_fl_rcu(np, sfl) {
                struct ip6_flowlabel *fl = sfl->fl;
-               if (fl->label == label) {
+
+               if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
                        fl->lastuse = jiffies;
-                       atomic_inc(&fl->users);
                        rcu_read_unlock_bh();
                        return fl;
                }
@@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                                                goto done;
                                        }
                                        fl1 = sfl->fl;
-                                       atomic_inc(&fl1->users);
+                                       if (!atomic_inc_not_zero(&fl1->users))
+                                               fl1 = NULL;
                                        break;
                                }
                        }
index fc60d9d..cdb913e 100644 (file)
@@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
        lapb = __lapb_devtostruct(dev);
        if (!lapb)
                goto out;
+       lapb_put(lapb);
 
        lapb_stop_t1timer(lapb);
        lapb_stop_t2timer(lapb);
index f04714d..a42e2ce 100644 (file)
@@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
                list_del_init(list);
        grp->exclusive = 0;
        write_unlock_irq(&grp->list_lock);
-       up_write(&grp->list_mutex);
 
        if (!empty)
                unsubscribe_port(client, port, grp, &subs->info, ack);
+       up_write(&grp->list_mutex);
 }
 
 /* connect two ports */
index 74c9600..ef8955a 100644 (file)
@@ -1707,9 +1707,6 @@ static int azx_first_init(struct azx *chip)
                        chip->msi = 0;
        }
 
-       if (azx_acquire_irq(chip, 0) < 0)
-               return -EBUSY;
-
        pci_set_master(pci);
        synchronize_irq(bus->irq);
 
@@ -1820,6 +1817,9 @@ static int azx_first_init(struct azx *chip)
                return -ENODEV;
        }
 
+       if (azx_acquire_irq(chip, 0) < 0)
+               return -EBUSY;
+
        strcpy(card->driver, "HDA-Intel");
        strlcpy(card->shortname, driver_short_names[chip->driver_type],
                sizeof(card->shortname));
index d562e1b..5b07970 100644 (file)
@@ -561,6 +561,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
        msleep(5);
 
        regcache_cache_only(cs42xx8->regmap, false);
+       regcache_mark_dirty(cs42xx8->regmap);
 
        ret = regcache_sync(cs42xx8->regmap);
        if (ret) {