Controls the dirty page count condition for the in-place-update
policies.
+What: /sys/fs/f2fs/<disk>/min_seq_blocks
+Date: August 2018
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls the dirty page count condition for batched sequential
+ writes in ->writepages.
+
+
What: /sys/fs/f2fs/<disk>/min_hot_blocks
Date: March 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
- qcom,mdss-dsi-border-color: Defines the border color value if border is present.
0 = default value.
- qcom,mdss-dsi-pan-enable-dynamic-fps: Boolean used to enable change in frame rate dynamically.
+- qcom,mdss-dsi-pan-enable-dynamic-bitclk: Boolean used to enable change in DSI clock dynamically.
+- qcom,mdss-dsi-dynamic-bitclk_freq: An array of integers that specifies the DSI bit clock
+ frequencies supported as part of dynamic bit clock feature.
- qcom,mdss-dsi-pan-fps-update: A string that specifies when to change the frame rate.
"dfps_suspend_resume_mode"= FPS change request is
implemented during suspend/resume.
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
+ qcom,mdss-dsi-pan-enable-dynamic-bitclk;
+ qcom,mdss-dsi-dynamic-bitclk_freq = <711037824 724453632 737869440
+ 751285248 764701056 778116864 791532672 804948480>;
qcom,min-refresh-rate = <30>;
qcom,max-refresh-rate = <60>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+ Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
enabled by default.
data_flush Enable data flushing before checkpoint in order to
persist data of regular and symlink.
+fault_injection=%d Enable fault injection in all supported types with
+ specified injection rate.
+fault_type=%d Support configuring fault injection type, should be
+ enabled with fault_injection option, fault type value
+ is shown below, it supports single or combined type.
+ Type_Name Type_Value
+ FAULT_KMALLOC 0x000000001
+ FAULT_KVMALLOC 0x000000002
+ FAULT_PAGE_ALLOC 0x000000004
+ FAULT_PAGE_GET 0x000000008
+ FAULT_ALLOC_BIO 0x000000010
+ FAULT_ALLOC_NID 0x000000020
+ FAULT_ORPHAN 0x000000040
+ FAULT_BLOCK 0x000000080
+ FAULT_DIR_DEPTH 0x000000100
+ FAULT_EVICT_INODE 0x000000200
+ FAULT_TRUNCATE 0x000000400
+ FAULT_IO 0x000000800
+ FAULT_CHECKPOINT 0x000001000
+ FAULT_DISCARD 0x000002000
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
Description
-----------
See Documentation/x86/intel_mpx.txt for more
information about the feature.
- eagerfpu= [X86]
- on enable eager fpu restore
- off disable eager fpu restore
- auto selects the default scheme, which automatically
- enables eagerfpu restore for xsaveopt.
module.async_probe [KNL]
Enable asynchronous probe on this module.
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 158
+SUBLEVEL = 162
EXTRAVERSION =
NAME = Blurry Fish Butt
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
-
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
task_thread_info(current)->thr_ptr;
}
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * setup usermode thread pointer #2:
+ * however for this special use of r25 in kernel, __switch_to() sets
+ * r25 for kernel needs and only in the final return path is usermode
+ * r25 setup, from pt_regs->user_r25. So set that up as well
+ */
+ c_regs->user_r25 = c_callee->r25;
+#endif
+
return 0;
}
};
};
- dcan1: can@481cc000 {
+ dcan1: can@4ae3c000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan1";
reg = <0x4ae3c000 0x2000>;
status = "disabled";
};
- dcan2: can@481d0000 {
+ dcan2: can@48480000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan2";
reg = <0x48480000 0x2000>;
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
};
&soc {
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
+ };
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
};
&soc {
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
+ };
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
};
&soc {
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
+ };
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
status = "disabled";
};
- qcom,msm-ssc-sensors {
- compatible = "qcom,msm-ssc-sensors";
- qcom,firmware-name = "slpi";
- status = "ok";
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
};
sound-adp-agave {
qcom,refs-tdm-rx = <&dai_tert_tdm_rx_5>;
qcom,spkr-tdm-rx = <&dai_quat_tdm_rx_0>;
qcom,mic-tdm-tx = <&dai_quat_tdm_tx_0>;
+ status = "disabled";
};
usb_detect: usb_detect {
qcom,thermal-node;
};
};
+
+&soc {
+ qcom,msm-thermal {
+ qcom,hotplug-temp = <115>;
+ qcom,hotplug-temp-hysteresis = <25>;
+ qcom,therm-reset-temp = <119>;
+ };
+};
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
};
&soc {
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
+ };
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
};
&soc {
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
+ };
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
};
&soc {
+ ssc_sensors: qcom,msm-ssc-sensors {
+ status = "disabled";
+ };
qcom,msm-thermal {
qcom,hotplug-temp = <115>;
qcom,hotplug-temp-hysteresis = <25>;
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
reg = <0x10 8>;
};
- dload_type@18 {
+ dload_type@1c {
compatible = "qcom,msm-imem-dload-type";
- reg = <0x18 4>;
+ reg = <0x1c 4>;
};
restart_reason@65c {
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#clock-cells = <1>;
reg = <0xc994400 0x588>,
- <0xc8c2300 0x8>;
- reg-names = "pll_base", "gdsc_base";
+ <0xc8c2300 0x8>,
+ <0xc994200 0x98>;
+ reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
gdsc-supply = <&gdsc_mdss>;
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
<&clock_mmss MMSS_MDSS_ESC0_CLK>,
<&clock_mmss BYTE0_CLK_SRC>,
<&clock_mmss PCLK0_CLK_SRC>,
- <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>;
+ <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>,
+ <&mdss_dsi0_pll BYTE0_MUX_CLK>,
+ <&mdss_dsi0_pll PIX0_MUX_CLK>,
+ <&mdss_dsi0_pll BYTE0_SRC_CLK>,
+ <&mdss_dsi0_pll PIX0_SRC_CLK>,
+ <&mdss_dsi0_pll SHADOW_BYTE0_SRC_CLK>,
+ <&mdss_dsi0_pll SHADOW_PIX0_SRC_CLK>;
clock-names = "byte_clk", "pixel_clk", "core_clk",
"byte_clk_rcg", "pixel_clk_rcg",
- "byte_intf_clk";
+ "byte_intf_clk", "pll_byte_clk_mux",
+ "pll_pixel_clk_mux", "pll_byte_clk_src",
+ "pll_pixel_clk_src", "pll_shadow_byte_clk_src",
+ "pll_shadow_pixel_clk_src";
qcom,platform-strength-ctrl = [ff 06
ff 06
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
gpio-key,wakeup;
debounce-interval = <15>;
};
-
- home {
- label = "home";
- gpios = <&pm660_gpios 11 0x1>;
- linux,input-type = <1>;
- linux,code = <102>;
- gpio-key,wakeup;
- debounce-interval = <15>;
- };
-
};
hbtp {
};
cont_splash_mem: splash_region@9d400000 {
- reg = <0x0 0x9d400000 0x0 0x02400000>;
+ reg = <0x0 0x9d400000 0x0 0x23ff000>;
label = "cont_splash_mem";
};
+
+ dfps_data_mem: dfps_data_mem@0x9f7ff000 {
+ reg = <0 0x9f7ff000 0 0x00001000>;
+ label = "dfps_data_mem";
+ };
};
bluetooth: bt_wcn3990 {
qcom,mdss-dsi-panel-max-error-count = <3>;
qcom,mdss-dsi-min-refresh-rate = <53>;
qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-bitclk;
+ qcom,mdss-dsi-dynamic-bitclk_freq = <798240576 801594528 804948480
+ 808302432 811656384>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
};
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#clock-cells = <1>;
reg = <0xc994400 0x588>,
- <0xc8c2300 0x8>;
- reg-names = "pll_base", "gdsc_base";
+ <0xc8c2300 0x8>,
+ <0xc994200 0x98>;
+ reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
gdsc-supply = <&gdsc_mdss>;
clock-rate = <0>;
qcom,dsi-pll-ssc-en;
qcom,dsi-pll-ssc-mode = "down-spread";
+ memory-region = <&dfps_data_mem>;
qcom,platform-supply-entries {
#address-cells = <1>;
#clock-cells = <1>;
reg = <0xc996400 0x588>,
- <0xc8c2300 0x8>;
- reg-names = "pll_base", "gdsc_base";
+ <0xc8c2300 0x8>,
+ <0xc996200 0x98>;
+ reg-names = "pll_base", "gdsc_base", "dynamic_pll_base";
gdsc-supply = <&gdsc_mdss>;
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
<&clock_mmss MMSS_MDSS_ESC0_CLK>,
<&clock_mmss BYTE0_CLK_SRC>,
<&clock_mmss PCLK0_CLK_SRC>,
- <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>;
+ <&clock_mmss MMSS_MDSS_BYTE0_INTF_CLK>,
+ <&mdss_dsi0_pll BYTE0_MUX_CLK>,
+ <&mdss_dsi0_pll PIX0_MUX_CLK>,
+ <&mdss_dsi0_pll BYTE0_SRC_CLK>,
+ <&mdss_dsi0_pll PIX0_SRC_CLK>,
+ <&mdss_dsi0_pll SHADOW_BYTE0_SRC_CLK>,
+ <&mdss_dsi0_pll SHADOW_PIX0_SRC_CLK>;
clock-names = "byte_clk", "pixel_clk", "core_clk",
"byte_clk_rcg", "pixel_clk_rcg",
- "byte_intf_clk";
+ "byte_intf_clk", "pll_byte_clk_mux",
+ "pll_pixel_clk_mux", "pll_byte_clk_src",
+ "pll_pixel_clk_src", "pll_shadow_byte_clk_src",
+ "pll_shadow_pixel_clk_src";
qcom,null-insertion-enabled;
qcom,platform-strength-ctrl = [ff 06
<&clock_mmss MMSS_MDSS_ESC1_CLK>,
<&clock_mmss BYTE1_CLK_SRC>,
<&clock_mmss PCLK1_CLK_SRC>,
- <&clock_mmss MMSS_MDSS_BYTE1_INTF_CLK>;
+ <&clock_mmss MMSS_MDSS_BYTE1_INTF_CLK>,
+ <&mdss_dsi1_pll BYTE1_MUX_CLK>,
+ <&mdss_dsi1_pll PIX1_MUX_CLK>,
+ <&mdss_dsi1_pll BYTE1_SRC_CLK>,
+ <&mdss_dsi1_pll PIX1_SRC_CLK>,
+ <&mdss_dsi1_pll SHADOW_BYTE1_SRC_CLK>,
+ <&mdss_dsi1_pll SHADOW_PIX1_SRC_CLK>;
clock-names = "byte_clk", "pixel_clk", "core_clk",
"byte_clk_rcg", "pixel_clk_rcg",
- "byte_intf_clk";
+ "byte_intf_clk", "pll_byte_clk_mux",
+ "pll_pixel_clk_mux", "pll_byte_clk_src",
+ "pll_pixel_clk_src", "pll_shadow_byte_clk_src",
+ "pll_shadow_pixel_clk_src";
qcom,null-insertion-enabled;
qcom,platform-strength-ctrl = [ff 06
};
cont_splash_mem: splash_region@9d400000 {
- reg = <0x0 0x9d400000 0x0 0x02400000>;
+ reg = <0x0 0x9d400000 0x0 0x23ff000>;
label = "cont_splash_mem";
};
+
+ dfps_data_mem: dfps_data_mem@0x9f7ff000 {
+ reg = <0 0x9f7ff000 0 0x00001000>;
+ label = "dfps_data_mem";
+ };
};
bluetooth: bt_wcn3990 {
model = "Qualcomm Technologies, Inc. MSM 8996";
compatible = "qcom,msm8996";
qcom,msm-id = <246 0x0>;
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x0>;
+ efficiency = <1024>;
+ };
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x1>;
+ efficiency = <1024>;
+ };
+ CPU2: cpu@2 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x2>;
+ efficiency = <1536>;
+ };
+ CPU3: cpu@3 {
+ device_type = "cpu";
+ compatible = "qcom,kryo";
+ reg = <0x0 0x3>;
+ efficiency = <1536>;
+ };
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU2>;
+ };
+
+ core1 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+};
firmware: firmware {
android {
pinctrl-names = "bootstrap_active", "bootstrap_sleep";
pinctrl-0 = <&cnss_bootstrap_active>;
pinctrl-1 = <&cnss_bootstrap_sleep>;
+ qcom,wlan-ramdump-dynamic = <0x200000>;
qcom,msm-bus,name = "msm-cnss";
qcom,msm-bus,num-cases = <4>;
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at91sam9260-macb", "cdns,macb";
+ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
}
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
/*
* This function sets up the boot address workaround needed for SMP
phys_addr_t resume_addr_reg)
{
void __iomem *sram_virt_base;
- u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+ u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
CONFIG_COMPAT=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_IPC_LOGGING=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
CONFIG_COMPAT=y
CONFIG_PM_AUTOSLEEP=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_ARM64_PTDUMP=y
CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
CONFIG_HOTPLUG_SIZE_BITS=28
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_USB_ISP1760=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_USB_QTI_KS_BRIDGE=y
+CONFIG_USB_QCOM_IPC_BRIDGE=y
CONFIG_USB_QCOM_DIAG_BRIDGE=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_HOTPLUG_SIZE_BITS=28
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_USB_ISP1760=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_USB_QTI_KS_BRIDGE=y
+CONFIG_USB_QCOM_IPC_BRIDGE=y
CONFIG_USB_QCOM_DIAG_BRIDGE=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_MSM_SSPHY_QMP=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_CORESIGHT_QPDI=y
CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
#define ARM64_HAS_VIRT_HOST_EXTN 12
#define ARM64_HARDEN_BRANCH_PREDICTOR 13
#define ARM64_UNMAP_KERNEL_AT_EL0 14
-#define ARM64_NCAPS 15
+#define ARM64_HAS_32BIT_EL0 15
+#define ARM64_NCAPS 16
#ifndef __ASSEMBLY__
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(SYS_ID_AA64MMFR0_EL1));
}
+static inline bool system_supports_32bit_el0(void)
+{
+ return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+}
+
static inline bool system_supports_mixed_endian_el0(void)
{
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm goto("1: nop\n\t"
+ asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm goto("1: b %l[l_yes]\n\t"
+ asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.hcr_el2 & HCR_RW);
+}
+
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
+#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
.matches = unmap_kernel_at_el0,
},
#endif
+ {
+ .desc = "32-bit EL0 Support",
+ .capability = ARM64_HAS_32BIT_EL0,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .field_pos = ID_AA64PFR0_EL0_SHIFT,
+ .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+ },
{},
};
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+ size = sizeof(__u64);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ size = sizeof(__uint128_t);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ size = sizeof(__u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (KVM_REG_SIZE(reg->id) == size &&
+ IS_ALIGNED(off, size / sizeof(__u32)))
+ return 0;
+
+ return -EINVAL;
+}
+
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_USR:
+ if (!system_supports_32bit_el0())
+ return -EINVAL;
+ break;
case COMPAT_PSR_MODE_FIQ:
case COMPAT_PSR_MODE_IRQ:
case COMPAT_PSR_MODE_SVC:
case COMPAT_PSR_MODE_ABT:
case COMPAT_PSR_MODE_UND:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
+ if (vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
break;
default:
err = -EINVAL;
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline long fls(int x)
+static inline int fls(int x)
{
int r;
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static inline long ffs(int x)
+static inline int ffs(int x)
{
int r;
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
- pfn_to_virt(max_low_pfn),
+ (unsigned long)pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
}
early_param("fadump_reserve_mem", early_fadump_reserve_mem);
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
{
- int rc;
+ int rc, err;
unsigned int wait_time;
pr_debug("Registering for firmware-assisted kernel dump...\n");
} while (wait_time);
+ err = -EIO;
switch (rc) {
+ default:
+ pr_err("Failed to register. Unknown Error(%d).\n", rc);
+ break;
case -1:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Hardware Error(%d).\n", rc);
case -3:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Parameter Error(%d).\n", rc);
+ err = -EINVAL;
break;
case -9:
printk(KERN_ERR "firmware-assisted kernel dump is already "
" registered.");
fw_dump.dump_registered = 1;
+ err = -EEXIST;
break;
case 0:
printk(KERN_INFO "firmware-assisted kernel dump registration"
" is successful\n");
fw_dump.dump_registered = 1;
+ err = 0;
break;
}
+ return err;
}
void crash_fadump(struct pt_regs *regs, const char *str)
return addr;
}
-static void register_fadump(void)
+static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
* assisted dump.
*/
if (!fw_dump.reserve_dump_area_size)
- return;
+ return -ENODEV;
ret = fadump_setup_crash_memory_ranges();
if (ret)
fadump_create_elfcore_headers(vaddr);
/* register the future kernel dump with firmware. */
- register_fw_dump(&fdm);
+ return register_fw_dump(&fdm);
}
static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
switch (buf[0]) {
case '0':
if (fw_dump.dump_registered == 0) {
- ret = -EINVAL;
goto unlock_out;
}
/* Un-register Firmware-assisted dump */
break;
case '1':
if (fw_dump.dump_registered == 1) {
- ret = -EINVAL;
+ ret = -EEXIST;
goto unlock_out;
}
/* Register Firmware-assisted dump */
- register_fadump();
+ ret = register_fadump();
break;
default:
ret = -EINVAL;
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
- memblock_reserve(crashk_res.start, crash_size);
+ if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+ memblock_reserve(crashk_res.start, crash_size)) {
+ pr_err("Failed to reserve memory for crashkernel!\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
- /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+ /*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
+ * Store r13 away so we can free up the scratch SPR for the SLB fault
+ * handler (needed once we start accessing the thread_struct).
+ */
+ GET_SCRATCH0(r11)
+ std r11, GPR13(r1)
+
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
+ /* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
- GET_SCRATCH0(8) /* user r13 */
+ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)
unsigned long pp, key;
unsigned long v, gr;
__be64 *hptep;
- int index;
+ long int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
/* Get SLB entry */
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
- if ((level_shift - 3) * levels + page_shift >= 60)
+ if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL;
/* Allocate TCE table */
struct dcss_segment {
struct list_head list;
char dcss_name[8];
- char res_name[15];
+ char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
+ strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
* for fpu state save/restore overhead.
*/
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point() \
-do { \
- if (!use_eager_fpu()) \
- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
- set_pcl_breakeven_point();
}
#endif
return crypto_register_shash(&alg);
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*ts) :
+ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+ "memory", "rcx", "r11");
return ret;
}
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+ "memory", "rcx", "r11");
return ret;
}
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+ : "=a" (ret), "=m" (*ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
: "memory", "edx");
return ret;
}
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+ : "=a" (ret), "=m" (*tv), "=m" (*tz)
+ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx");
return ret;
}
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
/*
* FPU related CPU feature flag helper routines:
*/
-static __always_inline __pure bool use_eager_fpu(void)
-{
- return true;
-}
-
static __always_inline __pure bool use_xsaveopt(void)
{
return static_cpu_has(X86_FEATURE_XSAVEOPT);
}
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
- if (!use_eager_fpu())
- clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
- if (!use_eager_fpu())
- stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
}
/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
static inline void fpregs_activate(struct fpu *fpu)
{
- __fpregs_activate_hw();
__fpregs_activate(fpu);
}
static inline void fpregs_deactivate(struct fpu *fpu)
{
__fpregs_deactivate(fpu);
- __fpregs_deactivate_hw();
}
/*
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
+ new_fpu->fpstate_active;
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new_fpu->counter++;
__fpregs_activate(new_fpu);
prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
}
} else {
- old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
- new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
unsigned char fpregs_active;
/*
- * @counter:
- *
- * This counter contains the number of consecutive context switches
- * during which the FPU stays used. If this is over a threshold, the
- * lazy FPU restore logic becomes eager, to save the trap overhead.
- * This is an unsigned char so that after 256 iterations the counter
- * wraps and the context switch behavior turns lazy again; this is to
- * deal with bursty apps that only use the FPU for a short time:
- */
- unsigned char counter;
- /*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
struct kvm_mmu_memory_cache mmu_page_header_cache;
struct fpu guest_fpu;
- bool eager_fpu;
u64 xcr0;
u64 guest_supported_xcr0;
u32 guest_xstate_size;
return this_cpu_read(in_kernel_fpu);
}
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
static bool interrupted_kernel_fpu_idle(void)
{
- if (kernel_fpu_disabled())
- return false;
-
- if (use_eager_fpu())
- return true;
-
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+ return !kernel_fpu_disabled();
}
/*
copy_fpregs_to_fpstate(fpu);
} else {
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
- __fpregs_activate_hw();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state);
- else
- __fpregs_deactivate_hw();
kernel_fpu_enable();
}
preempt_disable();
if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&fpu->state);
- else
- fpregs_deactivate(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
}
}
preempt_enable();
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
*/
- if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, xstate_size);
+ memset(&dst_fpu->state.xsave, 0, xstate_size);
/*
* Save current FPU registers directly into the child
if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&src_fpu->state);
- else
- fpregs_deactivate(src_fpu);
+ copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
}
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
kernel_fpu_disable();
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
- fpu->counter++;
kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__drop(struct fpu *fpu)
{
preempt_disable();
- fpu->counter = 0;
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
{
WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
/* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu);
} else {
}
fpu->fpstate_active = 1;
- if (use_eager_fpu()) {
- preempt_disable();
- fpu__restore(fpu);
- preempt_enable();
- }
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
return err;
} else {
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/param.h>
+#include <asm/tsc.h>
/* CPU reference clock frequency: in KHz */
#define FREQ_83 83200
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- vcpu->arch.eager_fpu = use_eager_fpu();
- if (vcpu->arch.eager_fpu)
- kvm_x86_ops->fpu_activate(vcpu);
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
- /*
- * If using eager FPU mode, or if the guest is a frequent user
- * of the FPU, just leave the FPU active for next time.
- * Every 255 times fpu_counter rolls over to 0; a guest that uses
- * the FPU in bursts will revert to loading it on demand.
- */
- if (!vcpu->arch.eager_fpu) {
- if (++vcpu->fpu_counter < 5)
- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
- }
trace_kvm_fpu(0);
}
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
- emu_nid_to_phys[nid] = nid;
+ emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
- struct pt_regs regs;
+ struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
dpm_wait_for_children(dev, async);
- if (async_error)
+ if (async_error) {
+ dev->power.direct_complete = false;
goto Complete;
+ }
/*
* If a device configured to wake up the system from sleep states
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
+ dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
(struct floppy_struct **)&outparam);
if (ret)
return ret;
+ memcpy(&inparam.g, outparam,
+ offsetof(struct floppy_struct, name));
+ outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
u32 l3_pre_driver_1;
u32 l3_pre_driver_2;
+ u32 l0_res_code_offset;
+ u32 l1_res_code_offset;
+ u32 l2_res_code_offset;
+ u32 l3_res_code_offset;
+
bool debug;
};
if (ver == HDMI_VERSION_8998_3_3) {
if (bclk > high_freq_bit_clk_threshold) {
- cfg->l0_tx_drv_lvl = 0xA;
+ cfg->l0_tx_drv_lvl = 0xf;
cfg->l0_tx_emp_post1_lvl = 0x3;
- cfg->l1_tx_drv_lvl = 0xA;
- cfg->l1_tx_emp_post1_lvl = 0x3;
- cfg->l2_tx_drv_lvl = 0xA;
+ cfg->l1_tx_drv_lvl = 0xf;
+ cfg->l1_tx_emp_post1_lvl = 0x2;
+ cfg->l2_tx_drv_lvl = 0xf;
cfg->l2_tx_emp_post1_lvl = 0x3;
- cfg->l3_tx_drv_lvl = 0x8;
- cfg->l3_tx_emp_post1_lvl = 0x3;
+ cfg->l3_tx_drv_lvl = 0xf;
+ cfg->l3_tx_emp_post1_lvl = 0x0;
cfg->l0_pre_driver_1 = 0x0;
cfg->l0_pre_driver_2 = 0x1C;
cfg->l1_pre_driver_1 = 0x0;
cfg->l2_pre_driver_2 = 0x1C;
cfg->l3_pre_driver_1 = 0x0;
cfg->l3_pre_driver_2 = 0x0;
+ cfg->l0_res_code_offset = 0x3;
+ cfg->l1_res_code_offset = 0x0;
+ cfg->l2_res_code_offset = 0x0;
+ cfg->l3_res_code_offset = 0x3;
} else if (bclk > dig_freq_bit_clk_threshold) {
- cfg->l0_tx_drv_lvl = 0x9;
+ cfg->l0_tx_drv_lvl = 0xf;
cfg->l0_tx_emp_post1_lvl = 0x3;
- cfg->l1_tx_drv_lvl = 0x9;
+ cfg->l1_tx_drv_lvl = 0xf;
cfg->l1_tx_emp_post1_lvl = 0x3;
- cfg->l2_tx_drv_lvl = 0x9;
+ cfg->l2_tx_drv_lvl = 0xf;
cfg->l2_tx_emp_post1_lvl = 0x3;
- cfg->l3_tx_drv_lvl = 0x8;
- cfg->l3_tx_emp_post1_lvl = 0x3;
+ cfg->l3_tx_drv_lvl = 0xf;
+ cfg->l3_tx_emp_post1_lvl = 0x0;
cfg->l0_pre_driver_1 = 0x0;
cfg->l0_pre_driver_2 = 0x16;
cfg->l1_pre_driver_1 = 0x0;
cfg->l2_pre_driver_1 = 0x0;
cfg->l2_pre_driver_2 = 0x16;
cfg->l3_pre_driver_1 = 0x0;
- cfg->l3_pre_driver_2 = 0x0;
+ cfg->l3_pre_driver_2 = 0x18;
+ cfg->l0_res_code_offset = 0x3;
+ cfg->l1_res_code_offset = 0x0;
+ cfg->l2_res_code_offset = 0x0;
+ cfg->l3_res_code_offset = 0x0;
} else if (bclk > mid_freq_bit_clk_threshold) {
- cfg->l0_tx_drv_lvl = 0x9;
- cfg->l0_tx_emp_post1_lvl = 0x3;
- cfg->l1_tx_drv_lvl = 0x9;
- cfg->l1_tx_emp_post1_lvl = 0x3;
- cfg->l2_tx_drv_lvl = 0x9;
- cfg->l2_tx_emp_post1_lvl = 0x3;
- cfg->l3_tx_drv_lvl = 0x8;
- cfg->l3_tx_emp_post1_lvl = 0x3;
+ cfg->l0_tx_drv_lvl = 0xf;
+ cfg->l0_tx_emp_post1_lvl = 0x5;
+ cfg->l1_tx_drv_lvl = 0xf;
+ cfg->l1_tx_emp_post1_lvl = 0x5;
+ cfg->l2_tx_drv_lvl = 0xf;
+ cfg->l2_tx_emp_post1_lvl = 0x5;
+ cfg->l3_tx_drv_lvl = 0xf;
+ cfg->l3_tx_emp_post1_lvl = 0x0;
cfg->l0_pre_driver_1 = 0x0;
cfg->l0_pre_driver_2 = 0x0E;
cfg->l1_pre_driver_1 = 0x0;
cfg->l2_pre_driver_1 = 0x0;
cfg->l2_pre_driver_2 = 0x0E;
cfg->l3_pre_driver_1 = 0x0;
- cfg->l3_pre_driver_2 = 0x0;
+ cfg->l3_pre_driver_2 = 0x0E;
+ cfg->l0_res_code_offset = 0x0;
+ cfg->l1_res_code_offset = 0x0;
+ cfg->l2_res_code_offset = 0x0;
+ cfg->l3_res_code_offset = 0x0;
} else {
- cfg->l0_tx_drv_lvl = 0x0;
+ cfg->l0_tx_drv_lvl = 0x1;
cfg->l0_tx_emp_post1_lvl = 0x0;
- cfg->l1_tx_drv_lvl = 0x0;
+ cfg->l1_tx_drv_lvl = 0x1;
cfg->l1_tx_emp_post1_lvl = 0x0;
- cfg->l2_tx_drv_lvl = 0x0;
+ cfg->l2_tx_drv_lvl = 0x1;
cfg->l2_tx_emp_post1_lvl = 0x0;
cfg->l3_tx_drv_lvl = 0x0;
cfg->l3_tx_emp_post1_lvl = 0x0;
cfg->l0_pre_driver_1 = 0x0;
- cfg->l0_pre_driver_2 = 0x01;
+ cfg->l0_pre_driver_2 = 0x16;
cfg->l1_pre_driver_1 = 0x0;
- cfg->l1_pre_driver_2 = 0x01;
+ cfg->l1_pre_driver_2 = 0x16;
cfg->l2_pre_driver_1 = 0x0;
- cfg->l2_pre_driver_2 = 0x01;
+ cfg->l2_pre_driver_2 = 0x16;
cfg->l3_pre_driver_1 = 0x0;
- cfg->l3_pre_driver_2 = 0x0;
+ cfg->l3_pre_driver_2 = 0x18;
+ cfg->l0_res_code_offset = 0x0;
+ cfg->l1_res_code_offset = 0x0;
+ cfg->l2_res_code_offset = 0x0;
+ cfg->l3_res_code_offset = 0x0;
}
} else {
cfg->l0_tx_drv_lvl = 0xF;
cfg->l2_pre_driver_2 = 0x1E;
cfg->l3_pre_driver_1 = 0x0;
cfg->l3_pre_driver_2 = 0x10;
+ cfg->l0_res_code_offset = 0x3;
+ cfg->l1_res_code_offset = 0x0;
+ cfg->l2_res_code_offset = 0x0;
+ cfg->l3_res_code_offset = 0x3;
}
return rc;
_W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
_W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x3);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
- _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x3);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), cfg.l0_res_code_offset);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), cfg.l1_res_code_offset);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), cfg.l2_res_code_offset);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), cfg.l3_res_code_offset);
_W(phy, PHY_MODE, cfg.phy_mode);
return;
}
+ if (!of_machine_is_compatible("ti,am43"))
+ ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
ti_32k_timer.counter = ti_32k_timer.base;
/*
struct dcp_coherent_block *coh;
struct completion completion[DCP_MAX_CHANS];
- struct mutex mutex[DCP_MAX_CHANS];
+ spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
};
int ret;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
arq->complete(arq, ret);
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO;
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
struct ahash_request *req;
int ret, fini;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
ret = dcp_sha_req_to_buf(arq);
fini = rctx->fini;
arq->complete(arq, ret);
- if (!fini)
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
rctx->init = 1;
}
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
platform_set_drvdata(pdev, sdcp);
for (i = 0; i < DCP_MAX_CHANS; i++) {
- mutex_init(&sdcp->mutex[i]);
+ spin_lock_init(&sdcp->lock[i]);
init_completion(&sdcp->completion[i]);
crypto_init_queue(&sdcp->queue[i], 50);
}
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
- return rc;
+ goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
- put_device(pvt->addrmatch_dev);
- device_del(pvt->addrmatch_dev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
- return rc;
+ goto err_put_chancounts;
}
return 0;
+
+err_put_chancounts:
+ put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+ device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+ put_device(pvt->addrmatch_dev);
+
+ return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
edac_dbg(1, "\n");
if (!pvt->is_registered) {
- put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
+ put_device(pvt->chancounts_dev);
}
- put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
+ put_device(pvt->addrmatch_dev);
}
/****************************************************************************
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
obj-$(CONFIG_MSM_SMP2P) += gpio-msm-smp2p.o
-obj-$(CONFIG_MSM_SMP2P_TEST) += gpio-msm-smp2p-test.o
uint8_t int_en[3];
uint8_t irq_mask[3];
uint8_t irq_stat[3];
+ uint8_t int_input_en[3];
+ uint8_t int_lvl_cached[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
int i;
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+ if (dev->int_input_en[i]) {
+ mutex_lock(&dev->lock);
+ dev->dir[i] &= ~dev->int_input_en[i];
+ dev->int_input_en[i] = 0;
+ adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+ dev->dir[i]);
+ mutex_unlock(&dev->lock);
+ }
+
+ if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+ dev->int_lvl_cached[i] = dev->int_lvl[i];
+ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+ dev->int_lvl[i]);
+ }
+
if (dev->int_en[i] ^ dev->irq_mask[i]) {
dev->int_en[i] = dev->irq_mask[i];
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
dev->int_en[i]);
}
+ }
mutex_unlock(&dev->irq_lock);
}
else
return -EINVAL;
- adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
- adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
- dev->int_lvl[bank]);
+ dev->int_input_en[bank] |= bit;
return 0;
}
+++ /dev/null
-/* drivers/gpio/gpio-msm-smp2p-test.c
- *
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/gpio.h>
-#include <linux/debugfs.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/bitmap.h>
-#include "../soc/qcom/smp2p_private.h"
-#include "../soc/qcom/smp2p_test_common.h"
-
-/* Interrupt callback data */
-struct gpio_info {
- int gpio_base_id;
- int irq_base_id;
-
- bool initialized;
- struct completion cb_completion;
- int cb_count;
- DECLARE_BITMAP(triggered_irqs, SMP2P_BITS_PER_ENTRY);
-};
-
-/* GPIO Inbound/Outbound callback info */
-struct gpio_inout {
- struct gpio_info in;
- struct gpio_info out;
-};
-
-static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
-
-/**
- * Init/reset the callback data.
- *
- * @info: Pointer to callback data
- */
-static void cb_data_reset(struct gpio_info *info)
-{
- int n;
-
- if (!info)
- return;
-
- if (!info->initialized) {
- init_completion(&info->cb_completion);
- info->initialized = true;
- }
- info->cb_count = 0;
-
- for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n)
- clear_bit(n, info->triggered_irqs);
-
- reinit_completion(&info->cb_completion);
-}
-
-static int smp2p_gpio_test_probe(struct platform_device *pdev)
-{
- int id;
- int cnt;
- struct device_node *node = pdev->dev.of_node;
- struct gpio_info *gpio_info_ptr = NULL;
-
- /*
- * NOTE: This does a string-lookup of the GPIO pin name and doesn't
- * actually directly link to the SMP2P GPIO driver since all
- * GPIO/Interrupt access must be through standard
- * Linux GPIO / Interrupt APIs.
- */
- if (strcmp("qcom,smp2pgpio_test_smp2p_1_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_1_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_2_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_2_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_3_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_3_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_4_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_4_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_5_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_5_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_7_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_7_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_15_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
- } else if (
- strcmp("qcom,smp2pgpio_test_smp2p_15_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
- } else {
- pr_err("%s: unable to match device type '%s'\n",
- __func__, node->name);
- return -ENODEV;
- }
-
- /* retrieve the GPIO and interrupt ID's */
- cnt = of_gpio_count(node);
- if (cnt && gpio_info_ptr) {
- /*
- * Instead of looping through all 32-bits, we can just get the
- * first pin to get the base IDs. This saves on the verbosity
- * of the device tree nodes as well.
- */
- id = of_get_gpio(node, 0);
- if (id == -EPROBE_DEFER)
- return id;
- gpio_info_ptr->gpio_base_id = id;
- gpio_info_ptr->irq_base_id = gpio_to_irq(id);
- }
- return 0;
-}
-
-/*
- * NOTE: Instead of match table and device driver, you may be able to just
- * call of_find_compatible_node() in your init function.
- */
-static struct of_device_id msm_smp2p_match_table[] = {
- /* modem */
- {.compatible = "qcom,smp2pgpio_test_smp2p_1_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_1_in", },
-
- /* audio (adsp) */
- {.compatible = "qcom,smp2pgpio_test_smp2p_2_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_2_in", },
-
- /* sensor */
- {.compatible = "qcom,smp2pgpio_test_smp2p_3_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_3_in", },
-
- /* wcnss */
- {.compatible = "qcom,smp2pgpio_test_smp2p_4_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_4_in", },
-
- /* CDSP */
- {.compatible = "qcom,smp2pgpio_test_smp2p_5_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_5_in", },
-
- /* TZ */
- {.compatible = "qcom,smp2pgpio_test_smp2p_7_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_7_in", },
-
- /* mock loopback */
- {.compatible = "qcom,smp2pgpio_test_smp2p_15_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_15_in", },
- {},
-};
-
-static struct platform_driver smp2p_gpio_driver = {
- .probe = smp2p_gpio_test_probe,
- .driver = {
- .name = "smp2pgpio_test",
- .owner = THIS_MODULE,
- .of_match_table = msm_smp2p_match_table,
- },
-};
-
-/**
- * smp2p_ut_local_gpio_out - Verify outbound functionality.
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_gpio_out(struct seq_file *s)
-{
- int failed = 0;
- struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
- int ret;
- int id;
- struct msm_smp2p_remote_mock *mock;
-
- seq_printf(s, "Running %s\n", __func__);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- mock = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(mock, !=, NULL);
-
- mock->rx_interrupt_count = 0;
- memset(&mock->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
- SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
- 0, 1);
- strlcpy(mock->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
- msm_smp2p_set_remote_mock_exists(true);
- mock->tx_interrupt();
-
- /* open GPIO entry */
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, true);
-
- /* verify set/get functions */
- UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- int pin = cb_info->gpio_base_id + id;
-
- mock->rx_interrupt_count = 0;
- gpio_set_value(pin, 1);
- UT_ASSERT_INT(1, ==, mock->rx_interrupt_count);
- UT_ASSERT_INT(1, ==, gpio_get_value(pin));
-
- gpio_set_value(pin, 0);
- UT_ASSERT_INT(2, ==, mock->rx_interrupt_count);
- UT_ASSERT_INT(0, ==, gpio_get_value(pin));
- }
- if (failed)
- break;
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_irq - Interrupt handler for inbound entries.
- *
- * @irq: Virtual IRQ being triggered
- * @data: Cookie data (struct gpio_info * in this case)
- * @returns: Number of bytes written
- */
-static irqreturn_t smp2p_gpio_irq(int irq, void *data)
-{
- struct gpio_info *gpio_ptr = (struct gpio_info *)data;
- int offset;
-
- if (!gpio_ptr) {
- pr_err("%s: gpio_ptr is NULL for irq %d\n", __func__, irq);
- return IRQ_HANDLED;
- }
-
- offset = irq - gpio_ptr->irq_base_id;
- if (offset >= 0 && offset < SMP2P_BITS_PER_ENTRY)
- set_bit(offset, gpio_ptr->triggered_irqs);
- else
- pr_err("%s: invalid irq offset base %d; irq %d\n",
- __func__, gpio_ptr->irq_base_id, irq);
-
- ++gpio_ptr->cb_count;
- complete(&gpio_ptr->cb_completion);
- return IRQ_HANDLED;
-}
-
-/**
- * smp2p_ut_local_gpio_in - Verify inbound functionality.
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_gpio_in(struct seq_file *s)
-{
- int failed = 0;
- struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
- int id;
- int ret;
- int virq;
- struct msm_smp2p_remote_mock *mock;
-
- seq_printf(s, "Running %s\n", __func__);
-
- cb_data_reset(cb_info);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- mock = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(mock, !=, NULL);
-
- mock->rx_interrupt_count = 0;
- memset(&mock->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
- SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
- 0, 1);
- strlcpy(mock->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
- msm_smp2p_set_remote_mock_exists(true);
- mock->tx_interrupt();
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, true);
-
- /* verify set/get functions locally */
- UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- int pin;
- int current_value;
-
- /* verify pin value cannot be set */
- pin = cb_info->gpio_base_id + id;
- current_value = gpio_get_value(pin);
-
- gpio_set_value(pin, 0);
- UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
- gpio_set_value(pin, 1);
- UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
-
- /* verify no interrupts */
- UT_ASSERT_INT(0, ==, cb_info->cb_count);
- }
- if (failed)
- break;
-
- /* register for interrupts */
- UT_ASSERT_INT(0, <, cb_info->irq_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
- ret = request_irq(virq,
- smp2p_gpio_irq, IRQF_TRIGGER_RISING,
- "smp2p_test", cb_info);
- UT_ASSERT_INT(0, ==, ret);
- }
- if (failed)
- break;
-
- /* verify both rising and falling edge interrupts */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_BOTH);
- cb_data_reset(cb_info);
-
- /* verify rising-edge interrupt */
- mock->remote_item.entries[0].entry = 1 << id;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- test_bit(id, cb_info->triggered_irqs);
-
- /* verify falling-edge interrupt */
- mock->remote_item.entries[0].entry = 0;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 2);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- }
- if (failed)
- break;
-
- /* verify rising-edge interrupts */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
- cb_data_reset(cb_info);
-
- /* verify only rising-edge interrupt is triggered */
- mock->remote_item.entries[0].entry = 1 << id;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- test_bit(id, cb_info->triggered_irqs);
-
- mock->remote_item.entries[0].entry = 0;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- }
- if (failed)
- break;
-
- /* verify falling-edge interrupts */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_FALLING);
- cb_data_reset(cb_info);
-
- /* verify only rising-edge interrupt is triggered */
- mock->remote_item.entries[0].entry = 1 << id;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 0);
- UT_ASSERT_INT(0, ==,
- test_bit(id, cb_info->triggered_irqs));
-
- mock->remote_item.entries[0].entry = 0;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- }
- if (failed)
- break;
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- /* unregister for interrupts */
- if (cb_info->irq_base_id) {
- for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
- free_irq(cb_info->irq_base_id + id, cb_info);
- }
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_ut_local_gpio_in_update_open - Verify combined open/update.
- *
- * @s: pointer to output file
- *
- * If the remote side updates the SMP2P bits and sends before negotiation is
- * complete, then the UPDATE event will have to be delayed until negotiation is
- * complete. This should result in both the OPEN and UPDATE events coming in
- * right after each other and the behavior should be transparent to the clients
- * of SMP2P GPIO.
- */
-static void smp2p_ut_local_gpio_in_update_open(struct seq_file *s)
-{
- int failed = 0;
- struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
- int id;
- int ret;
- int virq;
- struct msm_smp2p_remote_mock *mock;
-
- seq_printf(s, "Running %s\n", __func__);
-
- cb_data_reset(cb_info);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- mock = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(mock, !=, NULL);
-
- mock->rx_interrupt_count = 0;
- memset(&mock->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
- SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
- 0, 1);
- strlcpy(mock->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
-
- /* register for interrupts */
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, true);
-
- UT_ASSERT_INT(0, <, cb_info->irq_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
- ret = request_irq(virq,
- smp2p_gpio_irq, IRQ_TYPE_EDGE_BOTH,
- "smp2p_test", cb_info);
- UT_ASSERT_INT(0, ==, ret);
- }
- if (failed)
- break;
-
- /* update the state value and complete negotiation */
- mock->remote_item.entries[0].entry = 0xDEADDEAD;
- msm_smp2p_set_remote_mock_exists(true);
- mock->tx_interrupt();
-
- /* verify delayed state updates were processed */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
-
- UT_ASSERT_INT(cb_info->cb_count, >, 0);
- if (0x1 & (0xDEADDEAD >> id)) {
- /* rising edge should have been triggered */
- if (!test_bit(id, cb_info->triggered_irqs)) {
- seq_printf(s, "%s:%d bit %d clear, ",
- __func__, __LINE__, id);
- seq_puts(s, "expected set\n");
- failed = 1;
- break;
- }
- } else {
- /* edge should not have been triggered */
- if (test_bit(id, cb_info->triggered_irqs)) {
- seq_printf(s, "%s:%d bit %d set, ",
- __func__, __LINE__, id);
- seq_puts(s, "expected clear\n");
- failed = 1;
- break;
- }
- }
- }
- if (failed)
- break;
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- /* unregister for interrupts */
- if (cb_info->irq_base_id) {
- for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
- free_irq(cb_info->irq_base_id + id, cb_info);
- }
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_write_bits - writes value to each GPIO pin specified in mask.
- *
- * @gpio: gpio test structure
- * @mask: 1 = write gpio_value to this GPIO pin
- * @gpio_value: value to write to GPIO pin
- */
-static void smp2p_gpio_write_bits(struct gpio_info *gpio, uint32_t mask,
- int gpio_value)
-{
- int n;
-
- for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
- if (mask & 0x1)
- gpio_set_value(gpio->gpio_base_id + n, gpio_value);
- mask >>= 1;
- }
-}
-
-static void smp2p_gpio_set_bits(struct gpio_info *gpio, uint32_t mask)
-{
- smp2p_gpio_write_bits(gpio, mask, 1);
-}
-
-static void smp2p_gpio_clr_bits(struct gpio_info *gpio, uint32_t mask)
-{
- smp2p_gpio_write_bits(gpio, mask, 0);
-}
-
-/**
- * smp2p_gpio_get_value - reads entire 32-bits of GPIO
- *
- * @gpio: gpio structure
- * @returns: 32 bit value of GPIO pins
- */
-static uint32_t smp2p_gpio_get_value(struct gpio_info *gpio)
-{
- int n;
- uint32_t value = 0;
-
- for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
- if (gpio_get_value(gpio->gpio_base_id + n))
- value |= 1 << n;
- }
- return value;
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s: pointer to output file
- * @remote_pid: Remote processor to test
- * @name: Name of the test for reporting
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid,
- const char *name)
-{
- int failed = 0;
- uint32_t request;
- uint32_t response;
- struct gpio_info *cb_in;
- struct gpio_info *cb_out;
- int id;
- int ret;
-
- seq_printf(s, "Running %s for '%s' remote pid %d\n",
- __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
- cb_in = &gpio_info[remote_pid].in;
- cb_out = &gpio_info[remote_pid].out;
- cb_data_reset(cb_in);
- cb_data_reset(cb_out);
- do {
- /* open test entries */
- msm_smp2p_deinit_rmt_lpb_proc(remote_pid);
- smp2p_gpio_open_test_entry("smp2p", remote_pid, true);
-
- /* register for interrupts */
- UT_ASSERT_INT(0, <, cb_in->gpio_base_id);
- UT_ASSERT_INT(0, <, cb_in->irq_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- int virq = cb_in->irq_base_id + id;
- UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
- ret = request_irq(virq,
- smp2p_gpio_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "smp2p_test", cb_in);
- UT_ASSERT_INT(0, ==, ret);
- }
- if (failed)
- break;
-
- /* write echo of data value 0 */
- UT_ASSERT_INT(0, <, cb_out->gpio_base_id);
- request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(request, 1);
- SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
- SMP2P_SET_RMT_DATA(request, 0x0);
-
- smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
- smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
- smp2p_gpio_set_bits(cb_out, request);
-
- UT_ASSERT_INT(cb_in->cb_count, ==, 0);
- smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
- /* verify response */
- do {
- /* wait for up to 32 changes */
- if (wait_for_completion_timeout(
- &cb_in->cb_completion, HZ / 2) == 0)
- break;
- reinit_completion(&cb_in->cb_completion);
- } while (cb_in->cb_count < 32);
- UT_ASSERT_INT(cb_in->cb_count, >, 0);
- response = smp2p_gpio_get_value(cb_in);
- SMP2P_SET_RMT_CMD_TYPE(request, 0);
- UT_ASSERT_HEX(request, ==, response);
-
- /* write echo of data value of all 1's */
- request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(request, 1);
- SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
- SMP2P_SET_RMT_DATA(request, ~0);
-
- smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
- cb_data_reset(cb_in);
- smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
- smp2p_gpio_set_bits(cb_out, request);
-
- UT_ASSERT_INT(cb_in->cb_count, ==, 0);
- smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
- /* verify response including 24 interrupts */
- do {
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in->cb_completion, HZ / 2),
- >, 0);
- reinit_completion(&cb_in->cb_completion);
- } while (cb_in->cb_count < 24);
- response = smp2p_gpio_get_value(cb_in);
- SMP2P_SET_RMT_CMD_TYPE(request, 0);
- UT_ASSERT_HEX(request, ==, response);
- UT_ASSERT_INT(24, ==, cb_in->cb_count);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", name);
- seq_puts(s, "\tFailed\n");
- }
-
- /* unregister for interrupts */
- if (cb_in->irq_base_id) {
- for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
- free_irq(cb_in->irq_base_id + id, cb_in);
- }
-
- smp2p_gpio_open_test_entry("smp2p", remote_pid, false);
- msm_smp2p_init_rmt_lpb_proc(remote_pid);
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s: pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
- struct smp2p_interrupt_config *int_cfg;
- int pid;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s, "Remote processor config unavailable\n");
- return;
- }
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
- if (!int_cfg[pid].is_configured)
- continue;
-
- smp2p_ut_remote_inout_core(s, pid, __func__);
- }
-}
-
-static int __init smp2p_debugfs_init(void)
-{
- /* register GPIO pins */
- (void)platform_driver_register(&smp2p_gpio_driver);
-
- /*
- * Add Unit Test entries.
- *
- * The idea with unit tests is that you can run all of them
- * from ADB shell by doing:
- * adb shell
- * cat ut*
- *
- * And if particular tests fail, you can then repeatedly run the
- * failing tests as you debug and resolve the failing test.
- */
- smp2p_debug_create("ut_local_gpio_out", smp2p_ut_local_gpio_out);
- smp2p_debug_create("ut_local_gpio_in", smp2p_ut_local_gpio_in);
- smp2p_debug_create("ut_local_gpio_in_update_open",
- smp2p_ut_local_gpio_in_update_open);
- smp2p_debug_create("ut_remote_gpio_inout", smp2p_ut_remote_inout);
- return 0;
-}
-late_initcall(smp2p_debugfs_init);
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (timeout == 0)
return -ETIME;
queue_delayed_work(hdmi->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4);
}
+static void sde_hdmi_tx_set_avmute(void *ptr)
+{
+ struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr;
+ struct hdmi *hdmi;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ pr_err("setting avmute to true\n");
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+ sde_hdmi_config_avmute(hdmi, true);
+}
+
void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl)
{
hdmi_ctrl->auth_state = false;
- if (sde_hdmi_tx_is_encryption_set(hdmi_ctrl) ||
- !sde_hdmi_tx_is_stream_shareable(hdmi_ctrl))
- rc = sde_hdmi_config_avmute(hdmi, true);
-
if (sde_hdmi_tx_is_panel_on(hdmi_ctrl)) {
pr_debug("%s: Reauthenticating\n", __func__);
if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) {
}
break;
- case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+ case HDCP_STATE_AUTH_FAIL_NOREAUTH:
if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
hdcp1_set_enc(false);
hdcp_init_data.mutex = &hdmi_ctrl->hdcp_mutex;
hdcp_init_data.workq = hdmi->workq;
hdcp_init_data.notify_status = sde_hdmi_tx_hdcp_cb;
+ hdcp_init_data.avmute_sink = sde_hdmi_tx_set_avmute;
hdcp_init_data.cb_data = (void *)hdmi_ctrl;
hdcp_init_data.hdmi_tx_ver = hdmi_ctrl->hdmi_tx_major_version;
hdcp_init_data.sec_access = true;
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
SDE_HDCP_DEBUG("enc level changed %d\n", min_enc_lvl);
+ /* notify the client first about the new level */
+ if (enc_notify && ctrl->init_data.notify_status)
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
cdata.context = ctrl->lib_ctx;
sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
- if (enc_notify && ctrl->init_data.notify_status)
- ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+static void sde_hdmi_hdcp2p2_mute_sink(void *client_ctx)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ /* call into client to send avmute to the sink */
+ if (ctrl->init_data.avmute_sink)
+ ctrl->init_data.avmute_sink(ctrl->init_data.cb_data);
}
static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
.wakeup = sde_hdmi_hdcp2p2_wakeup,
.notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
.srm_cb = sde_hdmi_hdcp2p2_srm_cb,
+ .mute_sink = sde_hdmi_hdcp2p2_mute_sink,
};
static struct hdcp_txmtr_ops txmtr_ops;
return;
}
+ /* detach/ignore user data if 'disabled' */
+ if (!scale_v2.enable) {
+ SDE_DEBUG_PLANE(psde, "scale data removed\n");
+ return;
+ }
+
/* populate from user space */
pe = &(pp->pixel_ext);
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
struct workqueue_struct *workq;
void *cb_data;
void (*notify_status)(void *cb_data, enum sde_hdcp_states status);
+ void (*avmute_sink)(void *cb_data);
struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
u8 sink_rx_status;
u16 *version;
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp)
{
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL))
+ hdcp->init_data.avmute_sink(hdcp->init_data.cb_data);
+
if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
sde_hdcp_1x_cache_topology(hdcp);
sde_hdcp_1x_notify_topology();
if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
!init_data->mutex || !init_data->notify_status ||
- !init_data->workq || !init_data->cb_data) {
+ !init_data->workq || !init_data->cb_data ||
+ !init_data->avmute_sink) {
pr_err("invalid input\n");
goto error;
}
nv_connector->edid = NULL;
}
- /* Outputs are only polled while runtime active, so acquiring a
- * runtime PM ref here is unnecessary (and would deadlock upon
- * runtime suspend because it waits for polling to finish).
+ /* Outputs are only polled while runtime active, so resuming the
+ * device here is unnecessary (and would deadlock upon runtime suspend
+ * because it waits for polling to finish). We do however, want to
+ * prevent the autosuspend timer from elapsing during this operation
+ * if possible.
*/
- if (!drm_kms_helper_is_poll_worker()) {
- ret = pm_runtime_get_sync(connector->dev->dev);
+ if (drm_kms_helper_is_poll_worker()) {
+ pm_runtime_get_noresume(dev->dev);
+ } else {
+ ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
- }
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
}
/* load and execute some other ucode image (bios therm?) */
- return pmu_load(init, 0x01, post, NULL, NULL);
+ pmu_load(init, 0x01, post, NULL, NULL);
+ return 0;
}
static const struct nvkm_devinit_func
kgsl_context_put(context);
break;
}
+ case KGSL_PROP_SECURE_BUFFER_ALIGNMENT:
+ {
+ unsigned int align;
+
+ if (param->sizebytes != sizeof(unsigned int)) {
+ result = -EINVAL;
+ break;
+ }
+ /*
+ * XPUv2 impose the constraint of 1MB memory alignment,
+ * on the other hand Hypervisor does not have such
+ * constraints. So driver should fulfill such
+ * requirements when allocating secure memory.
+ */
+ align = MMU_FEATURE(&dev_priv->device->mmu,
+ KGSL_MMU_HYP_SECURE_ALLOC) ? PAGE_SIZE : SZ_1M;
+
+ if (copy_to_user(param->value, &align, sizeof(align)))
+ result = -EFAULT;
+
+ break;
+ }
+ case KGSL_PROP_SECURE_CTXT_SUPPORT:
+ {
+ unsigned int secure_ctxt;
+
+ if (param->sizebytes != sizeof(unsigned int)) {
+ result = -EINVAL;
+ break;
+ }
+
+ secure_ctxt = dev_priv->device->mmu.secured ? 1 : 0;
+
+ if (copy_to_user(param->value, &secure_ctxt,
+ sizeof(secure_ctxt)))
+ result = -EFAULT;
+
+ break;
+ }
default:
if (is_compat_task())
result = dev_priv->device->ftbl->getproperty_compat(
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
ret = sysfs_create_group(&hdev->dev.kobj,
&ntrig_attribute_group);
+ if (ret)
+ hid_err(hdev, "cannot create sysfs group\n");
return 0;
err_free:
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
*/
fcopy_transaction.recv_len = recvlen;
- fcopy_transaction.recv_channel = channel;
fcopy_transaction.recv_req_id = requestid;
fcopy_transaction.fcopy_msg = fcopy_msg;
int hv_fcopy_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ fcopy_transaction.recv_channel = srv->channel;
init_completion(&release_event);
/*
static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
static void kvp_timeout_func(struct work_struct *dummy);
+static void kvp_host_handshake_func(struct work_struct *dummy);
static void kvp_register(int);
static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
+static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp";
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
+static void kvp_host_handshake_func(struct work_struct *dummy)
+{
+ hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
+}
+
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
switch (msg->kvp_hdr.operation) {
pr_debug("KVP: userspace daemon ver. %d registered\n",
KVP_OP_REGISTER);
kvp_register(dm_reg_value);
- kvp_transaction.state = HVUTIL_READY;
+
+ /*
+ * If we're still negotiating with the host cancel the timeout
+ * work to not poll the channel twice.
+ */
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
return 0;
}
struct icmsg_negotiate *negop = NULL;
int util_fw_version;
int kvp_srv_version;
+ static enum {NEGO_NOT_STARTED,
+ NEGO_IN_PROGRESS,
+ NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /*
+ * If userspace daemon is not connected and host is asking
+ * us to negotiate we need to delay to not lose messages.
+ * This is important for Failover IP setting.
+ */
+ if (host_negotiatied == NEGO_NOT_STARTED) {
+ host_negotiatied = NEGO_IN_PROGRESS;
+ schedule_delayed_work(&kvp_host_handshake_work,
+ HV_UTIL_NEGO_TIMEOUT * HZ);
+ }
+ return;
+ }
if (kvp_transaction.state > HVUTIL_READY)
return;
-
+recheck:
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
*/
kvp_transaction.recv_len = recvlen;
- kvp_transaction.recv_channel = channel;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.kvp_msg = kvp_msg;
vmbus_sendpacket(channel, recv_buffer,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
+
+ host_negotiatied = NEGO_FINISHED;
+
+ goto recheck;
}
}
hv_kvp_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ kvp_transaction.recv_channel = srv->channel;
init_completion(&release_event);
/*
void hv_kvp_deinit(void)
{
kvp_transaction.state = HVUTIL_DEVICE_DYING;
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
hvutil_transport_destroy(hvt);
default:
return -EINVAL;
}
- vss_transaction.state = HVUTIL_READY;
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
return 0;
}
*/
vss_transaction.recv_len = recvlen;
- vss_transaction.recv_channel = channel;
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
return -ENOTSUPP;
}
recv_buffer = srv->recv_buffer;
+ vss_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
+ srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
if (ret) {
#define HV_UTIL_TIMEOUT 30
/*
+ * Timeout for guest-host handshake for services.
+ */
+#define HV_UTIL_NEGO_TIMEOUT 60
+
+/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
* is set by CPUID(HVCPUID_VERSION_FEATURES).
*/
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
{
- u16 val;
+ int val1, val2;
- val = i2c_smbus_read_byte_data(client, reg);
- val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+ val1 = i2c_smbus_read_byte_data(client, reg);
+ if (val1 < 0)
+ return val1;
+ val2 = i2c_smbus_read_byte_data(client, reg + 1);
+ if (val2 < 0)
+ return val2;
- return val;
+ return val1 | (val2 << 8);
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
* Bi-directional Current/Power Monitor with I2C Interface
* Datasheet: http://www.ti.com/product/ina230
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
* Thanks to Jan Volkering
*
* This program is free software; you can redistribute it and/or modify
return 0;
}
+static ssize_t ina2xx_show_shunt(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
static ssize_t ina2xx_store_shunt(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_store_shunt,
+ ina2xx_show_shunt, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
NULL,
};
+struct etmv4_reg {
+ void __iomem *addr;
+ u32 data;
+};
+
+static void do_smp_cross_read(void *data)
+{
+ struct etmv4_reg *reg = data;
+
+ reg->data = readl_relaxed(reg->addr);
+}
+
+static u32 etmv4_cross_read(const struct device *dev, u32 offset)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ struct etmv4_reg reg;
+
+ reg.addr = drvdata->base + offset;
+
+ smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
+ return reg.data;
+}
+#define coresight_cross_read(name, offset) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u32 val; \
+ pm_runtime_get_sync(_dev->parent); \
+ \
+ val = etmv4_cross_read(_dev->parent, offset); \
+ \
+ pm_runtime_put_sync(_dev->parent); \
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", val); \
+} \
+static DEVICE_ATTR_RO(name)
+
#define coresight_simple_func(name, offset) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
} \
DEVICE_ATTR_RO(name)
-coresight_simple_func(trcoslsr, TRCOSLSR);
-coresight_simple_func(trcpdcr, TRCPDCR);
-coresight_simple_func(trcpdsr, TRCPDSR);
-coresight_simple_func(trclsr, TRCLSR);
-coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_simple_func(trcdevid, TRCDEVID);
-coresight_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_simple_func(trcpidr0, TRCPIDR0);
-coresight_simple_func(trcpidr1, TRCPIDR1);
-coresight_simple_func(trcpidr2, TRCPIDR2);
-coresight_simple_func(trcpidr3, TRCPIDR3);
+coresight_cross_read(trcoslsr, TRCOSLSR);
+coresight_cross_read(trcpdcr, TRCPDCR);
+coresight_cross_read(trcpdsr, TRCPDSR);
+coresight_cross_read(trclsr, TRCLSR);
+coresight_cross_read(trcauthstatus, TRCAUTHSTATUS);
+coresight_cross_read(trcdevid, TRCDEVID);
+coresight_cross_read(trcdevtype, TRCDEVTYPE);
+coresight_cross_read(trcpidr0, TRCPIDR0);
+coresight_cross_read(trcpidr1, TRCPIDR1);
+coresight_cross_read(trcpidr2, TRCPIDR2);
+coresight_cross_read(trcpidr3, TRCPIDR3);
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
&dev_attr_trcoslsr.attr,
NULL,
};
-coresight_simple_func(trcidr0, TRCIDR0);
-coresight_simple_func(trcidr1, TRCIDR1);
-coresight_simple_func(trcidr2, TRCIDR2);
-coresight_simple_func(trcidr3, TRCIDR3);
-coresight_simple_func(trcidr4, TRCIDR4);
-coresight_simple_func(trcidr5, TRCIDR5);
+coresight_cross_read(trcidr0, TRCIDR0);
+coresight_cross_read(trcidr1, TRCIDR1);
+coresight_cross_read(trcidr2, TRCIDR2);
+coresight_cross_read(trcidr3, TRCIDR3);
+coresight_cross_read(trcidr4, TRCIDR4);
+coresight_cross_read(trcidr5, TRCIDR5);
/* trcidr[6,7] are reserved */
-coresight_simple_func(trcidr8, TRCIDR8);
-coresight_simple_func(trcidr9, TRCIDR9);
-coresight_simple_func(trcidr10, TRCIDR10);
-coresight_simple_func(trcidr11, TRCIDR11);
-coresight_simple_func(trcidr12, TRCIDR12);
-coresight_simple_func(trcidr13, TRCIDR13);
+coresight_cross_read(trcidr8, TRCIDR8);
+coresight_cross_read(trcidr9, TRCIDR9);
+coresight_cross_read(trcidr10, TRCIDR10);
+coresight_cross_read(trcidr11, TRCIDR11);
+coresight_cross_read(trcidr12, TRCIDR12);
+coresight_cross_read(trcidr13, TRCIDR13);
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
&dev_attr_trcidr0.attr,
}
#ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+ acpi_physical_address address)
+{
+ return address >= priv->smba &&
+ address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
static acpi_status
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
u64 *value, void *handler_context, void *region_context)
*/
mutex_lock(&priv->acpi_lock);
- if (!priv->acpi_reserved) {
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
struct i2c_msm_ctrl *ctrl = i2c_get_adapdata(adap);
struct i2c_msm_xfer *xfer = &ctrl->xfer;
+ if (num < 1) {
+ dev_err(ctrl->dev,
+ "error on number of msgs(%d) received\n", num);
+ return -EINVAL;
+ }
+
if (IS_ERR_OR_NULL(msgs)) {
dev_err(ctrl->dev, " error on msgs Accessing invalid pointer location\n");
return PTR_ERR(msgs);
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
if (ret)
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
if (ret)
static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr);
+static const struct file_operations ucma_fops;
+
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
f = fdget(cmd.fd);
if (!f.file)
return -ENOENT;
+ if (f.file->f_op != &ucma_fops) {
+ ret = -EINVAL;
+ goto file_put;
+ }
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
mutex_lock(&mut);
if (!ctx->closing) {
mutex_unlock(&mut);
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
/* rdma_destroy_id ensures that no event handlers are
* inflight for that id before releasing it.
*/
t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
+
+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+ if (qhp->wq.flushed)
+ return;
+
+ qhp->wq.flushed = 1;
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i;
+ int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
- for (i = 0; i < target->req_ring_size; ++i) {
- struct srp_request *req = &ch->req_ring[i];
+ for (j = 0; j < target->req_ring_size; ++j) {
+ struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}
*/
-static unsigned char atakbd_keycode[0x72] = { /* American layout */
- [0] = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = { /* American layout */
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_BACKSLASH, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_GRAVE, /* FIXME: '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
- [69] = KEY_ESC,
- [70] = KEY_DELETE,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
+ [71] = KEY_HOME,
+ [72] = KEY_UP,
[74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
+ [75] = KEY_LEFT,
+ [77] = KEY_RIGHT,
[78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
+ [80] = KEY_DOWN,
+ [82] = KEY_INSERT,
+ [83] = KEY_DELETE,
[96] = KEY_102ND,
- [97] = KEY_KPASTERISK, /* FIXME */
- [98] = KEY_KPSLASH,
+ [97] = KEY_UNDO,
+ [98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
- [103] = KEY_UP,
- [104] = KEY_KPASTERISK, /* FIXME */
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_KPASTERISK, /* FIXME */
- [108] = KEY_DOWN,
- [109] = KEY_KPASTERISK, /* FIXME */
- [110] = KEY_KPASTERISK, /* FIXME */
- [111] = KEY_KPASTERISK, /* FIXME */
- [112] = KEY_KPASTERISK, /* FIXME */
- [113] = KEY_KPASTERISK /* FIXME */
+ [103] = KEY_KP7,
+ [104] = KEY_KP8,
+ [105] = KEY_KP9,
+ [106] = KEY_KP4,
+ [107] = KEY_KP5,
+ [108] = KEY_KP6,
+ [109] = KEY_KP1,
+ [110] = KEY_KP2,
+ [111] = KEY_KP3,
+ [112] = KEY_KP0,
+ [113] = KEY_KPDOT,
+ [114] = KEY_KPENTER,
};
static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down)
{
- if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
+ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(atakbd_dev, scancode, 1);
- input_report_key(atakbd_dev, scancode, 0);
- input_sync(atakbd_dev);
- } else {
- input_report_key(atakbd_dev, scancode, down);
- input_sync(atakbd_dev);
- }
- } else /* scancodes >= 0xf2 are mouse data, most likely */
+ input_report_key(atakbd_dev, scancode, down);
+ input_sync(atakbd_dev);
+ } else /* scancodes >= 0xf3 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
NULL
};
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
- if (from_cblock(new_size) > from_cblock(cache->cache_size))
- return true;
+ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+ if (cache->sized) {
+ DMERR("%s: unable to extend cache due to missing cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+ }
/*
* We can't drop a dirty block when shrinking the cache.
sector_t data_block_size;
/*
+ * We reserve a section of the metadata for commit overhead.
+ * All reported space does *not* include this.
+ */
+ dm_block_t metadata_reserve;
+
+ /*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
* operation possible in this state is the closing of the device.
return dm_tm_commit(pmd->tm, sblock);
}
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+ int r;
+ dm_block_t total;
+ dm_block_t max_blocks = 4096; /* 16M */
+
+ r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+ if (r) {
+ DMERR("could not get size of metadata device");
+ pmd->metadata_reserve = max_blocks;
+ } else
+ pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
return ERR_PTR(r);
}
+ __set_metadata_reserve(pmd);
+
return pmd;
}
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+ if (!r) {
+ if (*result < pmd->metadata_reserve)
+ *result = 0;
+ else
+ *result -= pmd->metadata_reserve;
+ }
up_read(&pmd->root_lock);
return r;
int r = -EINVAL;
down_write(&pmd->root_lock);
- if (!pmd->fail_io)
+ if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
+ if (!r)
+ __set_metadata_reserve(pmd);
+ }
up_write(&pmd->root_lock);
return r;
enum pool_mode {
PM_WRITE, /* metadata may be changed */
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
+
+ /*
+ * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+ */
+ PM_OUT_OF_METADATA_SPACE,
PM_READ_ONLY, /* metadata may not be changed */
+
PM_FAIL, /* all I/O fails */
};
static void requeue_bios(struct pool *pool);
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+ return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+ return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+ int r;
+ const char *ooms_reason = NULL;
+ dm_block_t nr_free;
+
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+ if (r)
+ ooms_reason = "Could not get free metadata blocks";
+ else if (!nr_free)
+ ooms_reason = "No free metadata blocks";
+
+ if (ooms_reason && !is_read_only(pool)) {
+ DMERR("%s", ooms_reason);
+ set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+ }
+}
+
+static void check_for_data_space(struct pool *pool)
{
int r;
dm_block_t nr_free;
{
int r;
- if (get_pool_mode(pool) >= PM_READ_ONLY)
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
- else
- check_for_space(pool);
+ else {
+ check_for_metadata_space(pool);
+ check_for_data_space(pool);
+ }
return r;
}
return r;
}
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+ return r;
+ }
+
+ if (!free_blocks) {
+ /* Let's commit before we use up the metadata reserve. */
+ r = commit(pool);
+ if (r)
+ return r;
+ }
+
return 0;
}
case PM_OUT_OF_DATA_SPACE:
return pool->pf.error_if_no_space ? -ENOSPC : 0;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
case PM_FAIL:
return -EIO;
error_retry_list(pool);
break;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
- if (old_mode != new_mode)
+ if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
+
+ if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+ set_pool_mode(pool, PM_WRITE);
+
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
dm_device_name(pool->pool_md));
return -EOPNOTSUPP;
dm_block_t nr_blocks_data;
dm_block_t nr_blocks_metadata;
dm_block_t held_root;
+ enum pool_mode mode;
char buf[BDEVNAME_SIZE];
char buf2[BDEVNAME_SIZE];
struct pool_c *pt = ti->private;
else
DMEMIT("- ");
- if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ mode = get_pool_mode(pool);
+ if (mode == PM_OUT_OF_DATA_SPACE)
DMEMIT("out_of_data_space ");
- else if (pool->pf.mode == PM_READ_ONLY)
+ else if (is_read_only_pool_mode(mode))
DMEMIT("ro ");
else
DMEMIT("rw ");
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
- /* Clear suspend_area associated with the bitmap */
- spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
- spin_unlock_irq(&cinfo->suspend_lock);
-
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto dlm_unlock;
}
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
if (hi > 0) {
/* TODO:Wait for current resync to get over */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
allow_barrier(conf);
}
+ raise_barrier(conf, 0);
read_more:
/* Now schedule reads for blocks from sector_nr to last */
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
- raise_barrier(conf, sectors_done != 0);
+ raise_barrier(conf, 1);
atomic_set(&r10_bio->remaining, 0);
r10_bio->mddev = mddev;
r10_bio->sector = sector_nr;
if (sector_nr <= last)
goto read_more;
+ lower_barrier(conf);
+
/* Now that we have done the whole section we can
* update reshape_progress
*/
* set COM8
*/
if (priv->band_filter) {
- ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+ ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
0xff, 256 - priv->band_filter);
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
- *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+ const struct fimc_fmt *__fmt;
+
+ __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ if (fmt)
+ *fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
- pixm->num_planes = (*fmt)->memplanes;
- pixm->pixelformat = (*fmt)->fourcc;
+ pixm->num_planes = __fmt->memplanes;
+ pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.
sizeof(struct viu_reg), DRV_NAME)) {
dev_err(&op->dev, "Error while requesting mem region\n");
ret = -EBUSY;
- goto err;
+ goto err_irq;
}
/* remap registers */
if (!viu_regs) {
dev_err(&op->dev, "Can't map register set\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
/* Prepare our private structure */
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
viu_dev->vr = viu_regs;
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
if (ret < 0) {
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
- goto err;
+ goto err_irq;
}
ad = i2c_get_adapter(0);
+ if (!ad) {
+ ret = -EFAULT;
+ dev_err(&op->dev, "couldn't get i2c adapter\n");
+ goto err_v4l2;
+ }
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(&op->dev, "couldn't register control\n");
- goto err_vdev;
+ goto err_i2c;
}
/* This control handler will inherit the control(s) from the
sub-device(s). */
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
- goto err_vdev;
+ goto err_hdl;
}
*vdev = viu_template;
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
- goto err_vdev;
+ goto err_unlock;
}
/* enable VIU clock */
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
- goto err_clk;
+ goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&op->dev, "failed to enable the clock!\n");
- goto err_clk;
+ goto err_vdev;
}
viu_dev->clk = clk;
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(&op->dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
- goto err_irq;
+ goto err_clk;
}
mutex_unlock(&viu_dev->lock);
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
return ret;
-err_irq:
- clk_disable_unprepare(viu_dev->clk);
err_clk:
- video_unregister_device(viu_dev->vdev);
+ clk_disable_unprepare(viu_dev->clk);
err_vdev:
- v4l2_ctrl_handler_free(&viu_dev->hdl);
+ video_unregister_device(viu_dev->vdev);
+err_unlock:
mutex_unlock(&viu_dev->lock);
+err_hdl:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
i2c_put_adapter(ad);
+err_v4l2:
v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
irq_dispose_mapping(viu_irq);
return ret;
}
static int isp_xclk_init(struct isp_device *isp)
{
struct device_node *np = isp->dev->of_node;
- struct clk_init_data init;
+ struct clk_init_data init = { 0 };
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (err == -ENOIOCTLCMD)
+ err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;
msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
- msg[0].len - 3);
+ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+ &msg[0].buf[3],
+ msg[0].len - 3)
+ : -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
+ if (ret < 0) {
+ pr_err("tm6000: couldn't register the adapter!\n");
+ goto err;
+ }
+
dvb->adapter.priv = dev;
if (dvb->frontend) {
}
}
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+ /*
+ * Return the size of the video probe and commit controls, which depends
+ * on the protocol version.
+ */
+ if (stream->dev->uvc_version < 0x0110)
+ return 26;
+ else if (stream->dev->uvc_version < 0x0150)
+ return 34;
+ else
+ return 48;
+}
+
static int uvc_get_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe, __u8 query)
{
+ __u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
- __u16 size;
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
query == UVC_GET_DEF)
return -EIO;
ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
- if (size == 34) {
+ if (size >= 34) {
ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
ctrl->bmFramingInfo = data[30];
ctrl->bPreferedVersion = data[31];
static int uvc_set_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe)
{
+ __u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
- __u16 size;
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
data = kzalloc(size, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
- if (size == 34) {
+ if (size >= 34) {
put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
data[30] = ctrl->bmFramingInfo;
data[31] = ctrl->bPreferedVersion;
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (!found_ev)
- list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
+ /* Already listening */
kfree(sev);
- return 0; /* Already listening */
+ goto out_unlock;
}
if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ kfree(sev);
+ goto out_unlock;
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- return 0;
+out_unlock:
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
sev->ops->del(sev);
kfree(sev);
+ mutex_unlock(&fh->subscribe_lock);
return 0;
}
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
if (fh->vdev == NULL)
return;
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
}
static const struct of_device_id usbhs_child_match_table[] = {
- { .compatible = "ti,omap-ehci", },
- { .compatible = "ti,omap-ohci", },
+ { .compatible = "ti,ehci-omap", },
+ { .compatible = "ti,ohci-omap3", },
{ }
};
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
+ .probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
{
- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+ return platform_driver_register(&usbhs_omap_driver);
}
/*
*/
fs_initcall_sync(omap_usbhs_drvinit);
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
#define SKE_SEND_EKS_MESSAGE_SIZE \
(MESSAGE_ID_SIZE+BITS_128_IN_BYTES+BITS_64_IN_BYTES)
+#define HDCP2_0_REPEATER_DOWNSTREAM BIT(1)
+#define HDCP1_DEVICE_DOWNSTREAM BIT(0)
+
/* all message IDs */
#define INVALID_MESSAGE_ID 0
#define AKE_INIT_MESSAGE_ID 2
#define LC_INIT_MESSAGE_ID 9
#define LC_SEND_L_PRIME_MESSAGE_ID 10
#define SKE_SEND_EKS_MESSAGE_ID 11
-#define REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID 12
-#define REPEATER_AUTH_SEND_ACK_MESSAGE_ID 15
-#define REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID 16
-#define REPEATER_AUTH_STREAM_READY_MESSAGE_ID 17
+#define REP_SEND_RECV_ID_LIST_ID 12
+#define REP_SEND_ACK_ID 15
+#define REP_STREAM_MANAGE_ID 16
+#define REP_STREAM_READY_ID 17
#define SKE_SEND_TYPE_ID 18
#define HDCP2P2_MAX_MESSAGES 19
[SKE_SEND_TYPE_ID] = { 1,
{ {"type", 0x69494, 1} },
0 },
- [REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID] = { 4,
+ [REP_SEND_RECV_ID_LIST_ID] = { 4,
{ {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3},
{"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} },
(1 << 0) },
- [REPEATER_AUTH_SEND_ACK_MESSAGE_ID] = { 1,
+ [REP_SEND_ACK_ID] = { 1,
{ {"V", 0x693E0, 16} },
0 },
- [REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID] = { 3,
+ [REP_STREAM_MANAGE_ID] = { 3,
{ {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2},
{"streamID_Type", 0x693F5, 126} },
0 },
- [REPEATER_AUTH_STREAM_READY_MESSAGE_ID] = { 1,
+ [REP_STREAM_READY_ID] = { 1,
{ {"M'", 0x69473, 32} },
0 }
};
enum hdcp_state hdcp_state;
enum hdcp_lib_wakeup_cmd wakeup_cmd;
bool repeater_flag;
+ bool non_2p2_present;
bool update_stream;
bool tethered;
struct qseecom_handle *qseecom_handle;
handle->device_type == HDCP_TXMTR_DP)
return SKE_SEND_TYPE_ID;
case SKE_SEND_TYPE_ID:
- case REPEATER_AUTH_STREAM_READY_MESSAGE_ID:
- case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+ case REP_STREAM_READY_ID:
+ case REP_SEND_ACK_ID:
if (!handle->repeater_flag)
return INVALID_MESSAGE_ID;
if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE)
- return REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID;
+ return REP_STREAM_MANAGE_ID;
else
- return REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID;
- case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
- return REPEATER_AUTH_SEND_ACK_MESSAGE_ID;
- case REPEATER_AUTH_STREAM_MANAGE_MESSAGE_ID:
- return REPEATER_AUTH_STREAM_READY_MESSAGE_ID;
+ return REP_SEND_RECV_ID_LIST_ID;
+ case REP_SEND_RECV_ID_LIST_ID:
+ return REP_SEND_ACK_ID;
+ case REP_STREAM_MANAGE_ID:
+ return REP_STREAM_READY_ID;
default:
pr_err("Uknown message ID (%d)", handle->last_msg);
return -EINVAL;
case AKE_SEND_PAIRING_INFO_MESSAGE_ID:
handle->wait_timeout = HZ / 4;
break;
- case REPEATER_AUTH_SEND_RECEIVERID_LIST_MESSAGE_ID:
+ case REP_SEND_RECV_ID_LIST_ID:
if (!handle->authenticated)
handle->wait_timeout = HZ * 3;
else
case HDCP_LIB_WKUP_CMD_START:
handle->no_stored_km_flag = 0;
handle->repeater_flag = false;
+ handle->non_2p2_present = false;
handle->update_stream = false;
handle->last_msg_sent = 0;
handle->last_msg = INVALID_MESSAGE_ID;
cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
}
break;
- case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
+ case REP_SEND_ACK_ID:
pr_debug("Repeater authentication successful\n");
if (handle->update_stream) {
}
/*
- * if the response contains LC_Init message
- * send the message again to TZ
+ * if the response contains LC_Init OR RepeaterAuth_Stream_Manage
+ * message send the message again to the sink as this means that
+ * TZ would like to try again
*/
if ((rsp_buf->commandid == HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) &&
- ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID) &&
- (rsp_buf->msglen == LC_INIT_MESSAGE_SIZE)) {
+ ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID ||
+ (int)rsp_buf->message[0] == REP_STREAM_MANAGE_ID)) {
if (!atomic_read(&handle->hdcp_off)) {
/* keep local copy of TZ response */
memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
handle->authenticated = false;
+ /* AV mute the sink first to avoid artifacts */
+ handle->client_ops->mute_sink(handle->client_ctx);
+
hdcp_lib_txmtr_deinit(handle);
if (!handle->legacy_app)
hdcp_lib_session_deinit(handle);
QSEECOM_ALIGN(sizeof
(struct hdcp_rcvd_msg_rsp)));
+ if (msg[0] == REP_SEND_RECV_ID_LIST_ID) {
+ if ((msg[2] & HDCP2_0_REPEATER_DOWNSTREAM) ||
+ (msg[2] & HDCP1_DEVICE_DOWNSTREAM))
+ handle->non_2p2_present = true;
+ else
+ handle->non_2p2_present = false;
+ }
+
/* get next message from sink if we receive H PRIME on no store km */
if ((msg[0] == AKE_SEND_H_PRIME_MESSAGE_ID) &&
handle->no_stored_km_flag) {
goto exit;
}
- if ((msg[0] == REPEATER_AUTH_STREAM_READY_MESSAGE_ID) &&
+ if ((msg[0] == REP_STREAM_READY_ID) &&
(rc == 0) && (rsp_buf->status == 0)) {
pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n");
handle = hdcp_drv_mgr->handle;
+ /*
+ * if the stream type from TZ is type 1
+ * ignore subsequent writes to the min_enc_level
+ * to avoid state transitions which can potentially
+ * cause visual artifacts because the stream type
+ * is already at the highest level and for a HDCP 2.2
+ * capable sink, we do not need to reduce the stream type
+ */
+ if (handle &&
+ !handle->non_2p2_present) {
+ pr_info("stream type is 1 returning\n");
+ return ret;
+ }
+
rc = kstrtoint(buf, 10, &min_enc_lvl);
if (rc) {
pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
} else
lux = 0;
else
- return -EAGAIN;
+ return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
- if (retval < produce_q->kernel_if->num_pages) {
+ if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
- if (retval < consume_q->kernel_if->num_pages) {
+ if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
rp = __ipddp_find_route(&rcp);
- if (rp)
- memcpy(&rcp2, rp, sizeof(rcp2));
+ if (rp) {
+ memset(&rcp2, 0, sizeof(rcp2));
+ rcp2.ip = rp->ip;
+ rcp2.at = rp->at;
+ rcp2.flags = rp->flags;
+ }
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
return NULL;
}
}
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
return slave;
}
{
struct bonding *bond = bond_get_bond_by_slave(slave);
+ cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
info->link_failure_count = slave->link_failure_count;
}
-static void bond_netdev_notify(struct net_device *dev,
- struct netdev_bonding_info *info)
-{
- rtnl_lock();
- netdev_bonding_info_change(dev, info);
- rtnl_unlock();
-}
-
static void bond_netdev_notify_work(struct work_struct *_work)
{
- struct netdev_notify_work *w =
- container_of(_work, struct netdev_notify_work, work.work);
+ struct slave *slave = container_of(_work, struct slave,
+ notify_work.work);
+
+ if (rtnl_trylock()) {
+ struct netdev_bonding_info binfo;
- bond_netdev_notify(w->dev, &w->bonding_info);
- dev_put(w->dev);
- kfree(w);
+ bond_fill_ifslave(slave, &binfo.slave);
+ bond_fill_ifbond(slave->bond, &binfo.master);
+ netdev_bonding_info_change(slave->dev, &binfo);
+ rtnl_unlock();
+ } else {
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+ }
}
void bond_queue_slave_event(struct slave *slave)
{
- struct bonding *bond = slave->bond;
- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
- if (!nnw)
- return;
-
- dev_hold(slave->dev);
- nnw->dev = slave->dev;
- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
- bond_fill_ifbond(bond, &nnw->bonding_info.master);
- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
/* enslave device <slave> to bond device <master> */
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD);
-
/* Clear the MagicPacket detection logic */
reg = umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
- }
-
return IRQ_HANDLED;
}
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
if (likely(rc >= 0))
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget)
break;
}
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
napi_complete(napi);
if (!(status & MACB_BIT(TGO)))
return 0;
- usleep_range(10, 250);
+ udelay(250);
} while (time_before(halt_time, timeout));
return -ETIMEDOUT;
.init = macb_init,
};
+static const struct macb_config sama5d3macb_config = {
+ .caps = MACB_CAPS_SG_DISABLED
+ | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+ { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
- u16 length; /* length of the buffer */
+ u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
/* Wait for link to drop */
time = jiffies + (HZ / 10);
do {
- if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
if (!in_interrupt())
schedule_timeout_interruptible(1);
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
+ kfree(tx_old);
+ kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
- e1000_up(adapter);
+ if (netif_running(adapter->netdev))
+ e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
+#include <linux/if_vlan.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ if (!cpumask_available(eq->affinity_mask) ||
+ cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_PHY_PENDING,
rtl8169_update_counters(dev);
rtl_lock_work(tp);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl8169_down(dev);
rtl_unlock_work(tp);
rtl_lock_work(tp);
napi_disable(&tp->napi);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
rtl_unlock_work(tp);
rtl_pll_power_down(tp);
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
int x = ucast_entries;
switch (x) {
- case 1:
- case 32:
+ case 1 ... 32:
case 64:
case 128:
break;
return -EBUSY;
}
+ if (dev == port_dev) {
+ netdev_err(dev, "Cannot enslave team device to itself\n");
+ return -EINVAL;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
- spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
return ret;
}
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
- int ret),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
- TP_ARGS(ar, id, buf, buf_len, ret),
+ TP_ARGS(ar, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
- __field(int, ret)
),
TP_fast_assign(
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
- __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "%s %s id %d len %zu ret %d",
+ "%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
- __entry->buf_len,
- __entry->ret
+ __entry->buf_len
)
);
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
memset(skb_cb, 0, sizeof(*skb_cb));
+ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
if (ret)
goto err_pull;
cnss2-y += bus.o
cnss2-y += debug.o
cnss2-y += pci.o
+cnss2-y += usb.o
cnss2-y += power.o
cnss2-y += qmi.o
cnss2-y += wlan_firmware_service_v01.o
#include "bus.h"
#include "debug.h"
#include "pci.h"
+#include "usb.h"
enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
{
case QCA6290_DEVICE_ID:
case QCN7605_DEVICE_ID:
return CNSS_BUS_PCI;
+ case QCN7605_COMPOSITE_DEVICE_ID:
+ case QCN7605_STANDALONE_DEVICE_ID:
+ return CNSS_BUS_USB;
default:
cnss_pr_err("Unknown device_id: 0x%lx\n", device_id);
return CNSS_BUS_NONE;
}
}
+bool cnss_bus_req_mem_ind_valid(struct cnss_plat_data *plat_priv)
+{
+ if (cnss_get_bus_type(plat_priv->device_id) == CNSS_BUS_USB)
+ return false;
+ else
+ return true;
+}
+
+bool cnss_bus_dev_cal_rep_valid(struct cnss_plat_data *plat_priv)
+{
+ bool ret = false;
+
+ if (cnss_get_bus_type(plat_priv->device_id) == CNSS_BUS_USB)
+ ret = true;
+
+ return ret;
+}
+
void *cnss_bus_dev_to_bus_priv(struct device *dev)
{
if (!dev)
switch (cnss_get_dev_bus_type(dev)) {
case CNSS_BUS_PCI:
return cnss_pci_priv_to_plat_priv(bus_priv);
+ case CNSS_BUS_USB:
+ return cnss_usb_priv_to_plat_priv(bus_priv);
default:
return NULL;
}
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_init(plat_priv);
+ case CNSS_BUS_USB:
+ return cnss_usb_init(plat_priv);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
cnss_pci_deinit(plat_priv);
+ case CNSS_BUS_USB:
+ cnss_usb_deinit(plat_priv);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_call_driver_probe(plat_priv->bus_priv);
+ case CNSS_BUS_USB:
+ return cnss_usb_call_driver_probe(plat_priv->bus_priv);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_call_driver_remove(plat_priv->bus_priv);
+ case CNSS_BUS_USB:
+ return cnss_usb_call_driver_remove(plat_priv->bus_priv);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_dev_powerup(plat_priv->bus_priv);
+ case CNSS_BUS_USB:
+ return 0;
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_dev_shutdown(plat_priv->bus_priv);
+ case CNSS_BUS_USB:
+ return 0;
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_register_driver_hdlr(plat_priv->bus_priv, data);
+ case CNSS_BUS_USB:
+ return cnss_usb_register_driver_hdlr(plat_priv->bus_priv, data);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
switch (plat_priv->bus_type) {
case CNSS_BUS_PCI:
return cnss_pci_unregister_driver_hdlr(plat_priv->bus_priv);
+ case CNSS_BUS_USB:
+ return cnss_usb_unregister_driver_hdlr(plat_priv->bus_priv);
default:
cnss_pr_err("Unsupported bus type: %d\n",
plat_priv->bus_type);
#define QCN7605_VENDOR_ID 0x17CB
#define QCN7605_DEVICE_ID 0x1102
+#define QCN7605_USB_VENDOR_ID 0x05C6
+#define QCN7605_COMPOSITE_DEVICE_ID QCN7605_COMPOSITE_PRODUCT_ID
+#define QCN7605_STANDALONE_DEVICE_ID QCN7605_STANDALONE_PRODUCT_ID
+
+#define QCN7605_STANDALONE_PRODUCT_ID 0x9900
+#define QCN7605_COMPOSITE_PRODUCT_ID 0x9901
+
enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev);
enum cnss_dev_bus_type cnss_get_bus_type(unsigned long device_id);
void *cnss_bus_dev_to_bus_priv(struct device *dev);
int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv,
int modem_current_status);
int cnss_bus_recovery_update_status(struct cnss_plat_data *plat_priv);
+bool cnss_bus_req_mem_ind_valid(struct cnss_plat_data *plat_priv);
+bool cnss_bus_dev_cal_rep_valid(struct cnss_plat_data *plat_priv);
#endif /* _CNSS_BUS_H */
#define FW_READY_TIMEOUT 20000
#define FW_ASSERT_TIMEOUT 5000
#define CNSS_EVENT_PENDING 2989
+#define CE_MSI_NAME "CE"
static struct cnss_plat_data *plat_env;
{
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct wlfw_wlan_cfg_req_msg_v01 req;
- u32 i;
+ u32 i, ce_id, num_vectors, user_base_data, base_vector;
int ret = 0;
if (plat_priv->device_id == QCA6174_DEVICE_ID)
req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
}
+ if (config->num_shadow_reg_cfg) {
+ req.shadow_reg_valid = 1;
+
+ if (config->num_shadow_reg_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+ req.shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+ else
+ req.shadow_reg_len = config->num_shadow_reg_cfg;
+ memcpy(req.shadow_reg, config->shadow_reg_cfg,
+ sizeof(struct wlfw_shadow_reg_cfg_s_v01)
+ * req.shadow_reg_len);
+ }
+
req.shadow_reg_v2_valid = 1;
if (config->num_shadow_reg_v2_cfg >
QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
* req.shadow_reg_v2_len);
+ if (config->rri_over_ddr_cfg_valid) {
+ req.rri_over_ddr_cfg_valid = 1;
+ req.rri_over_ddr_cfg.base_addr_low =
+ config->rri_over_ddr_cfg.base_addr_low;
+ req.rri_over_ddr_cfg.base_addr_high =
+ config->rri_over_ddr_cfg.base_addr_high;
+ }
+
+ if (plat_priv->device_id == QCN7605_DEVICE_ID) {
+ ret = cnss_get_user_msi_assignment(dev, CE_MSI_NAME,
+ &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (!ret) {
+ req.msi_cfg_valid = 1;
+ req.msi_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ for (ce_id = 0; ce_id < QMI_WLFW_MAX_NUM_CE_V01;
+ ce_id++) {
+ req.msi_cfg[ce_id].ce_id = ce_id;
+ req.msi_cfg[ce_id].msi_vector =
+ (ce_id % num_vectors) + base_vector;
+ }
+ }
+ }
ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, &req);
if (ret)
goto out;
return ret;
}
+static int cnss_cal_update_hdlr(struct cnss_plat_data *plat_priv)
+{
+ /* QCN7605 store the cal data sent by FW to calDB memory area
+ * get out of this after complete data is uploaded. FW is expected
+ * to send cal done
+ */
+ return 0;
+}
+
static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
{
switch (type) {
return "COLD_BOOT_CAL_START";
case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
return "COLD_BOOT_CAL_DONE";
+ case CNSS_DRIVER_EVENT_CAL_UPDATE:
+ return "COLD_BOOT_CAL_DATA_UPDATE";
+ case CNSS_DRIVER_EVENT_CAL_DOWNLOAD:
+ return "COLD_BOOT_CAL_DATA_DOWNLOAD";
case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
return "REGISTER_DRIVER";
case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
}
EXPORT_SYMBOL(cnss_force_fw_assert);
+static int cnss_wlfw_server_arrive_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+
+ ret = cnss_wlfw_server_arrive(plat_priv);
+ if (ret)
+ goto out;
+
+ if (!cnss_bus_req_mem_ind_valid(plat_priv)) {
+ ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+ if (ret)
+ goto out;
+ /*cnss driver sends meta data report and waits for FW_READY*/
+ if (cnss_bus_dev_cal_rep_valid(plat_priv))
+ ret = cnss_wlfw_cal_report_send_sync(plat_priv);
+ }
+out:
+ return ret;
+}
+
static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
{
int ret = 0;
if (plat_priv->device_id == QCN7605_DEVICE_ID)
goto skip_shutdown;
cnss_bus_dev_shutdown(plat_priv);
+
skip_shutdown:
clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
-
return 0;
}
switch (event->type) {
case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
- ret = cnss_wlfw_server_arrive(plat_priv);
+ ret = cnss_wlfw_server_arrive_hdlr(plat_priv);
break;
case CNSS_DRIVER_EVENT_SERVER_EXIT:
ret = cnss_wlfw_server_exit(plat_priv);
case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
break;
+ case CNSS_DRIVER_EVENT_CAL_UPDATE:
+ ret = cnss_cal_update_hdlr(plat_priv);
+ break;
case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
break;
subsys_info->subsys_desc.name = "QCA6290";
break;
case QCN7605_DEVICE_ID:
+ case QCN7605_STANDALONE_DEVICE_ID:
+ case QCN7605_COMPOSITE_DEVICE_ID:
subsys_info->subsys_desc.name = "QCN7605";
break;
default:
case QCN7605_DEVICE_ID:
ret = cnss_register_ramdump_v2(plat_priv);
break;
+ case QCN7605_COMPOSITE_DEVICE_ID:
+ case QCN7605_STANDALONE_DEVICE_ID:
+ break;
+
default:
cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
ret = -ENODEV;
case QCA6290_DEVICE_ID:
cnss_unregister_ramdump_v2(plat_priv);
break;
+ case QCN7605_COMPOSITE_DEVICE_ID:
+ case QCN7605_STANDALONE_DEVICE_ID:
+ break;
default:
cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
break;
enum cnss_dev_bus_type {
CNSS_BUS_NONE = -1,
CNSS_BUS_PCI,
+ CNSS_BUS_USB,
};
struct cnss_vreg_info {
CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
CNSS_DRIVER_EVENT_POWER_UP,
CNSS_DRIVER_EVENT_POWER_DOWN,
+ CNSS_DRIVER_EVENT_CAL_UPDATE,
+ CNSS_DRIVER_EVENT_CAL_DOWNLOAD,
CNSS_DRIVER_EVENT_MAX,
};
void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
bool *cnss_get_qmi_bypass(void);
+bool is_qcn7605_device(u16 device_id);
#endif /* _CNSS_MAIN_H */
return pm_request_resume(&pci_dev->dev);
}
+#ifdef CONFIG_CNSS_QCA6390
+int cnss_pci_force_wake_request(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->device_id != QCA6390_DEVICE_ID)
+ return 0;
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ read_lock_bh(&mhi_ctrl->pm_lock);
+ mhi_ctrl->wake_get(mhi_ctrl, true);
+ read_unlock_bh(&mhi_ctrl->pm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->device_id != QCA6390_DEVICE_ID)
+ return true;
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ return mhi_ctrl->dev_state == MHI_STATE_M0 ? true : false;
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct mhi_controller *mhi_ctrl;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->device_id != QCA6390_DEVICE_ID)
+ return 0;
+
+ mhi_ctrl = pci_priv->mhi_ctrl;
+ if (!mhi_ctrl)
+ return -EINVAL;
+
+ read_lock_bh(&mhi_ctrl->pm_lock);
+ mhi_ctrl->wake_put(mhi_ctrl, false);
+ read_unlock_bh(&mhi_ctrl->pm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+#else
+int cnss_pci_force_wake_request(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_request);
+
+int cnss_pci_is_device_awake(struct device *dev)
+{
+ return true;
+}
+EXPORT_SYMBOL(cnss_pci_is_device_awake);
+
+int cnss_pci_force_wake_release(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_force_wake_release);
+#endif
+
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
req.fw_init_done_enable = 1;
req.pin_connect_result_enable_valid = 1;
req.pin_connect_result_enable = 1;
+ req.initiate_cal_download_enable_valid = 1;
+ req.initiate_cal_download_enable = 1;
+ req.initiate_cal_update_enable_valid = 1;
+ req.initiate_cal_update_enable = 1;
req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
return ret;
}
+static int cnss_qmi_initiate_cal_update_ind_hdlr(
+ struct cnss_plat_data *plat_priv,
+ void *msg, unsigned int msg_len)
+{
+ return 0;
+}
+
static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
void *msg, unsigned int msg_len)
{
return ret;
}
+int cnss_wlfw_cal_report_send_sync(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+
static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
unsigned int msg_id, void *msg,
unsigned int msg_len, void *ind_cb_priv)
case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
break;
+ case QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01:
+ cnss_qmi_initiate_cal_update_ind_hdlr(plat_priv, msg, msg_len);
+ break;
default:
cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n",
msg_id);
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
u32 data_len, u8 *data);
int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
u8 fw_log_mode);
-
+int cnss_wlfw_cal_report_send_sync(struct cnss_plat_data *plat_priv);
#endif /* _CNSS_QMI_H */
--- /dev/null
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "main.h"
+#include "bus.h"
+#include "debug.h"
+#include "usb.h"
+
+int cnss_usb_dev_powerup(struct cnss_usb_data *usb_priv)
+{
+ int ret = 0;
+
+ if (!usb_priv) {
+ cnss_pr_err("usb_priv is NULL\n");
+ return -ENODEV;
+ }
+ return ret;
+}
+
+int cnss_usb_wlan_register_driver(struct cnss_usb_wlan_driver *driver_ops)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_usb_data *usb_priv;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ usb_priv = plat_priv->bus_priv;
+ if (!usb_priv) {
+ cnss_pr_err("usb_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (usb_priv->driver_ops) {
+ cnss_pr_err("Driver has already registered\n");
+ return -EEXIST;
+ }
+
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ CNSS_EVENT_SYNC_UNINTERRUPTIBLE,
+ driver_ops);
+ return ret;
+}
+EXPORT_SYMBOL(cnss_usb_wlan_register_driver);
+
+void cnss_usb_wlan_unregister_driver(struct cnss_usb_wlan_driver *driver_ops)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return;
+ }
+
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
+}
+EXPORT_SYMBOL(cnss_usb_wlan_unregister_driver);
+
+int cnss_usb_register_driver_hdlr(struct cnss_usb_data *usb_priv,
+ void *data)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+ set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ usb_priv->driver_ops = data;
+
+ ret = cnss_bus_call_driver_probe(plat_priv);
+
+ return ret;
+}
+
+int cnss_usb_unregister_driver_hdlr(struct cnss_usb_data *usb_priv)
+{
+ struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+ set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ cnss_usb_dev_shutdown(usb_priv);
+ usb_priv->driver_ops = NULL;
+
+ return 0;
+}
+
+int cnss_usb_dev_shutdown(struct cnss_usb_data *usb_priv)
+{
+ int ret = 0;
+
+ if (!usb_priv) {
+ cnss_pr_err("usb_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ switch (usb_priv->device_id) {
+ case QCN7605_COMPOSITE_DEVICE_ID:
+ case QCN7605_STANDALONE_DEVICE_ID:
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%x\n",
+ usb_priv->device_id);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+int cnss_usb_call_driver_probe(struct cnss_usb_data *usb_priv)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+ if (!usb_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+ ret = usb_priv->driver_ops->reinit(usb_priv->usb_intf,
+ usb_priv->usb_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to reinit host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+ ret = usb_priv->driver_ops->probe(usb_priv->usb_intf,
+ usb_priv->usb_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to probe host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+int cnss_usb_call_driver_remove(struct cnss_usb_data *usb_priv)
+{
+ struct cnss_plat_data *plat_priv = usb_priv->plat_priv;
+
+ if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Skip driver remove\n");
+ return 0;
+ }
+
+ if (!usb_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
+ usb_priv->driver_ops->shutdown(usb_priv->usb_intf);
+ } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ usb_priv->driver_ops->remove(usb_priv->usb_intf);
+ clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return 0;
+}
+
+static struct usb_driver cnss_usb_driver;
+#define QCN7605_WLAN_STANDALONE_INTERFACE_NUM 0x0000
+#define QCN7605_WLAN_COMPOSITE_INTERFACE_NUM 0x0002
+
+static int cnss_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ int ret = 0;
+ struct cnss_usb_data *usb_priv;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct usb_device *usb_dev;
+ unsigned short bcd_device;
+
+ cnss_pr_dbg("USB probe, vendor ID: 0x%x, product ID: 0x%x\n",
+ id->idVendor, id->idProduct);
+
+ usb_dev = interface_to_usbdev(interface);
+ usb_priv = devm_kzalloc(&usb_dev->dev, sizeof(*usb_priv),
+ GFP_KERNEL);
+ if (!usb_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bcd_device = le16_to_cpu(usb_dev->descriptor.bcdDevice);
+ usb_priv->plat_priv = plat_priv;
+ usb_priv->usb_intf = interface;
+ usb_priv->usb_device_id = id;
+ usb_priv->device_id = id->idProduct;
+ usb_priv->target_version = bcd_device;
+ cnss_set_usb_priv(interface, usb_priv);
+ plat_priv->device_id = usb_priv->device_id;
+ plat_priv->bus_priv = usb_priv;
+
+ /*increment the ref count of usb dev structure*/
+ usb_get_dev(usb_dev);
+
+ ret = cnss_register_subsys(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_register_ramdump(plat_priv);
+ if (ret)
+ goto unregister_subsys;
+
+ switch (usb_priv->device_id) {
+ case QCN7605_COMPOSITE_DEVICE_ID:
+ case QCN7605_STANDALONE_DEVICE_ID:
+ break;
+ default:
+ cnss_pr_err("Unknown USB device found: 0x%x\n",
+ usb_priv->device_id);
+ ret = -ENODEV;
+ goto unregister_ramdump;
+ }
+
+ return 0;
+
+unregister_ramdump:
+ cnss_unregister_ramdump(plat_priv);
+unregister_subsys:
+ cnss_unregister_subsys(plat_priv);
+reset_ctx:
+ plat_priv->bus_priv = NULL;
+ devm_kfree(&usb_dev->dev, usb_priv);
+out:
+ return ret;
+}
+
+static void cnss_usb_remove(struct usb_interface *interface)
+{
+ struct usb_device *usb_dev;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_usb_data *usb_priv = plat_priv->bus_priv;
+
+ usb_priv->plat_priv = NULL;
+ plat_priv->bus_priv = NULL;
+ usb_dev = interface_to_usbdev(interface);
+ usb_put_dev(usb_dev);
+ devm_kfree(&usb_dev->dev, usb_priv);
+}
+
+static int cnss_usb_suspend(struct usb_interface *interface, pm_message_t state)
+{
+ int ret = 0;
+ struct cnss_usb_data *usb_priv;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ usb_priv = plat_priv->bus_priv;
+ if (!usb_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = usb_priv->driver_ops->suspend(usb_priv->usb_intf,
+ state);
+out:
+ return ret;
+}
+
+static int cnss_usb_resume(struct usb_interface *interface)
+{
+ int ret = 0;
+ struct cnss_usb_data *usb_priv;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ usb_priv = plat_priv->bus_priv;
+ if (!usb_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = usb_priv->driver_ops->resume(usb_priv->usb_intf);
+
+out:
+ return ret;
+}
+
+static int cnss_usb_reset_resume(struct usb_interface *interface)
+{
+ return 0;
+}
+
+static struct usb_device_id cnss_usb_id_table[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(QCN7605_USB_VENDOR_ID,
+ QCN7605_COMPOSITE_PRODUCT_ID,
+ QCN7605_WLAN_COMPOSITE_INTERFACE_NUM) },
+ { USB_DEVICE_INTERFACE_NUMBER(QCN7605_USB_VENDOR_ID,
+ QCN7605_STANDALONE_PRODUCT_ID,
+ QCN7605_WLAN_STANDALONE_INTERFACE_NUM) },
+ {} /* Terminating entry */
+};
+
+static struct usb_driver cnss_usb_driver = {
+ .name = "cnss_usb",
+ .id_table = cnss_usb_id_table,
+ .probe = cnss_usb_probe,
+ .disconnect = cnss_usb_remove,
+ .suspend = cnss_usb_suspend,
+ .resume = cnss_usb_resume,
+ .reset_resume = cnss_usb_reset_resume,
+ .supports_autosuspend = true,
+};
+
+int cnss_usb_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = usb_register(&cnss_usb_driver);
+ if (ret) {
+ cnss_pr_err("Failed to register to Linux USB framework, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+void cnss_usb_deinit(struct cnss_plat_data *plat_priv)
+{
+ usb_deregister(&cnss_usb_driver);
+}
--- /dev/null
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_USB_H
+#define _CNSS_USB_H
+
+#include <linux/usb.h>
+
+#include "main.h"
+
+struct cnss_usb_data {
+ struct usb_interface *usb_intf;
+ struct cnss_plat_data *plat_priv;
+ const struct usb_device_id *usb_device_id;
+ u16 device_id; /*USB PID*/
+ u16 target_version; /* [QCN7605] - from bcdDevice*/
+ struct cnss_usb_wlan_driver *driver_ops;
+};
+
+static inline void cnss_set_usb_priv(struct usb_interface *usb_intf, void *data)
+{
+ usb_set_intfdata(usb_intf, data);
+}
+
+static inline struct cnss_usb_data *cnss_get_usb_priv(struct usb_interface
+ *usb_intf)
+{
+ return usb_get_intfdata(usb_intf);
+}
+
+static inline struct cnss_plat_data *cnss_usb_priv_to_plat_priv(void *bus_priv)
+{
+ struct cnss_usb_data *usb_priv = bus_priv;
+
+ return usb_priv->plat_priv;
+}
+
+int cnss_usb_init(struct cnss_plat_data *plat_priv);
+void cnss_usb_deinit(struct cnss_plat_data *plat_priv);
+void cnss_usb_collect_dump_info(struct cnss_usb_data *usb_priv, bool in_panic);
+void cnss_usb_clear_dump_info(struct cnss_usb_data *usb_priv);
+int cnss_usb_force_fw_assert_hdlr(struct cnss_usb_data *usb_priv);
+void cnss_usb_fw_boot_timeout_hdlr(struct cnss_usb_data *usb_priv);
+int cnss_usb_call_driver_probe(struct cnss_usb_data *usb_priv);
+int cnss_usb_call_driver_remove(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_powerup(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_shutdown(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_crash_shutdown(struct cnss_usb_data *usb_priv);
+int cnss_usb_dev_ramdump(struct cnss_usb_data *usb_priv);
+
+int cnss_usb_register_driver_hdlr(struct cnss_usb_data *usb_priv, void *data);
+
+int cnss_usb_unregister_driver_hdlr(struct cnss_usb_data *usb_priv);
+int cnss_usb_call_driver_modem_status(struct cnss_usb_data *usb_priv,
+ int modem_current_status);
+
+#endif /* _CNSS_USB_H */
},
};
+static struct elem_info wlfw_rri_over_ddr_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rri_over_ddr_cfg_s_v01,
+ base_addr_low),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct
+ wlfw_rri_over_ddr_cfg_s_v01,
+ base_addr_high),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_msi_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_msi_cfg_s_v01,
+ ce_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_msi_cfg_s_v01,
+ msi_vector),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ rri_over_ddr_cfg_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rri_over_ddr_cfg_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ rri_over_ddr_cfg),
+ .ei_array = wlfw_rri_over_ddr_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ msi_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ msi_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_msi_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ msi_cfg),
+ .ei_array = wlfw_msi_cfg_s_v01_ei,
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
cal_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_data_location),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
end),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ cal_data_location),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
total_size),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_data_location),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
end),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ cal_data_location_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ cal_data_location),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
u32 addr;
};
+struct wlfw_rri_over_ddr_cfg_s_v01 {
+ u32 base_addr_low;
+ u32 base_addr_high;
+};
+
+struct wlfw_msi_cfg_s_v01 {
+ u16 ce_id;
+ u16 msi_vector;
+};
+
struct wlfw_memory_region_info_s_v01 {
u64 region_addr;
u32 size;
u32 shadow_reg_v2_len;
struct wlfw_shadow_reg_v2_cfg_s_v01
shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+ u8 rri_over_ddr_cfg_valid;
+ struct wlfw_rri_over_ddr_cfg_s_v01 rri_over_ddr_cfg;
+ u8 msi_cfg_valid;
+ u32 msi_cfg_len;
+ struct wlfw_msi_cfg_s_v01 msi_cfg[QMI_WLFW_MAX_NUM_CE_V01];
};
#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
struct wlfw_initiate_cal_download_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
};
-#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 21
extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
struct wlfw_cal_download_req_msg_v01 {
u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
u8 end_valid;
u8 end;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
};
-#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6185
extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
struct wlfw_cal_download_resp_msg_v01 {
struct wlfw_initiate_cal_update_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
u32 total_size;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
};
-#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 21
extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
struct wlfw_cal_update_req_msg_v01 {
u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
u8 end_valid;
u8 end;
+ u8 cal_data_location_valid;
+ u32 cal_data_location;
};
-#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6188
extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
struct wlfw_msa_info_req_msg_v01 {
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
sband->vht_cap.vht_mcs.rx_mcs_map =
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+ if (buflen < le32_to_cpu(auth_req->length))
+ return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
+#include "ps.h"
#include "tx.h"
#include "hw_ops.h"
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ return ret;
+
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
} while (!event);
out:
+ wl1271_ps_elp_sleep(wl);
kfree(events_vector);
return ret;
}
goto out;
}
+ if (nv->size <= 4) {
+ pr_err("wcnss: %s: request_firmware failed for %s (file size = %zu)\n",
+ __func__, NVBIN_FILE, nv->size);
+ goto out;
+ }
+
/* First 4 bytes in nv blob is validity bitmap.
* We cannot validate nv, so skip those 4 bytes.
*/
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons;
+ kfree_skb(nskb);
+ return ~0U;
+ }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
skb->len += rx->status;
i = xennet_fill_frags(queue, skb, &tmpq);
+ if (unlikely(i == ~0U))
+ goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
-
- /* Test that a parsing failure does not return -EPROBE_DEFER */
- np = of_find_node_by_path("/testcase-data/testcase-device2");
- pdev = of_find_device_by_node(np);
- unittest(pdev, "device 2 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq == -EPROBE_DEFER,
+ "device deferred probe failed - %d\n", irq);
+
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ unittest(pdev, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq < 0 && irq != -EPROBE_DEFER,
+ "device parsing error failed - %d\n", irq);
+ }
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
EXPORT_SYMBOL(pci_save_state);
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
- u32 saved_val, int retry)
+ u32 saved_val, int retry, bool force)
{
u32 val;
pci_read_config_dword(pdev, offset, &val);
- if (val == saved_val)
+ if (!force && val == saved_val)
return;
for (;;) {
}
static void pci_restore_config_space_range(struct pci_dev *pdev,
- int start, int end, int retry)
+ int start, int end, int retry,
+ bool force)
{
int index;
for (index = end; index >= start; index--)
pci_restore_config_dword(pdev, 4 * index,
pdev->saved_config_space[index],
- retry);
+ retry, force);
}
static void pci_restore_config_space(struct pci_dev *pdev)
{
if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
- pci_restore_config_space_range(pdev, 10, 15, 0);
+ pci_restore_config_space_range(pdev, 10, 15, 0, false);
/* Restore BARs before the command register. */
- pci_restore_config_space_range(pdev, 4, 9, 10);
- pci_restore_config_space_range(pdev, 0, 3, 0);
+ pci_restore_config_space_range(pdev, 4, 9, 10, false);
+ pci_restore_config_space_range(pdev, 0, 3, 0, false);
+ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+ /*
+ * Force rewriting of prefetch registers to avoid S3 resume
+ * issues on Intel PCI bridges that occur when these
+ * registers are not explicitly written.
+ */
+ pci_restore_config_space_range(pdev, 9, 11, 0, true);
+ pci_restore_config_space_range(pdev, 0, 8, 0, false);
} else {
- pci_restore_config_space_range(pdev, 0, 15, 0);
+ pci_restore_config_space_range(pdev, 0, 15, 0, false);
}
}
#define IPA_SPS_PROD_TIMEOUT_MSEC 100
+#define EP_EMPTY_MAX_RETRY 5
+#define IPA_BAM_REG_MAP_SIZE 4
+#define IPA_BAM_REG_N_OFST 0x1000
+
+
#ifdef CONFIG_COMPAT
#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_HDR, \
ipa2_apps_shutdown_apps_ep_reset();
+ iounmap_non_ap_bam_regs();
+
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("APPS_SHUTDOWN");
return 0;
return 0;
}
+static void __iomem *ioremap_sw_desc_ofst_bam_register(int ep_idx)
+{
+ int ep_ofst = IPA_BAM_REG_N_OFST * ep_idx;
+
+ return ioremap(ipa_ctx->ipa_wrapper_base
+ + IPA_BAM_REG_BASE_OFST
+ + IPA_BAM_SW_DESC_OFST
+ + ep_ofst,
+ IPA_BAM_REG_MAP_SIZE);
+}
+
+static void __iomem *ioremap_peer_desc_ofst_bam_register(int ep_idx)
+{
+ int ep_ofst = IPA_BAM_REG_N_OFST * ep_idx;
+
+ return ioremap(ipa_ctx->ipa_wrapper_base
+ + IPA_BAM_REG_BASE_OFST
+ + IPA_BAM_PEER_DESC_OFST
+ + ep_ofst,
+ IPA_BAM_REG_MAP_SIZE);
+}
+
+/**
+* ioremap_non_ap_bam_regs() -
+ perform ioremap of non-apps eps
+ bam sw_ofsts and evnt_ring
+ register.
+ Present only Q6 ep's are done.
+*
+* Return codes:
+* 0: success
+* non-Zero: In case of memory failure
+*/
+int ioremap_non_ap_bam_regs(void)
+{
+ int client_idx;
+ int ep_idx;
+
+ if (!ipa_ctx) {
+ IPAERR("IPA driver init not done\n");
+ return -ENODEV;
+ }
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+ if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
+
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+
+ if (ep_idx == -1)
+ continue;
+
+ ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx] =
+ ioremap_sw_desc_ofst_bam_register(ep_idx);
+ ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx] =
+ ioremap_peer_desc_ofst_bam_register(ep_idx);
+
+ if (!ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx] ||
+ !ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]) {
+ IPAERR("IOREMA Failure @ ep %d\n", ep_idx);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+* iounmap_non_ap_bam_regs() -
+ unmap the ioremapped addr of
+ non-apps ep bam sw_ofsts and
+ evnt_ring register.
+*/
+void iounmap_non_ap_bam_regs(void)
+{
+ int client_idx;
+ int ep_idx;
+
+ if (!ipa_ctx) {
+ IPAERR("IPA driver init not done\n");
+ return;
+ }
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+ if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
+ IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
+ IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
+
+ ep_idx = ipa2_get_ep_mapping(client_idx);
+
+ if (ep_idx == -1)
+ continue;
+
+ if (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx])
+ iounmap
+ (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx]);
+ if (ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx])
+ iounmap
+ (ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]);
+ }
+}
+
+/**
+* wait_for_ep_empty() - Wait for sps bam empty
+*
+* @client: ipa client to check for empty
+*
+* Return codes:
+* 0: success upon ep empty
+* non-Zero: Failure if ep non-empty
+*/
+
+int wait_for_ep_empty(enum ipa_client_type client)
+{
+ struct ipa_ep_context *ep = NULL;
+ u32 is_ep_empty = 0;
+ int ret = 0;
+ union ipa_bam_sw_peer_desc read_sw_desc;
+ union ipa_bam_sw_peer_desc read_peer_desc;
+ u32 retry = EP_EMPTY_MAX_RETRY;
+ int ep_idx = ipa2_get_ep_mapping(client);
+
+ if (ep_idx == -1)
+ return -ENODEV;
+
+ ep = &ipa_ctx->ep[ep_idx];
+
+check_ap_ep_empty:
+ if (ep->valid) {
+ ret = sps_is_pipe_empty(ep->ep_hdl, &is_ep_empty);
+ if (ret && retry--) {
+ usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+ IPA_UC_WAII_MAX_SLEEP);
+ goto check_ap_ep_empty;
+ } else {
+ IPAERR("Ep %d is non-empty even after retries\n",
+ ep_idx);
+ ret = -1;
+ }
+ } else {
+ /* Might be Q6 ep, which is non-AP ep */
+
+check_non_ap_ep_empty:
+ IPADBG("request is for non-Apps ep %d\n", ep_idx);
+
+ if (IPA_CLIENT_IS_CONS(client)) {
+ /*
+ * Do not wait for empty in client CONS ep's
+ * It is software responsibility
+ * to set sus/holb on client cons ep
+ * and ipa would be empty due to that.
+ */
+ ret = 0;
+ goto success;
+ }
+
+ if (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx] &&
+ ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]) {
+
+ read_sw_desc.read_reg =
+ ioread32
+ (ipa_ctx->ipa_non_ap_bam_s_desc_iova[ep_idx]);
+ read_peer_desc.read_reg =
+ ioread32
+ (ipa_ctx->ipa_non_ap_bam_p_desc_iova[ep_idx]);
+
+ IPADBG("sw_desc reg 0x%x\n",
+ read_sw_desc.read_reg);
+ IPADBG("sw_dsc_ofst = 0x%x\n",
+ read_sw_desc.sw_desc.sw_dsc_ofst);
+ IPADBG("sw_desc reg 0x%x\n",
+ read_peer_desc.read_reg);
+ IPADBG("p_dsc_fifo_peer_ofst = 0x%x\n",
+ read_peer_desc.peer_desc.p_dsc_fifo_peer_ofst);
+
+ if (read_sw_desc.sw_desc.sw_dsc_ofst ==
+ read_peer_desc.peer_desc.p_dsc_fifo_peer_ofst) {
+ IPADBG("EP %d is empty\n", ep_idx);
+ ret = 0;
+ } else if (retry) {
+ retry--;
+ usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+ IPA_UC_WAII_MAX_SLEEP);
+ goto check_non_ap_ep_empty;
+ } else {
+ IPAERR
+ ("Ep %d is non-empty even after retries\n",
+ ep_idx);
+ ret = -1;
+ }
+ }
+ }
+
+success:
+ return ret;
+}
+
/**
* ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes
* in IPA HW after modem shutdown. This is performed
IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
- res = ipa_uc_reset_pipe(client_idx);
+
+ if (ipa_ctx->is_apps_shutdown_support &&
+ (ipa2_get_ep_mapping(client_idx) != -1)) {
+ /*
+ * Check for Q6 ep empty
+ * before issue a reset
+ */
+ res = wait_for_ep_empty(client_idx);
+ if (res)
+ IPAERR("ep %d not empty\n",
+ ipa2_get_ep_mapping(client_idx));
+ else
+ res = ipa_uc_reset_pipe(client_idx);
+ } else {
+ res = ipa_uc_reset_pipe(client_idx);
+ }
if (res)
BUG();
}
ipa_register_panic_hdlr();
+ if (ipa_ctx->is_apps_shutdown_support) {
+ result = ioremap_non_ap_bam_regs();
+ if (result) {
+ IPAERR(":IOREMAP Failed (%d)\n", result);
+ goto fail_add_interrupt_handler;
+ } else {
+ IPAERR(":IOREMAP success (%d)\n", result);
+ }
+ }
pr_info("IPA driver initialization was successful.\n");
return 0;
struct cdev cdev;
unsigned long bam_handle;
struct ipa_ep_context ep[IPA_MAX_NUM_PIPES];
+ void __iomem *ipa_non_ap_bam_s_desc_iova[IPA_MAX_NUM_PIPES];
+ void __iomem *ipa_non_ap_bam_p_desc_iova[IPA_MAX_NUM_PIPES];
bool skip_ep_cfg_shadow[IPA_MAX_NUM_PIPES];
bool resume_on_connect[IPA_CLIENT_MAX];
struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
int ipa_apps_shutdown_cleanup(void);
int register_ipa_platform_cb(int (*cb)(void));
int ipa_q6_post_shutdown_cleanup(void);
+int wait_for_ep_empty(enum ipa_client_type client);
+int ioremap_non_ap_bam_regs(void);
+void iounmap_non_ap_bam_regs(void);
int ipa_init_q6_smem(void);
int ipa_q6_monitor_holb_mitigation(bool enable);
#define NAT_TABLE_ENTRY_SIZE_BYTE 32
#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
+/*
+ * Max NAT table entries is limited 1000 entries.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 1000
+#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE)
+
static int ipa_nat_vma_fault_remap(
struct vm_area_struct *vma, struct vm_fault *vmf)
{
goto bail;
}
+ if (mem->size > MAX_ALLOC_NAT_SIZE) {
+ IPAERR("Trying allocate more size = %zu, Max allowed = %d\n",
+ mem->size, MAX_ALLOC_NAT_SIZE);
+ result = -EPERM;
+ goto bail;
+ }
+
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
IPAERR_RL("Invalid Parameters or device is already init\n");
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
*/
#define IPA_ENABLED_PIPES_OFST 0x000005DC
#define IPA_YELLOW_MARKER_SYS_CFG_OFST 0x00000728
+#define IPA_BAM_SW_DESC_OFST 0x00013800
+#define IPA_BAM_PEER_DESC_OFST 0x00013818
+
/*
* End of IPA 2.6/2.6L Registers
*/
struct ipa_rt_entry *rule_next;
struct ipa_rt_tbl_set *rset;
u32 apps_start_idx;
+ struct ipa_hdr_entry *hdr_entry;
+ struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
int id;
bool tbl_user = false;
if (!user_only ||
rule->ipacm_installed) {
list_del(&rule->link);
+ if (rule->hdr) {
+ hdr_entry = ipa_id_find(
+ rule->rule.hdr_hdl);
+ if (!hdr_entry ||
+ hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL(
+ "Header already deleted\n");
+ return -EINVAL;
+ }
+ } else if (rule->proc_ctx) {
+ hdr_proc_entry =
+ ipa_id_find(
+ rule->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie !=
+ IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL(
+ "Proc entry already deleted\n");
+ return -EINVAL;
+ }
+ }
tbl->rule_cnt--;
if (rule->hdr)
__ipa_release_hdr(rule->hdr->id);
#define NAT_TABLE_ENTRY_SIZE_BYTE 32
#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
+/*
+ * Max NAT table entries is limited 1000 entries.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 1000
+#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE)
+
static int ipa3_nat_vma_fault_remap(
struct vm_area_struct *vma, struct vm_fault *vmf)
{
goto bail;
}
+ if (mem->size > MAX_ALLOC_NAT_SIZE) {
+ IPAERR("Trying allocate more size = %zu, Max allowed = %d\n",
+ mem->size, MAX_ALLOC_NAT_SIZE);
+ result = -EPERM;
+ goto bail;
+ }
+
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
IPAERR_RL("Invalid Parameters or device is already init\n");
struct ipa3_rt_entry *rule;
struct ipa3_rt_entry *rule_next;
struct ipa3_rt_tbl_set *rset;
+ struct ipa3_hdr_entry *hdr_entry;
+ struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
u32 apps_start_idx;
int id;
bool tbl_user = false;
if (!user_only ||
rule->ipacm_installed) {
list_del(&rule->link);
+ if (rule->hdr) {
+ hdr_entry = ipa3_id_find(
+ rule->rule.hdr_hdl);
+ if (!hdr_entry ||
+ hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL(
+ "Header already deleted\n");
+ return -EINVAL;
+ }
+ } else if (rule->proc_ctx) {
+ hdr_proc_entry =
+ ipa3_id_find(
+ rule->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie !=
+ IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL(
+ "Proc entry already deleted\n");
+ return -EINVAL;
+ }
+ }
tbl->rule_cnt--;
if (rule->hdr)
__ipa3_release_hdr(rule->hdr->id);
__ipa3_release_hdr_proc_ctx(
rule->proc_ctx->id);
rule->cookie = 0;
- idr_remove(&tbl->rule_ids, rule->rule_id);
+ if (!rule->rule_id_valid)
+ idr_remove(&tbl->rule_ids,
+ rule->rule_id);
id = rule->id;
kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
}
+ kfree(output.pointer);
return status;
}
}
static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
static void vexpress_power_off(void)
{
int err;
vexpress_restart_device = dev;
- err = register_restart_handler(&vexpress_restart_nb);
- if (err) {
- dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return err;
+ if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+ err = register_restart_handler(&vexpress_restart_nb);
+ if (err) {
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ atomic_dec(&vexpress_restart_nb_refcnt);
+ return err;
+ }
}
device_create_file(dev, &dev_attr_active);
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
goto arm_cq;
}
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static struct scsi_transport_template *ibmvscsi_transport_template;
ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (ppartition_name)
- strncpy(partition_name, ppartition_name,
+ strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
obj-$(CONFIG_MSM_SMEM_LOGGING) += smem_log.o
obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
-obj-$(CONFIG_MSM_SMP2P) += smp2p.o smp2p_debug.o smp2p_sleepstate.o
-obj-$(CONFIG_MSM_SMP2P_TEST) += smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
+obj-$(CONFIG_MSM_SMP2P) += smp2p.o smp2p_loopback.o smp2p_debug.o smp2p_sleepstate.o
obj-$(CONFIG_MSM_QMI_INTERFACE) += qmi_interface.o
obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
obj-$(CONFIG_MSM_HVC) += hvc.o
.openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
}
-static const char hab_info_str[] = "Change: 16764735 Revision: #76";
+static const char hab_info_str[] = "Change: 17280941 Revision: #81";
/*
* The following has to match habmm definitions, order does not matter if
HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10),
HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11),
HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12),
- HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 13),
- HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 14),
- HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 15),
- HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 16),
- HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 17),
- HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 18),
- HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 19),
- HAB_DEVICE_CNSTR(DEVICE_FDE1_NAME, MM_FDE_1, 20),
- HAB_DEVICE_CNSTR(DEVICE_BUFFERQ1_NAME, MM_BUFFERQ_1, 21),
+ HAB_DEVICE_CNSTR(DEVICE_VID2_NAME, MM_VID_2, 13),
+ HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 14),
+ HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 15),
+ HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 16),
+ HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 17),
+ HAB_DEVICE_CNSTR(DEVICE_FDE1_NAME, MM_FDE_1, 18),
+ HAB_DEVICE_CNSTR(DEVICE_BUFFERQ1_NAME, MM_BUFFERQ_1, 19),
};
struct hab_driver hab_driver = {
read_lock(&ctx->ctx_lock);
list_for_each_entry(vchan, &ctx->vchannels, node) {
if (vcid == vchan->id) {
- kref_get(&vchan->refcount);
+ if (vchan->otherend_closed || vchan->closed ||
+ !kref_get_unless_zero(&vchan->refcount)) {
+ pr_debug("failed to inc vcid %x remote %x session %d refcnt %d close_flg remote %d local %d\n",
+ vchan->id, vchan->otherend_id,
+ vchan->session_id,
+ get_refcnt(vchan->refcount),
+ vchan->otherend_closed, vchan->closed);
+ vchan = NULL;
+ }
read_unlock(&ctx->ctx_lock);
return vchan;
}
/* notify remote side on vchan closing */
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
list_del(&vchan->node); /* vchan is not in this ctx anymore */
- hab_vchan_stop_notify(vchan);
+
+ if (!vchan->closed) { /* locally hasn't closed yet */
+ if (!kref_get_unless_zero(&vchan->refcount)) {
+ pr_err("vchan %x %x refcnt %d mismanaged closed %d remote closed %d\n",
+ vchan->id,
+ vchan->otherend_id,
+ get_refcnt(vchan->refcount),
+ vchan->closed, vchan->otherend_closed);
+ continue; /* vchan is already being freed */
+ } else {
+ hab_vchan_stop_notify(vchan);
+ /* put for notify. shouldn't cause free */
+ hab_vchan_put(vchan);
+ }
+ } else
+ continue;
+
write_unlock(&ctx->ctx_lock);
- if (!vchan->closed) {
- pr_warn("potential leak vc %pK %x remote %x session %d refcnt %d\n",
- vchan, vchan->id, vchan->otherend_id,
- vchan->session_id,
- get_refcnt(vchan->refcount));
- hab_vchan_put(vchan); /* there is a lock inside */
- }
+ hab_vchan_put(vchan); /* there is a lock inside */
write_lock(&ctx->ctx_lock);
}
dev_t dev;
place_marker("M - HAB INIT Start");
-
result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
if (result < 0) {
} else
set_dma_ops(hab_driver.dev, &hab_dma_ops);
}
-
hab_stat_init(&hab_driver);
-
place_marker("M - HAB INIT End");
-
return result;
err:
#include <linux/reboot.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include <linux/delay.h>
#include <soc/qcom/boot_stats.h>
enum hab_payload_type {
#define DEVICE_DISP5_NAME "hab_disp5"
#define DEVICE_GFX_NAME "hab_ogles"
#define DEVICE_VID_NAME "hab_vid"
+#define DEVICE_VID2_NAME "hab_vid2"
#define DEVICE_MISC_NAME "hab_misc"
#define DEVICE_QCPE1_NAME "hab_qcpe_vm1"
-#define DEVICE_QCPE2_NAME "hab_qcpe_vm2"
-#define DEVICE_QCPE3_NAME "hab_qcpe_vm3"
-#define DEVICE_QCPE4_NAME "hab_qcpe_vm4"
#define DEVICE_CLK1_NAME "hab_clock_vm1"
#define DEVICE_CLK2_NAME "hab_clock_vm2"
#define DEVICE_FDE1_NAME "hab_fde1"
};
struct virtual_channel {
+ struct list_head node; /* for ctx */
+ struct list_head pnode; /* for pchan */
/*
* refcount is used to track the references from hab core to the virtual
* channel such as references from physical channels,
struct kref refcount;
struct physical_channel *pchan;
struct uhab_context *ctx;
- struct list_head node; /* for ctx */
- struct list_head pnode; /* for pchan */
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
#include "hab.h"
#include "hab_ghs.h"
+#define GIPC_VM_SET_CNT 22
static const char * const dt_gipc_path_name[] = {
"testgipc1",
"testgipc2",
"testgipc22",
};
+
+/* same vmid assignment for all the vms. it should matches dt_gipc_path_name */
+int mmid_order[GIPC_VM_SET_CNT] = {
+ MM_AUD_1,
+ MM_AUD_2,
+ MM_AUD_3,
+ MM_AUD_4,
+ MM_CAM_1,
+ MM_CAM_2,
+ MM_DISP_1,
+ MM_DISP_2,
+ MM_DISP_3,
+ MM_DISP_4,
+ MM_DISP_5,
+ MM_GFX,
+ MM_VID,
+ MM_MISC,
+ MM_QCPE_VM1,
+ MM_VID_2, /* newly recycled */
+ 0,
+ 0,
+ MM_CLK_VM1,
+ MM_CLK_VM2,
+ MM_FDE_1,
+ MM_BUFFERQ_1,
+};
+
static struct ghs_vmm_plugin_info_s {
const char * const *dt_name;
+ int *mmid_dt_mapping;
int curr;
int probe_cnt;
} ghs_vmm_plugin_info = {
dt_gipc_path_name,
+ mmid_order,
0,
ARRAY_SIZE(dt_gipc_path_name),
};
tasklet_schedule(&dev->task);
}
+static int get_dt_name_idx(int vmid_base, int mmid,
+ struct ghs_vmm_plugin_info_s *plugin_info)
+{
+ int idx = -1;
+ int i;
+
+ if (vmid_base < 0 || vmid_base > plugin_info->probe_cnt /
+ GIPC_VM_SET_CNT) {
+ pr_err("vmid %d overflow expected max %d\n", vmid_base,
+ plugin_info->probe_cnt / GIPC_VM_SET_CNT);
+ return idx;
+ }
+
+ for (i = 0; i < GIPC_VM_SET_CNT; i++) {
+ if (mmid == plugin_info->mmid_dt_mapping[i]) {
+ idx = vmid_base * GIPC_VM_SET_CNT + i;
+ if (idx > plugin_info->probe_cnt) {
+ pr_err("dt name idx %d overflow max %d\n",
+ idx, plugin_info->probe_cnt);
+ idx = -1;
+ }
+ break;
+ }
+ }
+ return idx;
+}
+
/* static struct physical_channel *habhyp_commdev_alloc(int id) */
int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device)
struct physical_channel *pchan = NULL;
struct physical_channel **ppchan = (struct physical_channel **)commdev;
int ret = 0;
+ int dt_name_idx = 0;
if (ghs_vmm_plugin_info.curr > ghs_vmm_plugin_info.probe_cnt) {
pr_err("too many commdev alloc %d, supported is %d\n",
gvh_dn = of_find_node_by_path("/aliases");
if (gvh_dn) {
const char *ep_path = NULL;
- struct device_node *ep_dn;
+ struct device_node *ep_dn = NULL;
+
+ dt_name_idx = get_dt_name_idx(vmid_remote,
+ mmid_device->id,
+ &ghs_vmm_plugin_info);
+ if (dt_name_idx < 0) {
+ pr_err("failed to find %s for vmid %d ret %d\n",
+ mmid_device->name,
+ mmid_device->id,
+ dt_name_idx);
+ ret = -ENOENT;
+ goto err;
+ }
ret = of_property_read_string(gvh_dn,
- ghs_vmm_plugin_info.dt_name[ghs_vmm_plugin_info.curr],
- &ep_path);
+ ghs_vmm_plugin_info.dt_name[dt_name_idx],
+ &ep_path);
if (ret)
- pr_err("failed to read endpoint string ret %d\n",
+ pr_err("failed to read endpoint str ret %d\n",
ret);
of_node_put(gvh_dn);
of_node_put(ep_dn);
if (IS_ERR(dev->endpoint)) {
ret = PTR_ERR(dev->endpoint);
- pr_err("KGIPC alloc failed id: %d, ret: %d\n",
- ghs_vmm_plugin_info.curr, ret);
+ pr_err("alloc failed %d %s ret %d\n",
+ dt_name_idx, mmid_device->name,
+ ret);
goto err;
} else {
- pr_debug("gipc ep found for %d\n",
- ghs_vmm_plugin_info.curr);
+ pr_debug("gipc ep found for %d %s\n",
+ dt_name_idx, mmid_device->name);
}
} else {
- pr_err("of_parse_phandle failed id: %d\n",
- ghs_vmm_plugin_info.curr);
+ pr_err("of_parse_phandle failed id %d %s\n",
+ dt_name_idx, mmid_device->name);
ret = -ENOENT;
goto err;
}
} else {
- pr_err("of_find_compatible_node failed id: %d\n",
- ghs_vmm_plugin_info.curr);
+ pr_err("of_find_compatible_node failed id %d %s\n",
+ dt_name_idx, mmid_device->name);
ret = -ENOENT;
goto err;
}
pchan->hyp_data = (void *)dev;
pchan->is_be = is_be;
strlcpy(dev->name, name, sizeof(dev->name));
+ strlcpy(pchan->name, name, sizeof(pchan->name));
*ppchan = pchan;
dev->read_data = kmalloc(GIPC_RECV_BUFF_SIZE_BYTES, GFP_KERNEL);
if (!dev->read_data) {
struct page **pages;
long npages;
uint64_t index; /* for mmap first call */
- int kernel;
void *kva;
- void *uva;
- int refcntk;
- int refcntu;
uint32_t userflags;
struct file *filp_owner;
struct file *filp_mapper;
- struct dma_buf *dmabuf;
int32_t export_id;
int32_t vcid;
struct physical_channel *pchan;
+ struct kref refcount;
};
struct importer_context {
rwlock_t implist_lock;
};
+static struct pages_list *pages_list_create(
+ void *imp_ctx,
+ struct export_desc *exp,
+ uint32_t userflags)
+{
+ struct page **pages;
+ struct compressed_pfns *pfn_table =
+ (struct compressed_pfns *)exp->payload;
+ struct pages_list *pglist;
+ unsigned long pfn;
+ int i, j, k = 0, size;
+
+ if (!pfn_table)
+ return ERR_PTR(-EINVAL);
+
+ size = exp->payload_count * sizeof(struct page *);
+ pages = kmalloc(size, GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
+ if (!pglist) {
+ kfree(pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pfn = pfn_table->first_pfn;
+ for (i = 0; i < pfn_table->nregions; i++) {
+ for (j = 0; j < pfn_table->region[i].size; j++) {
+ pages[k] = pfn_to_page(pfn+j);
+ k++;
+ }
+ pfn += pfn_table->region[i].size + pfn_table->region[i].space;
+ }
+
+ pglist->pages = pages;
+ pglist->npages = exp->payload_count;
+ pglist->userflags = userflags;
+ pglist->export_id = exp->export_id;
+ pglist->vcid = exp->vcid_remote;
+ pglist->pchan = exp->pchan;
+
+ kref_init(&pglist->refcount);
+
+ return pglist;
+}
+
+static void pages_list_destroy(struct kref *refcount)
+{
+ struct pages_list *pglist = container_of(refcount,
+ struct pages_list, refcount);
+
+ if (pglist->kva)
+ vunmap(pglist->kva);
+
+ kfree(pglist->pages);
+
+ kfree(pglist);
+}
+
+static void pages_list_get(struct pages_list *pglist)
+{
+ kref_get(&pglist->refcount);
+}
+
+static int pages_list_put(struct pages_list *pglist)
+{
+ return kref_put(&pglist->refcount, pages_list_destroy);
+}
+
+static struct pages_list *pages_list_lookup(
+ struct importer_context *imp_ctx,
+ uint32_t export_id, struct physical_channel *pchan)
+{
+ struct pages_list *pglist, *tmp;
+
+ read_lock(&imp_ctx->implist_lock);
+ list_for_each_entry_safe(pglist, tmp, &imp_ctx->imp_list, list) {
+ if (pglist->export_id == export_id &&
+ pglist->pchan == pchan) {
+ pages_list_get(pglist);
+ read_unlock(&imp_ctx->implist_lock);
+ return pglist;
+ }
+ }
+ read_unlock(&imp_ctx->implist_lock);
+
+ return NULL;
+}
+
+static void pages_list_add(struct importer_context *imp_ctx,
+ struct pages_list *pglist)
+{
+ pages_list_get(pglist);
+
+ write_lock(&imp_ctx->implist_lock);
+ list_add_tail(&pglist->list, &imp_ctx->imp_list);
+ imp_ctx->cnt++;
+ write_unlock(&imp_ctx->implist_lock);
+}
+
+static void pages_list_remove(struct importer_context *imp_ctx,
+ struct pages_list *pglist)
+{
+ write_lock(&imp_ctx->implist_lock);
+ list_del(&pglist->list);
+ imp_ctx->cnt--;
+ write_unlock(&imp_ctx->implist_lock);
+
+ pages_list_put(pglist);
+}
+
void *habmm_hyp_allocate_grantable(int page_count,
uint32_t *sizebytes)
{
for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
pages[rc] = nth_page(page, j);
rc++;
- if (WARN_ON(rc >= page_count))
+ if (rc >= page_count)
break;
}
+
+ if (rc >= page_count)
+ break;
}
err:
if (!priv)
return;
- list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list) {
- if (kernel && pglist->kva)
- vunmap(pglist->kva);
-
- list_del(&pglist->list);
- priv->cnt--;
-
- kfree(pglist->pages);
- kfree(pglist);
- }
+ list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list)
+ pages_list_remove(priv, pglist);
kfree(priv);
}
static void hab_map_open(struct vm_area_struct *vma)
{
+ struct pages_list *pglist =
+ (struct pages_list *)vma->vm_private_data;
+
+ pages_list_get(pglist);
}
static void hab_map_close(struct vm_area_struct *vma)
{
+ struct pages_list *pglist =
+ (struct pages_list *)vma->vm_private_data;
+
+ pages_list_put(pglist);
+ vma->vm_private_data = NULL;
}
static const struct vm_operations_struct habmem_vm_ops = {
.close = hab_map_close,
};
+static int hab_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct pages_list *pglist = vma->vm_private_data;
+ pgoff_t page_offset;
+ int ret;
+
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ if (page_offset > pglist->npages)
+ return VM_FAULT_SIGBUS;
+
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ pglist->pages[page_offset]);
+
+ switch (ret) {
+ case 0:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON(1);
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static void hab_buffer_open(struct vm_area_struct *vma)
+{
+}
+
+static void hab_buffer_close(struct vm_area_struct *vma)
+{
+}
+
+static const struct vm_operations_struct hab_buffer_vm_ops = {
+ .fault = hab_buffer_fault,
+ .open = hab_buffer_open,
+ .close = hab_buffer_close,
+};
+
static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct pages_list *pglist = dmabuf->priv;
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &habmem_vm_ops;
+ vma->vm_ops = &hab_buffer_vm_ops;
vma->vm_private_data = pglist;
vma->vm_flags |= VM_MIXEDMAP;
static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
{
+ struct pages_list *pglist = dmabuf->priv;
+
+ pages_list_put(pglist);
}
static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
uint32_t userflags,
int32_t *pfd)
{
- struct page **pages;
- struct compressed_pfns *pfn_table =
- (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
- unsigned long pfn;
- int i, j, k = 0;
- pgprot_t prot = PAGE_KERNEL;
- int32_t fd, size;
+ int32_t fd = -1;
int ret;
+ struct dma_buf *dmabuf;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- if (!pfn_table || !priv)
+ if (!priv)
return -EINVAL;
- size = exp->payload_count * sizeof(struct page *);
- pages = kmalloc(size, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
- if (!pglist) {
- kfree(pages);
- return -ENOMEM;
- }
- pfn = pfn_table->first_pfn;
- for (i = 0; i < pfn_table->nregions; i++) {
- for (j = 0; j < pfn_table->region[i].size; j++) {
- pages[k] = pfn_to_page(pfn+j);
- k++;
- }
- pfn += pfn_table->region[i].size + pfn_table->region[i].space;
- }
+ pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+ if (pglist)
+ goto buffer_ready;
- pglist->pages = pages;
- pglist->npages = exp->payload_count;
- pglist->kernel = 0;
- pglist->index = 0;
- pglist->refcntk = pglist->refcntu = 0;
- pglist->userflags = userflags;
- pglist->export_id = exp->export_id;
- pglist->vcid = exp->vcid_remote;
- pglist->pchan = exp->pchan;
+ pglist = pages_list_create(imp_ctx, exp, userflags);
+ if (IS_ERR(pglist))
+ return PTR_ERR(pglist);
- if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
- prot = pgprot_writecombine(prot);
+ pages_list_add(priv, pglist);
+buffer_ready:
exp_info.ops = &dma_buf_ops;
- exp_info.size = exp->payload_count << PAGE_SHIFT;
+ exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
- pglist->dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(pglist->dmabuf)) {
- ret = PTR_ERR(pglist->dmabuf);
- kfree(pages);
- kfree(pglist);
- return ret;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ pr_err("export to dmabuf failed\n");
+ ret = PTR_ERR(dmabuf);
+ goto proc_end;
}
+ pages_list_get(pglist);
- fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC);
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0) {
- dma_buf_put(pglist->dmabuf);
- kfree(pages);
- kfree(pglist);
- return -EINVAL;
+ pr_err("dma buf to fd failed\n");
+ dma_buf_put(dmabuf);
+ ret = -EINVAL;
+ goto proc_end;
}
- pglist->refcntk++;
-
- write_lock(&priv->implist_lock);
- list_add_tail(&pglist->list, &priv->imp_list);
- priv->cnt++;
- write_unlock(&priv->implist_lock);
-
+proc_end:
*pfd = fd;
-
+ pages_list_put(pglist);
return 0;
}
uint32_t userflags,
void **pkva)
{
- struct page **pages;
- struct compressed_pfns *pfn_table =
- (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
- unsigned long pfn;
- int i, j, k = 0, size;
pgprot_t prot = PAGE_KERNEL;
- if (!pfn_table || !priv)
+ if (!priv)
return -EINVAL;
- size = exp->payload_count * sizeof(struct page *);
- pages = kmalloc(size, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
- pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
- if (!pglist) {
- kfree(pages);
- return -ENOMEM;
- }
- pfn = pfn_table->first_pfn;
- for (i = 0; i < pfn_table->nregions; i++) {
- for (j = 0; j < pfn_table->region[i].size; j++) {
- pages[k] = pfn_to_page(pfn+j);
- k++;
- }
- pfn += pfn_table->region[i].size + pfn_table->region[i].space;
- }
+ pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+ if (pglist)
+ goto buffer_ready;
- pglist->pages = pages;
- pglist->npages = exp->payload_count;
- pglist->kernel = 1;
- pglist->refcntk = pglist->refcntu = 0;
- pglist->userflags = userflags;
- pglist->export_id = exp->export_id;
- pglist->vcid = exp->vcid_remote;
- pglist->pchan = exp->pchan;
+ pglist = pages_list_create(imp_ctx, exp, userflags);
+ if (IS_ERR(pglist))
+ return PTR_ERR(pglist);
+
+ pages_list_add(priv, pglist);
+
+buffer_ready:
+ if (pglist->kva)
+ goto pro_end;
+
+ if (pglist->userflags != userflags) {
+ pr_info("exp %d: userflags: 0x%x -> 0x%x\n",
+ exp->export_id, pglist->userflags, userflags);
+ pglist->userflags = userflags;
+ }
if (!(userflags & HABMM_IMPORT_FLAGS_CACHED))
prot = pgprot_writecombine(prot);
pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot);
if (pglist->kva == NULL) {
- kfree(pages);
pr_err("%ld pages vmap failed\n", pglist->npages);
- kfree(pglist);
return -ENOMEM;
}
- pr_debug("%ld pages vmap pass, return %p\n",
- pglist->npages, pglist->kva);
-
- pglist->refcntk++;
-
- write_lock(&priv->implist_lock);
- list_add_tail(&pglist->list, &priv->imp_list);
- priv->cnt++;
- write_unlock(&priv->implist_lock);
-
+pro_end:
*pkva = pglist->kva;
-
+ pages_list_put(pglist);
return 0;
}
uint32_t userflags,
uint64_t *index)
{
- struct page **pages;
- struct compressed_pfns *pfn_table =
- (struct compressed_pfns *)exp->payload;
struct pages_list *pglist;
struct importer_context *priv = imp_ctx;
- unsigned long pfn;
- int i, j, k = 0, size;
- if (!pfn_table || !priv)
+ if (!priv)
return -EINVAL;
- size = exp->payload_count * sizeof(struct page *);
- pages = kmalloc(size, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
- pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
- if (!pglist) {
- kfree(pages);
- return -ENOMEM;
- }
+ pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+ if (pglist)
+ goto buffer_ready;
- pfn = pfn_table->first_pfn;
- for (i = 0; i < pfn_table->nregions; i++) {
- for (j = 0; j < pfn_table->region[i].size; j++) {
- pages[k] = pfn_to_page(pfn+j);
- k++;
- }
- pfn += pfn_table->region[i].size + pfn_table->region[i].space;
- }
+ pglist = pages_list_create(imp_ctx, exp, userflags);
+ if (IS_ERR(pglist))
+ return PTR_ERR(pglist);
- pglist->pages = pages;
- pglist->npages = exp->payload_count;
- pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT;
- pglist->refcntk = pglist->refcntu = 0;
- pglist->userflags = userflags;
- pglist->export_id = exp->export_id;
- pglist->vcid = exp->vcid_remote;
- pglist->pchan = exp->pchan;
+ pages_list_add(priv, pglist);
- write_lock(&priv->implist_lock);
- list_add_tail(&pglist->list, &priv->imp_list);
- priv->cnt++;
- write_unlock(&priv->implist_lock);
+buffer_ready:
+ if (pglist->index)
+ goto proc_end;
- *index = pglist->index << PAGE_SHIFT;
+ pglist->index = page_to_phys(pglist->pages[0]) >> PAGE_SHIFT;
+proc_end:
+ *index = pglist->index << PAGE_SHIFT;
+ pages_list_put(pglist);
return 0;
}
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
{
struct importer_context *priv = imp_ctx;
- struct pages_list *pglist, *tmp;
- int found = 0;
-
- write_lock(&priv->implist_lock);
- list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) {
- if (pglist->export_id == exp->export_id &&
- pglist->pchan == exp->pchan) {
- found = 1;
- list_del(&pglist->list);
- priv->cnt--;
- break;
- }
- }
- write_unlock(&priv->implist_lock);
+ struct pages_list *pglist;
- if (!found) {
+ pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+ if (!pglist) {
pr_err("failed to find export id %u\n", exp->export_id);
return -EINVAL;
}
- pr_debug("detach pglist %p, kernel %d, list cnt %d\n",
- pglist, pglist->kernel, priv->cnt);
+ pages_list_remove(priv, pglist);
- if (pglist->kva)
- vunmap(pglist->kva);
-
- if (pglist->dmabuf)
- dma_buf_put(pglist->dmabuf);
-
- kfree(pglist->pages);
- kfree(pglist);
+ pages_list_put(pglist);
return 0;
}
long length = vma->vm_end - vma->vm_start;
struct pages_list *pglist;
int bfound = 0;
+ int ret = 0;
read_lock(&imp_ctx->implist_lock);
list_for_each_entry(pglist, &imp_ctx->imp_list, list) {
if ((pglist->index == vma->vm_pgoff) &&
((length <= pglist->npages * PAGE_SIZE))) {
bfound = 1;
+ pages_list_get(pglist);
break;
}
}
if (length > pglist->npages * PAGE_SIZE) {
pr_err("Error vma length %ld not matching page list %ld\n",
length, pglist->npages * PAGE_SIZE);
- return -EINVAL;
+ ret = -EINVAL;
+ goto proc_end;
}
vma->vm_ops = &habmem_vm_ops;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return 0;
+
+proc_end:
+ pages_list_put(pglist);
+ return ret;
}
int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp)
struct pages_list *pglist;
int found = 0;
- read_lock(&priv->implist_lock);
- list_for_each_entry(pglist, &priv->imp_list, list) {
- if (pglist->export_id == exp->export_id &&
- pglist->pchan == exp->pchan) {
- found = 1;
- break;
- }
+ pglist = pages_list_lookup(priv, exp->export_id, exp->pchan);
+ if (pglist) {
+ found = 1;
+ pages_list_put(pglist);
}
- read_unlock(&priv->implist_lock);
return found;
}
struct uhab_context *ctx;
if (!exp || !exp->ctx || !exp->pchan) {
- pr_err("failed to find valid info in exp %pK ctx %pK pchan %pK\n",
+ if (exp)
+ pr_err("invalid info in exp %pK ctx %pK pchan %pK\n",
exp, exp->ctx, exp->pchan);
+ else
+ pr_err("invalid exp\n");
return;
}
}
spin_unlock_bh(&ctx->imp_lock);
+ if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) {
+ pr_err("input size %d don't match buffer size %d\n",
+ param->sizebytes, exp->payload_count << PAGE_SHIFT);
+ ret = -EINVAL;
+ goto err_imp;
+ }
+
if (!found) {
pr_err("Fail to get export descriptor from export id %d\n",
param->exportid);
}
}
- /* return all the received messages before the remote close */
- if ((!ret || (ret == -ERESTARTSYS)) && !hab_rx_queue_empty(vchan)) {
- spin_lock_bh(&vchan->rx_lock);
+ /*
+ * return all the received messages before the remote close,
+ * and need empty check again in case the list is empty now due to
+ * dequeue by other threads
+ */
+ spin_lock_bh(&vchan->rx_lock);
+
+ if ((!ret || (ret == -ERESTARTSYS)) && !list_empty(&vchan->rx_list)) {
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
if (message) {
ret = 0;
*rsize = message->sizebytes;
} else {
- pr_err("rcv buffer too small %d < %zd\n",
- *rsize, message->sizebytes);
+ pr_err("vcid %x rcv buf too small %d < %zd\n",
+ vchan->id, *rsize,
+ message->sizebytes);
*rsize = message->sizebytes;
message = NULL;
ret = -EOVERFLOW; /* come back again */
}
}
- spin_unlock_bh(&vchan->rx_lock);
} else
/* no message received, retain the original status */
*rsize = 0;
+ spin_unlock_bh(&vchan->rx_lock);
+
*msg = message;
return ret;
}
break;
}
- exp_desc->domid_local = pchan->dom_id;
+ if (pchan->vmid_local != exp_desc->domid_remote ||
+ pchan->vmid_remote != exp_desc->domid_local)
+ pr_err("corrupted vmid %d != %d %d != %d\n",
+ pchan->vmid_local, exp_desc->domid_remote,
+ pchan->vmid_remote, exp_desc->domid_local);
+ exp_desc->domid_remote = pchan->vmid_remote;
+ exp_desc->domid_local = pchan->vmid_local;
exp_desc->pchan = pchan;
hab_export_enqueue(vchan, exp_desc);
ret = wait_event_interruptible_timeout(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request),
ms_timeout);
- if (!ret || (-ERESTARTSYS == ret)) {
+ if (!ret) {
+ pr_debug("%s timeout in open listen\n", dev->name);
+ ret = -EAGAIN; /* condition not met */
+ } else if (-ERESTARTSYS == ret) {
pr_warn("something failed in open listen ret %d\n",
ret);
- ret = -EAGAIN; /* condition not met */
+ ret = -EINTR; /* condition not met */
} else if (ret > 0)
ret = 0; /* condition met */
} else { /* fe case */
/* the release vchan from ctx was done earlier in vchan close() */
hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
- vchan->ctx = NULL;
/* release vchan from pchan. no more msg for this vchan */
write_lock_bh(&pchan->vchans_lock);
get_refcnt(vchan->refcount),
payload_type, sizebytes);
vchan = NULL;
+ } else if (vchan->otherend_closed || vchan->closed) {
+ pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
+ vchan->otherend_closed, vchan->closed,
+ vchan->id, vchan->otherend_id,
+ vchan->session_id, get_refcnt(vchan->refcount),
+ vchan_id, session_id, payload_type, sizebytes);
+ vchan = NULL;
} else if (!kref_get_unless_zero(&vchan->refcount)) {
/*
* this happens when refcnt is already zero
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
- } else if (vchan->otherend_closed || vchan->closed) {
- pr_err("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
- vchan->otherend_closed, vchan->closed,
- vchan->id, vchan->otherend_id,
- vchan->session_id, get_refcnt(vchan->refcount),
- vchan_id, session_id, payload_type, sizebytes);
- vchan = NULL;
}
}
spin_unlock_bh(&pchan->vid_lock);
if (vchan) {
vchan->otherend_closed = 1;
wake_up(&vchan->rx_queue);
- wake_up_interruptible(&vchan->ctx->exp_wq);
+ if (vchan->ctx)
+ wake_up_interruptible(&vchan->ctx->exp_wq);
+ else
+ pr_err("NULL ctx for vchan %x\n", vchan->id);
}
}
read_lock(&pchan->vchans_lock);
empty = list_empty(&pchan->vchannels);
+ if (!empty) {
+ struct virtual_channel *vchan;
+
+ list_for_each_entry(vchan, &pchan->vchannels, pnode) {
+ pr_err("vchan %pK id %x remote id %x session %d ref %d closed %d remote close %d\n",
+ vchan, vchan->id, vchan->otherend_id,
+ vchan->session_id,
+ get_refcnt(vchan->refcount), vchan->closed,
+ vchan->otherend_closed);
+ }
+
+ }
read_unlock(&pchan->vchans_lock);
return empty;
if (!hab_vchans_per_pchan_empty(pchan)) {
empty = 0;
spin_unlock_bh(&hab_dev->pchan_lock);
+ pr_info("vmid %d %s's vchans are not closed\n",
+ vmid, pchan->name);
break;
}
}
pr_info("waiting for GVM%d's sockets closure\n", vmid);
while (!hab_vchans_empty(vmid))
- schedule();
+ usleep_range(10000, 12000);
pr_info("all of GVM%d's sockets are closed\n", vmid);
}
static uint32_t hab_handle_rx;
static char apr_tx_buf[APR_TX_BUF_SIZE];
static char apr_rx_buf[APR_RX_BUF_SIZE];
+static spinlock_t hab_tx_lock;
/* apr callback thread task */
static struct task_struct *apr_vm_cb_thread_task;
* apr handle and store in svc tbl.
*/
-static struct mutex m_lock_tbl_qdsp6;
-
static struct apr_svc_table svc_tbl_qdsp6[] = {
{
.name = "AFE",
},
};
-static struct mutex m_lock_tbl_voice;
-
static struct apr_svc_table svc_tbl_voice[] = {
{
.name = "VSM",
int i;
int size;
struct apr_svc_table *tbl;
- struct mutex *lock;
struct aprv2_vm_cmd_register_rsp_t apr_rsp;
uint32_t apr_len;
int ret = 0;
+ unsigned long flags;
struct {
uint32_t cmd_id;
struct aprv2_vm_cmd_register_t reg_cmd;
if (domain_id == APR_DOMAIN_ADSP) {
tbl = svc_tbl_qdsp6;
size = ARRAY_SIZE(svc_tbl_qdsp6);
- lock = &m_lock_tbl_qdsp6;
} else {
tbl = svc_tbl_voice;
size = ARRAY_SIZE(svc_tbl_voice);
- lock = &m_lock_tbl_voice;
}
- mutex_lock(lock);
+ spin_lock_irqsave(&hab_tx_lock, flags);
for (i = 0; i < size; i++) {
if (!strcmp(svc_name, tbl[i].name)) {
*client_id = tbl[i].client_id;
if (ret) {
pr_err("%s: habmm_socket_send failed %d\n",
__func__, ret);
- mutex_unlock(lock);
+ spin_unlock_irqrestore(&hab_tx_lock,
+ flags);
return ret;
}
/* wait for response */
if (ret) {
pr_err("%s: apr_vm_nb_receive failed %d\n",
__func__, ret);
- mutex_unlock(lock);
+ spin_unlock_irqrestore(&hab_tx_lock,
+ flags);
return ret;
}
if (apr_rsp.status) {
pr_err("%s: apr_vm_nb_receive status %d\n",
__func__, apr_rsp.status);
ret = apr_rsp.status;
- mutex_unlock(lock);
+ spin_unlock_irqrestore(&hab_tx_lock,
+ flags);
return ret;
}
/* update svc table */
break;
}
}
- mutex_unlock(lock);
+ spin_unlock_irqrestore(&hab_tx_lock, flags);
pr_debug("%s: svc_name = %s client_id = %d domain_id = %d\n",
__func__, svc_name, *client_id, domain_id);
int i;
int size;
struct apr_svc_table *tbl;
- struct mutex *lock;
struct aprv2_vm_cmd_deregister_rsp_t apr_rsp;
uint32_t apr_len;
int ret = 0;
+ unsigned long flags;
struct {
uint32_t cmd_id;
struct aprv2_vm_cmd_deregister_t dereg_cmd;
if (domain_id == APR_DOMAIN_ADSP) {
tbl = svc_tbl_qdsp6;
size = ARRAY_SIZE(svc_tbl_qdsp6);
- lock = &m_lock_tbl_qdsp6;
} else {
tbl = svc_tbl_voice;
size = ARRAY_SIZE(svc_tbl_voice);
- lock = &m_lock_tbl_voice;
}
- mutex_lock(lock);
+ spin_lock_irqsave(&hab_tx_lock, flags);
for (i = 0; i < size; i++) {
if (tbl[i].id == svc_id && tbl[i].handle == handle) {
/* need to deregister a service */
break;
}
}
- mutex_unlock(lock);
+ spin_unlock_irqrestore(&hab_tx_lock, flags);
if (i == size) {
pr_err("%s: APR: Wrong svc id %d handle %d\n",
return -ENETRESET;
}
- spin_lock_irqsave(&svc->w_lock, flags);
+ spin_lock_irqsave(&hab_tx_lock, flags);
if (!svc->id || !svc->vm_handle) {
pr_err("APR: Still service is not yet opened\n");
ret = -EINVAL;
ret = hdr->pkt_size;
done:
- spin_unlock_irqrestore(&svc->w_lock, flags);
+ spin_unlock_irqrestore(&hab_tx_lock, flags);
return ret;
}
pr_err("%s: habmm_socket_open tx failed %d\n", __func__, ret);
return ret;
}
+ spin_lock_init(&hab_tx_lock);
ret = habmm_socket_open(&hab_handle_rx,
MM_AUD_2,
pr_info("%s: apr_vm_cb_thread started pid %d\n",
__func__, pid);
- mutex_init(&m_lock_tbl_qdsp6);
- mutex_init(&m_lock_tbl_voice);
-
for (i = 0; i < APR_DEST_MAX; i++)
for (j = 0; j < APR_CLIENT_MAX; j++) {
mutex_init(&client[i][j].m_lock);
for (k = 0; k < APR_SVC_MAX; k++) {
mutex_init(&client[i][j].svc[k].m_lock);
- spin_lock_init(&client[i][j].svc[k].w_lock);
}
}
struct anc_dev_drv_info {
uint32_t state;
- uint32_t rpm;
- uint32_t bypass_mode;
uint32_t algo_module_id;
};
int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd)
{
- int rc = 0;
+ int rc = -EINVAL;
switch (anc_cmd) {
- case ANC_CMD_RPM: {
- struct audio_anc_rpm_info *rpm_info_p =
- (struct audio_anc_rpm_info *)info_p;
+ case ANC_CMD_ALGO_MODULE: {
+ struct audio_anc_algo_module_info *module_info_p =
+ (struct audio_anc_algo_module_info *)info_p;
+
+ rc = 0;
if (this_anc_dev_info.state)
- rc = anc_if_set_rpm(
+ rc = anc_if_set_algo_module_id(
anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
- rpm_info_p->rpm);
+ module_info_p->module_id);
else
- this_anc_dev_info.rpm = 0;
+ this_anc_dev_info.algo_module_id =
+ module_info_p->module_id;
break;
}
- case ANC_CMD_BYPASS_MODE: {
- struct audio_anc_bypass_mode *bypass_mode_p =
- (struct audio_anc_bypass_mode *)info_p;
-
+ case ANC_CMD_ALGO_CALIBRATION: {
+ rc = -EINVAL;
if (this_anc_dev_info.state)
- rc = anc_if_set_bypass_mode(
+ rc = anc_if_set_algo_module_cali_data(
anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
- bypass_mode_p->mode);
+ info_p);
else
- this_anc_dev_info.bypass_mode = bypass_mode_p->mode;
+ pr_err("%s: ANC is not running yet\n",
+ __func__);
+ break;
+ }
+ default:
+ pr_err("%s: ANC cmd wrong\n",
+ __func__);
break;
}
- case ANC_CMD_ALGO_MODULE: {
- struct audio_anc_algo_module_info *module_info_p =
- (struct audio_anc_algo_module_info *)info_p;
+ return rc;
+}
+
+int msm_anc_dev_get_info(void *info_p, int32_t anc_cmd)
+{
+ int rc = -EINVAL;
+
+ switch (anc_cmd) {
+ case ANC_CMD_ALGO_CALIBRATION: {
if (this_anc_dev_info.state)
- rc = anc_if_set_algo_module_id(
+ rc = anc_if_get_algo_module_cali_data(
anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
- module_info_p->module_id);
+ info_p);
else
- this_anc_dev_info.algo_module_id =
- module_info_p->module_id;
+ pr_err("%s: ANC is not running yet\n",
+ __func__);
break;
}
+ default:
+ pr_err("%s: ANC cmd wrong\n",
+ __func__);
+ break;
}
return rc;
}
-
int msm_anc_dev_start(void)
{
int rc = 0;
anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
this_anc_dev_info.algo_module_id);
- if (this_anc_dev_info.bypass_mode != 0)
- rc = anc_if_set_bypass_mode(
- anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id,
- this_anc_dev_info.bypass_mode);
-
group_id = get_group_id_from_port_id(
anc_port_cfg[ANC_DEV_PORT_ANC_SPKR].port_id);
this_anc_dev_info.state = 0;
this_anc_dev_info.algo_module_id = 0;
- this_anc_dev_info.rpm = 0;
- this_anc_dev_info.bypass_mode = 0;
pr_debug("%s: ANC devices stop successfully!\n", __func__);
case ANC_CMD_STOP:
size = 0;
break;
- case ANC_CMD_RPM:
- size = sizeof(struct audio_anc_rpm_info);
- break;
- case ANC_CMD_BYPASS_MODE:
- size = sizeof(struct audio_anc_bypass_mode);
- break;
case ANC_CMD_ALGO_MODULE:
size = sizeof(struct audio_anc_algo_module_info);
break;
case ANC_CMD_ALGO_CALIBRATION:
- size = sizeof(struct audio_anc_algo_calibration_info);
+ size = sizeof(struct audio_anc_algo_calibration_header);
break;
default:
pr_err("%s:Invalid anc cmd %d!",
case ANC_CMD_STOP:
ret = msm_anc_dev_stop();
break;
- case ANC_CMD_RPM:
- case ANC_CMD_BYPASS_MODE:
case ANC_CMD_ALGO_MODULE:
case ANC_CMD_ALGO_CALIBRATION:
ret = msm_anc_dev_set_info(data, anc_cmd);
int ret = 0;
switch (anc_cmd) {
- case ANC_CMD_RPM:
+ case ANC_CMD_ALGO_CALIBRATION:
+ ret = msm_anc_dev_get_info(data, anc_cmd);
break;
default:
break;
pr_err("%s: Could not copy size value from user\n", __func__);
ret = -EFAULT;
goto done;
- } else if (size < sizeof(struct audio_anc_packet)) {
+ } else if (size < sizeof(struct audio_anc_header)) {
pr_err("%s: Invalid size sent to driver: %d, min size is %zd\n",
- __func__, size, sizeof(struct audio_anc_packet));
+ __func__, size, sizeof(struct audio_anc_header));
ret = -EINVAL;
goto done;
}
atomic_t status;
wait_queue_head_t wait[AFE_MAX_PORTS];
struct task_struct *task;
- struct anc_get_rpm_resp rpm_calib_data;
+ struct anc_get_algo_module_cali_data_resp cali_data_resp;
uint32_t mmap_handle;
struct mutex afe_cmd_lock;
};
static int32_t anc_get_param_callback(uint32_t *payload,
uint32_t payload_size)
{
- u32 param_id;
- struct anc_get_rpm_resp *resp =
- (struct anc_get_rpm_resp *) payload;
-
- if (!(&(resp->pdata))) {
- pr_err("%s: Error: resp pdata is NULL\n", __func__);
+ if ((payload_size < (sizeof(uint32_t) +
+ sizeof(this_anc_if.cali_data_resp.pdata))) ||
+ (payload_size > sizeof(this_anc_if.cali_data_resp))) {
+ pr_err("%s: Error: received size %d, calib_data size %zu\n",
+ __func__, payload_size,
+ sizeof(this_anc_if.cali_data_resp));
return -EINVAL;
}
- param_id = resp->pdata.param_id;
- if (param_id == AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM) {
- if (payload_size < sizeof(this_anc_if.rpm_calib_data)) {
- pr_err("%s: Error: received size %d, calib_data size %zu\n",
- __func__, payload_size,
- sizeof(this_anc_if.rpm_calib_data));
- return -EINVAL;
- }
-
- memcpy(&this_anc_if.rpm_calib_data, payload,
- sizeof(this_anc_if.rpm_calib_data));
- if (!this_anc_if.rpm_calib_data.status) {
- atomic_set(&this_anc_if.state, 0);
- } else {
- pr_debug("%s: calib resp status: %d", __func__,
- this_anc_if.rpm_calib_data.status);
- atomic_set(&this_anc_if.state, -1);
- }
+ memcpy(&this_anc_if.cali_data_resp, payload,
+ payload_size);
+ if (!this_anc_if.cali_data_resp.status) {
+ atomic_set(&this_anc_if.state, 0);
+ } else {
+ pr_debug("%s: calib resp status: %d", __func__,
+ this_anc_if.cali_data_resp.status);
+ atomic_set(&this_anc_if.state, -1);
}
return 0;
return anc_if_send_cmd_port_stop(port_id);
}
-int anc_if_set_rpm(u16 port_id, u32 rpm)
+int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
{
int ret = 0;
int index;
index = q6audio_get_port_index(port_id);
{
- struct anc_set_rpm_command config;
+ struct anc_set_algo_module_id_command config;
memset(&config, 0, sizeof(config));
config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
config.param.payload_address_lsw = 0x00;
config.param.payload_address_msw = 0x00;
config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
- config.pdata.param_id = AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM;
- config.pdata.param_size = sizeof(config.set_rpm);
- config.set_rpm.minor_version =
- AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM;
- config.set_rpm.rpm = rpm;
+ config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
+ config.pdata.param_id =
+ AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID;
+ config.pdata.param_size = sizeof(config.set_algo_module_id);
+ config.set_algo_module_id.minor_version = 1;
+ config.set_algo_module_id.module_id = module_id;
ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
if (ret) {
- pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
}
}
return ret;
}
-int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode)
+int anc_if_set_anc_mic_spkr_layout(u16 port_id,
+struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
{
int ret = 0;
-
int index;
ret = anc_sdsp_interface_prepare();
index = q6audio_get_port_index(port_id);
{
- struct anc_set_bypass_mode_command config;
+ struct anc_set_mic_spkr_layout_info_command config;
memset(&config, 0, sizeof(config));
config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
config.param.payload_address_lsw = 0x00;
config.param.payload_address_msw = 0x00;
config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
+ config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
config.pdata.param_id =
- AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE;
- config.pdata.param_size = sizeof(config.set_bypass_mode);
- config.set_bypass_mode.minor_version =
- AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE;
- config.set_bypass_mode.bypass_mode = bypass_mode;
+ AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO;
+ config.pdata.param_size = sizeof(config.set_mic_spkr_layout);
+ memcpy(&config.set_mic_spkr_layout, set_mic_spkr_layout_p,
+ sizeof(config.set_mic_spkr_layout));
ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
if (ret) {
- pr_err("%s: share resource for port 0x%x failed ret = %d\n",
+ pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
}
}
return ret;
}
-int anc_if_set_algo_module_id(u16 port_id, u32 module_id)
+
+int anc_if_set_algo_module_cali_data(u16 port_id, void *data_p)
{
int ret = 0;
-
int index;
ret = anc_sdsp_interface_prepare();
index = q6audio_get_port_index(port_id);
{
- struct anc_set_algo_module_id_command config;
+ struct anc_set_algo_module_cali_data_command *cali_data_cfg_p;
+ void *config_p = NULL;
+ int cmd_size = 0;
+ void *out_payload_p = NULL;
+ uint32_t *in_payload_p = (uint32_t *)data_p;
+
+ uint32_t module_id = *in_payload_p;
+ uint32_t param_id = *(in_payload_p + 1);
+ uint32_t payload_size = *(in_payload_p + 2);
+
+ cmd_size = sizeof(struct anc_set_algo_module_cali_data_command)
+ + payload_size;
+ config_p = kzalloc(cmd_size, GFP_KERNEL);
+ if (!config_p) {
+ ret = -ENOMEM;
+ return ret;
+ }
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ memset(config_p, 0, cmd_size);
+ out_payload_p = config_p
+ + sizeof(struct anc_set_algo_module_cali_data_command);
+
+ cali_data_cfg_p =
+ (struct anc_set_algo_module_cali_data_command *)config_p;
+
+ cali_data_cfg_p->hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) -
+ cali_data_cfg_p->hdr.pkt_size = cmd_size;
+ cali_data_cfg_p->hdr.src_port = 0;
+ cali_data_cfg_p->hdr.dest_port = 0;
+ cali_data_cfg_p->hdr.token = index;
+ cali_data_cfg_p->hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
+ cali_data_cfg_p->param.port_id = q6audio_get_port_id(port_id);
+ cali_data_cfg_p->param.payload_size = cmd_size -
sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
- config.pdata.param_id =
- AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID;
- config.pdata.param_size = sizeof(config.set_algo_module_id);
- config.set_algo_module_id.minor_version = 1;
- config.set_algo_module_id.module_id = module_id;
+ sizeof(struct aud_msvc_port_cmd_set_param_v2);
+ cali_data_cfg_p->param.payload_address_lsw = 0x00;
+ cali_data_cfg_p->param.payload_address_msw = 0x00;
+ cali_data_cfg_p->param.mem_map_handle = 0x00;
+ cali_data_cfg_p->pdata.module_id = module_id;
+ cali_data_cfg_p->pdata.param_id = param_id;
+ cali_data_cfg_p->pdata.param_size = payload_size;
+
+ memcpy(out_payload_p, (in_payload_p + 3), payload_size);
+
+ ret = anc_if_apr_send_pkt(cali_data_cfg_p,
+ &this_anc_if.wait[index]);
+ if (ret)
+ pr_err("%s: anc algo module calibration data for port 0x%x failed ret = %d\n",
+ __func__, port_id, ret);
- ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
- if (ret) {
- pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
- __func__, port_id, ret);
- }
+ kfree(config_p);
}
return ret;
}
-int anc_if_set_anc_mic_spkr_layout(u16 port_id,
-struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p)
+int anc_if_get_algo_module_cali_data(u16 port_id, void *data_p)
{
int ret = 0;
-
int index;
ret = anc_sdsp_interface_prepare();
index = q6audio_get_port_index(port_id);
{
- struct anc_set_mic_spkr_layout_info_command config;
+ struct anc_get_algo_module_cali_data_command *cali_data_cfg_p;
+ void *config_p = NULL;
+ int cmd_size = 0;
+ void *out_payload_p = NULL;
+ uint32_t *in_payload_p = (uint32_t *)data_p;
+
+ uint32_t module_id = *in_payload_p;
+ uint32_t param_id = *(in_payload_p + 1);
+ uint32_t payload_size = *(in_payload_p + 2);
+
+ cmd_size = sizeof(struct anc_get_algo_module_cali_data_command)
+ + payload_size;
+ config_p = kzalloc(cmd_size, GFP_KERNEL);
+ if (!config_p) {
+ ret = -ENOMEM;
+ return ret;
+ }
- memset(&config, 0, sizeof(config));
- config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ memset(config_p, 0, cmd_size);
+ out_payload_p = config_p +
+ sizeof(struct anc_set_algo_module_cali_data_command);
+
+ cali_data_cfg_p =
+ (struct anc_get_algo_module_cali_data_command *)config_p;
+
+ cali_data_cfg_p->hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
- config.hdr.pkt_size = sizeof(config);
- config.hdr.src_port = 0;
- config.hdr.dest_port = 0;
- config.hdr.token = index;
- config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
- config.param.port_id = q6audio_get_port_id(port_id);
- config.param.payload_size = sizeof(config) -
+ cali_data_cfg_p->hdr.pkt_size = cmd_size;
+ cali_data_cfg_p->hdr.src_port = 0;
+ cali_data_cfg_p->hdr.dest_port = 0;
+ cali_data_cfg_p->hdr.token = index;
+ cali_data_cfg_p->hdr.opcode = AFE_PORT_CMD_GET_PARAM_V2;
+ cali_data_cfg_p->param.port_id = q6audio_get_port_id(port_id);
+ cali_data_cfg_p->param.payload_size = cmd_size -
sizeof(struct apr_hdr) -
- sizeof(config.param);
- config.param.payload_address_lsw = 0x00;
- config.param.payload_address_msw = 0x00;
- config.param.mem_map_handle = 0x00;
- config.pdata.module_id = AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO;
- config.pdata.param_id =
- AUD_MSVC_PARAM_ID_PORT_ANC_MIC_SPKR_LAYOUT_INFO;
- config.pdata.param_size = sizeof(config.set_mic_spkr_layout);
-
- memcpy(&config.set_mic_spkr_layout, set_mic_spkr_layout_p,
- sizeof(config.set_mic_spkr_layout));
- ret = anc_if_apr_send_pkt(&config, &this_anc_if.wait[index]);
- if (ret) {
- pr_err("%s: anc algo module ID for port 0x%x failed ret = %d\n",
+ sizeof(struct aud_msvc_port_cmd_get_param_v2);
+ cali_data_cfg_p->param.payload_address_lsw = 0x00;
+ cali_data_cfg_p->param.payload_address_msw = 0x00;
+ cali_data_cfg_p->param.mem_map_handle = 0x00;
+ cali_data_cfg_p->param.module_id = module_id;
+ cali_data_cfg_p->param.param_id = param_id;
+ cali_data_cfg_p->pdata.param_size = 0;
+ cali_data_cfg_p->pdata.module_id = 0;
+ cali_data_cfg_p->pdata.param_id = 0;
+
+ ret = anc_if_apr_send_pkt(cali_data_cfg_p,
+ &this_anc_if.wait[index]);
+ if (ret)
+ pr_err("%s: anc algo module calibration data for port 0x%x failed ret = %d\n",
__func__, port_id, ret);
- }
+
+ memcpy((in_payload_p + 3),
+ &this_anc_if.cali_data_resp.payload[0], payload_size);
+
+ *in_payload_p = this_anc_if.cali_data_resp.pdata.module_id;
+ *(in_payload_p + 1) =
+ this_anc_if.cali_data_resp.pdata.param_id;
+ *(in_payload_p + 2) =
+ this_anc_if.cali_data_resp.pdata.param_size;
+
+ kfree(config_p);
}
return ret;
mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
if (!mmap_region_cmd) {
ret = -ENOMEM;
- pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
return ret;
}
+++ /dev/null
-/* drivers/soc/qcom/smp2p_spinlock_test.c
- *
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/remote_spinlock.h>
-#include <soc/qcom/smem.h>
-#include "smem_private.h"
-#include "smp2p_private.h"
-#include "smp2p_test_common.h"
-
-#define RS_END_THIEF_PID_BIT 20
-#define RS_END_THIEF_MASK 0x00f00000
-
-/* Spinlock commands used for testing Apps<->RPM spinlocks. */
-enum RPM_SPINLOCK_CMDS {
- RPM_CMD_INVALID,
- RPM_CMD_START,
- RPM_CMD_LOCKED,
- RPM_CMD_UNLOCKED,
- RPM_CMD_END,
-};
-
-/* Shared structure for testing Apps<->RPM spinlocks. */
-struct rpm_spinlock_test {
- uint32_t apps_cmd;
- uint32_t apps_lock_count;
- uint32_t rpm_cmd;
- uint32_t rpm_lock_count;
-};
-
-static uint32_t ut_remote_spinlock_run_time = 1;
-
-/**
- * smp2p_ut_remote_spinlock_core - Verify remote spinlock.
- *
- * @s: Pointer to output file
- * @remote_pid: Remote processor to test
- * @use_trylock: Use trylock to prevent an Apps deadlock if the
- * remote spinlock fails.
- */
-static void smp2p_ut_remote_spinlock_core(struct seq_file *s, int remote_pid,
- bool use_trylock)
-{
- int failed = 0;
- unsigned lock_count = 0;
- struct msm_smp2p_out *handle = NULL;
- int ret;
- uint32_t test_request;
- uint32_t test_response;
- struct mock_cb_data cb_out;
- struct mock_cb_data cb_in;
- unsigned long flags;
- unsigned n;
- bool have_lock;
- bool timeout;
- int failed_tmp;
- int spinlock_owner;
- remote_spinlock_t *smem_spinlock;
- unsigned long end;
-
- seq_printf(s, "Running %s for '%s' remote pid %d\n",
- __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
- cb_out.initialized = false;
- cb_in.initialized = false;
- mock_cb_data_init(&cb_out);
- mock_cb_data_init(&cb_in);
- do {
- smem_spinlock = smem_get_remote_spinlock();
- UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
- /* Open output entry */
- ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
- &cb_out.nb, &handle);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_out.cb_completion, HZ * 2),
- >, 0);
- UT_ASSERT_INT(cb_out.cb_count, ==, 1);
- UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
- /* Open inbound entry */
- ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
- &cb_in.nb);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ * 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
- /* Send start */
- mock_cb_data_reset(&cb_in);
- mock_cb_data_reset(&cb_out);
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
- SMP2P_SET_RMT_DATA(test_request, 0x0);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ * 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
- ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
- &test_response);
- UT_ASSERT_INT(ret, ==, 0);
-
- test_response = SMP2P_GET_RMT_CMD(test_response);
- if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
- test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
- /* invalid response from remote - abort test */
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
- SMP2P_SET_RMT_DATA(test_request, 0x0);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
- test_response);
- }
-
- /* Run spinlock test */
- if (use_trylock)
- seq_puts(s, "\tUsing remote_spin_trylock\n");
- else
- seq_puts(s, "\tUsing remote_spin_lock\n");
-
- flags = 0;
- have_lock = false;
- timeout = false;
- spinlock_owner = 0;
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
- end = jiffies + (ut_remote_spinlock_run_time * HZ);
- if (ut_remote_spinlock_run_time < 300) {
- seq_printf(s, "\tRunning test for %u seconds; ",
- ut_remote_spinlock_run_time);
- seq_puts(s,
- "on physical hardware please run >= 300 seconds by doing 'echo 300 > ut_remote_spinlock_time'\n");
- }
- while (time_is_after_jiffies(end)) {
- /* try to acquire spinlock */
- if (use_trylock) {
- unsigned long j_start = jiffies;
- while (!remote_spin_trylock_irqsave(
- smem_spinlock, flags)) {
- if (jiffies_to_msecs(jiffies - j_start)
- > 1000) {
- seq_puts(s,
- "\tFail: Timeout trying to get the lock\n");
- timeout = true;
- break;
- }
- }
- if (timeout)
- break;
- } else {
- remote_spin_lock_irqsave(smem_spinlock, flags);
- }
- have_lock = true;
- ++lock_count;
-
- /* tell the remote side that we have the lock */
- SMP2P_SET_RMT_DATA(test_request, lock_count);
- SMP2P_SET_RMT_CMD(test_request,
- SMP2P_LB_CMD_RSPIN_LOCKED);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- /* verify the other side doesn't say it has the lock */
- for (n = 0; n < 1000; ++n) {
- spinlock_owner =
- remote_spin_owner(smem_spinlock);
- if (spinlock_owner != SMEM_APPS) {
- /* lock stolen by remote side */
- seq_puts(s, "\tFail: Remote side: ");
- seq_printf(s, "%d stole lock pid: %d\n",
- remote_pid, spinlock_owner);
- failed = true;
- break;
- }
- spinlock_owner = 0;
-
- ret = msm_smp2p_in_read(remote_pid,
- SMP2P_RLPB_ENTRY_NAME, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- test_response =
- SMP2P_GET_RMT_CMD(test_response);
- UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_UNLOCKED, ==,
- test_response);
- }
- if (failed)
- break;
-
- /* tell remote side we are unlocked and release lock */
- SMP2P_SET_RMT_CMD(test_request,
- SMP2P_LB_CMD_RSPIN_UNLOCKED);
- (void)msm_smp2p_out_write(handle, test_request);
- have_lock = false;
- remote_spin_unlock_irqrestore(smem_spinlock, flags);
- }
- if (have_lock)
- remote_spin_unlock_irqrestore(smem_spinlock, flags);
-
- /* End test */
- mock_cb_data_reset(&cb_in);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
- SMP2P_SET_RMT_DATA(test_request, lock_count |
- (spinlock_owner << RS_END_THIEF_PID_BIT));
- (void)msm_smp2p_out_write(handle, test_request);
-
- failed_tmp = failed;
- failed = false;
- do {
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ * 2),
- >, 0);
- reinit_completion(&cb_in.cb_completion);
- ret = msm_smp2p_in_read(remote_pid,
- SMP2P_RLPB_ENTRY_NAME, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- } while (!failed &&
- SMP2P_GET_RMT_CMD(test_response) !=
- SMP2P_LB_CMD_RSPIN_END);
- if (failed)
- break;
- failed = failed_tmp;
-
- test_response = SMP2P_GET_RMT_DATA(test_response);
- seq_puts(s, "\tLocked spinlock ");
- seq_printf(s, "local %u times; remote %u times",
- lock_count,
- test_response & ((1 << RS_END_THIEF_PID_BIT) - 1)
- );
- if (test_response & RS_END_THIEF_MASK) {
- seq_puts(s, "Remote side reporting lock stolen by ");
- seq_printf(s, "pid %d.\n",
- SMP2P_GET_BITS(test_response,
- RS_END_THIEF_MASK,
- RS_END_THIEF_PID_BIT));
- failed = 1;
- }
- seq_puts(s, "\n");
-
- /* Cleanup */
- ret = msm_smp2p_out_close(&handle);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_PTR(handle, ==, NULL);
- ret = msm_smp2p_in_unregister(remote_pid,
- SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
- UT_ASSERT_INT(ret, ==, 0);
-
- if (!failed && !timeout)
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- if (handle) {
- /* send end command */
- test_request = 0;
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
- SMP2P_SET_RMT_DATA(test_request, lock_count);
- (void)msm_smp2p_out_write(handle, test_request);
- (void)msm_smp2p_out_close(&handle);
- }
- (void)msm_smp2p_in_unregister(remote_pid,
- SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-/**
- * smp2p_ut_remote_spinlock_pid - Verify remote spinlock for a processor.
- *
- * @s: Pointer to output file
- * @pid: Processor to test
- * @use_trylock: Use trylock to prevent an Apps deadlock if the
- * remote spinlock fails.
- */
-static void smp2p_ut_remote_spinlock_pid(struct seq_file *s, int pid,
- bool use_trylock)
-{
- struct smp2p_interrupt_config *int_cfg;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s, "Remote processor config unavailable\n");
- return;
- }
-
- if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
- return;
-
- msm_smp2p_deinit_rmt_lpb_proc(pid);
- smp2p_ut_remote_spinlock_core(s, pid, use_trylock);
- msm_smp2p_init_rmt_lpb_proc(pid);
-}
-
-/**
- * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_remote_spinlock(struct seq_file *s)
-{
- int pid;
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
- smp2p_ut_remote_spinlock_pid(s, pid, false);
-}
-
-/**
- * smp2p_ut_remote_spin_trylock - Verify remote trylock for all processors.
- *
- * @s: Pointer to output file
- */
-static void smp2p_ut_remote_spin_trylock(struct seq_file *s)
-{
- int pid;
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
- smp2p_ut_remote_spinlock_pid(s, pid, true);
-}
-
-/**
- * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
- *
- * @s: pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_spinlock_modem(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_pid(s, SMP2P_MODEM_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_adsp(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_pid(s, SMP2P_AUDIO_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_dsps(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_pid(s, SMP2P_SENSOR_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_wcnss(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_pid(s, SMP2P_WIRELESS_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_cdsp(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_pid(s, SMP2P_CDSP_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_tz(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_pid(s, SMP2P_TZ_PROC, false);
-}
-
-/**
- * smp2p_ut_remote_spinlock_rpm - Verify remote spinlock.
- *
- * @s: pointer to output file
- * @remote_pid: Remote processor to test
- */
-static void smp2p_ut_remote_spinlock_rpm(struct seq_file *s)
-{
- int failed = 0;
- unsigned long flags;
- unsigned n;
- unsigned test_num;
- struct rpm_spinlock_test *data_ptr;
- remote_spinlock_t *smem_spinlock;
- bool have_lock;
-
- seq_printf(s, "Running %s for Apps<->RPM Test\n",
- __func__);
- do {
- smem_spinlock = smem_get_remote_spinlock();
- UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
- data_ptr = smem_alloc(SMEM_ID_VENDOR0,
- sizeof(struct rpm_spinlock_test), 0,
- SMEM_ANY_HOST_FLAG);
- UT_ASSERT_PTR(0, !=, data_ptr);
-
- /* Send start */
- writel_relaxed(0, &data_ptr->apps_lock_count);
- writel_relaxed(RPM_CMD_START, &data_ptr->apps_cmd);
-
- seq_puts(s, "\tWaiting for RPM to start test\n");
- for (n = 0; n < 1000; ++n) {
- if (readl_relaxed(&data_ptr->rpm_cmd) !=
- RPM_CMD_INVALID)
- break;
- usleep_range(1000, 1200);
- }
- if (readl_relaxed(&data_ptr->rpm_cmd) == RPM_CMD_INVALID) {
- /* timeout waiting for RPM */
- writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
- UT_ASSERT_INT(RPM_CMD_LOCKED, !=, RPM_CMD_INVALID);
- }
-
- /* Run spinlock test */
- flags = 0;
- have_lock = false;
- for (test_num = 0; !failed && test_num < 10000; ++test_num) {
- /* acquire spinlock */
- remote_spin_lock_irqsave(smem_spinlock, flags);
- have_lock = true;
- data_ptr->apps_lock_count++;
- writel_relaxed(data_ptr->apps_lock_count,
- &data_ptr->apps_lock_count);
- writel_relaxed(RPM_CMD_LOCKED, &data_ptr->apps_cmd);
- /*
- * Ensure that the remote side sees our lock has
- * been acquired before we start polling their status.
- */
- wmb();
-
- /* verify the other side doesn't say it has the lock */
- for (n = 0; n < 1000; ++n) {
- UT_ASSERT_HEX(RPM_CMD_UNLOCKED, ==,
- readl_relaxed(&data_ptr->rpm_cmd));
- }
- if (failed)
- break;
-
- /* release spinlock */
- have_lock = false;
- writel_relaxed(RPM_CMD_UNLOCKED, &data_ptr->apps_cmd);
- /*
- * Ensure that our status-update write was committed
- * before we unlock the spinlock.
- */
- wmb();
- remote_spin_unlock_irqrestore(smem_spinlock, flags);
- }
- if (have_lock)
- remote_spin_unlock_irqrestore(smem_spinlock, flags);
-
- /* End test */
- writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
- seq_printf(s, "\tLocked spinlock local %u remote %u\n",
- readl_relaxed(&data_ptr->apps_lock_count),
- readl_relaxed(&data_ptr->rpm_lock_count));
-
- if (!failed)
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-struct rmt_spinlock_work_item {
- struct work_struct work;
- struct completion try_lock;
- struct completion locked;
- bool has_locked;
-};
-
-static void ut_remote_spinlock_ssr_worker(struct work_struct *work)
-{
- remote_spinlock_t *smem_spinlock;
- unsigned long flags;
- struct rmt_spinlock_work_item *work_item =
- container_of(work, struct rmt_spinlock_work_item, work);
-
- work_item->has_locked = false;
- complete(&work_item->try_lock);
- smem_spinlock = smem_get_remote_spinlock();
- if (!smem_spinlock) {
- pr_err("%s Failed\n", __func__);
- return;
- }
-
- remote_spin_lock_irqsave(smem_spinlock, flags);
- remote_spin_unlock_irqrestore(smem_spinlock, flags);
- work_item->has_locked = true;
- complete(&work_item->locked);
-}
-
-/**
- * smp2p_ut_remote_spinlock_ssr - Verify remote spinlock.
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_remote_spinlock_ssr(struct seq_file *s)
-{
- int failed = 0;
- unsigned long flags;
- remote_spinlock_t *smem_spinlock;
- int spinlock_owner = 0;
-
- struct workqueue_struct *ws = NULL;
- struct rmt_spinlock_work_item work_item = { .has_locked = false };
-
- seq_printf(s, " Running %s Test\n",
- __func__);
- do {
- smem_spinlock = smem_get_remote_spinlock();
- UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
- ws = create_singlethread_workqueue("ut_remote_spinlock_ssr");
- UT_ASSERT_PTR(ws, !=, NULL);
- INIT_WORK(&work_item.work, ut_remote_spinlock_ssr_worker);
- init_completion(&work_item.try_lock);
- init_completion(&work_item.locked);
-
- remote_spin_lock_irqsave(smem_spinlock, flags);
- /* Unlock local spin lock and hold HW spinlock */
- spin_unlock_irqrestore(&((smem_spinlock)->local), flags);
-
- queue_work(ws, &work_item.work);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &work_item.try_lock, HZ * 2), >, 0);
- UT_ASSERT_INT((int)work_item.has_locked, ==, 0);
- spinlock_owner = remote_spin_owner(smem_spinlock);
- UT_ASSERT_INT(spinlock_owner, ==, SMEM_APPS);
- remote_spin_release_all(SMEM_APPS);
-
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &work_item.locked, HZ * 2), >, 0);
-
- if (!failed)
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-/**
- * smp2p_ut_remote_spinlock_track_core - Verify remote spinlock.
- *
- * @s: Pointer to output file
- * @remote_pid: Remote processor to test
- *
- * This test has the remote subsystem grab the lock, and then has the local
- * subsystem attempt to grab the lock using the trylock() API. It then verifies
- * that the ID in the hw_spinlocks array matches the owner of the lock.
- */
-static void smp2p_ut_remote_spinlock_track_core(struct seq_file *s,
- int remote_pid)
-{
- int failed = 0;
- struct msm_smp2p_out *handle = NULL;
- int ret;
- uint32_t test_request;
- uint32_t test_response;
- struct mock_cb_data cb_out;
- struct mock_cb_data cb_in;
- unsigned long flags;
- int stored_value;
- remote_spinlock_t *smem_spinlock;
-
- seq_printf(s, "Running %s for '%s' remote pid %d\n",
- __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
- cb_out.initialized = false;
- cb_in.initialized = false;
- mock_cb_data_init(&cb_out);
- mock_cb_data_init(&cb_in);
- do {
- smem_spinlock = smem_get_remote_spinlock();
- UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
- /* Open output entry */
- ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
- &cb_out.nb, &handle);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_out.cb_completion, HZ * 2),
- >, 0);
- UT_ASSERT_INT(cb_out.cb_count, ==, 1);
- UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
- /* Open inbound entry */
- ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
- &cb_in.nb);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ * 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
- /* Send start */
- mock_cb_data_reset(&cb_in);
- mock_cb_data_reset(&cb_out);
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
- SMP2P_SET_RMT_DATA(test_request, 0x0);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ * 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
- ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
- &test_response);
- UT_ASSERT_INT(ret, ==, 0);
-
- test_response = SMP2P_GET_RMT_CMD(test_response);
- if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
- test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
- /* invalid response from remote - abort test */
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
- SMP2P_SET_RMT_DATA(test_request, 0x0);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
- test_response);
- }
-
- /* Run spinlock test */
- flags = 0;
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-
- /* try to acquire spinlock */
- remote_spin_trylock_irqsave(smem_spinlock, flags);
- /*
- * Need to check against the locking token (PID + 1)
- * because the remote_spin_owner() API only returns the
- * PID.
- */
- stored_value = remote_spin_get_hw_spinlocks_element(
- smem_spinlock);
- UT_ASSERT_INT(stored_value, ==,
- remote_spin_owner(smem_spinlock) + 1);
- UT_ASSERT_INT(stored_value, ==, remote_pid + 1);
-
- /* End test */
- test_request = 0x0;
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
- SMP2P_SET_RMT_DATA(test_request, 0x0);
- (void)msm_smp2p_out_write(handle, test_request);
-
- /* Cleanup */
- ret = msm_smp2p_out_close(&handle);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_PTR(handle, ==, NULL);
- ret = msm_smp2p_in_unregister(remote_pid,
- SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
- UT_ASSERT_INT(ret, ==, 0);
-
- if (!failed)
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- if (handle) {
- /* send end command */
- test_request = 0x0;
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
- SMP2P_SET_RMT_DATA(test_request, 0x0);
- (void)msm_smp2p_out_write(handle, test_request);
- (void)msm_smp2p_out_close(&handle);
- }
- (void)msm_smp2p_in_unregister(remote_pid,
- SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-/**
- * smp2p_ut_remote_spinlock_track - Verify PID tracking for modem.
- *
- * @s: Pointer to output file
- * @pid: The processor to test
- */
-static void smp2p_ut_remote_spinlock_track(struct seq_file *s, int pid)
-{
- struct smp2p_interrupt_config *int_cfg;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s, "Remote processor config unavailable\n");
- return;
- }
-
- if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
- return;
-
- msm_smp2p_deinit_rmt_lpb_proc(pid);
- smp2p_ut_remote_spinlock_track_core(s, pid);
- msm_smp2p_init_rmt_lpb_proc(pid);
-}
-
-/**
- * smp2p_ut_remote_spinlock_track - Verify PID tracking for all processors.
- *
- * @s: Pointer to output file
- *
- * This test verifies PID tracking for all configured remote processors.
- */
-static void smp2p_ut_remote_spinlock_track_modem(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_track(s, SMP2P_MODEM_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_adsp(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_track(s, SMP2P_AUDIO_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_dsps(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_track(s, SMP2P_SENSOR_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_wcnss(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_track(s, SMP2P_WIRELESS_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_cdsp(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_track(s, SMP2P_CDSP_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_tz(struct seq_file *s)
-{
- smp2p_ut_remote_spinlock_track(s, SMP2P_TZ_PROC);
-}
-
-static int __init smp2p_debugfs_init(void)
-{
- /*
- * Add Unit Test entries.
- *
- * The idea with unit tests is that you can run all of them
- * from ADB shell by doing:
- * adb shell
- * cat ut*
- *
- * And if particular tests fail, you can then repeatedly run the
- * failing tests as you debug and resolve the failing test.
- */
- smp2p_debug_create("ut_remote_spinlock",
- smp2p_ut_remote_spinlock);
- smp2p_debug_create("ut_remote_spin_trylock",
- smp2p_ut_remote_spin_trylock);
- smp2p_debug_create("ut_remote_spinlock_modem",
- smp2p_ut_remote_spinlock_modem);
- smp2p_debug_create("ut_remote_spinlock_adsp",
- smp2p_ut_remote_spinlock_adsp);
- smp2p_debug_create("ut_remote_spinlock_dsps",
- smp2p_ut_remote_spinlock_dsps);
- smp2p_debug_create("ut_remote_spinlock_wcnss",
- smp2p_ut_remote_spinlock_wcnss);
- smp2p_debug_create("ut_remote_spinlock_cdsp",
- smp2p_ut_remote_spinlock_cdsp);
- smp2p_debug_create("ut_remote_spinlock_tz",
- smp2p_ut_remote_spinlock_tz);
- smp2p_debug_create("ut_remote_spinlock_rpm",
- smp2p_ut_remote_spinlock_rpm);
- smp2p_debug_create_u32("ut_remote_spinlock_time",
- &ut_remote_spinlock_run_time);
- smp2p_debug_create("ut_remote_spinlock_ssr",
- &smp2p_ut_remote_spinlock_ssr);
- smp2p_debug_create("ut_remote_spinlock_track_modem",
- &smp2p_ut_remote_spinlock_track_modem);
- smp2p_debug_create("ut_remote_spinlock_track_adsp",
- &smp2p_ut_remote_spinlock_track_adsp);
- smp2p_debug_create("ut_remote_spinlock_track_dsps",
- &smp2p_ut_remote_spinlock_track_dsps);
- smp2p_debug_create("ut_remote_spinlock_track_wcnss",
- &smp2p_ut_remote_spinlock_track_wcnss);
- smp2p_debug_create("ut_remote_spinlock_track_cdsp",
- &smp2p_ut_remote_spinlock_track_cdsp);
- smp2p_debug_create("ut_remote_spinlock_track_tz",
- &smp2p_ut_remote_spinlock_track_tz);
- return 0;
-}
-module_init(smp2p_debugfs_init);
+++ /dev/null
-/* drivers/soc/qcom/smp2p_test.c
- *
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <soc/qcom/subsystem_restart.h>
-#include "smp2p_private.h"
-#include "smp2p_test_common.h"
-
-/**
- * smp2p_ut_local_basic - Basic sanity test using local loopback.
- *
- * @s: pointer to output file
- *
- * This test simulates a simple write and read
- * when remote processor does not exist.
- */
-static void smp2p_ut_local_basic(struct seq_file *s)
-{
- int failed = 0;
- struct msm_smp2p_out *smp2p_obj;
- struct msm_smp2p_remote_mock *rmp = NULL;
- int ret;
- uint32_t test_request;
- uint32_t test_response = 0;
- static struct mock_cb_data cb_data;
-
- seq_printf(s, "Running %s\n", __func__);
- mock_cb_data_init(&cb_data);
- do {
- /* initialize mock edge and start opening */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0,
- sizeof(struct smp2p_smem_item));
-
- msm_smp2p_set_remote_mock_exists(false);
-
- ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
- &cb_data.nb, &smp2p_obj);
- UT_ASSERT_INT(ret, ==, 0);
-
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
- UT_ASSERT_INT(cb_data.cb_count, ==, 0);
- rmp->rx_interrupt_count = 0;
-
- /* simulate response from remote side */
- rmp->remote_item.header.magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
- SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
- SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
- rmp->remote_item.header.flags = 0x0;
- msm_smp2p_set_remote_mock_exists(true);
- rmp->tx_interrupt();
-
- /* verify port was opened */
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_data.cb_completion, HZ / 2), >, 0);
- UT_ASSERT_INT(cb_data.cb_count, ==, 1);
- UT_ASSERT_INT(cb_data.event_open, ==, 1);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
- /* do write (test outbound entries) */
- rmp->rx_interrupt_count = 0;
- test_request = 0xC0DE;
- ret = msm_smp2p_out_write(smp2p_obj, test_request);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- /* do read (test inbound entries) */
- ret = msm_smp2p_out_read(smp2p_obj, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(test_request, ==, test_response);
-
- ret = msm_smp2p_out_close(&smp2p_obj);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- (void)msm_smp2p_out_close(&smp2p_obj);
- }
-}
-
-/**
- * smp2p_ut_local_late_open - Verify post-negotiation opening.
- *
- * @s: pointer to output file
- *
- * Verify entry creation for opening entries after negotiation is complete.
- */
-static void smp2p_ut_local_late_open(struct seq_file *s)
-{
- int failed = 0;
- struct msm_smp2p_out *smp2p_obj;
- struct msm_smp2p_remote_mock *rmp = NULL;
- int ret;
- uint32_t test_request;
- uint32_t test_response = 0;
- static struct mock_cb_data cb_data;
-
- seq_printf(s, "Running %s\n", __func__);
- mock_cb_data_init(&cb_data);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- rmp->remote_item.header.magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
- SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
- SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent,
- SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
- rmp->remote_item.header.flags = 0x0;
-
- msm_smp2p_set_remote_mock_exists(true);
-
- ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
- &cb_data.nb, &smp2p_obj);
- UT_ASSERT_INT(ret, ==, 0);
-
- /* verify port was opened */
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_data.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_data.cb_count, ==, 1);
- UT_ASSERT_INT(cb_data.event_open, ==, 1);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
- /* do write (test outbound entries) */
- rmp->rx_interrupt_count = 0;
- test_request = 0xC0DE;
- ret = msm_smp2p_out_write(smp2p_obj, test_request);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- /* do read (test inbound entries) */
- ret = msm_smp2p_out_read(smp2p_obj, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(test_request, ==, test_response);
-
- ret = msm_smp2p_out_close(&smp2p_obj);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- (void)msm_smp2p_out_close(&smp2p_obj);
- }
-}
-
-/**
- * smp2p_ut_local_early_open - Verify pre-negotiation opening.
- *
- * @s: pointer to output file
- *
- * Verify entry creation for opening entries before negotiation is complete.
- */
-static void smp2p_ut_local_early_open(struct seq_file *s)
-{
- int failed = 0;
- struct msm_smp2p_out *smp2p_obj;
- struct msm_smp2p_remote_mock *rmp = NULL;
- struct smp2p_smem *outbound_item;
- int negotiation_state;
- int ret;
- uint32_t test_request;
- uint32_t test_response = 0;
- static struct mock_cb_data cb_data;
-
- seq_printf(s, "Running %s\n", __func__);
- mock_cb_data_init(&cb_data);
- do {
- /* initialize mock edge, but don't enable, yet */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- rmp->remote_item.header.magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
- SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
- SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
- rmp->remote_item.header.flags = 0x0;
-
- msm_smp2p_set_remote_mock_exists(false);
- UT_ASSERT_PTR(NULL, ==,
- smp2p_get_in_item(SMP2P_REMOTE_MOCK_PROC));
-
- /* initiate open, but verify it doesn't complete */
- ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
- &cb_data.nb, &smp2p_obj);
- UT_ASSERT_INT(ret, ==, 0);
-
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_data.cb_completion, HZ / 8),
- ==, 0);
- UT_ASSERT_INT(cb_data.cb_count, ==, 0);
- UT_ASSERT_INT(cb_data.event_open, ==, 0);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- outbound_item = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
- &negotiation_state);
- UT_ASSERT_PTR(outbound_item, !=, NULL);
- UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENING);
- UT_ASSERT_INT(0, ==,
- SMP2P_GET_ENT_VALID(outbound_item->valid_total_ent));
-
- /* verify that read/write don't work yet */
- rmp->rx_interrupt_count = 0;
- test_request = 0x0;
- ret = msm_smp2p_out_write(smp2p_obj, test_request);
- UT_ASSERT_INT(ret, ==, -ENODEV);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
-
- ret = msm_smp2p_out_read(smp2p_obj, &test_response);
- UT_ASSERT_INT(ret, ==, -ENODEV);
-
- /* allocate remote entry and verify open */
- msm_smp2p_set_remote_mock_exists(true);
- rmp->tx_interrupt();
-
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_data.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_data.cb_count, ==, 1);
- UT_ASSERT_INT(cb_data.event_open, ==, 1);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
- /* do write (test outbound entries) */
- rmp->rx_interrupt_count = 0;
- test_request = 0xC0DE;
- ret = msm_smp2p_out_write(smp2p_obj, test_request);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- /* do read (test inbound entries) */
- ret = msm_smp2p_out_read(smp2p_obj, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(test_request, ==, test_response);
-
- ret = msm_smp2p_out_close(&smp2p_obj);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- (void)msm_smp2p_out_close(&smp2p_obj);
- }
-}
-
-/**
- * smp2p_ut_mock_loopback - Exercise the remote loopback using remote mock.
- *
- * @s: pointer to output file
- *
- * This test exercises the remote loopback code using
- * remote mock object. The remote mock object simulates the remote
- * processor sending remote loopback commands to the local processor.
- */
-static void smp2p_ut_mock_loopback(struct seq_file *s)
-{
- int failed = 0;
- struct msm_smp2p_remote_mock *rmp = NULL;
- int ret;
- uint32_t test_request = 0;
- uint32_t test_response = 0;
- struct msm_smp2p_out *local;
-
- seq_printf(s, "Running %s\n", __func__);
- do {
- /* Initialize the mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
-
- memset(&rmp->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- rmp->remote_item.header.magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
- SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
- SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 1);
- rmp->remote_item.header.flags = 0x0;
- msm_smp2p_set_remote_mock_exists(true);
-
- /* Create test entry and attach loopback server */
- rmp->rx_interrupt_count = 0;
- reinit_completion(&rmp->cb_completion);
- strlcpy(rmp->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- rmp->remote_item.entries[0].entry = 0;
- rmp->tx_interrupt();
-
- local = msm_smp2p_init_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &rmp->cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
- /* Send Echo Command */
- rmp->rx_interrupt_count = 0;
- reinit_completion(&rmp->cb_completion);
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
- SMP2P_SET_RMT_DATA(test_request, 10);
- rmp->remote_item.entries[0].entry = test_request;
- rmp->tx_interrupt();
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &rmp->cb_completion, HZ / 2),
- >, 0);
-
- /* Verify Echo Response */
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
- ret = msm_smp2p_out_read(local,
- &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- test_response = SMP2P_GET_RMT_DATA(test_response);
- UT_ASSERT_INT(test_response, ==, 10);
-
- /* Send PINGPONG command */
- test_request = 0;
- test_response = 0;
- rmp->rx_interrupt_count = 0;
- reinit_completion(&rmp->cb_completion);
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
- SMP2P_SET_RMT_DATA(test_request, 10);
- rmp->remote_item.entries[0].entry = test_request;
- rmp->tx_interrupt();
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &rmp->cb_completion, HZ / 2),
- >, 0);
-
- /* Verify PINGPONG Response */
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
- ret = msm_smp2p_out_read(local, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- test_response = SMP2P_GET_RMT_DATA(test_response);
- UT_ASSERT_INT(test_response, ==, 9);
-
- /* Send CLEARALL command */
- test_request = 0;
- test_response = 0;
- rmp->rx_interrupt_count = 0;
- reinit_completion(&rmp->cb_completion);
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
- SMP2P_SET_RMT_DATA(test_request, 10);
- rmp->remote_item.entries[0].entry = test_request;
- rmp->tx_interrupt();
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &rmp->cb_completion, HZ / 2),
- >, 0);
-
- /* Verify CLEARALL response */
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
- ret = msm_smp2p_out_read(local, &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- test_response = SMP2P_GET_RMT_DATA(test_response);
- UT_ASSERT_INT(test_response, ==, 0);
-
- ret = msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
- UT_ASSERT_INT(ret, ==, 0);
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
- }
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s: pointer to output file
- * @remote_pid: Remote processor to test
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid)
-{
- int failed = 0;
- struct msm_smp2p_out *handle;
- int ret;
- uint32_t test_request;
- uint32_t test_response = 0;
- static struct mock_cb_data cb_out;
- static struct mock_cb_data cb_in;
-
- seq_printf(s, "Running %s for '%s' remote pid %d\n",
- __func__, smp2p_pid_to_name(remote_pid), remote_pid);
- mock_cb_data_init(&cb_out);
- mock_cb_data_init(&cb_in);
- do {
- /* Open output entry */
- ret = msm_smp2p_out_open(remote_pid, "smp2p",
- &cb_out.nb, &handle);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_out.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_out.cb_count, ==, 1);
- UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
- /* Open inbound entry */
- ret = msm_smp2p_in_register(remote_pid, "smp2p",
- &cb_in.nb);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
- /* Write an echo request */
- mock_cb_data_reset(&cb_out);
- mock_cb_data_reset(&cb_in);
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
- SMP2P_SET_RMT_DATA(test_request, 0xAA55);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- /* Verify inbound reply */
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
- UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
- cb_in.entry_data.current_value), ==, 0xAA55);
-
- ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
- UT_ASSERT_INT(SMP2P_LB_CMD_ECHO, ==,
- SMP2P_GET_RMT_CMD(test_response));
- UT_ASSERT_INT(0xAA55, ==, SMP2P_GET_RMT_DATA(test_response));
-
- /* Write a clear all request */
- mock_cb_data_reset(&cb_in);
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
- SMP2P_SET_RMT_DATA(test_request, 0xAA55);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- /* Verify inbound reply */
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
- UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
- cb_in.entry_data.current_value), ==, 0x0000);
-
- ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
- UT_ASSERT_INT(0x0000, ==, SMP2P_GET_RMT_DATA(test_response));
-
- /* Write a decrement request */
- mock_cb_data_reset(&cb_in);
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
- SMP2P_SET_RMT_DATA(test_request, 0xAA55);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- /* Verify inbound reply */
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
- UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
- cb_in.entry_data.current_value), ==, 0xAA54);
-
- ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
- UT_ASSERT_INT(SMP2P_LB_CMD_PINGPONG, ==,
- SMP2P_GET_RMT_CMD(test_response));
- UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
-
- /* Test the ignore flag */
- mock_cb_data_reset(&cb_in);
- test_request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
- SMP2P_SET_RMT_CMD(test_request, SMP2P_RLPB_IGNORE);
- SMP2P_SET_RMT_DATA(test_request, 0xAA55);
- ret = msm_smp2p_out_write(handle, test_request);
- UT_ASSERT_INT(ret, ==, 0);
-
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in.cb_completion, HZ / 2),
- ==, 0);
- UT_ASSERT_INT(cb_in.cb_count, ==, 0);
- UT_ASSERT_INT(cb_in.event_entry_update, ==, 0);
- ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
-
- /* Cleanup */
- ret = msm_smp2p_out_close(&handle);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_PTR(handle, ==, 0);
- ret = msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
- UT_ASSERT_INT(ret, ==, 0);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- if (handle)
- (void)msm_smp2p_out_close(&handle);
- (void)msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
-
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s: pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
- struct smp2p_interrupt_config *int_cfg;
- int pid;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s, "Remote processor config unavailable\n");
- return;
- }
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
- if (!int_cfg[pid].is_configured)
- continue;
-
- msm_smp2p_deinit_rmt_lpb_proc(pid);
- smp2p_ut_remote_inout_core(s, pid);
- msm_smp2p_init_rmt_lpb_proc(pid);
- }
-}
-
-/**
- * smp2p_ut_remote_out_max_entries_core - Verify open functionality.
- *
- * @s: pointer to output file
- * @remote_pid: Remote processor for which the test is executed.
- *
- * This test verifies open functionality by creating maximum outbound entries.
- */
-static void smp2p_ut_remote_out_max_entries_core(struct seq_file *s,
- int remote_pid)
-{
- int j = 0;
- int failed = 0;
- struct msm_smp2p_out *handle[SMP2P_MAX_ENTRY];
- int ret;
- static struct mock_cb_data cb_out[SMP2P_MAX_ENTRY];
- char entry_name[SMP2P_MAX_ENTRY_NAME];
- int num_created;
-
- seq_printf(s, "Running %s for '%s' remote pid %d\n",
- __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
- for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
- handle[j] = NULL;
- mock_cb_data_init(&cb_out[j]);
- }
-
- do {
- num_created = 0;
- for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
- /* Open as many output entries as possible */
- scnprintf((char *)entry_name, SMP2P_MAX_ENTRY_NAME,
- "smp2p%d", j);
- ret = msm_smp2p_out_open(remote_pid, entry_name,
- &cb_out[j].nb, &handle[j]);
- if (ret == -ENOMEM)
- /* hit max number */
- break;
- UT_ASSERT_INT(ret, ==, 0);
- ++num_created;
- }
- if (failed)
- break;
-
- /* verify we created more than 1 entry */
- UT_ASSERT_INT(num_created, <=, SMP2P_MAX_ENTRY);
- UT_ASSERT_INT(num_created, >, 0);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- /* cleanup */
- for (j = 0; j < SMP2P_MAX_ENTRY; j++)
- ret = msm_smp2p_out_close(&handle[j]);
-}
-
-/**
- * smp2p_ut_remote_out_max_entries - Verify open for all configured processors.
- *
- * @s: pointer to output file
- *
- * This test verifies creating max number of entries for
- * all configured remote processor.
- */
-static void smp2p_ut_remote_out_max_entries(struct seq_file *s)
-{
- struct smp2p_interrupt_config *int_cfg;
- int pid;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s, "Remote processor config unavailable\n");
- return;
- }
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
- if (!int_cfg[pid].is_configured)
- continue;
-
- smp2p_ut_remote_out_max_entries_core(s, pid);
- }
-}
-
-/**
- * smp2p_ut_local_in_max_entries - Verify registering and unregistering.
- *
- * @s: pointer to output file
- *
- * This test verifies registering and unregistering for inbound entries using
- * the remote mock processor.
- */
-static void smp2p_ut_local_in_max_entries(struct seq_file *s)
-{
- int j = 0;
- int failed = 0;
- struct msm_smp2p_remote_mock *rmp = NULL;
- int ret;
- static struct mock_cb_data cb_in[SMP2P_MAX_ENTRY];
- static struct mock_cb_data cb_out;
-
- seq_printf(s, "Running %s\n", __func__);
-
- for (j = 0; j < SMP2P_MAX_ENTRY; j++)
- mock_cb_data_init(&cb_in[j]);
-
- mock_cb_data_init(&cb_out);
-
- do {
- /* Initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- rmp->remote_item.header.magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
- SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
- SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
- rmp->remote_item.header.flags = 0x0;
- msm_smp2p_set_remote_mock_exists(true);
-
- /* Create Max Entries in the remote mock object */
- for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
- scnprintf(rmp->remote_item.entries[j].name,
- SMP2P_MAX_ENTRY_NAME, "smp2p%d", j);
- rmp->remote_item.entries[j].entry = 0;
- rmp->tx_interrupt();
- }
-
- /* Register for in entries */
- for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
- ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[j].name,
- &(cb_in[j].nb));
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &(cb_in[j].cb_completion), HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in[j].cb_count, ==, 1);
- UT_ASSERT_INT(cb_in[j].event_entry_update, ==, 0);
- }
- UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
-
- /* Unregister */
- for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
- ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[j].name,
- &(cb_in[j].nb));
- UT_ASSERT_INT(ret, ==, 0);
- }
- UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
-
- for (j = 0; j < SMP2P_MAX_ENTRY; j++)
- ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[j].name,
- &(cb_in[j].nb));
- }
-}
-
-/**
- * smp2p_ut_local_in_multiple - Verify Multiple Inbound Registration.
- *
- * @s: pointer to output file
- *
- * This test verifies multiple clients registering for same inbound entries
- * using the remote mock processor.
- */
-static void smp2p_ut_local_in_multiple(struct seq_file *s)
-{
- int failed = 0;
- struct msm_smp2p_remote_mock *rmp = NULL;
- int ret;
- static struct mock_cb_data cb_in_1;
- static struct mock_cb_data cb_in_2;
- static struct mock_cb_data cb_out;
-
- seq_printf(s, "Running %s\n", __func__);
-
- mock_cb_data_init(&cb_in_1);
- mock_cb_data_init(&cb_in_2);
- mock_cb_data_init(&cb_out);
-
- do {
- /* Initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- rmp->remote_item.header.magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
- SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
- SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
- SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, 1);
- SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
- rmp->remote_item.header.flags = 0x0;
- msm_smp2p_set_remote_mock_exists(true);
-
- /* Create an Entry in the remote mock object */
- scnprintf(rmp->remote_item.entries[0].name,
- SMP2P_MAX_ENTRY_NAME, "smp2p%d", 1);
- rmp->remote_item.entries[0].entry = 0;
- rmp->tx_interrupt();
-
- /* Register multiple clients for the inbound entry */
- ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[0].name,
- &cb_in_1.nb);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &(cb_in_1.cb_completion), HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in_1.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in_1.event_entry_update, ==, 0);
-
- ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[0].name,
- &cb_in_2.nb);
- UT_ASSERT_INT(ret, ==, 0);
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &(cb_in_2.cb_completion), HZ / 2),
- >, 0);
- UT_ASSERT_INT(cb_in_2.cb_count, ==, 1);
- UT_ASSERT_INT(cb_in_2.event_entry_update, ==, 0);
-
-
- /* Unregister the clients */
- ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[0].name,
- &(cb_in_1.nb));
- UT_ASSERT_INT(ret, ==, 0);
-
- ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[0].name,
- &(cb_in_2.nb));
- UT_ASSERT_INT(ret, ==, 0);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
-
- ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[0].name,
- &(cb_in_1.nb));
-
- ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
- rmp->remote_item.entries[0].name,
- &(cb_in_2.nb));
- }
-}
-
-/**
- * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_ssr_ack(struct seq_file *s)
-{
- int failed = 0;
- struct msm_smp2p_remote_mock *rmp = NULL;
- int ret;
-
- seq_printf(s, "Running %s\n", __func__);
- do {
- struct smp2p_smem *rhdr;
- struct smp2p_smem *lhdr;
- int negotiation_state;
-
- /* initialize v1 without SMP2P_FEATURE_SSR_ACK enabled */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
- rhdr = &rmp->remote_item.header;
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
- rhdr->magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(rhdr->feature_version, 1);
- SMP2P_SET_FEATURES(rhdr->feature_version, 0);
- SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
- rhdr->flags = 0x0;
- msm_smp2p_set_remote_mock_exists(true);
- rmp->tx_interrupt();
-
- /* verify edge is open */
- lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
- &negotiation_state);
- UT_ASSERT_PTR(NULL, !=, lhdr);
- UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- /* verify no response to ack feature */
- rmp->rx_interrupt_count = 0;
- SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
- rmp->tx_interrupt();
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
-
- /* initialize v1 with SMP2P_FEATURE_SSR_ACK enabled */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
- rmp = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(rmp, !=, NULL);
- rhdr = &rmp->remote_item.header;
-
- rmp->rx_interrupt_count = 0;
- memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
- rhdr->magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
- SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(rhdr->feature_version, 1);
- SMP2P_SET_FEATURES(rhdr->feature_version,
- SMP2P_FEATURE_SSR_ACK);
- SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
- rmp->rx_interrupt_count = 0;
- rhdr->flags = 0x0;
- msm_smp2p_set_remote_mock_exists(true);
- rmp->tx_interrupt();
-
- /* verify edge is open */
- lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
- &negotiation_state);
- UT_ASSERT_PTR(NULL, !=, lhdr);
- UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- /* verify response to ack feature */
- rmp->rx_interrupt_count = 0;
- SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
- rmp->tx_interrupt();
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- rmp->rx_interrupt_count = 0;
- SMP2P_SET_RESTART_DONE(rhdr->flags, 0);
- rmp->tx_interrupt();
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
- UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-/**
- * get_ssr_name_for_proc - Retrieve an SSR name from the provided list
- *
- * @names: List of possible processor names
- * @name_len: The length of @names
- * @index: Index into @names
- *
- * Return: Pointer to the next processor name, NULL in error conditions
- */
-static char *get_ssr_name_for_proc(char *names[], size_t name_len, int index)
-{
- if (index >= name_len) {
- pr_err("%s: SSR failed; check subsys name table\n",
- __func__);
- return NULL;
- }
-
- return names[index];
-}
-
-/**
- * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- * @rpid: Remote processor ID
- * @int_cfg: Interrupt config
- */
-static void smp2p_ut_remotesubsys_ssr_ack(struct seq_file *s, uint32_t rpid,
- struct smp2p_interrupt_config *int_cfg)
-{
- int failed = 0;
-
- seq_printf(s, "Running %s\n", __func__);
- do {
- struct smp2p_smem *rhdr;
- struct smp2p_smem *lhdr;
- int negotiation_state;
- int name_index;
- int ret;
- uint32_t ssr_done_start;
- bool ssr_ack_enabled = false;
- bool ssr_success = false;
- char *name = NULL;
-
- static char *mpss_names[] = {"modem", "mpss"};
- static char *lpass_names[] = {"adsp", "lpass"};
- static char *sensor_names[] = {"slpi", "dsps"};
- static char *wcnss_names[] = {"wcnss"};
-
- lhdr = smp2p_get_out_item(rpid, &negotiation_state);
- UT_ASSERT_PTR(NULL, !=, lhdr);
- UT_ASSERT_INT(SMP2P_EDGE_STATE_OPENED, ==, negotiation_state);
-
- rhdr = smp2p_get_in_item(rpid);
- UT_ASSERT_PTR(NULL, !=, rhdr);
-
- /* get initial state of SSR flags */
- if (SMP2P_GET_FEATURES(rhdr->feature_version)
- & SMP2P_FEATURE_SSR_ACK)
- ssr_ack_enabled = true;
- else
- ssr_ack_enabled = false;
-
- ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr->flags);
- UT_ASSERT_INT(ssr_done_start, ==,
- SMP2P_GET_RESTART_ACK(lhdr->flags));
-
- /* trigger restart */
- name_index = 0;
- while (!ssr_success) {
-
- switch (rpid) {
- case SMP2P_MODEM_PROC:
- name = get_ssr_name_for_proc(mpss_names,
- ARRAY_SIZE(mpss_names),
- name_index);
- break;
- case SMP2P_AUDIO_PROC:
- name = get_ssr_name_for_proc(lpass_names,
- ARRAY_SIZE(lpass_names),
- name_index);
- break;
- case SMP2P_SENSOR_PROC:
- name = get_ssr_name_for_proc(sensor_names,
- ARRAY_SIZE(sensor_names),
- name_index);
- break;
- case SMP2P_WIRELESS_PROC:
- name = get_ssr_name_for_proc(wcnss_names,
- ARRAY_SIZE(wcnss_names),
- name_index);
- break;
- default:
- pr_err("%s: Invalid proc ID %d given for ssr\n",
- __func__, rpid);
- }
-
- if (!name) {
- seq_puts(s, "\tSSR failed; check subsys name table\n");
- failed = true;
- break;
- }
-
- seq_printf(s, "Restarting '%s'\n", name);
- ret = subsystem_restart(name);
- if (ret == -ENODEV) {
- seq_puts(s, "\tSSR call failed\n");
- ++name_index;
- continue;
- }
- ssr_success = true;
- }
- if (failed)
- break;
-
- msleep(10*1000);
-
- /* verify ack signaling */
- if (ssr_ack_enabled) {
- ssr_done_start ^= 1;
- UT_ASSERT_INT(ssr_done_start, ==,
- SMP2P_GET_RESTART_ACK(lhdr->flags));
- UT_ASSERT_INT(ssr_done_start, ==,
- SMP2P_GET_RESTART_DONE(rhdr->flags));
- UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_DONE(lhdr->flags));
- seq_puts(s, "\tSSR ACK Enabled and Toggled\n");
- } else {
- UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_ACK(lhdr->flags));
-
- UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_DONE(rhdr->flags));
- UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_ACK(rhdr->flags));
- seq_puts(s, "\tSSR ACK Disabled\n");
- }
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-}
-
-/**
- * smp2p_ut_remote_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- *
- * Triggers SSR for each subsystem.
- */
-static void smp2p_ut_remote_ssr_ack(struct seq_file *s)
-{
- struct smp2p_interrupt_config *int_cfg;
- int pid;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s,
- "Remote processor config unavailable\n");
- return;
- }
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
- if (!int_cfg[pid].is_configured)
- continue;
-
- msm_smp2p_deinit_rmt_lpb_proc(pid);
- smp2p_ut_remotesubsys_ssr_ack(s, pid, &int_cfg[pid]);
- msm_smp2p_init_rmt_lpb_proc(pid);
- }
-}
-
-static struct dentry *dent;
-static DEFINE_MUTEX(show_lock);
-
-static int debugfs_show(struct seq_file *s, void *data)
-{
- void (*show)(struct seq_file *) = s->private;
-
- mutex_lock(&show_lock);
- show(s);
- mutex_unlock(&show_lock);
-
- return 0;
-}
-
-static int debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, debugfs_show, inode->i_private);
-}
-
-static const struct file_operations debug_ops = {
- .open = debug_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
-
-void smp2p_debug_create(const char *name,
- void (*show)(struct seq_file *))
-{
- struct dentry *file;
-
- file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
- if (!file)
- pr_err("%s: unable to create file '%s'\n", __func__, name);
-}
-
-void smp2p_debug_create_u32(const char *name, uint32_t *value)
-{
- struct dentry *file;
-
- file = debugfs_create_u32(name, S_IRUGO | S_IWUSR, dent, value);
- if (!file)
- pr_err("%s: unable to create file '%s'\n", __func__, name);
-}
-
-static int __init smp2p_debugfs_init(void)
-{
- dent = debugfs_create_dir("smp2p_test", 0);
- if (IS_ERR(dent))
- return PTR_ERR(dent);
-
- /*
- * Add Unit Test entries.
- *
- * The idea with unit tests is that you can run all of them
- * from ADB shell by doing:
- * adb shell
- * cat ut*
- *
- * And if particular tests fail, you can then repeatedly run the
- * failing tests as you debug and resolve the failing test.
- */
- smp2p_debug_create("ut_local_basic",
- smp2p_ut_local_basic);
- smp2p_debug_create("ut_local_late_open",
- smp2p_ut_local_late_open);
- smp2p_debug_create("ut_local_early_open",
- smp2p_ut_local_early_open);
- smp2p_debug_create("ut_mock_loopback",
- smp2p_ut_mock_loopback);
- smp2p_debug_create("ut_remote_inout",
- smp2p_ut_remote_inout);
- smp2p_debug_create("ut_local_in_max_entries",
- smp2p_ut_local_in_max_entries);
- smp2p_debug_create("ut_remote_out_max_entries",
- smp2p_ut_remote_out_max_entries);
- smp2p_debug_create("ut_local_in_multiple",
- smp2p_ut_local_in_multiple);
- smp2p_debug_create("ut_local_ssr_ack",
- smp2p_ut_local_ssr_ack);
- smp2p_debug_create("ut_remote_ssr_ack",
- smp2p_ut_remote_ssr_ack);
-
- return 0;
-}
-module_init(smp2p_debugfs_init);
+++ /dev/null
-/* drivers/soc/qcom/smp2p_test_common.h
- *
- * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef _SMP2P_TEST_COMMON_H_
-#define _SMP2P_TEST_COMMON_H_
-
-#include <linux/debugfs.h>
-
-/**
- * Unit test assertion for logging test cases.
- *
- * @a lval
- * @b rval
- * @cmp comparison operator
- *
- * Assertion fails if (@a cmp @b) is not true which then
- * logs the function and line number where the error occurred
- * along with the values of @a and @b.
- *
- * Assumes that the following local variables exist:
- * @s - sequential output file pointer
- * @failed - set to true if test fails
- */
-#define UT_ASSERT_INT(a, cmp, b) \
- { \
- int a_tmp = (a); \
- int b_tmp = (b); \
- if (!((a_tmp)cmp(b_tmp))) { \
- seq_printf(s, "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
- __func__, __LINE__, \
- a_tmp, b_tmp); \
- failed = 1; \
- break; \
- } \
- }
-
-#define UT_ASSERT_PTR(a, cmp, b) \
- { \
- void *a_tmp = (a); \
- void *b_tmp = (b); \
- if (!((a_tmp)cmp(b_tmp))) { \
- seq_printf(s, "%s:%d Fail: " #a "(%pK) " #cmp \
- " " #b "(%pK)\n", \
- __func__, __LINE__, \
- a_tmp, b_tmp); \
- failed = 1; \
- break; \
- } \
- }
-
-#define UT_ASSERT_UINT(a, cmp, b) \
- { \
- unsigned a_tmp = (a); \
- unsigned b_tmp = (b); \
- if (!((a_tmp)cmp(b_tmp))) { \
- seq_printf(s, "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
- __func__, __LINE__, \
- a_tmp, b_tmp); \
- failed = 1; \
- break; \
- } \
- }
-
-#define UT_ASSERT_HEX(a, cmp, b) \
- { \
- unsigned a_tmp = (a); \
- unsigned b_tmp = (b); \
- if (!((a_tmp)cmp(b_tmp))) { \
- seq_printf(s, "%s:%d Fail: " #a "(%x) " #cmp " " #b "(%x)\n", \
- __func__, __LINE__, \
- a_tmp, b_tmp); \
- failed = 1; \
- break; \
- } \
- }
-
-/**
- * In-range unit test assertion for test cases.
- *
- * @a lval
- * @minv Minimum value
- * @maxv Maximum value
- *
- * Assertion fails if @a is not on the exclusive range minv, maxv
- * ((@a < @minv) or (@a > @maxv)). In the failure case, the macro
- * logs the function and line number where the error occurred along
- * with the values of @a and @minv, @maxv.
- *
- * Assumes that the following local variables exist:
- * @s - sequential output file pointer
- * @failed - set to true if test fails
- */
-#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
- { \
- int a_tmp = (a); \
- int minv_tmp = (minv); \
- int maxv_tmp = (maxv); \
- if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
- seq_printf(s, "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
- #a "(%d) > " #maxv "(%d)\n", \
- __func__, __LINE__, \
- a_tmp, minv_tmp, a_tmp, maxv_tmp); \
- failed = 1; \
- break; \
- } \
- }
-
-/* Structure to track state changes for the notifier callback. */
-struct mock_cb_data {
- bool initialized;
- spinlock_t lock;
- struct notifier_block nb;
-
- /* events */
- struct completion cb_completion;
- int cb_count;
- int event_open;
- int event_entry_update;
- struct msm_smp2p_update_notif entry_data;
-};
-
-void smp2p_debug_create(const char *name, void (*show)(struct seq_file *));
-void smp2p_debug_create_u32(const char *name, uint32_t *value);
-static inline int smp2p_test_notify(struct notifier_block *self,
- unsigned long event, void *data);
-
-/**
- * Reset mock callback data to default values.
- *
- * @cb: Mock callback data
- */
-static inline void mock_cb_data_reset(struct mock_cb_data *cb)
-{
- reinit_completion(&cb->cb_completion);
- cb->cb_count = 0;
- cb->event_open = 0;
- cb->event_entry_update = 0;
- memset(&cb->entry_data, 0,
- sizeof(struct msm_smp2p_update_notif));
-}
-
-
-/**
- * Initialize mock callback data.
- *
- * @cb: Mock callback data
- */
-static inline void mock_cb_data_init(struct mock_cb_data *cb)
-{
- if (!cb->initialized) {
- init_completion(&cb->cb_completion);
- spin_lock_init(&cb->lock);
- cb->initialized = true;
- cb->nb.notifier_call = smp2p_test_notify;
- memset(&cb->entry_data, 0,
- sizeof(struct msm_smp2p_update_notif));
- }
- mock_cb_data_reset(cb);
-}
-
-/**
- * Notifier function passed into SMP2P for testing.
- *
- * @self: Pointer to calling notifier block
- * @event: Event
- * @data: Event-specific data
- * @returns: 0
- */
-static inline int smp2p_test_notify(struct notifier_block *self,
- unsigned long event, void *data)
-{
- struct mock_cb_data *cb_data_ptr;
- unsigned long flags;
-
- cb_data_ptr = container_of(self, struct mock_cb_data, nb);
-
- spin_lock_irqsave(&cb_data_ptr->lock, flags);
-
- switch (event) {
- case SMP2P_OPEN:
- ++cb_data_ptr->event_open;
- if (data) {
- cb_data_ptr->entry_data =
- *(struct msm_smp2p_update_notif *)(data);
- }
- break;
- case SMP2P_ENTRY_UPDATE:
- ++cb_data_ptr->event_entry_update;
- if (data) {
- cb_data_ptr->entry_data =
- *(struct msm_smp2p_update_notif *)(data);
- }
- break;
- default:
- pr_err("%s Unknown event\n", __func__);
- break;
- }
-
- ++cb_data_ptr->cb_count;
- complete(&cb_data_ptr->cb_completion);
- spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
- return 0;
-}
-#endif /* _SMP2P_TEST_COMMON_H_ */
enum subsystem_type type;
struct notifier_block nb;
void *handle;
- unsigned int ssr_irq;
+ int ssr_irq;
struct list_head subsystem_list;
struct work_struct work;
};
struct device_node *child = NULL;
const char *ss_type;
struct resource *res;
- struct subsystem_descriptor *subsystem;
+ struct subsystem_descriptor *subsystem = NULL;
int ret = 0;
if (!pdev) {
}
}
- INIT_WORK(&subsystem->work, subsystem_notif_wq_func);
+ if (subsystem)
+ INIT_WORK(&subsystem->work, subsystem_notif_wq_func);
return 0;
err:
destroy_workqueue(ssr_wq);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
+ if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
- else if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
- ret = -ETIMEDOUT;
+ } else {
+ if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ }
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS &rspi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+ sh_msiof_write(p, STR,
+ sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+ sh_msiof_spi_resume);
+#define DEV_PM_OPS &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
goto exit_free_master;
}
+ /* disabled clock may cause interrupt storm upon request */
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ ret = PTR_ERR(tspi->clk);
+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_prepare(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+ goto exit_free_master;
+ }
+
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
- goto exit_free_master;
- }
-
- tspi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tspi->clk)) {
- dev_err(&pdev->dev, "can not get clock\n");
- ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_clk_disable;
}
tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
tegra_slink_deinit_dma_param(tspi, true);
exit_free_irq:
free_irq(spi_irq, tspi);
+exit_clk_disable:
+ clk_disable(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
free_irq(tspi->irq, tspi);
+ clk_disable(tspi->clk);
+
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
goto out;
}
+ /* requested mapping size larger than object size */
+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
calc_vm_prot_bits(PROT_MASK))) {
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
return ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
return ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, id);
- mutex_unlock(&client->lock);
-
- return handle;
-}
-
-static bool ion_handle_validate(struct ion_client *client,
- struct ion_handle *handle)
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return idr_find(&client->idr, handle->id) == handle;
}
EXPORT_SYMBOL(ion_alloc);
-static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
{
bool valid_handle;
}
EXPORT_SYMBOL(ion_free);
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
- ion_phys_addr_t *addr, size_t *len)
+static int __ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, bool lock_client)
{
struct ion_buffer *buffer;
int ret;
- mutex_lock(&client->lock);
+ if (lock_client)
+ mutex_lock(&client->lock);
if (!ion_handle_validate(client, handle)) {
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
return -EINVAL;
}
if (!buffer->heap->ops->phys) {
pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
__func__, buffer->heap->name, buffer->heap->type);
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
return -ENODEV;
}
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
return ret;
}
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ return __ion_phys(client, handle, addr, len, true);
+}
EXPORT_SYMBOL(ion_phys);
+int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ return __ion_phys(client, handle, addr, len, false);
+}
+
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
return __ion_share_dma_buf_fd(client, handle, false);
}
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+static struct ion_handle *__ion_import_dma_buf(struct ion_client *client,
+ int fd, bool lock_client)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
}
buffer = dmabuf->priv;
- mutex_lock(&client->lock);
+ if (lock_client)
+ mutex_lock(&client->lock);
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
handle = ion_handle_get_check_overflow(handle);
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
goto end;
}
handle = ion_handle_create(client, buffer);
if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
goto end;
}
ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
if (ret) {
- ion_handle_put(handle);
+ if (lock_client)
+ ion_handle_put(handle);
+ else
+ ion_handle_put_nolock(handle);
handle = ERR_PTR(ret);
}
dma_buf_put(dmabuf);
return handle;
}
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ return __ion_import_dma_buf(client, fd, true);
+}
EXPORT_SYMBOL(ion_import_dma_buf);
+struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd)
+{
+ return __ion_import_dma_buf(client, fd, false);
+}
+
static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
data->heaps[i].size);
}
}
+
+void lock_client(struct ion_client *client)
+{
+ mutex_lock(&client->lock);
+}
+
+void unlock_client(struct ion_client *client)
+{
+ mutex_unlock(&client->lock);
+}
+
+struct ion_buffer *get_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
* drivers/staging/android/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
enum ion_heap_type type, void *data,
int (*f)(struct ion_heap *heap, void *data));
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id);
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id);
int ion_handle_put(struct ion_handle *handle);
+bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle);
+
+void lock_client(struct ion_client *client);
+
+void unlock_client(struct ion_client *client);
+
+struct ion_buffer *get_buffer(struct ion_handle *handle);
+
+/**
+ * This function is same as ion_free() except it won't use client->lock.
+ */
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * This function is same as ion_phys() except it won't use client->lock.
+ */
+int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * This function is same as ion_import_dma_buf() except it won't use
+ * client->lock.
+ */
+struct ion_handle *ion_import_dma_buf_nolock(struct ion_client *client, int fd);
+
#endif /* _ION_PRIV_H */
err_free_sg2:
/* We failed to zero buffers. Bypass pool */
- buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
if (vmid > 0)
ion_system_secure_heap_unassign_sg(table, vmid);
int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
void *vaddr, unsigned long len, unsigned int cmd)
{
- return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+ int ret;
+
+ lock_client(client);
+ ret = ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+ unlock_client(client);
+
+ return ret;
}
EXPORT_SYMBOL(msm_ion_do_cache_op);
void *vaddr, unsigned int offset, unsigned long len,
unsigned int cmd)
{
- return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+ int ret;
+
+ lock_client(client);
+ ret = ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+ unlock_client(client);
+
+ return ret;
}
EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
ion_phys_addr_t buff_phys_start = 0;
size_t buf_length = 0;
- ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+ ret = ion_phys_nolock(client, handle, &buff_phys_start, &buf_length);
if (ret)
return -EINVAL;
int i;
unsigned int len = 0;
void (*op)(const void *, const void *);
+ struct ion_buffer *buffer;
-
- table = ion_sg_table(client, handle);
+ buffer = get_buffer(handle);
+ table = buffer->sg_table;
if (IS_ERR_OR_NULL(table))
return PTR_ERR(table);
unsigned long flags;
struct sg_table *table;
struct page *page;
+ struct ion_buffer *buffer;
- ret = ion_handle_get_flags(client, handle, &flags);
- if (ret)
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to %s.\n",
+ __func__, __func__);
return -EINVAL;
+ }
+
+ buffer = get_buffer(handle);
+ mutex_lock(&buffer->lock);
+ flags = buffer->flags;
+ mutex_unlock(&buffer->lock);
if (!ION_IS_CACHED(flags))
return 0;
if (get_secure_vmid(flags) > 0)
return 0;
- table = ion_sg_table(client, handle);
+ table = buffer->sg_table;
if (IS_ERR_OR_NULL(table))
return PTR_ERR(table);
int ret;
struct mm_struct *mm = current->active_mm;
+ lock_client(client);
if (data.flush_data.handle > 0) {
- handle = ion_handle_get_by_id(client,
- (int)data.flush_data.handle);
+ handle = ion_handle_get_by_id_nolock(
+ client, (int)data.flush_data.handle);
if (IS_ERR(handle)) {
pr_info("%s: Could not find handle: %d\n",
__func__, (int)data.flush_data.handle);
+ unlock_client(client);
return PTR_ERR(handle);
}
} else {
- handle = ion_import_dma_buf(client, data.flush_data.fd);
+ handle = ion_import_dma_buf_nolock(client,
+ data.flush_data.fd);
if (IS_ERR(handle)) {
pr_info("%s: Could not import handle: %pK\n",
__func__, handle);
+ unlock_client(client);
return -EINVAL;
}
}
}
up_read(&mm->mmap_sem);
- ion_free(client, handle);
+ ion_free_nolock(client, handle);
+ unlock_client(client);
if (ret < 0)
return ret;
break;
goto SD_Execute_Write_Cmd_Failed;
}
- rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
goto SD_Execute_Write_Cmd_Failed;
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
- }
-}
-
static void chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ bin2hex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
pr_err("Could not find CHAP_R.\n");
goto out;
}
+ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
}
crypto_free_hash(tfm);
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
+ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
+ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ pr_err("Malformed CHAP_C\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
mutex_lock(&tz->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED) {
tz->polling_delay = data->polling_delay;
- else
+ tz->passive_delay = data->passive_delay;
+ } else {
tz->polling_delay = 0;
+ tz->passive_delay = 0;
+ }
mutex_unlock(&tz->lock);
(link->has_func_id) &&
(link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
- (link->func_id == CISTPL_FUNCID_SERIAL)))
- pcmcia_loop_config(link, serial_check_for_multi, info);
+ (link->func_id == CISTPL_FUNCID_SERIAL))) {
+ if (pcmcia_loop_config(link, serial_check_for_multi, info))
+ goto failed;
+ }
/*
* Apply any multi-port quirk.
/* Get the address of the host memory buffer.
*/
bdp = pinfo->rx_cur;
- while (bdp->cbd_sc & BD_SC_EMPTY)
- ;
+ if (bdp->cbd_sc & BD_SC_EMPTY)
+ return NO_POLL_CHAR;
/* If the buffer address is in the CPM DPRAM, don't
* convert it.
poll_chars = 0;
}
if (poll_chars <= 0) {
- poll_chars = poll_wait_key(poll_buf, pinfo);
+ int ret = poll_wait_key(poll_buf, pinfo);
+
+ if (ret == NO_POLL_CHAR)
+ return ret;
+ poll_chars = ret;
pollp = poll_buf;
}
poll_chars--;
dev_name(&pdev->dev), sport);
if (ret)
return ret;
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+ return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
dev_name(&pdev->dev), sport);
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <linux/nospec.h>
+
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {
+ vsa.console = array_index_nospec(vsa.console,
+ MAX_NR_CONSOLES + 1);
vsa.console--;
console_lock();
ret = vc_allocate(vsa.console);
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
- rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
dev_err(&desc->intf->dev,
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
- int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
+ bool is_in;
+ bool allow_short = false;
+ bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
u = 0;
switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
+ if (is_in)
+ allow_short = true;
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
/* min 8 byte setup packet */
break;
case USBDEVFS_URB_TYPE_BULK:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
if (uurb->flags & USBDEVFS_URB_NO_FSBR)
u |= URB_NO_FSBR;
- if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
+ if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+ if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
struct device *dev;
struct usb_device *udev;
int retval = 0;
- int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
iface->condition = USB_INTERFACE_BOUND;
- /* See the comment about disabling LPM in usb_probe_interface(). */
- if (driver->disable_hub_initiated_lpm) {
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
- return -ENOMEM;
- }
- }
-
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
if (device_is_registered(dev))
retval = device_bind_driver(dev);
- /* Attempt to re-enable USB3 LPM, if the disable was successful. */
- if (!lpm_disable_error)
- usb_unlocked_enable_lpm(udev);
+ if (retval) {
+ dev->driver = NULL;
+ usb_set_intfdata(iface, NULL);
+ iface->needs_remote_wakeup = 0;
+ iface->condition = USB_INTERFACE_UNBOUND;
+
+ /*
+ * Unbound interfaces are always runtime-PM-disabled
+ * and runtime-PM-suspended
+ */
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
return retval;
}
struct usb_interface_cache *intf_cache = NULL;
int i;
+ if (!config)
+ return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
}
ret = dwc3_msm_suspend(mdwc, false);
- if (!ret)
- atomic_set(&mdwc->pm_suspended, 1);
+ if (ret)
+ return ret;
- return ret;
+ flush_work(&mdwc->bus_vote_w);
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return 0;
}
static int dwc3_msm_pm_freeze(struct device *dev)
mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
ret = dwc3_msm_suspend(mdwc, true);
- if (!ret)
- atomic_set(&mdwc->pm_suspended, 1);
+ if (ret)
+ return ret;
- return ret;
+ flush_work(&mdwc->bus_vote_w);
+ atomic_set(&mdwc->pm_suspended, 1);
+
+ return 0;
}
static int dwc3_msm_pm_resume(struct device *dev)
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
if (dev->xfer_send_header) {
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "receive_file_work(%lld)\n", count);
if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
}
/* push data to (open) tty */
- if (req->actual) {
+ if (req->actual && tty) {
char *packet = req->buf;
unsigned size = req->actual;
unsigned n;
static int fotg210_udc_remove(struct platform_device *pdev)
{
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fotg210->gadget);
iounmap(fotg210->reg);
free_irq(platform_get_irq(pdev, 0), fotg210);
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
return 0;
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
if (fotg210 == NULL)
- goto err_alloc;
+ goto err;
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
fotg210->reg = ioremap(res->start, resource_size(res));
if (fotg210->reg == NULL) {
pr_err("ioremap error.\n");
- goto err_map;
+ goto err_alloc;
}
spin_lock_init(&fotg210->lock);
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
GFP_KERNEL);
if (fotg210->ep0_req == NULL)
- goto err_req;
+ goto err_map;
fotg210_init(fotg210);
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
err_map:
- if (fotg210->reg)
- iounmap(fotg210->reg);
+ iounmap(fotg210->reg);
err_alloc:
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
+err:
return ret;
}
temp = readl(port_array[wIndex]);
break;
}
-
- /* Software should not attempt to set
- * port link state above '3' (U3) and the port
- * must be enabled.
- */
- if ((temp & PORT_PE) == 0 ||
- (link_state > USB_SS_PORT_LS_U3)) {
- xhci_warn(xhci, "Cannot set link state.\n");
+ /* Port must be enabled */
+ if (!(temp & PORT_PE)) {
+ retval = -ENODEV;
+ break;
+ }
+ /* Can't set port link state above '3' (U3) */
+ if (link_state > USB_SS_PORT_LS_U3) {
+ xhci_warn(xhci, "Cannot set port %d link state %d\n",
+ wIndex, link_state);
goto error;
}
-
if (link_state == USB_SS_PORT_LS_U3) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
To compile this driver as a module, choose M here: the module
will be called ks_bridge. If unsure, choose N.
+config USB_QCOM_IPC_BRIDGE
+ tristate "USB QTI IPC bridge driver"
+ depends on USB
+ depends on USB_QCOM_DIAG_BRIDGE
+ help
+ Say Y here if you have a QTI modem device connected via USB that
+ will be bridged in kernel space. This driver works as a transport
+ layer for IPC router module that enables communication between
+ APPS processor and MODEM processor. This config depends on
+ USB_QCOM_DIAG_BRIDGE because the core USB support for the transports
+ of both diag and IPC messages is in the same driver. Select this
+ config manually if you want to compile HSIC transport IPC router.
+
config USB_QCOM_DIAG_BRIDGE
tristate "USB QTI diagnostic bridge driver"
depends on USB
help
Say Y here if you have a QTI modem device connected via USB that
will be bridged in kernel space. This driver communicates with the
- diagnostic and QMI interfaces and allows for bridging with the diag
- forwarding driver for diag interface and IPC router for QMI interface.
+ diagnostic interface and allows for bridging with the diag forwarding
+ driver.
To compile this driver as a module, choose M here: the
module will be called diag_bridge. If unsure, choose N.
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->io_mutex);
+ if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+ return -EIO;
+
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
transfer_buffer_length,
KOBIL_TIMEOUT);
- dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
- __func__, result, transfer_buffer[0]);
+ dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+ result);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ goto out_free;
+ }
+
+ dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
+out_free:
kfree(transfer_buffer);
return result;
}
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, sizeof(*secd));
- if (result < sizeof(*secd)) {
+ if (result < (int)sizeof(*secd)) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
error_rc_add:
usb_put_intf(iface);
usb_put_dev(hwarc->usb_dev);
+ kfree(hwarc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+extern const u8 aty_postdividers[8];
+
/*
* Hardware cursor support
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
/*
* PLL Reference Divider M:
*/
- M = pll_regs[2];
+ M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
- N = pll_regs[7 + (clock_cntl & 3)];
+ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:
*/
#define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
- pll->ct.xclk_post_div_real = postdividers[xpost_div];
+ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
- sclk_post_div_real = postdividers[mpost_div];
+ sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG
}
static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
- int new_fps)
+ u64 new_clk_rate)
{
int rc = 0;
- u64 clk_rate;
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
struct mdss_panel_info *pinfo;
u32 phy_rev;
pinfo = &pdata->panel_info;
phy_rev = ctrl_pdata->shared_data->phy_rev;
- rc = mdss_dsi_clk_div_config
- (&ctrl_pdata->panel_data.panel_info, new_fps);
- if (rc) {
- pr_err("%s: unable to initialize the clk dividers\n",
- __func__);
- return rc;
- }
-
+ pinfo->clk_rate = new_clk_rate;
+ pinfo->mipi.dsi_pclk_rate = mdss_dsi_get_pclk_rate(pinfo,
+ new_clk_rate);
__mdss_dsi_dyn_refresh_config(ctrl_pdata);
if (phy_rev == DSI_PHY_REV_20)
ctrl_pdata->byte_clk_rate_bkp = ctrl_pdata->byte_clk_rate;
ctrl_pdata->pclk_rate = pinfo->mipi.dsi_pclk_rate;
- clk_rate = pinfo->clk_rate;
- do_div(clk_rate, 8U);
- ctrl_pdata->byte_clk_rate = (u32) clk_rate;
+ do_div(new_clk_rate, 8U);
+ ctrl_pdata->byte_clk_rate = (u32) new_clk_rate;
pr_debug("byte_rate=%i\n", ctrl_pdata->byte_clk_rate);
pr_debug("pclk_rate=%i\n", ctrl_pdata->pclk_rate);
return rc;
}
-static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata,
- int new_fps)
+static int __mdss_dsi_dfps_update_clks(struct mdss_panel_data *pdata)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
clk_disable_unprepare(ctrl_pdata->pll_byte_clk);
clk_disable_unprepare(ctrl_pdata->pll_pixel_clk);
- /* update new fps that at this point is already updated in hw */
- pinfo->current_fps = new_fps;
- if (sctrl_pdata) {
- spinfo->current_fps = new_fps;
- }
-
return rc;
dfps_timeout:
MDSS_XLOG(ctrl_pdata->ndx, enabled, data);
}
-static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
+static int __mdss_dsi_dynamic_clock_switch(struct mdss_panel_data *pdata,
+ u64 new_clk_rate)
{
int rc = 0;
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
struct mdss_panel_info *pinfo;
u32 phy_rev;
- u32 frame_rate_bkp;
+ u64 clk_rate_bkp;
+
+ pr_debug("%s+:\n", __func__);
+
+ ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+ panel_data);
+
+ phy_rev = ctrl_pdata->shared_data->phy_rev;
+ pinfo = &pdata->panel_info;
+
+ /* get the fps configured in HW */
+ clk_rate_bkp = pinfo->clk_rate;
+
+ __mdss_dsi_mask_dfps_errors(ctrl_pdata, true);
+
+ if (phy_rev == DSI_PHY_REV_20) {
+ rc = mdss_dsi_phy_calc_timing_param(pinfo, phy_rev,
+ new_clk_rate);
+ if (rc) {
+ pr_err("PHY calculations failed-%lld\n", new_clk_rate);
+ goto end_update;
+ }
+ }
+
+ rc = __mdss_dsi_dfps_calc_clks(pdata, new_clk_rate);
+ if (rc) {
+ pr_err("error calculating clocks for %lld\n", new_clk_rate);
+ goto error_clks;
+ }
+
+ rc = __mdss_dsi_dfps_update_clks(pdata);
+ if (rc) {
+ pr_err("Dynamic refresh failed-%lld\n", new_clk_rate);
+ goto error_dfps;
+ }
+ return rc;
+error_dfps:
+ if (__mdss_dsi_dfps_calc_clks(pdata, clk_rate_bkp))
+ pr_err("error reverting clock calculations for %lld\n",
+ clk_rate_bkp);
+error_clks:
+ if (mdss_dsi_phy_calc_timing_param(pinfo, phy_rev, clk_rate_bkp))
+ pr_err("Unable to revert phy timing-%lld\n", clk_rate_bkp);
+end_update:
+ return rc;
+}
+
+static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
+{
+ int rc = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct mdss_panel_info *pinfo;
pr_debug("%s+:\n", __func__);
return -EINVAL;
}
- phy_rev = ctrl_pdata->shared_data->phy_rev;
pinfo = &pdata->panel_info;
- /* get the fps configured in HW */
- frame_rate_bkp = pinfo->current_fps;
-
if (new_fps == pinfo->current_fps) {
/*
* This is unlikely as mdss driver checks for previously
__mdss_dsi_update_video_mode_total(pdata, new_fps);
} else if (pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
/* Clock update method */
+ u64 new_clk_rate = mdss_dsi_calc_bitclk
+ (&ctrl_pdata->panel_data.panel_info, new_fps);
+ if (!new_clk_rate) {
+ pr_err("%s: unable to get the new bit clock rate\n",
+ __func__);
+ rc = -EINVAL;
+ goto end_update;
+ }
- __mdss_dsi_mask_dfps_errors(ctrl_pdata, true);
+ rc = __mdss_dsi_dynamic_clock_switch(pdata, new_clk_rate);
+ if (!rc) {
+ struct mdss_dsi_ctrl_pdata *mctrl_pdata = NULL;
+ struct mdss_panel_info *mpinfo = NULL;
- if (phy_rev == DSI_PHY_REV_20) {
- rc = mdss_dsi_phy_calc_timing_param(pinfo, phy_rev,
- new_fps);
- if (rc) {
- pr_err("PHY calculations failed-%d\n", new_fps);
+ if (mdss_dsi_is_hw_config_split
+ (ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_master(ctrl_pdata))
goto end_update;
- }
- }
- rc = __mdss_dsi_dfps_calc_clks(pdata, new_fps);
- if (rc) {
- pr_err("error calculating clocks for %d\n", new_fps);
- goto error_clks;
- }
+ if (mdss_dsi_is_hw_config_split
+ (ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) {
+ mctrl_pdata = mdss_dsi_get_ctrl_clk_master();
+ if (IS_ERR_OR_NULL(mctrl_pdata)) {
+ pr_err("Invalid mctrl_pdata\n");
+ goto end_update;
+ }
- rc = __mdss_dsi_dfps_update_clks(pdata, new_fps);
- if (rc) {
- pr_err("Dynamic refresh failed-%d\n", new_fps);
- goto error_dfps;
+ mpinfo = &mctrl_pdata->panel_data.panel_info;
+ }
+ /*
+ * update new fps that at this point is already
+ * updated in hw
+ */
+ pinfo->current_fps = new_fps;
+ if (mctrl_pdata && mpinfo)
+ mpinfo->current_fps = new_fps;
}
}
-
- return rc;
-error_dfps:
- if (__mdss_dsi_dfps_calc_clks(pdata, frame_rate_bkp))
- pr_err("error reverting clock calculations for %d\n",
- frame_rate_bkp);
-error_clks:
- if (mdss_dsi_phy_calc_timing_param(pinfo, phy_rev, frame_rate_bkp))
- pr_err("Unable to revert phy timing-%d\n", frame_rate_bkp);
end_update:
return rc;
}
MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
}
+static struct mdss_dsi_ctrl_pdata *mdss_dsi_get_drvdata(struct device *dev)
+{
+ struct msm_fb_data_type *mfd;
+ struct mdss_panel_data *pdata;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+ struct fb_info *fbi = dev_get_drvdata(dev);
+
+ if (fbi) {
+ mfd = (struct msm_fb_data_type *)fbi->par;
+ pdata = dev_get_platdata(&mfd->pdev->dev);
+
+ ctrl_pdata = container_of(pdata,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+ }
+
+ return ctrl_pdata;
+}
+
+static ssize_t supp_bitclk_list_sysfs_rda(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+ int i = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = mdss_dsi_get_drvdata(dev);
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ if (!pinfo) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ if (!pinfo->dynamic_bitclk) {
+ pr_err_once("%s: Dynamic bitclk not enabled for this panel\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ buf[0] = 0;
+ for (i = 0; i < pinfo->supp_bitclk_len; i++) {
+ if (ret > 0)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ ",%d", pinfo->supp_bitclks[i]);
+ else
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "%d", pinfo->supp_bitclks[i]);
+ }
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+ return ret;
+}
+
+static ssize_t dynamic_bitclk_sysfs_wta(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc = 0, i = 0;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = mdss_dsi_get_drvdata(dev);
+ struct mdss_panel_info *pinfo = NULL;
+ int clk_rate = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ if (!pinfo) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ if (!pinfo->dynamic_bitclk) {
+ pr_err_once("%s: Dynamic bitclk not enabled for this panel\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (mdss_panel_is_power_off(pinfo->panel_power_state)) {
+ pr_err_once("%s: Panel powered off!\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = kstrtoint(buf, 10, &clk_rate);
+ if (rc) {
+ pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ return rc;
+ }
+
+ for (i = 0; i < pinfo->supp_bitclk_len; i++) {
+ if (pinfo->supp_bitclks[i] == clk_rate)
+ break;
+ }
+ if (i == pinfo->supp_bitclk_len) {
+ pr_err("Requested bitclk: %d not supported\n", clk_rate);
+ return -EINVAL;
+ }
+
+ rc = __mdss_dsi_dynamic_clock_switch(&ctrl_pdata->panel_data,
+ clk_rate);
+ if (!rc && mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data)) {
+ struct mdss_dsi_ctrl_pdata *octrl =
+ mdss_dsi_get_other_ctrl(ctrl_pdata);
+ rc = __mdss_dsi_dynamic_clock_switch(&octrl->panel_data,
+ clk_rate);
+ if (rc)
+ pr_err("failed to switch DSI bitclk for sctrl\n");
+ } else if (rc) {
+ pr_err("failed to switch DSI bitclk\n");
+ }
+
+ return count;
+} /* dynamic_bitclk_sysfs_wta */
+
+static ssize_t dynamic_bitclk_sysfs_rda(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = mdss_dsi_get_drvdata(dev);
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ pinfo = &ctrl_pdata->panel_data.panel_info;
+ if (!pinfo) {
+ pr_err("no panel connected\n");
+ return -ENODEV;
+ }
+
+ ret = snprintf(buf, PAGE_SIZE, "%llu\n", pinfo->clk_rate);
+ pr_debug("%s: '%llu'\n", __func__, pinfo->clk_rate);
+
+ return ret;
+} /* dynamic_bitclk_sysfs_rda */
+
+static DEVICE_ATTR(dynamic_bitclk, S_IRUGO | S_IWUSR | S_IWGRP,
+ dynamic_bitclk_sysfs_rda, dynamic_bitclk_sysfs_wta);
+static DEVICE_ATTR(supported_bitclk, S_IRUGO, supp_bitclk_list_sysfs_rda, NULL);
+
+static struct attribute *dynamic_bitclk_fs_attrs[] = {
+ &dev_attr_dynamic_bitclk.attr,
+ &dev_attr_supported_bitclk.attr,
+ NULL,
+};
+
+static struct attribute_group mdss_dsi_fs_attrs_group = {
+ .attrs = dynamic_bitclk_fs_attrs,
+};
+
static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
int event, void *arg)
{
ctrl_pdata->kobj = &fbi->dev->kobj;
ctrl_pdata->fb_node = fbi->node;
+ if (!mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) ||
+ (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
+ mdss_dsi_is_ctrl_clk_master(ctrl_pdata))) {
+ if (sysfs_create_group(&fbi->dev->kobj,
+ &mdss_dsi_fs_attrs_group))
+ pr_err("failed to create DSI sysfs group\n");
+ }
+
if (IS_ENABLED(CONFIG_MSM_DBA) &&
pdata->panel_info.is_dba_panel) {
queue_delayed_work(ctrl_pdata->workq,
pinfo = &(ctrl_pdata->panel_data.panel_info);
if (!(mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
mdss_dsi_is_ctrl_clk_slave(ctrl_pdata)) &&
- pinfo->dynamic_fps) {
+ (pinfo->dynamic_fps || pinfo->dynamic_bitclk)) {
rc = mdss_dsi_shadow_clk_init(pdev, ctrl_pdata);
if (rc) {
((mipi->mode == DSI_VIDEO_MODE)
? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
- rc = mdss_dsi_clk_div_config(pinfo, mipi->frame_rate);
- if (rc) {
- pr_err("%s: unable to initialize the clk dividers\n", __func__);
- return rc;
+ pinfo->clk_rate = mdss_dsi_calc_bitclk(pinfo, mipi->frame_rate);
+ if (!pinfo->clk_rate) {
+ pr_err("%s: unable to calculate the DSI bit clock\n", __func__);
+ return -EINVAL;
}
+
+ pinfo->mipi.dsi_pclk_rate = mdss_dsi_get_pclk_rate(pinfo,
+ pinfo->clk_rate);
+ if (!pinfo->mipi.dsi_pclk_rate) {
+ pr_err("%s: unable to calculate the DSI pclk\n", __func__);
+ return -EINVAL;
+ }
+
ctrl_pdata->pclk_rate = mipi->dsi_pclk_rate;
clk_rate = pinfo->clk_rate;
do_div(clk_rate, 8U);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
-int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
- int frame_rate);
+u64 mdss_dsi_calc_bitclk(struct mdss_panel_info *panel_info, int frame_rate);
+u32 mdss_dsi_get_pclk_rate(struct mdss_panel_info *panel_info, u64 clk_rate);
int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy);
int mdss_dsi_link_clk_init(struct platform_device *pdev,
struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
for (i = 0; i < ctrl->status_cmds.cmd_cnt; i++)
len += lenp[i];
- for (i = 0; i < len; i++) {
- pr_debug("[%i] return:0x%x status:0x%x\n",
- i, (unsigned int)ctrl->return_buf[i],
- (unsigned int)ctrl->status_value[j + i]);
- MDSS_XLOG(ctrl->ndx, ctrl->return_buf[i],
- ctrl->status_value[j + i]);
- j += len;
- }
-
for (j = 0; j < ctrl->groups; ++j) {
for (i = 0; i < len; ++i) {
+ pr_debug("[%i] return:0x%x status:0x%x\n",
+ i, ctrl->return_buf[i],
+ (unsigned int)ctrl->status_value[group + i]);
+ MDSS_XLOG(ctrl->ndx, ctrl->return_buf[i],
+ ctrl->status_value[group + i]);
if (ctrl->return_buf[i] !=
ctrl->status_value[group + i])
break;
struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
const char *data;
- bool dynamic_fps;
+ bool dynamic_fps, dynamic_bitclk;
struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
+ int rc = 0;
dynamic_fps = of_property_read_bool(pan_node,
"qcom,mdss-dsi-pan-enable-dynamic-fps");
if (!dynamic_fps)
- return;
+ goto dynamic_bitclk;
pinfo->dynamic_fps = true;
data = of_get_property(pan_node, "qcom,mdss-dsi-pan-fps-update", NULL);
pinfo->new_fps = pinfo->mipi.frame_rate;
pinfo->current_fps = pinfo->mipi.frame_rate;
+dynamic_bitclk:
+ dynamic_bitclk = of_property_read_bool(pan_node,
+ "qcom,mdss-dsi-pan-enable-dynamic-bitclk");
+ if (!dynamic_bitclk)
+ return;
+
+ of_find_property(pan_node, "qcom,mdss-dsi-dynamic-bitclk_freq",
+ &pinfo->supp_bitclk_len);
+ pinfo->supp_bitclk_len = pinfo->supp_bitclk_len/sizeof(u32);
+ if (pinfo->supp_bitclk_len < 1)
+ return;
+
+ pinfo->supp_bitclks = kzalloc((sizeof(u32) * pinfo->supp_bitclk_len),
+ GFP_KERNEL);
+ if (!pinfo->supp_bitclks)
+ return;
+
+ rc = of_property_read_u32_array(pan_node,
+ "qcom,mdss-dsi-dynamic-bitclk_freq", pinfo->supp_bitclks,
+ pinfo->supp_bitclk_len);
+ if (rc) {
+ pr_err("Error from dynamic bitclk freq u64 array read\n");
+ return;
+ }
+ pinfo->dynamic_bitclk = true;
return;
}
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
}
int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
- u32 frate_hz)
+ u64 clk_rate)
{
struct dsi_phy_t_clk_param t_clk;
struct dsi_phy_timing t_param;
- int hsync_period;
- int vsync_period;
- unsigned long inter_num;
- uint32_t lane_config = 0;
- unsigned long x, y;
int rc = 0;
if (!pinfo) {
return -EINVAL;
}
- hsync_period = mdss_panel_get_htotal(pinfo, true);
- vsync_period = mdss_panel_get_vtotal(pinfo);
-
- inter_num = pinfo->bpp * frate_hz;
-
- if (pinfo->mipi.data_lane0)
- lane_config++;
- if (pinfo->mipi.data_lane1)
- lane_config++;
- if (pinfo->mipi.data_lane2)
- lane_config++;
- if (pinfo->mipi.data_lane3)
- lane_config++;
-
- x = mult_frac(vsync_period * hsync_period, inter_num, lane_config);
- y = rounddown(x, 1);
- t_clk.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+ t_clk.bitclk_mbps = rounddown((uint32_t) div_u64(clk_rate, 1000000), 1);
t_clk.escclk_numer = ESC_CLK_MHZ;
t_clk.escclk_denom = ESCCLK_MMSS_CC_PREDIV;
t_clk.tlpx_numer_ns = TLPX_NUMER;
t_clk.treot_ns = TR_EOT;
- pr_debug("hperiod=%d, vperiod=%d, inter_num=%lu, lane_cfg=%d\n",
- hsync_period, vsync_period, inter_num, lane_config);
- pr_debug("x=%lu, y=%lu, bitrate=%d\n", x, y, t_clk.bitclk_mbps);
+ pr_debug("bitrate=%d\n", t_clk.bitclk_mbps);
rc = mdss_dsi_phy_initialize_defaults(&t_clk, &t_param, phy_rev);
if (rc) {
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* @frate_hz - Frame rate for which phy timing parameters are to be calculated.
*/
int mdss_dsi_phy_calc_timing_param(struct mdss_panel_info *pinfo, u32 phy_rev,
- u32 frate_hz);
+ u64 clk_rate);
/*
* mdss_dsi_phy_v3_init() - initialization sequence for DSI PHY rev v3
/*
* Core MDSS framebuffer driver.
*
- * Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
"red_chromaticity_x=%d\nred_chromaticity_y=%d\n"
"green_chromaticity_x=%d\ngreen_chromaticity_y=%d\n"
"blue_chromaticity_x=%d\nblue_chromaticity_y=%d\n"
- "panel_orientation=%d\n",
+ "panel_orientation=%d\ndyn_bitclk_en=%d\n",
pinfo->partial_update_enabled,
pinfo->roi_alignment.xstart_pix_align,
pinfo->roi_alignment.width_pix_align,
pinfo->hdr_properties.display_primaries[5],
pinfo->hdr_properties.display_primaries[6],
pinfo->hdr_properties.display_primaries[7],
- pinfo->panel_orientation);
+ pinfo->panel_orientation, pinfo->dynamic_bitclk);
return ret;
}
int pwm_lpg_chan;
int pwm_period;
bool dynamic_fps;
+ bool dynamic_bitclk;
+ u32 *supp_bitclks;
+ u32 supp_bitclk_len;
bool ulps_feature_enabled;
bool ulps_suspend_enabled;
bool panel_ack_disabled;
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
__func__, pinfo->mipi.frame_rate);
}
- rc = mdss_dsi_clk_div_config(&pdata->panel_info,
- pdata->panel_info.mipi.frame_rate);
- if (rc) {
- pr_err("%s: unable to initialize the clk dividers\n",
- __func__);
- return rc;
+ pinfo->clk_rate = mdss_dsi_calc_bitclk(pinfo, pinfo->mipi.frame_rate);
+ if (!pinfo->clk_rate) {
+ pr_err("%s: unable to calculate the DSI bit clock\n", __func__);
+ return -EINVAL;
}
+
+ pinfo->mipi.dsi_pclk_rate = mdss_dsi_get_pclk_rate(pinfo,
+ pinfo->clk_rate);
+ if (!pinfo->mipi.dsi_pclk_rate) {
+ pr_err("%s: unable to calculate the DSI pclk\n", __func__);
+ return -EINVAL;
+ }
+
ctrl_pdata->refresh_clk_rate = false;
ctrl_pdata->pclk_rate = pdata->panel_info.mipi.dsi_pclk_rate;
ctrl_pdata->byte_clk_rate = pdata->panel_info.clk_rate / 8;
/* phy panel timing calaculation */
rc = mdss_dsi_phy_calc_timing_param(pinfo,
ctrl_pdata->shared_data->phy_rev,
- pinfo->mipi.frame_rate);
+ pdata->panel_info.clk_rate);
if (rc) {
pr_err("Error in calculating phy timings\n");
return rc;
return (frame_rate != panel_info->mipi.frame_rate);
}
-int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info,
- int frame_rate)
+static u8 mdss_dsi_get_lane_cnt(struct mdss_panel_info *panel_info)
{
- struct mdss_panel_data *pdata = container_of(panel_info,
- struct mdss_panel_data, panel_info);
- struct mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(pdata,
- struct mdss_dsi_ctrl_pdata, panel_data);
- u64 h_period, v_period, clk_rate;
- u32 dsi_pclk_rate;
- u8 lanes = 0, bpp;
+ u8 lanes = 0;
if (!panel_info)
return -EINVAL;
if (panel_info->mipi.data_lane0)
lanes += 1;
- switch (panel_info->mipi.dst_format) {
+ if (!lanes)
+ lanes = 1;
+
+ return lanes;
+}
+
+static u8 mdss_dsi_get_bpp(char dst_format)
+{
+ u8 bpp = 0;
+
+ switch (dst_format) {
case DSI_CMD_DST_FORMAT_RGB888:
case DSI_VIDEO_DST_FORMAT_RGB888:
case DSI_VIDEO_DST_FORMAT_RGB666_LOOSE:
bpp = 3; /* Default format set to RGB888 */
break;
}
+ return bpp;
+}
+
+u64 mdss_dsi_calc_bitclk(struct mdss_panel_info *panel_info, int frame_rate)
+{
+ struct mdss_panel_data *pdata = container_of(panel_info,
+ struct mdss_panel_data, panel_info);
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata = container_of(pdata,
+ struct mdss_dsi_ctrl_pdata, panel_data);
+ u64 h_period, v_period, clk_rate = 0;
+ u8 lanes = 0, bpp;
+
+ lanes = mdss_dsi_get_lane_cnt(panel_info);
+
+ bpp = mdss_dsi_get_bpp(panel_info->mipi.dst_format);
h_period = mdss_panel_get_htotal(panel_info, true);
if (panel_info->split_link_enabled)
v_period = mdss_panel_get_vtotal(panel_info);
if (ctrl_pdata->refresh_clk_rate || is_diff_frame_rate(panel_info,
- frame_rate) || (!panel_info->clk_rate)) {
- if (lanes > 0) {
- panel_info->clk_rate = h_period * v_period * frame_rate
- * bpp * 8;
- do_div(panel_info->clk_rate, lanes);
- } else {
- pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
- panel_info->clk_rate =
- h_period * v_period * frame_rate * bpp * 8;
- }
+ frame_rate) || (!panel_info->clk_rate)) {
+ clk_rate = h_period * v_period * frame_rate * bpp * 8;
+ do_div(clk_rate, lanes);
+ } else if (panel_info->clk_rate) {
+ clk_rate = panel_info->clk_rate;
}
- if (panel_info->clk_rate == 0)
- panel_info->clk_rate = 454000000;
+ if (clk_rate == 0)
+ clk_rate = 454000000;
+
+ return clk_rate;
+}
+
+u32 mdss_dsi_get_pclk_rate(struct mdss_panel_info *panel_info, u64 clk_rate)
+{
+ u8 lanes = 0, bpp;
+ u32 pclk_rate = 0;
+
+ lanes = mdss_dsi_get_lane_cnt(panel_info);
+
+ bpp = mdss_dsi_get_bpp(panel_info->mipi.dst_format);
- clk_rate = panel_info->clk_rate;
do_div(clk_rate, 8 * bpp);
if (panel_info->split_link_enabled)
- dsi_pclk_rate = (u32) clk_rate *
+ pclk_rate = (u32) clk_rate *
panel_info->mipi.lanes_per_sublink;
else
- dsi_pclk_rate = (u32) clk_rate * lanes;
+ pclk_rate = (u32) clk_rate * lanes;
- if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000))
- dsi_pclk_rate = 35000000;
- panel_info->mipi.dsi_pclk_rate = dsi_pclk_rate;
+ if ((pclk_rate < 3300000) || (pclk_rate > 250000000))
+ pclk_rate = 35000000;
- return 0;
+ return pclk_rate;
}
static bool mdss_dsi_is_ulps_req_valid(struct mdss_dsi_ctrl_pdata *ctrl,
if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
return -EFAULT;
+ if (mr->w > 4096 || mr->h > 4096)
+ return -EINVAL;
+
if (mr->w * mr->h * 3 > mr->buffer_size)
return -EINVAL;
mr->x, mr->y, mr->w, mr->h);
if (r > 0) {
- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+ if (copy_to_user(mr->buffer, buf, r))
r = -EFAULT;
}
static void disable_hotplug_cpu(int cpu)
{
- if (cpu_online(cpu)) {
- lock_device_hotplug();
+ if (!cpu_is_hotpluggable(cpu))
+ return;
+ lock_device_hotplug();
+ if (cpu_online(cpu))
device_offline(get_cpu_device(cpu));
- unlock_device_hotplug();
- }
- if (cpu_present(cpu))
+ if (!cpu_online(cpu) && cpu_present(cpu)) {
xen_arch_unregister_cpu(cpu);
-
- set_cpu_present(cpu, false);
+ set_cpu_present(cpu, false);
+ }
+ unlock_device_hotplug();
}
static int vcpu_online(unsigned int cpu)
clear_evtchn_to_irq_row(row);
}
- evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+ evtchn_to_irq[row][col] = irq;
return 0;
}
/*
* The Xenstore watch fires directly after registering it and
* after a suspend/resume cycle. So ENOENT is no error but
- * might happen in those cases.
+ * might happen in those cases. ERANGE is observed when we get
+ * an empty value (''), this happens when we acknowledge the
+ * request by writing '\0' below.
*/
- if (err != -ENOENT)
+ if (err != -ENOENT && err != -ERANGE)
pr_err("Error %d reading sysrq code in control/sysrq\n",
err);
xenbus_transaction_end(xbt, 1);
case SFM_LESSTHAN:
*target = '<';
break;
- case SFM_SLASH:
- *target = '\\';
- break;
case SFM_SPACE:
*target = ' ';
break;
}
count = 0;
+ /*
+ * We know that all the name entries in the protocols array
+ * are short (< 16 bytes anyway) and are NUL terminated.
+ */
for (i = 0; i < CIFS_NUM_PROT; i++) {
- strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
- count += strlen(protocols[i].name) + 1;
- /* null at end of source and target buffers anyway */
+ size_t len = strlen(protocols[i].name) + 1;
+
+ memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+ count += len;
}
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
+ size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
+ if (data_offset >
+ len - sizeof(struct file_notify_information)) {
+ cifs_dbg(FYI, "invalid data_offset %u\n",
+ data_offset);
+ return true;
+ }
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
}
srch_inf->entries_in_buffer = 0;
- srch_inf->index_of_last_entry = 0;
+ srch_inf->index_of_last_entry = 2;
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
- error_msg = "directory entry across range";
+ error_msg = "directory entry overrun";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
if (filp)
ext4_error_file(filp, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
return 1;
}
{
int err, inline_size;
struct ext4_iloc iloc;
+ size_t inline_len;
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
goto out;
}
+ inline_len = ext4_get_inline_size(dir);
offset = EXT4_INLINE_DOTDOT_SIZE;
- while (offset < dir->i_size) {
+ while (offset < inline_len) {
de = ext4_get_inline_entry(dir, &iloc, offset,
&inline_pos, &inline_size);
if (ext4_check_dir_entry(dir, NULL, de,
*/
sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
- mark_buffer_dirty(bh);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
int ext4_resize_begin(struct super_block *sb)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
int ret = 0;
if (!capable(CAP_SYS_RESOURCE))
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
ext4_warning(sb, "won't resize using backup superblock at %llu",
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
}
}
+ /*
+ * Make sure the last group has enough space so that it's
+ * guaranteed to have enough space for all metadata blocks
+ * that it might need to hold. (We might not need to store
+ * the inode table blocks in the last block group, but there
+ * will be cases where this might be needed.)
+ */
+ if ((ext4_group_first_block_no(sb, n_group) +
+ ext4_group_overhead_blocks(sb, n_group) + 2 +
+ sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+ n_blocks_count = ext4_group_first_block_no(sb, n_group);
+ n_group--;
+ n_blocks_count_retry = 0;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
+ goto retry;
+ }
+
/* extend the last group */
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
GFP_KERNEL);
}
{
int error;
- if (buffer_verified(bh))
- return 0;
-
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1))
return -EFSCORRUPTED;
+ if (buffer_verified(bh))
+ return 0;
+
if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
}
static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+ struct inode *inode)
{
- struct ext4_xattr_entry *last;
+ struct ext4_xattr_entry *last, *next;
size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
/* Compute min_offs and last. */
last = s->first;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ for (; !IS_LAST_ENTRY(last); last = next) {
+ next = EXT4_XATTR_NEXT(last);
+ if ((void *)next >= s->end) {
+ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+ return -EFSCORRUPTED;
+ }
if (!last->e_value_block && last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
mb2_cache_entry_delete_block(ext4_mb_cache, hash,
bs->bh->b_blocknr);
ea_bdebug(bs->bh, "modifying in-place");
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
ext4_xattr_rehash(header(s->base),
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error) {
if (error == -ENOSPC &&
ext4_has_inline_data(inode)) {
error = ext4_xattr_ibody_find(inode, i, is);
if (error)
return error;
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
}
if (error)
return error;
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error)
return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
/* Find the entry best suited to be pushed into EA block */
entry = NULL;
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ /* never move system.data out of the inode */
+ if ((last->e_name_len == 4) &&
+ (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+ !memcmp(last->e_name, "data", 4))
+ continue;
total_size =
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
EXT4_XATTR_LEN(last->e_name_len);
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
+ f2fs_build_fault_attr(sbi, 0, 0);
set_ckpt_flags(sbi, CP_ERROR_FLAG);
if (!end_io)
f2fs_flush_merged_writes(sbi);
.encrypted_page = NULL,
.is_meta = is_meta,
};
+ int err;
if (unlikely(!is_meta))
fio.op_flags &= ~REQ_META;
fio.page = page;
- if (f2fs_submit_page_bio(&fio)) {
+ err = f2fs_submit_page_bio(&fio);
+ if (err) {
f2fs_put_page(page, 1);
- goto repeat;
+ return ERR_PTR(err);
}
lock_page(page);
goto repeat;
}
- /*
- * if there is any IO error when accessing device, make our filesystem
- * readonly and make sure do not write checkpoint with non-uptodate
- * meta page.
- */
if (unlikely(!PageUptodate(page))) {
- memset(page_address(page), 0, PAGE_SIZE);
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
}
out:
return page;
return __get_meta_page(sbi, index, true);
}
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ struct page *page;
+ int count = 0;
+
+retry:
+ page = __get_meta_page(sbi, index, true);
+ if (IS_ERR(page)) {
+ if (PTR_ERR(page) == -EIO &&
+ ++count <= DEFAULT_RETRY_IO_COUNT)
+ goto retry;
+
+ f2fs_stop_checkpoint(sbi, false);
+ f2fs_bug_on(sbi, 1);
+ }
+
+ return page;
+}
+
/* for POR only */
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
return __get_meta_page(sbi, index, false);
}
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
switch (type) {
return false;
break;
case META_POR:
+ case DATA_GENERIC:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
- blkaddr < MAIN_BLKADDR(sbi)))
+ blkaddr < MAIN_BLKADDR(sbi))) {
+ if (type == DATA_GENERIC) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "access invalid blkaddr:%u", blkaddr);
+ WARN_ON(1);
+ }
+ return false;
+ }
+ break;
+ case META_GENERIC:
+ if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+ blkaddr >= MAIN_BLKADDR(sbi)))
return false;
break;
default:
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkno, type))
+ if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
goto out;
switch (type) {
trace_f2fs_writepage(page, META);
- if (unlikely(f2fs_cp_error(sbi))) {
- dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
- return 0;
- }
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
spin_lock(&im->ino_lock);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
f2fs_show_injection_info(FAULT_ORPHAN);
return -ENOSPC;
}
-#endif
+
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
{
struct inode *inode;
struct node_info ni;
- int err = f2fs_acquire_orphan_inode(sbi);
-
- if (err)
- goto err_out;
-
- __add_ino_entry(sbi, ino, 0, ORPHAN_INO);
+ int err;
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/* truncate all the data during iput */
iput(inode);
- f2fs_get_node_info(sbi, ino, &ni);
+ err = f2fs_get_node_info(sbi, ino, &ni);
+ if (err)
+ goto err_out;
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
err = -EIO;
goto err_out;
}
- __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
err_out:
/* Needed for iput() to work correctly and not trash data */
sbi->sb->s_flags |= MS_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
#endif
f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
- struct page *page = f2fs_get_meta_page(sbi, start_blk + i);
+ struct page *page;
struct f2fs_orphan_block *orphan_blk;
+ page = f2fs_get_meta_page(sbi, start_blk + i);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
+ }
+
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
__u32 crc = 0;
*cp_page = f2fs_get_meta_page(sbi, cp_addr);
+ if (IS_ERR(*cp_page))
+ return PTR_ERR(*cp_page);
+
*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
if (crc_offset > (blk_size - sizeof(__le32))) {
+ f2fs_put_page(*cp_page, 1);
f2fs_msg(sbi->sb, KERN_WARNING,
"invalid crc_offset: %zu", crc_offset);
return -EINVAL;
crc = cur_cp_crc(*cp_block);
if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_put_page(*cp_page, 1);
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
return -EINVAL;
}
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_1, version);
if (err)
- goto invalid_cp1;
+ return NULL;
+
+ if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+ sbi->blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid cp_pack_total_block_count:%u",
+ le32_to_cpu(cp_block->cp_pack_total_block_count));
+ goto invalid_cp;
+ }
pre_version = *version;
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_2, version);
if (err)
- goto invalid_cp2;
+ goto invalid_cp;
cur_version = *version;
if (cur_version == pre_version) {
f2fs_put_page(cp_page_2, 1);
return cp_page_1;
}
-invalid_cp2:
f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
f2fs_put_page(cp_page_1, 1);
return NULL;
}
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
- /* Sanity checking of checkpoint */
- if (f2fs_sanity_check_ckpt(sbi))
- goto free_fail_no_cp;
-
if (cur_page == cp1)
sbi->cur_cp_pack = 1;
else
sbi->cur_cp_pack = 2;
+ /* Sanity checking of checkpoint */
+ if (f2fs_sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
if (cp_blks <= 1)
goto done;
unsigned char *ckpt = (unsigned char *)sbi->ckpt;
cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+ if (IS_ERR(cur_page))
+ goto free_fail_no_cp;
sit_bitmap_ptr = page_address(cur_page);
memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
f2fs_put_page(cur_page, 1);
iput(inode);
/* We need to give cpu to another writers. */
- if (ino == cur_ino) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ if (ino == cur_ino)
cond_resched();
- } else {
+ else
ino = cur_ino;
- }
} else {
/*
* We should submit bio, since it exists several
f2fs_unlock_all(sbi);
}
-static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
{
DEFINE_WAIT(wait);
if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
+ if (unlikely(f2fs_cp_error(sbi)))
+ break;
+
io_schedule_timeout(5*HZ);
}
finish_wait(&sbi->cp_wait, &wait);
/* writeout cp pack 2 page */
err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
- f2fs_bug_on(sbi, err);
+ if (unlikely(err && f2fs_cp_error(sbi))) {
+ f2fs_put_page(page, 1);
+ return;
+ }
+ f2fs_bug_on(sbi, err);
f2fs_put_page(page, 0);
/* submit checkpoint (with barrier if NOBARRIER is not set) */
while (get_pages(sbi, F2FS_DIRTY_META)) {
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ break;
}
/*
f2fs_sync_meta_pages(sbi, META, LONG_MAX,
FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ break;
}
}
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
/* wait for previous submitted meta pages writeback */
- wait_on_all_pages_writeback(sbi);
-
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ f2fs_wait_on_all_pages_writeback(sbi);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
- wait_on_all_pages_writeback(sbi);
+ f2fs_wait_on_all_pages_writeback(sbi);
+
+ /*
+ * invalidate intermediate page cache borrowed from meta inode
+ * which are used for migration of encrypted inode's blocks.
+ */
+ if (f2fs_sb_has_encrypt(sbi->sb))
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
f2fs_release_ino_entry(sbi, false);
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ f2fs_reset_fsync_node_info(sbi);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
- return 0;
+ return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
}
/*
static void f2fs_read_end_io(struct bio *bio)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
bio->bi_error = -EIO;
}
-#endif
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
page->index != nid_of_node(page));
dec_page_count(sbi, type);
+ if (f2fs_in_warm_node_list(sbi, page))
+ f2fs_del_fsync_node_entry(sbi, page);
clear_cold_data(page);
end_page_writeback(page);
}
if (type != DATA && type != NODE)
goto submit_io;
- if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+ if (test_opt(sbi, LFS) && current->plug)
blk_finish_plug(current->plug);
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
- verify_block_addr(fio, fio->new_blkaddr);
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+ __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+ return -EFAULT;
+
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
spin_unlock(&io->io_lock);
}
- if (is_valid_blkaddr(fio->old_blkaddr))
+ if (__is_valid_data_blkaddr(fio->old_blkaddr))
verify_block_addr(fio, fio->old_blkaddr);
verify_block_addr(fio, fio->new_blkaddr);
}
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages)
+ unsigned nr_pages, unsigned op_flag)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
struct bio_post_read_ctx *ctx;
unsigned int post_read_steps = 0;
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ return ERR_PTR(-EFAULT);
+
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
block_t blkaddr)
{
- struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+ struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
if (IS_ERR(bio))
return PTR_ERR(bio);
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
+ block_t old_blkaddr;
pgoff_t fofs;
blkcnt_t count = 1;
int err;
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
+ err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ if (err)
+ return err;
+
dn->data_blkaddr = datablock_addr(dn->inode,
dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
return err;
alloc:
- f2fs_get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
- f2fs_allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+ old_blkaddr = dn->data_blkaddr;
+ f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
&sum, seg_type, NULL, false);
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ old_blkaddr, old_blkaddr);
f2fs_set_data_blkaddr(dn);
/* update i_size */
next_block:
blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
- if (!is_valid_blkaddr(blkaddr)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+ err = -EFAULT;
+ goto sync_out;
+ }
+
+ if (!is_valid_data_blkaddr(sbi, blkaddr)) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
if (!page)
return -ENOMEM;
- f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
phys = (__u64)blk_to_logical(inode, ni.blk_addr);
offset = offsetof(struct f2fs_inode, i_addr) +
if (!page)
return -ENOMEM;
- f2fs_get_node_info(sbi, xnid, &ni);
+ err = f2fs_get_node_info(sbi, xnid, &ni);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
phys = (__u64)blk_to_logical(inode, ni.blk_addr);
len = inode->i_sb->s_blocksize;
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * from read-ahead.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
- unsigned nr_pages)
+ unsigned nr_pages, bool is_readahead)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
SetPageUptodate(page);
goto confused;
}
+
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+ DATA_GENERIC))
+ goto set_error_page;
} else {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
bio = NULL;
}
if (bio == NULL) {
- bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+ bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+ is_readahead ? REQ_RAHEAD : 0);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
- ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+ ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
return ret;
}
if (f2fs_has_inline_data(inode))
return 0;
- return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
+ return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
}
static int encrypt_one_page(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
+ struct page *mpage;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
- if (!IS_ERR(fio->encrypted_page))
- return 0;
+ if (IS_ERR(fio->encrypted_page)) {
+ /* flush pending IOs and wait for a while in the ENOMEM case */
+ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+ f2fs_flush_merged_writes(fio->sbi);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
+ return PTR_ERR(fio->encrypted_page);
+ }
- /* flush pending IOs and wait for a while in the ENOMEM case */
- if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
- f2fs_flush_merged_writes(fio->sbi);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- gfp_flags |= __GFP_NOFAIL;
- goto retry_encrypt;
+ mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
+ if (mpage) {
+ if (PageUptodate(mpage))
+ memcpy(page_address(mpage),
+ page_address(fio->encrypted_page), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
}
- return PTR_ERR(fio->encrypted_page);
+ return 0;
}
static inline bool check_inplace_update_policy(struct inode *inode,
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
struct extent_info ei = {0,0,0};
+ struct node_info ni;
bool ipu_force = false;
int err = 0;
f2fs_lookup_extent_cache(inode, page->index, &ei)) {
fio->old_blkaddr = ei.blk + page->index - ei.fofs;
- if (is_valid_blkaddr(fio->old_blkaddr)) {
- ipu_force = true;
- fio->need_lock = LOCK_DONE;
- goto got_it;
- }
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC))
+ return -EFAULT;
+
+ ipu_force = true;
+ fio->need_lock = LOCK_DONE;
+ goto got_it;
}
/* Deadlock due to between page->lock and f2fs_lock_op */
goto out_writepage;
}
got_it:
+ if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC)) {
+ err = -EFAULT;
+ goto out_writepage;
+ }
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (ipu_force || (is_valid_blkaddr(fio->old_blkaddr) &&
+ if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
need_inplace_update(fio))) {
err = encrypt_one_page(fio);
if (err)
fio->need_lock = LOCK_REQ;
}
+ err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+ if (err)
+ goto out_writepage;
+
+ fio->version = ni.version;
+
err = encrypt_one_page(fio);
if (err)
goto out_writepage;
return ret;
}
+static inline bool __should_serialize_io(struct inode *inode,
+ struct writeback_control *wbc)
+{
+ if (!S_ISREG(inode->i_mode))
+ return false;
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ return true;
+ if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
+ return true;
+ return false;
+}
+
static int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
enum iostat_type io_type)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct blk_plug plug;
int ret;
+ bool locked = false;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
else if (atomic_read(&sbi->wb_sync_req[DATA]))
goto skip_write;
+ if (__should_serialize_io(inode, wbc)) {
+ mutex_lock(&sbi->writepages);
+ locked = true;
+ }
+
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc, io_type);
blk_finish_plug(&plug);
+ if (locked)
+ mutex_unlock(&sbi->writepages);
+
if (wbc->sync_mode == WB_SYNC_ALL)
atomic_dec(&sbi->wb_sync_req[DATA]);
/*
loff_t i_size = i_size_read(inode);
if (to > i_size) {
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
+
truncate_pagecache(inode, i_size);
f2fs_truncate_blocks(inode, i_size, true);
+
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
}
trace_f2fs_write_begin(inode, pos, len, flags);
- if (f2fs_is_atomic_file(inode) &&
- !f2fs_available_free_memory(sbi, INMEM_PAGES)) {
+ if ((f2fs_is_atomic_file(inode) &&
+ !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+ is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
err = -ENOMEM;
drop_atomic = true;
goto fail;
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
loff_t offset)
{
- unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
-
- if (offset & blocksize_mask)
- return -EINVAL;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- return -EINVAL;
-
+ unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+ unsigned blkbits = i_blkbits;
+ unsigned blocksize_mask = (1 << blkbits) - 1;
+ unsigned long align = offset | iov_iter_alignment(iter);
+ struct block_device *bdev = inode->i_sb->s_bdev;
+
+ if (align & blocksize_mask) {
+ if (bdev)
+ blkbits = blksize_bits(bdev_logical_block_size(bdev));
+ blocksize_mask = (1 << blkbits) - 1;
+ if (align & blocksize_mask)
+ return -EINVAL;
+ return 1;
+ }
return 0;
}
err = check_direct_IO(inode, iter, offset);
if (err)
- return err;
+ return err < 0 ? err : 0;
if (f2fs_force_buffered_io(inode, rw))
return 0;
if (!PageUptodate(page))
SetPageUptodate(page);
+ /* don't remain PG_checked flag which was set during GC */
+ if (is_cold_data(page))
+ clear_cold_data(page);
+
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
f2fs_register_inmem_page(inode, page);
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
- si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+ si->base_mem += NM_I(sbi)->nat_blocks *
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
}
start:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
f2fs_show_injection_info(FAULT_DIR_DEPTH);
return -ENOSPC;
}
-#endif
+
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
return -ENOSPC;
} while (0)
#endif
-#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
FAULT_KVMALLOC,
FAULT_TRUNCATE,
FAULT_IO,
FAULT_CHECKPOINT,
+ FAULT_DISCARD,
FAULT_MAX,
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
+
struct f2fs_fault_info {
atomic_t inject_ops;
unsigned int inject_rate;
unsigned int inject_type;
};
-extern char *fault_name[FAULT_MAX];
+extern char *f2fs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
-#define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
#define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
};
/*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
*/
enum {
META_CP,
META_SIT,
META_SSA,
META_POR,
+ DATA_GENERIC,
+ META_GENERIC,
};
/* for the list of ino */
struct inode *inode; /* vfs inode pointer */
};
+struct fsync_node_entry {
+ struct list_head list; /* list head */
+ struct page *page; /* warm node page pointer */
+ unsigned int seq_id; /* sequence id */
+};
+
/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
(MAX_PLIST_NUM - 1) : (blk_num - 1))
enum {
- D_PREP,
- D_SUBMIT,
- D_DONE,
+ D_PREP, /* initial */
+ D_PARTIAL, /* partially submitted */
+ D_SUBMIT, /* all submitted */
+ D_DONE, /* finished */
};
struct discard_info {
struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */
unsigned char state; /* state */
+ unsigned char issuing; /* issuing discard */
int error; /* bio error */
+ spinlock_t lock; /* for state/bio_ref updating */
+ unsigned short bio_ref; /* bio reference count */
};
enum {
unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
bool io_aware; /* issue discard in idle time */
bool sync; /* submit discard with REQ_SYNC flag */
+ bool ordered; /* issue discard by lba order */
unsigned int granularity; /* discard granularity */
};
unsigned int max_discards; /* max. discards to be issued */
unsigned int discard_granularity; /* discard granularity */
unsigned int undiscard_blks; /* # of undiscard blocks */
+ unsigned int next_pos; /* next discard position */
atomic_t issued_discard; /* # of issued discard */
atomic_t issing_discard; /* # of issing discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */
struct rb_root root; /* root of discard rb-tree */
+ bool rbtree_check; /* config for consistence check */
};
/* for the list of fsync inodes, used only during recovery */
*/
};
+#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
+
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
-/* vector size for gang look-up from extent cache that consists of radix tree */
-#define EXT_TREE_VEC_SIZE 64
-
/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
#define FADVISE_HOT_BIT 0x20
#define FADVISE_VERITY_BIT 0x40 /* reserved */
+#define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
+
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
}
static inline bool __is_discard_mergeable(struct discard_info *back,
- struct discard_info *front)
+ struct discard_info *front, unsigned int max_len)
{
return (back->lstart + back->len == front->lstart) &&
- (back->len + front->len < DEF_MAX_DISCARD_LEN);
+ (back->len + front->len <= max_len);
}
static inline bool __is_discard_back_mergeable(struct discard_info *cur,
- struct discard_info *back)
+ struct discard_info *back, unsigned int max_len)
{
- return __is_discard_mergeable(back, cur);
+ return __is_discard_mergeable(back, cur, max_len);
}
static inline bool __is_discard_front_mergeable(struct discard_info *cur,
- struct discard_info *front)
+ struct discard_info *front, unsigned int max_len)
{
- return __is_discard_mergeable(cur, front);
+ return __is_discard_mergeable(cur, front, max_len);
}
static inline bool __is_extent_mergeable(struct extent_info *back,
struct radix_tree_root nat_set_root;/* root of the nat set cache */
struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
struct list_head nat_entries; /* cached nat entry list (clean) */
+ spinlock_t nat_list_lock; /* protect clean nat entry list */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
unsigned int nat_blocks; /* # of nat blocks */
unsigned int ipu_policy; /* in-place-update policy */
unsigned int min_ipu_util; /* in-place-update threshold */
unsigned int min_fsync_blocks; /* threshold for fsync */
+ unsigned int min_seq_blocks; /* threshold for sequential blocks */
unsigned int min_hot_blocks; /* threshold for hot block allocation */
unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
bool retry; /* need to reallocate block address */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
+ unsigned char version; /* version of the node */
};
#define is_read_io(rw) ((rw) == READ)
SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
+ SBI_IS_SHUTDOWN, /* shutdown by ioctl */
};
enum {
struct rw_semaphore sb_lock; /* lock for raw super block */
int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */
+ struct mutex writepages; /* mutex for writepages() */
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
+ spinlock_t fsync_node_lock; /* for node entry lock */
+ struct list_head fsync_node_list; /* node list head */
+ unsigned int fsync_seg_id; /* sequence id */
+ unsigned int fsync_node_num; /* number of node entries */
+
/* for orphan inode, use 0'th array */
unsigned int max_orphans; /* max orphan inodes */
unsigned int gc_mode; /* current GC state */
/* for skip statistic */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
+ unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold;
#ifdef CONFIG_F2FS_FAULT_INJECTION
#define f2fs_show_injection_info(type) \
printk("%sF2FS-fs : inject %s in %s of %pF\n", \
- KERN_INFO, fault_name[type], \
+ KERN_INFO, f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
}
return false;
}
+#else
+#define f2fs_show_injection_info(type) do { } while (0)
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ return false;
+}
#endif
/* For write statistics. Suppose sector size is 512 bytes,
struct request_list *rl = &q->root_rl;
if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
- return 0;
+ return false;
return f2fs_time_over(sbi, REQ_TIME);
}
if (ret)
return ret;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_BLOCK)) {
f2fs_show_injection_info(FAULT_BLOCK);
release = *count;
goto enospc;
}
-#endif
+
/*
* let's increase this in prior to actual block count change in order
* for f2fs_sync_file to avoid data races when deciding checkpoint.
sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
goto enospc;
}
}
spin_unlock(&sbi->stat_lock);
- if (unlikely(release))
+ if (unlikely(release)) {
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
+ }
f2fs_i_blocks_write(inode, *count, true, true);
return 0;
enospc:
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
return -ENOSPC;
}
return ret;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_BLOCK)) {
f2fs_show_injection_info(FAULT_BLOCK);
goto enospc;
}
-#endif
spin_lock(&sbi->stat_lock);
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct page *page = find_lock_page(mapping, index);
+ struct page *page;
- if (page)
- return page;
+ if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+ if (!for_write)
+ page = find_get_page_flags(mapping, index,
+ FGP_LOCK | FGP_ACCESSED);
+ else
+ page = find_lock_page(mapping, index);
+ if (page)
+ return page;
- if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
- f2fs_show_injection_info(FAULT_PAGE_ALLOC);
- return NULL;
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+ f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+ return NULL;
+ }
}
-#endif
+
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
f2fs_show_injection_info(FAULT_PAGE_GET);
return NULL;
}
-#endif
+
return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
}
bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
return bio;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
f2fs_show_injection_info(FAULT_ALLOC_BIO);
return NULL;
}
-#endif
+
return bio_alloc(GFP_KERNEL, npages);
}
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(FAULT_KMALLOC);
return NULL;
}
-#endif
+
return kmalloc(size, flags);
}
static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KVMALLOC)) {
f2fs_show_injection_info(FAULT_KVMALLOC);
return NULL;
}
-#endif
+
return kvmalloc(size, flags);
}
spin_unlock(&sbi->iostat_lock);
}
-static inline bool is_valid_blkaddr(block_t blkaddr)
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
+ (!is_read_io(fio->op) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "invalid blkaddr: %u, type: %d, run fsck to fix.",
+ blkaddr, type);
+ f2fs_bug_on(sbi, 1);
+ }
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
{
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return false;
return true;
}
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ if (!__is_valid_data_blkaddr(blkaddr))
+ return false;
+ verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+ return true;
+}
+
/*
* file.c
*/
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni);
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
int f2fs_truncate_xattr_node(struct inode *inode);
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id);
int f2fs_remove_inode_page(struct inode *inode);
struct page *f2fs_new_inode_page(struct inode *inode);
struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
void f2fs_move_node_page(struct page *node_page, int gc_type);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
- struct writeback_control *wbc, bool atomic);
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id);
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type);
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
- block_t blkaddr, int type);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync);
void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
void f2fs_update_dirty_page(struct inode *inode, struct page *page);
void f2fs_remove_dirty_inode(struct inode *inode);
int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_checkpoint_caches(void);
return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
#else
- return 0;
+ return false;
#endif
}
F2FS_I_SB(inode)->s_ndevs);
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+ unsigned int type);
+#else
+#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
+#endif
+
#endif
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
+ unsigned int seq_id = 0;
if (unlikely(f2fs_readonly(inode->i_sb)))
return 0;
}
sync_nodes:
atomic_inc(&sbi->wb_sync_req[NODE]);
- ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
+ ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
atomic_dec(&sbi->wb_sync_req[NODE]);
if (ret)
goto out;
* given fsync mark.
*/
if (!atomic) {
- ret = f2fs_wait_on_node_pages_writeback(sbi, ino);
+ ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
if (ret)
goto out;
}
return pgofs;
}
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
- int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+ pgoff_t dirty, pgoff_t pgofs, int whence)
{
switch (whence) {
case SEEK_DATA:
if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
- is_valid_blkaddr(blkaddr))
+ is_valid_data_blkaddr(sbi, blkaddr))
return true;
break;
case SEEK_HOLE:
blkaddr = datablock_addr(dn.inode,
dn.node_page, dn.ofs_in_node);
- if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+ blkaddr, DATA_GENERIC)) {
+ f2fs_put_dnode(&dn);
+ goto fail;
+ }
+
+ if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+ pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
}
dn->data_blkaddr = NULL_ADDR;
f2fs_set_data_blkaddr(dn);
+
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ continue;
+
f2fs_invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
trace_f2fs_truncate(inode);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
f2fs_show_injection_info(FAULT_TRUNCATE);
return -EIO;
}
-#endif
+
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
}
if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size <= i_size_read(inode)) {
- down_write(&F2FS_I(inode)->i_mmap_sem);
- truncate_setsize(inode, attr->ia_size);
+ bool to_smaller = (attr->ia_size <= i_size_read(inode));
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ truncate_setsize(inode, attr->ia_size);
+
+ if (to_smaller)
err = f2fs_truncate(inode);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- if (err)
- return err;
- } else {
- /*
- * do not trim all blocks after i_size if target size is
- * larger than i_size.
- */
- down_write(&F2FS_I(inode)->i_mmap_sem);
- truncate_setsize(inode, attr->ia_size);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ /*
+ * do not trim all blocks after i_size if target size is
+ * larger than i_size.
+ */
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+ if (err)
+ return err;
+ if (!to_smaller) {
/* should convert inline inode here */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
blk_start = (loff_t)pg_start << PAGE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_SHIFT;
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
+
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
f2fs_lock_op(sbi);
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
f2fs_unlock_op(sbi);
+
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
if (ret)
return ret;
- f2fs_get_node_info(sbi, dn.nid, &ni);
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ return ret;
+ }
+
ilen = min((pgoff_t)
ADDRS_PER_PAGE(dn.node_page, dst_inode) -
dn.ofs_in_node, len - i);
return ret;
}
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + len) >> PAGE_SHIFT;
int ret;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
- f2fs_drop_extent_tree(inode);
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_lock_op(sbi);
+ f2fs_drop_extent_tree(inode);
+ truncate_pagecache(inode, offset);
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
f2fs_unlock_op(sbi);
+
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
- pgoff_t pg_start, pg_end;
loff_t new_size;
int ret;
if (ret)
return ret;
- pg_start = offset >> PAGE_SHIFT;
- pg_end = (offset + len) >> PAGE_SHIFT;
-
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- down_write(&F2FS_I(inode)->i_mmap_sem);
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out_unlock;
-
- truncate_pagecache(inode, offset);
+ return ret;
- ret = f2fs_do_collapse(inode, pg_start, pg_end);
+ ret = f2fs_do_collapse(inode, offset, len);
if (ret)
- goto out_unlock;
+ return ret;
/* write out all moved pages, if possible */
+ down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
truncate_pagecache(inode, new_size);
ret = f2fs_truncate_blocks(inode, new_size, true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
-out_unlock:
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
if (ret)
return ret;
- down_write(&F2FS_I(inode)->i_mmap_sem);
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
if (ret)
- goto out_sem;
-
- truncate_pagecache_range(inode, offset, offset + len - 1);
+ return ret;
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
ret = fill_zero(inode, pg_start, off_start,
off_end - off_start);
if (ret)
- goto out_sem;
+ return ret;
new_size = max_t(loff_t, new_size, offset + len);
} else {
ret = fill_zero(inode, pg_start++, off_start,
PAGE_SIZE - off_start);
if (ret)
- goto out_sem;
+ return ret;
new_size = max_t(loff_t, new_size,
(loff_t)pg_start << PAGE_SHIFT);
unsigned int end_offset;
pgoff_t end;
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ truncate_pagecache_range(inode,
+ (loff_t)index << PAGE_SHIFT,
+ ((loff_t)pg_end << PAGE_SHIFT) - 1);
+
f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
+
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_balance_fs(sbi, dn.node_changed);
else
f2fs_i_size_write(inode, new_size);
}
-out_sem:
- up_write(&F2FS_I(inode)->i_mmap_sem);
-
return ret;
}
f2fs_balance_fs(sbi, true);
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
down_write(&F2FS_I(inode)->i_mmap_sem);
ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
- goto out;
+ return ret;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out;
-
- truncate_pagecache(inode, offset);
+ return ret;
pg_start = offset >> PAGE_SHIFT;
pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+ truncate_pagecache(inode, offset);
+
while (!ret && idx > pg_start) {
nr = idx - pg_start;
if (nr > delta)
idx + delta, nr, false);
f2fs_unlock_op(sbi);
}
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/* write out all moved pages, if possible */
+ down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
-out:
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = fi->i_flags;
- if (file_is_encrypt(inode))
+ if (f2fs_encrypted_inode(inode))
flags |= F2FS_ENCRYPT_FL;
if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
flags |= F2FS_INLINE_DATA_FL;
inode_lock(inode);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_is_atomic_file(inode)) {
+ if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
+ ret = -EINVAL;
goto out;
+ }
ret = f2fs_convert_inline_inode(inode);
if (ret)
goto out;
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
if (!get_dirty_pages(inode))
goto skip_flush;
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret)
+ if (ret) {
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
+ }
skip_flush:
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current;
stat_inc_atomic_write(inode);
stat_update_max_atomic_write(inode);
out:
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
if (ret)
return ret;
- inode_lock(inode);
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ inode_lock(inode);
if (f2fs_is_volatile_file(inode)) {
ret = -EINVAL;
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
ret = -EINVAL;
}
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
}
+ clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+
inode_unlock(inode);
mnt_drop_write_file(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
- int ret;
+ int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
if (sb) {
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
thaw_bdev(sb->s_bdev, sb);
}
break;
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
default:
ret = -EINVAL;
return ret;
}
-static int f2fs_ioc_f2fs_write_checkpoint(struct file *filp, unsigned long arg)
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
}
inode_lock(src);
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (src != dst) {
ret = -EBUSY;
if (!inode_trylock(dst))
goto out;
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) {
- inode_unlock(dst);
- goto out;
- }
}
ret = -EINVAL;
goto out_unlock;
f2fs_balance_fs(sbi, true);
+
+ down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ if (src != dst) {
+ ret = -EBUSY;
+ if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+ goto out_src;
+ }
+
f2fs_lock_op(sbi);
ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
pos_out >> F2FS_BLKSIZE_BITS,
f2fs_i_size_write(dst, dst_osize);
}
f2fs_unlock_op(sbi);
-out_unlock:
- if (src != dst) {
+
+ if (src != dst)
up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
+out_src:
+ up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+out_unlock:
+ if (src != dst)
inode_unlock(dst);
- }
out:
- up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
inode_unlock(src);
return ret;
}
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1;
+ f2fs_i_gc_failures_write(inode, 0);
goto done;
}
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
return f2fs_ioc_gc_range(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
- return f2fs_ioc_f2fs_write_checkpoint(filp, arg);
+ return f2fs_ioc_write_checkpoint(filp, arg);
case F2FS_IOC_DEFRAGMENT:
return f2fs_ioc_defragment(filp, arg);
case F2FS_IOC_MOVE_RANGE:
continue;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
-#endif
if (!sb_start_write_trylock(sbi->sb))
continue;
continue;
}
- f2fs_get_node_info(sbi, nid, &ni);
+ if (f2fs_get_node_info(sbi, nid, &ni)) {
+ f2fs_put_page(node_page, 1);
+ continue;
+ }
+
if (ni.blk_addr != start_addr + off) {
f2fs_put_page(node_page, 1);
continue;
if (IS_ERR(node_page))
return false;
- f2fs_get_node_info(sbi, nid, dni);
+ if (f2fs_get_node_info(sbi, nid, dni)) {
+ f2fs_put_page(node_page, 1);
+ return false;
+ }
if (sum->version != dni->version) {
f2fs_msg(sbi->sb, KERN_WARNING,
return true;
}
+static int ra_data_block(struct inode *inode, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+ struct extent_info ei = {0, 0, 0};
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .ino = inode->i_ino,
+ .type = DATA,
+ .temp = COLD,
+ .op = REQ_OP_READ,
+ .op_flags = 0,
+ .encrypted_page = NULL,
+ .in_list = false,
+ .retry = false,
+ };
+ int err;
+
+ page = f2fs_grab_cache_page(mapping, index, true);
+ if (!page)
+ return -ENOMEM;
+
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ goto got_it;
+ }
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err)
+ goto put_page;
+ f2fs_put_dnode(&dn);
+
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ DATA_GENERIC))) {
+ err = -EFAULT;
+ goto put_page;
+ }
+got_it:
+ /* read page */
+ fio.page = page;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+ fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
+ dn.data_blkaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+ goto put_page;
+ }
+
+ err = f2fs_submit_page_bio(&fio);
+ if (err)
+ goto put_encrypted_page;
+ f2fs_put_page(fio.encrypted_page, 0);
+ f2fs_put_page(page, 1);
+ return 0;
+put_encrypted_page:
+ f2fs_put_page(fio.encrypted_page, 1);
+put_page:
+ f2fs_put_page(page, 1);
+ return err;
+}
+
/*
* Move data block via META_MAPPING while keeping locked data page.
* This can be used to move blocks, aka LBAs, directly on disk.
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
- struct page *page;
+ struct page *page, *mpage;
block_t newaddr;
int err;
bool lfs_mode = test_opt(fio.sbi, LFS);
*/
f2fs_wait_on_page_writeback(page, DATA, true);
- f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ if (err)
+ goto put_out;
+
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
goto recover_block;
}
+ mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+ if (mpage) {
+ bool updated = false;
+
+ if (PageUptodate(mpage)) {
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ updated = true;
+ }
+ f2fs_put_page(mpage, 1);
+ invalidate_mapping_pages(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, fio.old_blkaddr);
+ if (updated)
+ goto write_page;
+ }
+
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_page_out;
goto put_page_out;
}
+write_page:
set_page_dirty(fio.encrypted_page);
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
- /* if inode uses special I/O path, let's go phase 3 */
- if (f2fs_post_read_required(inode)) {
- add_gc_inode(gc_list, inode);
- continue;
- }
-
if (!down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
iput(inode);
+ sbi->skipped_gc_rwsem++;
+ continue;
+ }
+
+ start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+ ofs_in_node;
+
+ if (f2fs_post_read_required(inode)) {
+ int err = ra_data_block(inode, start_bidx);
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (err) {
+ iput(inode);
+ continue;
+ }
+ add_gc_inode(gc_list, inode);
continue;
}
- start_bidx = f2fs_start_bidx_of_node(nofs, inode);
data_page = f2fs_get_read_data_page(inode,
- start_bidx + ofs_in_node, REQ_RAHEAD,
- true);
+ start_bidx, REQ_RAHEAD, true);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (IS_ERR(data_page)) {
iput(inode);
continue;
if (!down_write_trylock(
&fi->i_gc_rwsem[WRITE])) {
+ sbi->skipped_gc_rwsem++;
up_write(&fi->i_gc_rwsem[READ]);
continue;
}
goto next;
sum = page_address(sum_page);
- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+ if (type != GET_SUM_TYPE((&sum->footer))) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
+ "type [%d, %d] in SSA and SIT",
+ segno, type, GET_SUM_TYPE((&sum->footer)));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto next;
+ }
/*
* this is to avoid deadlock:
.iroot = RADIX_TREE_INIT(GFP_NOFS),
};
unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+ unsigned long long first_skipped;
unsigned int skipped_round = 0, round = 0;
trace_f2fs_gc_begin(sbi->sb, sync, background,
prefree_segments(sbi));
cpc.reason = __get_cp_reason(sbi);
+ sbi->skipped_gc_rwsem = 0;
+ first_skipped = last_skipped;
gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
ret = -EINVAL;
total_freed += seg_freed;
if (gc_type == FG_GC) {
- if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
+ if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
+ sbi->skipped_gc_rwsem)
skipped_round++;
last_skipped = sbi->skipped_atomic_files[FG_GC];
round++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
- if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
- if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
- skipped_round * 2 >= round)
- f2fs_drop_inmem_pages_all(sbi, true);
+ if (sync)
+ goto stop;
+
+ if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+ if (skipped_round <= MAX_SKIP_GC_COUNT ||
+ skipped_round * 2 < round) {
segno = NULL_SEGNO;
goto gc_more;
}
+ if (first_skipped < last_skipped &&
+ (last_skipped - first_skipped) >
+ sbi->skipped_gc_rwsem) {
+ f2fs_drop_inmem_pages_all(sbi, true);
+ segno = NULL_SEGNO;
+ goto gc_more;
+ }
if (gc_type == FG_GC)
ret = f2fs_write_checkpoint(sbi, &cpc);
}
.encrypted_page = NULL,
.io_type = FS_DATA_IO,
};
+ struct node_info ni;
int dirty, err;
if (!f2fs_exist_data(dn->inode))
if (err)
return err;
+ err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+ if (err) {
+ f2fs_put_dnode(dn);
+ return err;
+ }
+
+ fio.version = ni.version;
+
+ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(dn);
+ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
+ f2fs_msg(fio.sbi->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dn->inode->i_ino, dn->data_blkaddr);
+ return -EINVAL;
+ }
+
f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
f2fs_do_read_inline_data(page, dn->inode_page);
if (err)
goto out;
+ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(&dn);
+ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dir->i_ino, dn.data_blkaddr);
+ err = -EINVAL;
+ goto out;
+ }
+
f2fs_wait_on_page_writeback(page, DATA, true);
dentry_blk = page_address(page);
return 0;
recover:
lock_page(ipage);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
ilen = start + len;
ilen -= start;
- f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ if (err)
+ goto out;
+
byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
byteaddr += (char *)inline_data_addr(inode, ipage) -
(char *)F2FS_INODE(ipage);
}
}
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+ struct f2fs_inode *ri)
{
block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
- if (is_valid_blkaddr(addr))
- return true;
- return false;
+ if (!__is_valid_data_blkaddr(addr))
+ return 1;
+ if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+ return -EFAULT;
+ return 0;
}
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
if (!f2fs_sb_has_inode_chksum(sbi->sb))
return false;
- if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
return false;
if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
struct f2fs_inode *ri;
__u32 provided, calculated;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
+ return true;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (!f2fs_enable_inode_chksum(sbi, page))
+#else
if (!f2fs_enable_inode_chksum(sbi, page) ||
PageDirty(page) || PageWriteback(page))
+#endif
return true;
ri = &F2FS_NODE(page)->i;
ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
}
-static bool sanity_check_inode(struct inode *inode)
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ unsigned long long iblocks;
+
+ iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+ if (!iblocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, iblocks);
+ return false;
+ }
+
+ if (ino_of_node(node_page) != nid_of_node(node_page)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode footer i_ino=%lx, ino,nid: "
+ "[%u, %u] run fsck to fix.",
+ __func__, inode->i_ino,
+ ino_of_node(node_page), nid_of_node(node_page));
+ return false;
+ }
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
&& !f2fs_has_extra_attr(inode)) {
__func__, inode->i_ino);
return false;
}
+
+ if (f2fs_has_extra_attr(inode) &&
+ !f2fs_sb_has_extra_attr(sbi->sb)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) is with extra_attr, "
+ "but extra_attr feature is off",
+ __func__, inode->i_ino);
+ return false;
+ }
+
+ if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+ fi->i_extra_isize % sizeof(__le32)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
+ "max: %zu",
+ __func__, inode->i_ino, fi->i_extra_isize,
+ F2FS_TOTAL_EXTRA_ATTR_SIZE);
+ return false;
+ }
+
+ if (F2FS_I(inode)->extent_tree) {
+ struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+ if (ei->len &&
+ (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+ !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+ DATA_GENERIC))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) extent info [%u, %u, %u] "
+ "is incorrect, run fsck to fix",
+ __func__, inode->i_ino,
+ ei->blk, ei->fofs, ei->len);
+ return false;
+ }
+ }
+
+ if (f2fs_has_inline_data(inode) &&
+ (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx, mode=%u) should not have "
+ "inline_data, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+ }
+
+ if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx, mode=%u) should not have "
+ "inline_dentry, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+ }
+
return true;
}
struct page *node_page;
struct f2fs_inode *ri;
projid_t i_projid;
+ int err;
/* Check if ino is within scope */
if (f2fs_check_nid_range(sbi, inode->i_ino))
fi->i_inline_xattr_size = 0;
}
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ return -EINVAL;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
- if (__written_first_block(ri))
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ if (S_ISREG(inode->i_mode)) {
+ err = __written_first_block(sbi, ri);
+ if (err < 0) {
+ f2fs_put_page(node_page, 1);
+ return err;
+ }
+ if (!err)
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ }
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
- if (!sanity_check_inode(inode)) {
- ret = -EINVAL;
- goto bad_inode;
- }
make_now:
if (ino == F2FS_NODE_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_node_aops;
F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+#endif
}
void f2fs_update_inode_page(struct inode *inode)
if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
f2fs_show_injection_info(FAULT_EVICT_INODE);
err = -EIO;
}
-#endif
+
if (!err) {
f2fs_lock_op(sbi);
err = f2fs_remove_inode_page(inode);
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct node_info ni;
+ int err;
/*
* clear nlink of inode in order to release resource of inode
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "May loss orphan inode, run fsck to fix.");
+ goto out;
+ }
if (ni.blk_addr != NULL_ADDR) {
- int err = f2fs_acquire_orphan_inode(sbi);
+ err = f2fs_acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
set_inode_flag(inode, FI_FREE_NID);
}
+out:
f2fs_unlock_op(sbi);
/* iput will drop the inode object */
return -EINVAL;
if (hot) {
- strncpy(extlist[count], name, strlen(name));
+ memcpy(extlist[count], name, strlen(name));
sbi->raw_super->hot_ext_count = hot_count + 1;
} else {
char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
memcpy(buf, &extlist[cold_count],
F2FS_EXTENSION_LEN * hot_count);
memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
- strncpy(extlist[cold_count], name, strlen(name));
+ memcpy(extlist[cold_count], name, strlen(name));
memcpy(&extlist[cold_count + 1], buf,
F2FS_EXTENSION_LEN * hot_count);
sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
/*
* Check whether the given nid is within node id range.
static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
- pgoff_t index = current_nat_addr(sbi, nid);
- return f2fs_get_meta_page(sbi, index);
+ return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
}
static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
struct page *src_page;
struct page *dst_page;
- pgoff_t src_off;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
struct f2fs_nm_info *nm_i = NM_I(sbi);
- src_off = current_nat_addr(sbi, nid);
- dst_off = next_nat_addr(sbi, src_off);
+ dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
/* get current nat block page with lock */
- src_page = f2fs_get_meta_page(sbi, src_off);
+ src_page = get_current_nat_page(sbi, nid);
dst_page = f2fs_grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
if (raw_ne)
node_info_from_raw_nat(&ne->ni, raw_ne);
+
+ spin_lock(&nm_i->nat_list_lock);
list_add_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
nm_i->nat_cnt++;
return ne;
}
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{
- return radix_tree_lookup(&nm_i->nat_root, n);
+ struct nat_entry *ne;
+
+ ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+ /* for recent accessed nat entry, move it to tail of lru list */
+ if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+ spin_lock(&nm_i->nat_list_lock);
+ if (!list_empty(&ne->list))
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+ }
+
+ return ne;
}
static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
- list_del(&e->list);
radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
nm_i->nat_cnt--;
__free_nat_entry(e);
nm_i->dirty_nat_cnt++;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
+ spin_lock(&nm_i->nat_list_lock);
if (new_ne)
list_del_init(&ne->list);
else
list_move_tail(&ne->list, &head->entry_list);
+ spin_unlock(&nm_i->nat_list_lock);
}
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
struct nat_entry_set *set, struct nat_entry *ne)
{
+ spin_lock(&nm_i->nat_list_lock);
list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
set_nat_flag(ne, IS_DIRTY, false);
set->entry_cnt--;
nm_i->dirty_nat_cnt--;
start, nr);
}
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+{
+ return NODE_MAPPING(sbi) == page->mapping &&
+ IS_DNODE(page) && is_cold_node(page);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ spin_lock_init(&sbi->fsync_node_lock);
+ INIT_LIST_HEAD(&sbi->fsync_node_list);
+ sbi->fsync_seg_id = 0;
+ sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+ struct page *page)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+ unsigned int seq_id;
+
+ fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+
+ get_page(page);
+ fn->page = page;
+ INIT_LIST_HEAD(&fn->list);
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_add_tail(&fn->list, &sbi->fsync_node_list);
+ fn->seq_id = sbi->fsync_seg_id++;
+ seq_id = fn->seq_id;
+ sbi->fsync_node_num++;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+ return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+ if (fn->page == page) {
+ list_del(&fn->list);
+ sbi->fsync_node_num--;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ kmem_cache_free(fsync_node_entry_slab, fn);
+ put_page(page);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ f2fs_bug_on(sbi, 1);
+}
+
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ sbi->fsync_seg_id = 0;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
new_blkaddr == NULL_ADDR);
f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
+ f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
new_blkaddr == NEW_ADDR);
/* increment version no as node is removed */
/* change address */
nat_set_blkaddr(e, new_blkaddr);
- if (!is_valid_blkaddr(new_blkaddr))
+ if (!is_valid_data_blkaddr(sbi, new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
__set_nat_cache_dirty(nm_i, e);
if (!down_write_trylock(&nm_i->nat_tree_lock))
return 0;
- while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+ spin_lock(&nm_i->nat_list_lock);
+ while (nr_shrink) {
struct nat_entry *ne;
+
+ if (list_empty(&nm_i->nat_entries))
+ break;
+
ne = list_first_entry(&nm_i->nat_entries,
struct nat_entry, list);
+ list_del(&ne->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, ne);
nr_shrink--;
+
+ spin_lock(&nm_i->nat_list_lock);
}
+ spin_unlock(&nm_i->nat_list_lock);
+
up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink;
}
/*
* This function always returns success
*/
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
up_read(&nm_i->nat_tree_lock);
- return;
+ return 0;
}
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
up_read(&nm_i->nat_tree_lock);
page = f2fs_get_meta_page(sbi, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
cache:
/* cache nat entry */
cache_nat_entry(sbi, nid, &ne);
+ return 0;
}
/*
return err;
}
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
+ int err;
- f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ if (err)
+ return err;
/* Deallocate node address */
f2fs_invalidate_blocks(sbi, ni.blk_addr);
dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+ return 0;
}
static int truncate_dnode(struct dnode_of_data *dn)
{
struct page *page;
+ int err;
if (dn->nid == 0)
return 1;
dn->node_page = page;
dn->ofs_in_node = 0;
f2fs_truncate_data_blocks(dn);
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ return err;
+
return 1;
}
if (!ofs) {
/* remove current indirect node */
dn->node_page = page;
- truncate_node(dn);
+ ret = truncate_node(dn);
+ if (ret)
+ goto out_err;
freed++;
} else {
f2fs_put_page(page, 1);
if (offset[idx + 1] == 0) {
dn->node_page = pages[idx];
dn->nid = nid[idx];
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ goto fail;
} else {
f2fs_put_page(pages[idx], 1);
}
nid_t nid = F2FS_I(inode)->i_xattr_nid;
struct dnode_of_data dn;
struct page *npage;
+ int err;
if (!nid)
return 0;
if (IS_ERR(npage))
return PTR_ERR(npage);
+ set_new_dnode(&dn, inode, NULL, npage, nid);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_page(npage, 1);
+ return err;
+ }
+
f2fs_i_xnid_write(inode, 0);
- set_new_dnode(&dn, inode, NULL, npage, nid);
- truncate_node(&dn);
return 0;
}
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ f2fs_put_dnode(&dn);
+ return -EIO;
+ }
f2fs_bug_on(F2FS_I_SB(inode),
inode->i_blocks != 0 && inode->i_blocks != 8);
/* will put inode & node pages */
- truncate_node(&dn);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
return 0;
}
goto fail;
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ if (err) {
+ dec_valid_node_count(sbi, dn->inode, !ofs);
+ goto fail;
+ }
f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
#endif
new_ni.nid = dn->nid;
.page = page,
.encrypted_page = NULL,
};
+ int err;
- if (PageUptodate(page))
+ if (PageUptodate(page)) {
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+#endif
return LOCKED_PAGE;
+ }
- f2fs_get_node_info(sbi, page->index, &ni);
+ err = f2fs_get_node_info(sbi, page->index, &ni);
+ if (err)
+ return err;
- if (unlikely(ni.blk_addr == NULL_ADDR)) {
+ if (unlikely(ni.blk_addr == NULL_ADDR) ||
+ is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
ClearPageUptodate(page);
return -ENOENT;
}
static int __write_node_page(struct page *page, bool atomic, bool *submitted,
struct writeback_control *wbc, bool do_balance,
- enum iostat_type io_type)
+ enum iostat_type io_type, unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
nid_t nid;
.io_type = io_type,
.io_wbc = wbc,
};
+ unsigned int seq;
trace_f2fs_writepage(page, NODE);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+ IS_DNODE(page) && is_cold_node(page))
+ goto redirty_out;
+
/* get old block addr of this node page */
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
+ if (f2fs_get_node_info(sbi, nid, &ni))
+ goto redirty_out;
+
if (wbc->for_reclaim) {
if (!down_read_trylock(&sbi->node_write))
goto redirty_out;
down_read(&sbi->node_write);
}
- f2fs_get_node_info(sbi, nid, &ni);
-
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
ClearPageUptodate(page);
return 0;
}
+ if (__is_valid_data_blkaddr(ni.blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+ goto redirty_out;
+
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= WRITE_FLUSH_FUA;
set_page_writeback(page);
ClearPageError(page);
+
+ if (f2fs_in_warm_node_list(sbi, page)) {
+ seq = f2fs_add_fsync_node_entry(sbi, page);
+ if (seq_id)
+ *seq_id = seq;
+ }
+
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
goto out_page;
if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO))
+ &wbc, false, FS_GC_NODE_IO, NULL))
unlock_page(node_page);
goto release_page;
} else {
static int f2fs_write_node_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+ return __write_node_page(page, false, NULL, wbc, false,
+ FS_NODE_IO, NULL);
}
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
- struct writeback_control *wbc, bool atomic)
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id)
{
pgoff_t index;
pgoff_t last_idx = ULONG_MAX;
ret = __write_node_page(page, atomic &&
page == last_page,
&submitted, wbc, true,
- FS_NODE_IO);
+ FS_NODE_IO, seq_id);
if (ret) {
unlock_page(page);
f2fs_put_page(last_page, 0);
!is_cold_node(page)))
continue;
lock_node:
- if (!trylock_page(page))
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ lock_page(page);
+ else if (!trylock_page(page))
continue;
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
set_dentry_mark(page, 0);
ret = __write_node_page(page, false, &submitted,
- wbc, do_balance, io_type);
+ wbc, do_balance, io_type, NULL);
if (ret)
unlock_page(page);
else if (submitted)
}
if (step < 2) {
+ if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+ goto out;
step++;
goto next_step;
}
-
+out:
if (nwritten)
f2fs_submit_merged_write(sbi, NODE);
return ret;
}
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id)
{
- pgoff_t index = 0;
- struct pagevec pvec;
+ struct fsync_node_entry *fn;
+ struct page *page;
+ struct list_head *head = &sbi->fsync_node_list;
+ unsigned long flags;
+ unsigned int cur_seq_id = 0;
int ret2 = 0, ret = 0;
- int nr_pages;
- pagevec_init(&pvec, 0);
+ while (seq_id && cur_seq_id < seq_id) {
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ fn = list_first_entry(head, struct fsync_node_entry, list);
+ if (fn->seq_id > seq_id) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ cur_seq_id = fn->seq_id;
+ page = fn->page;
+ get_page(page);
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
- while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_WRITEBACK))) {
- int i;
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ if (TestClearPageError(page))
+ ret = -EIO;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ put_page(page);
- if (ino && ino_of_node(page) == ino) {
- f2fs_wait_on_page_writeback(page, NODE, true);
- if (TestClearPageError(page))
- ret = -EIO;
- }
- }
- pagevec_release(&pvec);
- cond_resched();
+ if (ret)
+ break;
}
if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
ret2 = -EIO;
if (!ret)
ret = ret2;
+
return ret;
}
if (!PageUptodate(page))
SetPageUptodate(page);
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (IS_INODE(page))
+ f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
kmem_cache_free(free_nid_slab, i);
}
-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+ if (blk_addr == NEW_ADDR)
+ return -EINVAL;
+
if (blk_addr == NULL_ADDR) {
add_free_nid(sbi, start_nid, true, true);
} else {
spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
+
+ return 0;
}
static void scan_curseg_cache(struct f2fs_sb_info *sbi)
up_read(&nm_i->nat_tree_lock);
}
-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- int i = 0;
+ int i = 0, ret;
nid_t nid = nm_i->next_scan_nid;
if (unlikely(nid >= nm_i->max_nid))
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
- return;
+ return 0;
if (!mount) {
/* try to find free nids in free_nid_bitmap */
scan_free_nid_bits(sbi);
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
}
/* readahead nat pages to be scanned */
nm_i->nat_block_bitmap)) {
struct page *page = get_current_nat_page(sbi, nid);
- scan_nat_page(sbi, page, nid);
+ ret = scan_nat_page(sbi, page, nid);
f2fs_put_page(page, 1);
+
+ if (ret) {
+ up_read(&nm_i->nat_tree_lock);
+ f2fs_bug_on(sbi, !mount);
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "NAT is corrupt, run fsck to fix it");
+ return -EINVAL;
+ }
}
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
+
+ return 0;
}
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
+ int ret;
+
mutex_lock(&NM_I(sbi)->build_lock);
- __f2fs_build_free_nids(sbi, sync, mount);
+ ret = __f2fs_build_free_nids(sbi, sync, mount);
mutex_unlock(&NM_I(sbi)->build_lock);
+
+ return ret;
}
/*
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
f2fs_show_injection_info(FAULT_ALLOC_NID);
return false;
}
-#endif
+
spin_lock(&nm_i->nid_list_lock);
if (unlikely(nm_i->available_nids == 0)) {
struct dnode_of_data dn;
struct node_info ni;
struct page *xpage;
+ int err;
if (!prev_xnid)
goto recover_xnid;
/* 1: invalidate the previous xattr nid */
- f2fs_get_node_info(sbi, prev_xnid, &ni);
+ err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+ if (err)
+ return err;
+
f2fs_invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
nid_t ino = ino_of_node(page);
struct node_info old_ni, new_ni;
struct page *ipage;
+ int err;
- f2fs_get_node_info(sbi, ino, &old_ni);
+ err = f2fs_get_node_info(sbi, ino, &old_ni);
+ if (err)
+ return err;
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
return 0;
}
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
for (idx = addr; idx < addr + nrpages; idx++) {
struct page *page = f2fs_get_tmp_page(sbi, idx);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
rn = F2FS_NODE(page);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages);
}
+ return 0;
}
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
nid_t set_idx = 0;
LIST_HEAD(sets);
+ /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+ if (enabled_nat_bits(sbi, cpc)) {
+ down_write(&nm_i->nat_tree_lock);
+ remove_nats_in_journal(sbi);
+ up_write(&nm_i->nat_tree_lock);
+ }
+
if (!nm_i->dirty_nat_cnt)
return;
nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
- struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+ struct page *page;
+
+ page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+ if (IS_ERR(page)) {
+ disable_nat_bits(sbi, true);
+ return PTR_ERR(page);
+ }
memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
page_address(page), F2FS_BLKSIZE);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
+ spin_lock_init(&nm_i->nat_list_lock);
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->nid_list_lock);
for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
- NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
- if (!nm_i->free_nid_bitmap)
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap[i])
return -ENOMEM;
}
/* load free nid status from nat_bits table */
load_free_nid_bitmap(sbi);
- f2fs_build_free_nids(sbi, true, true);
- return 0;
+ return f2fs_build_free_nids(sbi, true, true);
}
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
unsigned idx;
nid = nat_get_nid(natvec[found - 1]) + 1;
- for (idx = 0; idx < found; idx++)
+ for (idx = 0; idx < found; idx++) {
+ spin_lock(&nm_i->nat_list_lock);
+ list_del(&natvec[idx]->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, natvec[idx]);
+ }
}
f2fs_bug_on(sbi, nm_i->nat_cnt);
sizeof(struct nat_entry_set));
if (!nat_entry_set_slab)
goto destroy_free_nid;
+
+ fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+ sizeof(struct fsync_node_entry));
+ if (!fsync_node_entry_slab)
+ goto destroy_nat_entry_set;
return 0;
+destroy_nat_entry_set:
+ kmem_cache_destroy(nat_entry_set_slab);
destroy_free_nid:
kmem_cache_destroy(free_nid_slab);
destroy_nat_entry:
void f2fs_destroy_node_manager_caches(void)
{
+ kmem_cache_destroy(fsync_node_entry_slab);
kmem_cache_destroy(nat_entry_set_slab);
kmem_cache_destroy(free_nid_slab);
kmem_cache_destroy(nat_entry_slab);
return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
}
+static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
else
flag &= ~(0x1 << type);
rn->footer.flag = cpu_to_le32(flag);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
}
#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
struct page *page = NULL;
block_t blkaddr;
unsigned int loop_cnt = 0;
- unsigned int free_blocks = sbi->user_block_count -
- valid_user_blocks(sbi);
+ unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+ valid_user_blocks(sbi);
int err = 0;
/* get node pages in the current segment */
while (1) {
struct fsync_inode_entry *entry;
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = f2fs_get_tmp_page(sbi, blkaddr);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ break;
+ }
if (!is_recoverable_dnode(page))
break;
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
- f2fs_get_node_info(sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (err)
+ goto err;
+
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
}
/* dest is valid block, try to recover from src to dest */
- if (f2fs_is_valid_meta_blkaddr(sbi, dest, META_POR)) {
+ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = f2fs_reserve_new_block(&dn);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- while (err)
+ while (err &&
+ IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
err = f2fs_reserve_new_block(&dn);
-#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
if (err)
while (1) {
struct fsync_inode_entry *entry;
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
f2fs_ra_meta_pages_cond(sbi, blkaddr);
page = f2fs_get_tmp_page(sbi, blkaddr);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ break;
+ }
if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
#endif
if (s_flags & MS_RDONLY) {
- f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "recover fsync data on readonly fs");
sbi->sb->s_flags &= ~MS_RDONLY;
}
err = -EAGAIN;
goto next;
}
- f2fs_get_node_info(sbi, dn.nid, &ni);
+
+ err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
+
if (cur->old_addr == NEW_ADDR) {
f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
int err;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ down_write(&fi->i_gc_rwsem[WRITE]);
+
+ f2fs_lock_op(sbi);
set_inode_flag(inode, FI_ATOMIC_COMMIT);
mutex_lock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
f2fs_unlock_op(sbi);
+ up_write(&fi->i_gc_rwsem[WRITE]);
+
return err;
}
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
-#endif
/* balance_fs_bg is able to be pending */
if (need && excess_cached_nats(sbi))
else
f2fs_build_free_nids(sbi, false, false);
- if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+ if (!is_idle(sbi) &&
+ (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
return;
/* checkpoint is the only way to shrink partial cached entries */
!f2fs_available_free_memory(sbi, INO_ENTRIES) ||
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
+ excess_dirty_nodes(sbi) ||
f2fs_time_over(sbi, CP_TIME)) {
if (test_opt(sbi, DATA_FLUSH)) {
struct blk_plug plug;
dc->len = len;
dc->ref = 0;
dc->state = D_PREP;
+ dc->issuing = 0;
dc->error = 0;
init_completion(&dc->wait);
list_add_tail(&dc->list, pend_list);
+ spin_lock_init(&dc->lock);
+ dc->bio_ref = 0;
atomic_inc(&dcc->discard_cmd_cnt);
dcc->undiscard_blks += len;
struct discard_cmd *dc)
{
if (dc->state == D_DONE)
- atomic_dec(&dcc->issing_discard);
+ atomic_sub(dc->issuing, &dcc->issing_discard);
list_del(&dc->list);
rb_erase(&dc->rb_node, &dcc->root);
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ unsigned long flags;
trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
+ spin_lock_irqsave(&dc->lock, flags);
+ if (dc->bio_ref) {
+ spin_unlock_irqrestore(&dc->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dc->lock, flags);
+
f2fs_bug_on(sbi, dc->ref);
if (dc->error == -EOPNOTSUPP)
static void f2fs_submit_discard_endio(struct bio *bio)
{
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+ unsigned long flags;
dc->error = bio->bi_error;
- dc->state = D_DONE;
- complete_all(&dc->wait);
+
+ spin_lock_irqsave(&dc->lock, flags);
+ dc->bio_ref--;
+ if (!dc->bio_ref && dc->state == D_SUBMIT) {
+ dc->state = D_DONE;
+ complete_all(&dc->wait);
+ }
+ spin_unlock_irqrestore(&dc->lock, flags);
bio_put(bio);
}
/* common policy */
dpolicy->type = discard_type;
dpolicy->sync = true;
+ dpolicy->ordered = false;
dpolicy->granularity = granularity;
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = true;
dpolicy->sync = false;
+ dpolicy->ordered = true;
if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
dpolicy->granularity = 1;
dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
}
}
-
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len);
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
- struct discard_cmd *dc)
+ struct discard_cmd *dc,
+ unsigned int *issued)
{
+ struct block_device *bdev = dc->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_discard_blocks =
+ SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
&(dcc->fstrim_list) : &(dcc->wait_list);
- struct bio *bio = NULL;
int flag = dpolicy->sync ? REQ_SYNC : 0;
+ block_t lstart, start, len, total_len;
+ int err = 0;
if (dc->state != D_PREP)
- return;
+ return 0;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- return;
+ return 0;
- trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+ trace_f2fs_issue_discard(bdev, dc->start, dc->len);
- dc->error = __blkdev_issue_discard(dc->bdev,
- SECTOR_FROM_BLOCK(dc->start),
- SECTOR_FROM_BLOCK(dc->len),
- GFP_NOFS, 0, &bio);
- if (!dc->error) {
- /* should keep before submission to avoid D_DONE right away */
- dc->state = D_SUBMIT;
- atomic_inc(&dcc->issued_discard);
- atomic_inc(&dcc->issing_discard);
- if (bio) {
- bio->bi_private = dc;
- bio->bi_end_io = f2fs_submit_discard_endio;
- submit_bio(flag, bio);
- list_move_tail(&dc->list, wait_list);
- __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+ lstart = dc->lstart;
+ start = dc->start;
+ len = dc->len;
+ total_len = len;
+
+ dc->len = 0;
+
+ while (total_len && *issued < dpolicy->max_requests && !err) {
+ struct bio *bio = NULL;
+ unsigned long flags;
+ bool last = true;
- f2fs_update_iostat(sbi, FS_DISCARD, 1);
+ if (len > max_discard_blocks) {
+ len = max_discard_blocks;
+ last = false;
}
- } else {
- __remove_discard_cmd(sbi, dc);
+
+ (*issued)++;
+ if (*issued == dpolicy->max_requests)
+ last = true;
+
+ dc->len += len;
+
+ if (time_to_inject(sbi, FAULT_DISCARD)) {
+ f2fs_show_injection_info(FAULT_DISCARD);
+ err = -EIO;
+ goto submit;
+ }
+ err = __blkdev_issue_discard(bdev,
+ SECTOR_FROM_BLOCK(start),
+ SECTOR_FROM_BLOCK(len),
+ GFP_NOFS, 0, &bio);
+submit:
+ if (err) {
+ spin_lock_irqsave(&dc->lock, flags);
+ if (dc->state == D_PARTIAL)
+ dc->state = D_SUBMIT;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ break;
+ }
+
+ f2fs_bug_on(sbi, !bio);
+
+ /*
+ * should keep before submission to avoid D_DONE
+ * right away
+ */
+ spin_lock_irqsave(&dc->lock, flags);
+ if (last)
+ dc->state = D_SUBMIT;
+ else
+ dc->state = D_PARTIAL;
+ dc->bio_ref++;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ atomic_inc(&dcc->issing_discard);
+ dc->issuing++;
+ list_move_tail(&dc->list, wait_list);
+
+ /* sanity check on discard range */
+ __check_sit_bitmap(sbi, start, start + len);
+
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ submit_bio(flag, bio);
+
+ atomic_inc(&dcc->issued_discard);
+
+ f2fs_update_iostat(sbi, FS_DISCARD, 1);
+
+ lstart += len;
+ start += len;
+ total_len -= len;
+ len = total_len;
}
+
+ if (!err && len)
+ __update_discard_tree_range(sbi, bdev, lstart, start, len);
+ return err;
}
static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct discard_cmd *dc;
struct discard_info di = {0};
struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_discard_blocks =
+ SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
block_t end = lstart + len;
- mutex_lock(&dcc->cmd_lock);
-
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
NULL, lstart,
(struct rb_entry **)&prev_dc,
if (prev_dc && prev_dc->state == D_PREP &&
prev_dc->bdev == bdev &&
- __is_discard_back_mergeable(&di, &prev_dc->di)) {
+ __is_discard_back_mergeable(&di, &prev_dc->di,
+ max_discard_blocks)) {
prev_dc->di.len += di.len;
dcc->undiscard_blks += di.len;
__relocate_discard_cmd(dcc, prev_dc);
if (next_dc && next_dc->state == D_PREP &&
next_dc->bdev == bdev &&
- __is_discard_front_mergeable(&di, &next_dc->di)) {
+ __is_discard_front_mergeable(&di, &next_dc->di,
+ max_discard_blocks)) {
next_dc->di.lstart = di.lstart;
next_dc->di.len += di.len;
next_dc->di.start = di.start;
node = rb_next(&prev_dc->rb_node);
next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
}
-
- mutex_unlock(&dcc->cmd_lock);
}
static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
blkstart -= FDEV(devi).start_blk;
}
+ mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+ mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
return 0;
}
+static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct discard_cmd *dc;
+ struct blk_plug plug;
+ unsigned int pos = dcc->next_pos;
+ unsigned int issued = 0;
+ bool io_interrupted = false;
+
+ mutex_lock(&dcc->cmd_lock);
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+ NULL, pos,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (!dc)
+ dc = next_dc;
+
+ blk_start_plug(&plug);
+
+ while (dc) {
+ struct rb_node *node;
+ int err = 0;
+
+ if (dc->state != D_PREP)
+ goto next;
+
+ if (dpolicy->io_aware && !is_idle(sbi)) {
+ io_interrupted = true;
+ break;
+ }
+
+ dcc->next_pos = dc->lstart + dc->len;
+ err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+ if (issued >= dpolicy->max_requests)
+ break;
+next:
+ node = rb_next(&dc->rb_node);
+ if (err)
+ __remove_discard_cmd(sbi, dc);
+ dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+ }
+
+ blk_finish_plug(&plug);
+
+ if (!dc)
+ dcc->next_pos = 0;
+
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (!issued && io_interrupted)
+ issued = -1;
+
+ return issued;
+}
+
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int i, iter = 0, issued = 0;
+ int i, issued = 0;
bool io_interrupted = false;
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
if (i + 1 < dpolicy->granularity)
break;
+
+ if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ return __issue_discard_cmd_orderly(sbi, dpolicy);
+
pend_list = &dcc->pend_list[i];
mutex_lock(&dcc->cmd_lock);
if (list_empty(pend_list))
goto next;
- f2fs_bug_on(sbi,
- !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+ &dcc->root));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
!is_idle(sbi)) {
io_interrupted = true;
- goto skip;
+ break;
}
- __submit_discard_cmd(sbi, dpolicy, dc);
- issued++;
-skip:
- if (++iter >= dpolicy->max_requests)
+ __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+ if (issued >= dpolicy->max_requests)
break;
}
blk_finish_plug(&plug);
next:
mutex_unlock(&dcc->cmd_lock);
- if (iter >= dpolicy->max_requests)
+ if (issued >= dpolicy->max_requests || io_interrupted)
break;
}
return trimmed;
}
-static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
struct discard_policy dp;
+ unsigned int discard_blks;
- if (dpolicy) {
- __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
- return;
- }
+ if (dpolicy)
+ return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
/* wait all */
__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
- __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+ discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
- __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+ discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+
+ return discard_blks;
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
/* just to make sure there is no pending discard commands */
__wait_all_discard_cmd(sbi, NULL);
+
+ f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
return dropped;
}
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
mutex_lock(&dirty_i->seglist_lock);
while (1) {
int i;
+
+ if (need_align && end != -1)
+ end--;
start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
if (start >= MAIN_SEGS(sbi))
break;
end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
start + 1);
- for (i = start; i < end; i++)
- clear_bit(i, prefree_map);
+ if (need_align) {
+ start = rounddown(start, sbi->segs_per_sec);
+ end = roundup(end, sbi->segs_per_sec);
+ }
- dirty_i->nr_dirty[PRE] -= end - start;
+ for (i = start; i < end; i++) {
+ if (test_and_clear_bit(i, prefree_map))
+ dirty_i->nr_dirty[PRE]--;
+ }
if (!test_opt(sbi, DISCARD))
continue;
dcc->nr_discards = 0;
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
dcc->undiscard_blks = 0;
+ dcc->next_pos = 0;
dcc->root = RB_ROOT;
+ dcc->rbtree_check = false;
init_waitqueue_head(&dcc->discard_wait_queue);
SM_I(sbi)->dcc_info = dcc;
if (addr == NEW_ADDR)
return;
+ invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
struct seg_entry *se;
bool is_cp = false;
- if (!is_valid_blkaddr(blkaddr))
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return true;
down_read(&sit_i->sentry_lock);
*/
struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
{
- return f2fs_get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+ return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
}
void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
return has_candidate;
}
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
unsigned int start, unsigned int end)
{
struct discard_cmd *dc;
struct blk_plug plug;
int issued;
+ unsigned int trimmed = 0;
next:
issued = 0;
mutex_lock(&dcc->cmd_lock);
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+ &dcc->root));
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
NULL, start,
while (dc && dc->lstart <= end) {
struct rb_node *node;
+ int err = 0;
if (dc->len < dpolicy->granularity)
goto skip;
goto skip;
}
- __submit_discard_cmd(sbi, dpolicy, dc);
+ err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
- if (++issued >= dpolicy->max_requests) {
+ if (issued >= dpolicy->max_requests) {
start = dc->lstart + dc->len;
+ if (err)
+ __remove_discard_cmd(sbi, dc);
+
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
- __wait_all_discard_cmd(sbi, NULL);
+ trimmed += __wait_all_discard_cmd(sbi, NULL);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto next;
}
skip:
node = rb_next(&dc->rb_node);
+ if (err)
+ __remove_discard_cmd(sbi, dc);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
if (fatal_signal_pending(current))
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
+
+ return trimmed;
}
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
struct discard_policy dpolicy;
unsigned long long trimmed = 0;
int err = 0;
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
- if (end <= MAIN_BLKADDR(sbi))
- return -EINVAL;
+ if (end < MAIN_BLKADDR(sbi))
+ goto out;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
f2fs_msg(sbi->sb, KERN_WARNING,
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
+ if (need_align) {
+ start_segno = rounddown(start_segno, sbi->segs_per_sec);
+ end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+ }
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
if (err)
goto out;
- start_block = START_BLOCK(sbi, start_segno);
- end_block = START_BLOCK(sbi, end_segno + 1);
-
- __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
- __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
-
/*
* We filed discard candidates, but actually we don't need to wait for
* all of them, since they'll be issued in idle time along with runtime
* discard option. User configuration looks like using runtime discard
* or periodic fstrim instead of it.
*/
- if (!test_opt(sbi, DISCARD)) {
- trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+ if (test_opt(sbi, DISCARD))
+ goto out;
+
+ start_block = START_BLOCK(sbi, start_segno);
+ end_block = START_BLOCK(sbi, end_segno + 1);
+
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+ trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
+ start_block, end_block);
+
+ trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
start_block, end_block);
- range->len = F2FS_BLK_TO_BYTES(trimmed);
- }
out:
+ if (!err)
+ range->len = F2FS_BLK_TO_BYTES(trimmed);
return err;
}
return CURSEG_COLD_DATA;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- is_inode_flag_set(inode, FI_ATOMIC_FILE) ||
- is_inode_flag_set(inode, FI_VOLATILE_FILE))
+ f2fs_is_atomic_file(inode) ||
+ f2fs_is_volatile_file(inode))
return CURSEG_HOT_DATA;
/* f2fs_rw_hint_to_seg_type(inode->i_write_hint); */
return CURSEG_WARM_DATA;
reallocate:
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio, true);
+ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+ invalidate_mapping_pages(META_MAPPING(fio->sbi),
+ fio->old_blkaddr, fio->old_blkaddr);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
{
struct f2fs_sb_info *sbi = fio->sbi;
struct f2fs_summary sum;
- struct node_info ni;
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
- f2fs_get_node_info(sbi, dn->nid, &ni);
- set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+ set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
do_write_page(&sum, fio);
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ old_blkaddr, old_blkaddr);
update_sit_entry(sbi, old_blkaddr, -1);
+ }
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
{
struct page *cpage;
- if (!is_valid_blkaddr(blkaddr))
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return;
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
}
}
-static void read_compacted_summaries(struct f2fs_sb_info *sbi)
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
start = start_sum_block(sbi);
page = f2fs_get_meta_page(sbi, start++);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
kaddr = (unsigned char *)page_address(page);
/* Step 1: restore nat cache */
page = NULL;
page = f2fs_get_meta_page(sbi, start++);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
kaddr = (unsigned char *)page_address(page);
offset = 0;
}
}
f2fs_put_page(page, 1);
+ return 0;
}
static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
+ int err = 0;
/* get segment number and block addr */
if (IS_DATASEG(type)) {
}
new = f2fs_get_meta_page(sbi, blk_addr);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
sum = (struct f2fs_summary_block *)page_address(new);
if (IS_NODESEG(type)) {
ns->ofs_in_node = 0;
}
} else {
- f2fs_restore_node_summary(sbi, segno, sum);
+ err = f2fs_restore_node_summary(sbi, segno, sum);
+ if (err)
+ goto out;
}
}
curseg->alloc_type = ckpt->alloc_type[type];
curseg->next_blkoff = blk_off;
mutex_unlock(&curseg->curseg_mutex);
+out:
f2fs_put_page(new, 1);
- return 0;
+ return err;
}
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
META_CP, true);
/* restore for compacted data summary */
- read_compacted_summaries(sbi);
+ err = read_compacted_summaries(sbi);
+ if (err)
+ return err;
type = CURSEG_HOT_NODE;
}
static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
unsigned int segno)
{
- return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
}
static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+ sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->min_ssr_sections = reserved_sections(sbi);
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
#define GET_SEGNO(sbi, blk_addr) \
- ((!is_valid_blkaddr(blk_addr)) ? \
+ ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define BLKS_PER_SEC(sbi) \
#define IS_DUMMY_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
-#define MAX_SKIP_ATOMIC_COUNT 16
+#define MAX_SKIP_GC_COUNT 16
struct inmem_pages {
struct list_head list;
{
struct f2fs_sb_info *sbi = fio->sbi;
- if (PAGE_TYPE_OF_BIO(fio->type) == META &&
- (!is_read_io(fio->op) || fio->is_meta))
- BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
- blk_addr >= MAIN_BLKADDR(sbi));
+ if (__is_meta_io(fio))
+ verify_blkaddr(sbi, blk_addr, META_GENERIC);
else
- BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
- blk_addr >= MAX_BLKADDR(sbi));
+ verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
}
/*
#ifdef CONFIG_F2FS_FAULT_INJECTION
-char *fault_name[FAULT_MAX] = {
+char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_TRUNCATE] = "truncate fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
+ [FAULT_DISCARD] = "discard error",
};
-static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
- unsigned int rate)
+void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+ unsigned int type)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (rate) {
atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = rate;
- ffi->inject_type = (1 << FAULT_MAX) - 1;
- } else {
- memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
+
+ if (type)
+ ffi->inject_type = type;
+
+ if (!rate && !type)
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
#endif
Opt_mode,
Opt_io_size_bits,
Opt_fault_injection,
+ Opt_fault_type,
Opt_lazytime,
Opt_nolazytime,
Opt_quota,
{Opt_mode, "mode=%s"},
{Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
+ {Opt_fault_type, "fault_type=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
{Opt_quota, "quota"},
"QUOTA feature is enabled, so ignore jquota_fmt");
F2FS_OPTION(sbi).s_jquota_fmt = 0;
}
- if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
- f2fs_msg(sbi->sb, KERN_INFO,
- "Filesystem with quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
- return -1;
- }
return 0;
}
#endif
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(sbi, arg);
+ f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
+ set_opt(sbi, FAULT_INJECTION);
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "FAULT_INJECTION was not selected");
+#endif
+ break;
+ case Opt_fault_type:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0, arg);
set_opt(sbi, FAULT_INJECTION);
#else
f2fs_msg(sb, KERN_INFO,
#ifdef CONFIG_QUOTA
if (f2fs_check_quota_options(sbi))
return -EINVAL;
+#else
+ if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Filesystem with quota feature cannot be mounted RDWR "
+ "without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+ if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Filesystem with project quota feature cannot be "
+ "mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
#endif
if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
/* our cp_error case, we can wait for any writeback page */
f2fs_flush_merged_writes(sbi);
+ f2fs_wait_on_all_pages_writeback(sbi);
+
+ f2fs_bug_on(sbi, sbi->fsync_node_num);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (test_opt(sbi, FAULT_INJECTION))
+ if (test_opt(sbi, FAULT_INJECTION)) {
seq_printf(seq, ",fault_injection=%u",
F2FS_OPTION(sbi).fault_info.inject_rate);
+ seq_printf(seq, ",fault_type=%u",
+ F2FS_OPTION(sbi).fault_info.inject_type);
+ }
#endif
#ifdef CONFIG_QUOTA
if (test_opt(sbi, QUOTA))
seq_printf(seq, ",fsync_mode=%s", "posix");
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
seq_printf(seq, ",fsync_mode=%s", "strict");
+ else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
+ seq_printf(seq, ",fsync_mode=%s", "nobarrier");
return 0;
}
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).test_dummy_encryption = false;
- sbi->readdir_ra = 1;
+ F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_sb_has_blkzoned(sbi->sb)) {
- set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
set_opt(sbi, DISCARD);
- } else {
+ if (f2fs_sb_has_blkzoned(sbi->sb))
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ else
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
- }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
set_opt(sbi, POSIX_ACL);
#endif
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(sbi, 0);
-#endif
+ f2fs_build_fault_attr(sbi, 0, 0);
}
#ifdef CONFIG_QUOTA
return 1;
}
- if (secs_per_zone > total_sections) {
+ if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_msg(sb, KERN_INFO,
- "Wrong secs_per_zone (%u > %u)",
+ "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
return 1;
}
unsigned int sit_segs, nat_segs;
unsigned int sit_bitmap_size, nat_bitmap_size;
unsigned int log_blocks_per_seg;
+ unsigned int segment_count_main;
+ unsigned int cp_pack_start_sum, cp_payload;
+ block_t user_block_count;
int i;
total = le32_to_cpu(raw_super->segment_count);
return 1;
}
+ user_block_count = le64_to_cpu(ckpt->user_block_count);
+ segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ if (!user_block_count || user_block_count >=
+ segment_count_main << log_blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong user_block_count: %u", user_block_count);
+ return 1;
+ }
+
main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
- log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
return 1;
}
+ cp_pack_start_sum = __start_sum_addr(sbi);
+ cp_payload = __cp_payload(sbi);
+ if (cp_pack_start_sum < cp_payload + 1 ||
+ cp_pack_start_sum > blocks_per_seg - 1 -
+ NR_CURSEG_TYPE) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong cp_pack_start_sum: %u",
+ cp_pack_start_sum);
+ return 1;
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
sm_i->dcc_info->discard_granularity = 1;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
}
+
+ sbi->readdir_ra = 1;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
- F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
- F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
-
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
+ mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
init_rwsem(&sbi->node_change);
f2fs_init_ino_entry_info(sbi);
+ f2fs_init_fsync_node_info(sbi);
+
/* setup f2fs internal modules */
err = f2fs_build_segment_manager(sbi);
if (err) {
err = PTR_ERR(root);
goto free_stats;
}
- if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+ if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+ !root->i_size || !root->i_nlink) {
iput(root);
err = -EINVAL;
- goto free_node_inode;
+ goto free_stats;
}
sb->s_root = d_make_root(root); /* allocate root dentry */
goto free_root_inode;
#ifdef CONFIG_QUOTA
- /*
- * Turn on quotas which were not enabled for read-only mounts if
- * filesystem has quota feature, so that they are updated correctly.
- */
+ /* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
if (err) {
static void kill_f2fs_super(struct super_block *sb)
{
if (sb->s_root) {
- set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
- f2fs_stop_gc_thread(F2FS_SB(sb));
- f2fs_stop_discard_thread(F2FS_SB(sb));
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ set_sbi_flag(sbi, SBI_IS_CLOSE);
+ f2fs_stop_gc_thread(sbi);
+ f2fs_stop_discard_thread(sbi);
+
+ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT,
+ };
+ f2fs_write_checkpoint(sbi, &cpc);
+ }
}
kill_block_super(sb);
}
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/compiler.h>
#include <linux/proc_fs.h>
#include <linux/f2fs_fs.h>
#include <linux/seq_file.h>
if (t >= 1) {
sbi->gc_mode = GC_URGENT;
if (sbi->gc_thread) {
+ sbi->gc_thread->gc_wake = 1;
wake_up_interruptible_all(
&sbi->gc_thread->gc_wait_queue_head);
wake_up_discard_thread(sbi, true);
bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
a->struct_type == GC_THREAD);
- if (gc_entry)
- down_read(&sbi->sb->s_umount);
+ if (gc_entry) {
+ if (!down_read_trylock(&sbi->sb->s_umount))
+ return -EAGAIN;
+ }
ret = __sbi_store(a, sbi, buf, count);
if (gc_entry)
up_read(&sbi->sb->s_umount);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_seq_blocks, min_seq_blocks);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks),
+ ATTR_LIST(min_seq_blocks),
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
ATTR_LIST(max_victim_search),
.kset = &f2fs_kset,
};
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
return 0;
}
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
return 0;
}
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
return 0;
}
+static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ int i;
+
+ seq_puts(seq, "format: victim_secmap bitmaps\n");
+
+ for (i = 0; i < MAIN_SECS(sbi); i++) {
+ if ((i % 10) == 0)
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d", test_bit(i, dirty_i->victim_secmap) ? 1 : 0);
+ if ((i % 10) == 9 || i == (MAIN_SECS(sbi) - 1))
+ seq_putc(seq, '\n');
+ else
+ seq_putc(seq, ' ');
+ }
+ return 0;
+}
+
#define F2FS_PROC_FILE_DEF(_name) \
static int _name##_open_fs(struct inode *inode, struct file *file) \
{ \
F2FS_PROC_FILE_DEF(segment_info);
F2FS_PROC_FILE_DEF(segment_bits);
F2FS_PROC_FILE_DEF(iostat_info);
+F2FS_PROC_FILE_DEF(victim_bits);
int __init f2fs_init_sysfs(void)
{
&f2fs_seq_segment_bits_fops, sb);
proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
&f2fs_seq_iostat_info_fops, sb);
+ proc_create_data("victim_bits", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_victim_bits_fops, sb);
}
return 0;
}
remove_proc_entry("iostat_info", sbi->s_proc);
remove_proc_entry("segment_info", sbi->s_proc);
remove_proc_entry("segment_bits", sbi->s_proc);
+ remove_proc_entry("victim_bits", sbi->s_proc);
remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
}
kobject_del(&sbi->s_kobj);
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
case F2FS_XATTR_INDEX_SECURITY:
break;
default:
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
case F2FS_XATTR_INDEX_SECURITY:
break;
default:
size_t size, int flags)
{
struct inode *inode = d_inode(dentry);
+ unsigned char old_advise = F2FS_I(inode)->i_advise;
+ unsigned char new_advise;
if (strcmp(name, "") != 0)
return -EINVAL;
if (value == NULL)
return -EINVAL;
- F2FS_I(inode)->i_advise |= *(char *)value;
+ new_advise = *(char *)value;
+ if (new_advise & ~FADVISE_MODIFIABLE_BITS)
+ return -EINVAL;
+
+ new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
+ new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
+
+ F2FS_I(inode)->i_advise = new_advise;
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
rc = xhandle->list(xhandle, dentry, buffer + len,
size - len, xd->xname,
xd->name_len);
+ if (rc > size - len) {
+ rc = -ERANGE;
+ goto out;
+ }
} else {
rc = xhandle->list(xhandle, dentry, NULL, 0,
xd->xname, xd->name_len);
}
- if (rc < 0)
- goto out;
len += rc;
}
rc = len;
if (status) {
op = &args->ops[0];
op->status = status;
+ resp->opcnt = 1;
goto encode_op;
}
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
+ clear_buffer_needs_validate(bh);
put_bh(bh);
bhs[i] = NULL;
continue;
res->last_used = 0;
- spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->track_lock);
list_add_tail(&res->tracking, &dlm->tracking_list);
- spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm->track_lock);
memset(res->lvb, 0, DLM_LVB_LEN);
memset(res->refmap, 0, sizeof(res->refmap));
int err;
int i;
+ /*
+ * The ability to racily run the kernel stack unwinder on a running task
+ * and then observe the unwinder output is scary; while it is useful for
+ * debugging kernel issues, it can also allow an attacker to leak kernel
+ * stack contents.
+ * Doing this in a manner that is at least safe from races would require
+ * some work to ensure that the remote task can not be scheduled; and
+ * even then, this would still expose the unwinder as local attack
+ * surface.
+ * Therefore, this interface is restricted to root.
+ */
+ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EACCES;
+
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
goto out;
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lower_file->f_op->unlocked_ioctl)
err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
if (!err)
sdcardfs_copy_and_fix_attrs(file_inode(file),
file_inode(lower_file));
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out:
return err;
}
goto out;
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lower_file->f_op->compat_ioctl)
err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out:
return err;
}
}
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(inode)->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out_err;
+ }
file->private_data =
kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
out_revert_cred:
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
dput(parent);
return err;
#include <linux/fs_struct.h>
#include <linux/ratelimit.h>
-/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
struct sdcardfs_inode_data *data)
{
return old_cred;
}
-/* Do not directly use this function, use REVERT_CRED() instead. */
void revert_fsids(const struct cred *old_cred)
{
const struct cred *cur_cred;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
err = -ENOMEM;
goto out_unlock;
}
+ copied_fs->umask = 0;
+ task_lock(current);
current->fs = copied_fs;
- current->fs->umask = 0;
+ task_unlock(current);
+
err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
if (err)
goto out;
fixup_lower_ownership(dentry, dentry->d_name.name);
out:
+ task_lock(current);
current->fs = saved_fs;
+ task_unlock(current);
free_fs_struct(copied_fs);
out_unlock:
unlock_dir(lower_parent_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry)
-{
- struct dentry *lower_old_dentry;
- struct dentry *lower_new_dentry;
- struct dentry *lower_dir_dentry;
- u64 file_size_save;
- int err;
- struct path lower_old_path, lower_new_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- file_size_save = i_size_read(d_inode(old_dentry));
- sdcardfs_get_lower_path(old_dentry, &lower_old_path);
- sdcardfs_get_lower_path(new_dentry, &lower_new_path);
- lower_old_dentry = lower_old_path.dentry;
- lower_new_dentry = lower_new_path.dentry;
- lower_dir_dentry = lock_parent(lower_new_dentry);
-
- err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
- lower_new_dentry, NULL);
- if (err || !d_inode(lower_new_dentry))
- goto out;
-
- err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
- fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
- set_nlink(d_inode(old_dentry),
- sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
- i_size_write(d_inode(new_dentry), file_size_save);
-out:
- unlock_dir(lower_dir_dentry);
- sdcardfs_put_lower_path(old_dentry, &lower_old_path);
- sdcardfs_put_lower_path(new_dentry, &lower_new_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
{
int err;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
unlock_dir(lower_dir_dentry);
dput(lower_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
-{
- int err;
- struct dentry *lower_dentry;
- struct dentry *lower_parent_dentry = NULL;
- struct path lower_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_parent_dentry = lock_parent(lower_dentry);
-
- err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
- if (err)
- goto out;
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
- unlock_dir(lower_parent_dentry);
- sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
static int touch(char *abs_path, mode_t mode)
{
struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
/* check disk space */
parent_dentry = dget_parent(dentry);
unlock_dir(lower_parent_dentry);
goto out_unlock;
}
+ copied_fs->umask = 0;
+ task_lock(current);
current->fs = copied_fs;
- current->fs->umask = 0;
+ task_unlock(current);
+
err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode);
if (err) {
if (make_nomedia_in_obb ||
((pd->perm == PERM_ANDROID)
&& (qstr_case_eq(&dentry->d_name, &q_data)))) {
- REVERT_CRED(saved_cred);
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
+ revert_fsids(saved_cred);
+ saved_cred = override_fsids(sbi,
+ SDCARDFS_I(d_inode(dentry))->data);
+ if (!saved_cred) {
+ pr_err("sdcardfs: failed to set up .nomedia in %s: %d\n",
+ lower_path.dentry->d_name.name,
+ -ENOMEM);
+ goto out;
+ }
set_fs_pwd(current->fs, &lower_path);
touch_err = touch(".nomedia", 0664);
if (touch_err) {
pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
- lower_path.dentry->d_name.name, touch_err);
+ lower_path.dentry->d_name.name,
+ touch_err);
goto out;
}
}
out:
+ task_lock(current);
current->fs = saved_fs;
+ task_unlock(current);
+
free_fs_struct(copied_fs);
out_unlock:
sdcardfs_put_lower_path(dentry, &lower_path);
out_revert:
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
* the dentry on the original path should be deleted.
out:
unlock_dir(lower_dir_dentry);
sdcardfs_put_real_lower(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t dev)
-{
- int err;
- struct dentry *lower_dentry;
- struct dentry *lower_parent_dentry = NULL;
- struct path lower_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_parent_dentry = lock_parent(lower_dentry);
-
- err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
- if (err)
- goto out;
-
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
- unlock_dir(lower_parent_dentry);
- sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
/*
* The locking rules in sdcardfs_rename are complex. We could use a simpler
* superblock-level name-space lock for renames and copy-ups.
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
+ saved_cred = override_fsids(SDCARDFS_SB(old_dir->i_sb),
+ SDCARDFS_I(new_dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_real_lower(old_dentry, &lower_old_path);
sdcardfs_get_lower_path(new_dentry, &lower_new_path);
dput(lower_new_dir_dentry);
sdcardfs_put_real_lower(old_dentry, &lower_old_path);
sdcardfs_put_lower_path(new_dentry, &lower_new_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
if (IS_POSIXACL(inode))
pr_warn("%s: This may be undefined behavior...\n", __func__);
err = generic_permission(&tmp, mask);
- /* XXX
- * Original sdcardfs code calls inode_permission(lower_inode,.. )
- * for checking inode permission. But doing such things here seems
- * duplicated work, because the functions called after this func,
- * such as vfs_create, vfs_unlink, vfs_rename, and etc,
- * does exactly same thing, i.e., they calls inode_permission().
- * So we just let they do the things.
- * If there are any security hole, just uncomment following if block.
- */
-#if 0
- if (!err) {
- /*
- * Permission check on lower_inode(=EXT4).
- * we check it with AID_MEDIA_RW permission
- */
- struct inode *lower_inode;
-
- OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
-
- lower_inode = sdcardfs_lower_inode(inode);
- err = inode_permission(lower_inode, mask);
-
- REVERT_CRED();
- }
-#endif
return err;
-
}
static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
goto out_err;
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+ saved_cred = override_fsids(SDCARDFS_SB(dentry->d_sb),
+ SDCARDFS_I(inode)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
out:
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
return err;
}
.setattr = sdcardfs_setattr_wrn,
.setattr2 = sdcardfs_setattr,
.getattr = sdcardfs_getattr,
- /* XXX Following operations are implemented,
- * but FUSE(sdcard) or FAT does not support them
- * These methods are *NOT* perfectly tested.
- .symlink = sdcardfs_symlink,
- .link = sdcardfs_link,
- .mknod = sdcardfs_mknod,
- */
};
const struct inode_operations sdcardfs_main_iops = {
}
/* save current_cred and override it */
- OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_err;
+ }
sdcardfs_get_lower_path(parent, &lower_parent_path);
out:
sdcardfs_put_lower_path(parent, &lower_parent_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
dput(parent);
return ret;
pr_info("sdcardfs: dev_name -> %s\n", dev_name);
pr_info("sdcardfs: options -> %s\n", (char *)raw_data);
- pr_info("sdcardfs: mnt -> %p\n", mnt);
+ pr_info("sdcardfs: mnt -> %pK\n", mnt);
/* parse lower path */
err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
(x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
} while (0)
-/* OVERRIDE_CRED() and REVERT_CRED()
- * OVERRIDE_CRED()
- * backup original task->cred
- * and modifies task->cred->fsuid/fsgid to specified value.
- * REVERT_CRED()
- * restore original task->cred->fsuid/fsgid.
- * These two macro should be used in pair, and OVERRIDE_CRED() should be
- * placed at the beginning of a function, right after variable declaration.
- */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info) \
- do { \
- saved_cred = override_fsids(sdcardfs_sbi, info->data); \
- if (!saved_cred) \
- return -ENOMEM; \
- } while (0)
-
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info) \
- do { \
- saved_cred = override_fsids(sdcardfs_sbi, info->data); \
- if (!saved_cred) \
- return ERR_PTR(-ENOMEM); \
- } while (0)
-
-#define REVERT_CRED(saved_cred) revert_fsids(saved_cred)
-
/* Android 5.0 support */
/* Permission mode for a specific node. Controls how file permissions
pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
err = -EINVAL;
}
- pr_info("Remount options were %s for vfsmnt %p.\n", options, mnt);
+ pr_info("Remount options were %s for vfsmnt %pK.\n", options, mnt);
err = parse_options_remount(sb, options, *flags & ~MS_SILENT, mnt->data);
int dev, vol;
char *endptr;
+ if (!name || !*name)
+ return ERR_PTR(-EINVAL);
+
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
- ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
- BITS_PER_LONG * BITS_PER_LONG)
-
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
int (*wakeup)(struct hdmi_hdcp_wakeup_data *data);
void (*notify_lvl_change)(void *client_ctx, int min_lvl);
void (*srm_cb)(void *client_ctx);
+ void (*mute_sink)(void *client_ctx);
};
enum hdcp_device_type {
struct hv_util_service {
u8 *recv_buffer;
+ void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
int ee;
};
+/**
+ * union ipa_bam_sw_peer_desc - IPA sps sw peer desc
+ *
+ * @sw_dsc_ofst: software desc offset
+ * @sw_ofst_in_desc: offset in desc
+ * @p_dsc_fifo_peer_ofst: peer desc offset
+ * @p_bytes_consumed: bytes consumed
+ */
+union ipa_bam_sw_peer_desc {
+ struct sw_ofsts_reg {
+ u32 sw_dsc_ofst:16;
+ u32 sw_ofst_in_desc:15;
+ } sw_desc;
+
+ struct evnt_reg {
+ u32 p_dsc_fifo_peer_ofst:16;
+ u32 p_bytes_consumed:15;
+ } peer_desc;
+
+ u32 read_reg;
+};
+
#if defined CONFIG_IPA || defined CONFIG_IPA3
/*
struct net_device *dev;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
/* True if the target is not a standard target */
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
+static inline bool ebt_invalid_target(int target)
+{
+ return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
#endif
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
int msm_anc_dev_set_info(void *info_p, int32_t anc_cmd);
+int msm_anc_dev_get_info(void *info_p, int32_t anc_cmd);
+
int msm_anc_dev_create(struct platform_device *pdev);
int msm_anc_dev_destroy(struct platform_device *pdev);
#include <sound/q6afe-v2.h>
#include <sound/apr_audio-v2.h>
-
#define AUD_MSVC_MODULE_AUDIO_DEV_RESOURCE_SHARE 0x0001028A
#define AUD_MSVC_PARAM_ID_PORT_SHARE_RESOURCE_CONFIG 0x00010297
#define AUD_MSVC_API_VERSION_SHARE_RESOURCE_CONFIG 0x1
#define AUD_MSVC_PARAM_ID_DEV_ANC_REFS_CONFIG 0x00010286
#define AUD_MSVC_API_VERSION_DEV_ANC_REFS_CONFIG 0x1
#define AUD_MSVC_MODULE_AUDIO_DEV_ANC_ALGO 0x00010234
-#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_RPM 0x00010235
-#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_RPM 0x1
struct aud_msvc_port_param_data_v2 {
/* ID of the module to be configured.
} __packed;
struct aud_audioif_config_command {
- struct apr_hdr hdr;
+ struct apr_hdr hdr;
struct aud_msvc_port_cmd_set_param_v2 param;
struct aud_msvc_port_param_data_v2 pdata;
union afe_port_config port;
u32 lpm_length;
} __packed;
-
-struct aud_msvc_param_id_dev_anc_algo_rpm {
- u32 minor_version;
- u32 rpm;
-} __packed;
-
-
struct aud_msvc_param_id_dev_anc_refs_cfg {
u32 minor_version;
u16 port_id;
u32 bit_width;
} __packed;
-
struct anc_share_resource_command {
- struct apr_hdr hdr;
+ struct apr_hdr hdr;
struct aud_msvc_port_cmd_set_param_v2 param;
struct aud_msvc_port_param_data_v2 pdata;
struct aud_msvc_param_id_dev_share_resource_cfg resource;
} __packed;
-
struct anc_config_ref_command {
- struct apr_hdr hdr;
+ struct apr_hdr hdr;
struct aud_msvc_port_cmd_set_param_v2 param;
struct aud_msvc_port_param_data_v2 pdata;
struct aud_msvc_param_id_dev_anc_refs_cfg refs;
} __packed;
-
-
-struct anc_set_rpm_command {
- struct apr_hdr hdr;
- struct aud_msvc_port_cmd_set_param_v2 param;
- struct aud_msvc_port_param_data_v2 pdata;
- struct aud_msvc_param_id_dev_anc_algo_rpm set_rpm;
-} __packed;
-
-struct anc_get_rpm_command {
- struct apr_hdr hdr;
- struct aud_msvc_port_cmd_get_param_v2 param;
- struct aud_msvc_port_param_data_v2 pdata;
- struct aud_msvc_param_id_dev_anc_algo_rpm get_rpm;
-} __packed;
-
-struct anc_get_rpm_resp {
- uint32_t status;
- struct aud_msvc_port_param_data_v2 pdata;
- struct aud_msvc_param_id_dev_anc_algo_rpm res_rpm;
-} __packed;
-
-#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_BYPASS_MODE 0x0001029B
-
-#define AUD_MSVC_API_VERSION_DEV_ANC_ALGO_BYPASS_MODE 0x1
-
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_NO 0x0
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_TO_ANC_SPKR 0x1
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_ANC_MIC_TO_ANC_SPKR 0x2
-#define AUD_MSVC_ANC_ALGO_BYPASS_MODE_REFS_MIXED_ANC_MIC_TO_ANC_SPKR 0x3
-
-struct aud_msvc_param_id_dev_anc_algo_bypass_mode {
- uint32_t minor_version;
- uint32_t bypass_mode;
-} __packed;
-
-struct anc_set_bypass_mode_command {
- struct apr_hdr hdr;
- struct aud_msvc_port_cmd_set_param_v2 param;
- struct aud_msvc_port_param_data_v2 pdata;
- struct aud_msvc_param_id_dev_anc_algo_bypass_mode set_bypass_mode;
-} __packed;
-
#define AUD_MSVC_PARAM_ID_PORT_ANC_ALGO_MODULE_ID 0x0001023A
struct aud_msvc_param_id_dev_anc_algo_module_id {
} __packed;
struct anc_set_algo_module_id_command {
- struct apr_hdr hdr;
+ struct apr_hdr hdr;
struct aud_msvc_port_cmd_set_param_v2 param;
struct aud_msvc_port_param_data_v2 pdata;
struct aud_msvc_param_id_dev_anc_algo_module_id set_algo_module_id;
} __packed;
struct anc_set_mic_spkr_layout_info_command {
- struct apr_hdr hdr;
+ struct apr_hdr hdr;
struct aud_msvc_port_cmd_set_param_v2 param;
struct aud_msvc_port_param_data_v2 pdata;
struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info
set_mic_spkr_layout;
} __packed;
+struct anc_set_algo_module_cali_data_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_set_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ /*
+ * calibration data payload followed
+ */
+} __packed;
+
+struct anc_get_algo_module_cali_data_command {
+ struct apr_hdr hdr;
+ struct aud_msvc_port_cmd_get_param_v2 param;
+ struct aud_msvc_port_param_data_v2 pdata;
+ /*
+ * calibration data payload followed
+ */
+} __packed;
+
+struct anc_get_algo_module_cali_data_resp {
+ uint32_t status;
+ struct aud_msvc_port_param_data_v2 pdata;
+ uint32_t payload[128];
+} __packed;
+
int anc_if_tdm_port_start(u16 port_id, struct afe_tdm_port_config *tdm_port);
int anc_if_tdm_port_stop(u16 port_id);
int anc_if_config_ref(u16 port_id, u32 sample_rate, u32 bit_width,
u16 num_channel);
-int anc_if_set_rpm(u16 port_id, u32 rpm);
-
-int anc_if_set_bypass_mode(u16 port_id, u32 bypass_mode);
-
int anc_if_set_algo_module_id(u16 port_id, u32 module_id);
int anc_if_set_anc_mic_spkr_layout(u16 port_id,
struct aud_msvc_param_id_dev_anc_mic_spkr_layout_info *set_mic_spkr_layout_p);
+int anc_if_set_algo_module_cali_data(u16 port_id, void *data_p);
+
+int anc_if_get_algo_module_cali_data(u16 port_id, void *data_p);
+
int anc_if_shared_mem_map(void);
int anc_if_shared_mem_unmap(void);
kfree_skb(skb);
}
+void skb_rbtree_purge(struct rb_root *root);
+
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
return __pskb_trim(skb, len);
}
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
- int cpu_partial; /* Number of per cpu partial objects to keep around */
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- /* OOO segments go in this list. Note that socket lock must be held,
- * as we do not use sk_buff_head lock.
- */
- struct sk_buff_head out_of_order_queue;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
wait_queue_head_t wait;
struct list_head subscribed; /* Subscribed events */
struct list_head available; /* Dequeueable event */
+ struct mutex subscribe_lock;
unsigned int navailable;
u32 sequence;
int mode;
};
-struct netdev_notify_work {
- struct delayed_work work;
- struct net_device *dev;
- struct netdev_bonding_info bonding_info;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};
#define _NET_CNSS2_H
#include <linux/pci.h>
+#include <linux/usb.h>
#define CNSS_MAX_FILE_NAME 20
#define CNSS_MAX_TIMESTAMP_LEN 32
const struct pci_device_id *id_table;
};
+struct cnss_usb_wlan_driver {
+ char *name;
+ int (*probe)(struct usb_interface *pintf, const struct usb_device_id
+ *id);
+ void (*remove)(struct usb_interface *pintf);
+ int (*reinit)(struct usb_interface *pintf, const struct usb_device_id
+ *id);
+ void (*shutdown)(struct usb_interface *pintf);
+ void (*crash_shutdown)(struct usb_interface *pintf);
+ int (*suspend)(struct usb_interface *pintf, pm_message_t state);
+ int (*resume)(struct usb_interface *pintf);
+ int (*reset_resume)(struct usb_interface *pintf);
+ const struct usb_device_id *id_table;
+};
+
enum cnss_driver_status {
CNSS_UNINITIALIZED,
CNSS_INITIALIZED,
extern void cnss_release_pm_sem(struct device *dev);
extern int cnss_auto_suspend(struct device *dev);
extern int cnss_auto_resume(struct device *dev);
+extern int cnss_pci_force_wake_request(struct device *dev);
+extern int cnss_pci_is_device_awake(struct device *dev);
+extern int cnss_pci_force_wake_release(struct device *dev);
extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
int *num_vectors,
uint32_t *user_base_data,
uint32_t mem_type, uint32_t data_len,
uint8_t *input);
extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
-
+extern int cnss_usb_wlan_register_driver(struct cnss_usb_wlan_driver *driver);
+extern void cnss_usb_wlan_unregister_driver(struct cnss_usb_wlan_driver *
+ driver);
#endif /* _NET_CNSS2_H */
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net *net, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
extern u32 fib_multipath_secret __read_mostly;
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
}
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+ int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ atomic_add(segs, &sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (skb_queue_empty(&tp->out_of_order_queue) &&
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
#define MM_VID_START 500
#define MM_VID 501
-#define MM_VID_END 502
+#define MM_VID_2 502
+#define MM_VID_END 503
#define MM_MISC_START 600
#define MM_MISC 601
#define MM_QCPE_START 700
#define MM_QCPE_VM1 701
-#define MM_QCPE_VM2 702
-#define MM_QCPE_VM3 703
-#define MM_QCPE_VM4 704
-#define MM_QCPE_END 705
+#define MM_QCPE_END 702
#define MM_CLK_START 800
#define MM_CLK_VM1 801
/* room for ANC_CMD define extend */
#define ANC_CMD_MAX 0xFF
+#define ANC_CALIBRATION_PAYLOAD_SIZE_MAX 100
+
struct audio_anc_header {
int32_t data_size;
int32_t version;
struct audio_anc_bypass_mode {
int32_t mode;
};
-
struct audio_anc_algo_module_info {
int32_t module_id;
};
+struct audio_anc_algo_calibration_header {
+ uint32_t module_id;
+ uint32_t param_id;
+ uint32_t payload_size;
+};
+
+struct audio_anc_algo_calibration_body {
+ int32_t payload[ANC_CALIBRATION_PAYLOAD_SIZE_MAX];
+};
+
struct audio_anc_algo_calibration_info {
- int32_t payload_size;
- /* num bytes of payload specificed in payload_size followed */
+ struct audio_anc_algo_calibration_header cali_header;
+ struct audio_anc_algo_calibration_body cali_body;
};
union audio_anc_data {
#define KGSL_PROP_DEVICE_QDSS_STM 0x19
#define KGSL_PROP_DEVICE_QTIMER 0x20
#define KGSL_PROP_IB_TIMEOUT 0x21
+#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
+#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
struct kgsl_shadowprop {
unsigned long gpuaddr;
for (i = 0; i < kallsyms->num_symtab; i++)
if (strcmp(name, symname(kallsyms, i)) == 0 &&
- kallsyms->symtab[i].st_info != 'U')
+ kallsyms->symtab[i].st_shndx != SHN_UNDEF)
return kallsyms->symtab[i].st_value;
return 0;
}
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < kallsyms->num_symtab; i++) {
+
+ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ continue;
+
ret = fn(data, symname(kallsyms, i),
mod, kallsyms->symtab[i].st_value);
if (ret != 0)
u64 prefer_idle)
{
struct schedtune *st = css_st(css);
- st->prefer_idle = prefer_idle;
+ st->prefer_idle = !!prefer_idle;
return 0;
}
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now = alarm_bases[type].gettime();
- exp = ktime_add(now, exp);
+
+ exp = ktime_add_safe(now, exp);
}
if (alarmtimer_do_nsleep(&alarm, exp))
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
new_flags |= VM_DONTDUMP;
break;
case MADV_DODUMP:
- if (new_flags & VM_SPECIAL) {
+ if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
error = -EINVAL;
goto out;
}
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
{
struct page *page, *page2;
void *object = NULL;
- int available = 0;
+ unsigned int available = 0;
int objects;
/*
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- unsigned long objects;
+ unsigned int objects;
int err;
- err = kstrtoul(buf, 10, &objects);
+ err = kstrtouint(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
+#else
+ "", /* nr_tlb_remote_flush */
+ "", /* nr_tlb_remote_flush_received */
#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
- "vmacache_full_flushes",
#endif
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
hdr.hop_limit, &hdr.daddr);
skb_push(skb, sizeof(hdr));
+ skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
+ if (ebt_invalid_target(info->target))
+ return -EINVAL;
+
return 0;
}
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+/**
+ * call_netdevice_notifiers_mtu - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ * @arg: additional u32 argument passed to the notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+ struct net_device *dev, u32 arg)
+{
+ struct netdev_notifier_info_ext info = {
+ .info.dev = dev,
+ .ext.mtu = arg,
+ };
+
+ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+ return call_netdevice_notifiers_info(val, dev, &info.info);
+}
+
#ifdef CONFIG_NET_INGRESS
static struct static_key ingress_needed __read_mostly;
err = __dev_set_mtu(dev, new_mtu);
if (!err) {
- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__dev_set_mtu(dev, orig_mtu);
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ new_mtu);
}
}
return err;
lladdr = neigh->ha;
}
+ /* Update confirmed timestamp for neighbour entry after we
+ * received ARP packet even if it doesn't change IP to MAC binding.
+ */
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
}
}
- /* Update timestamps only once we know we will make a change to the
+ /* Update timestamp only once we know we will make a change to the
* neighbour entry. Otherwise we risk to move the locktime window with
* noop updates and ignore relevant ARP updates.
*/
- if (new != old || lladdr != neigh->ha) {
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
+ if (new != old || lladdr != neigh->ha)
neigh->updated = jiffies;
- }
if (new != old) {
neigh_del_timer(neigh);
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
+ if (num_tx_queues < 1 || num_tx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
+ if (num_rx_queues < 1 || num_rx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
err = -ENOMEM;
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
EXPORT_SYMBOL(skb_queue_purge);
/**
+ * skb_rbtree_purge - empty a skb rbtree
+ * @root: root of the rbtree to empty
+ *
+ * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
+ * the list and one reference dropped. This function does not take
+ * any lock. Synchronization should be handled by the caller (e.g., TCP
+ * out-of-order queue is protected by the socket lock).
+ */
+void skb_rbtree_purge(struct rb_root *root)
+{
+ struct sk_buff *skb, *next;
+
+ rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+ kfree_skb(skb);
+
+ *root = RB_ROOT;
+}
+
+/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
if (encap)
skb_reset_inner_headers(skb);
skb->network_header = (u8 *)iph - skb->head;
+ skb_reset_mac_len(skb);
} while ((skb = skb->next));
out:
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *upper_info = ptr;
+ struct netdev_notifier_info_ext *info_ext = ptr;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
- /* fall through */
+ rt_cache_flush(net);
+ break;
case NETDEV_CHANGEMTU:
+ fib_sync_mtu(dev, info_ext->ext.mtu);
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
- info = ptr;
+ upper_info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ if (upper_info->upper_dev &&
+ netif_is_l3_master(upper_info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
return ret;
}
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ * larger MTUs on the path. With that limit raised, we can now
+ * discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+ struct fnhe_hash_bucket *bucket;
+ int i;
+
+ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+ fnhe;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+ if (fnhe->fnhe_mtu_locked) {
+ if (new <= fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ fnhe->fnhe_mtu_locked = false;
+ }
+ } else if (new < fnhe->fnhe_pmtu ||
+ orig == fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ }
+ }
+ }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+ struct hlist_head *head = &fib_info_devhash[hash];
+ struct fib_nh *nh;
+
+ hlist_for_each_entry(nh, head, nh_hash) {
+ if (nh->nh_dev == dev)
+ nh_update_mtu(nh, dev->mtu, orig_mtu);
+ }
+}
+
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
- const struct iphdr *iph = ip_hdr(skb);
__be16 *ports;
int end;
ports = (__be16 *)skb_transport_header(skb);
sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = iph->daddr;
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
int err;
bool connected;
+ /* ensure we can access the inner net header, for several users below */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_nhdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_nhdr_len = sizeof(struct ipv6hdr);
+ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+ goto tx_error;
+
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- __skb_queue_head_init(&tp->out_of_order_queue);
+ tp->out_of_order_queue = RB_ROOT;
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
INIT_LIST_HEAD(&tp->tsq_node);
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk);
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
inet->inet_dport = 0;
/* It _is_ possible, that we have something out-of-order _after_ FIN.
* Probably, we should reset in this case. For now drop them.
*/
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
- if (skb_queue_empty(&tp->out_of_order_queue)) {
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
return;
}
return true;
}
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from,
+ bool *fragstolen)
+{
+ bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+ /* In case tcp_drop() is called later, update to->gso_segs */
+ if (res) {
+ u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+ max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+ skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+ }
+ return res;
+}
+
+static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ __kfree_skb(skb);
+}
+
/* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue.
*/
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 dsack_high = tp->rcv_nxt;
+ bool fin, fragstolen, eaten;
struct sk_buff *skb, *tail;
- bool fragstolen, eaten;
+ struct rb_node *p;
- while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
+ p = rb_first(&tp->out_of_order_queue);
+ while (p) {
+ skb = rb_entry(p, struct sk_buff, rbnode);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
dsack_high = TCP_SKB_CB(skb)->end_seq;
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, &tp->out_of_order_queue);
- __skb_unlink(skb, &tp->out_of_order_queue);
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+ if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
continue;
}
SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
tail = skb_peek_tail(&sk->sk_receive_queue);
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+ fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- tcp_fin(sk);
- if (eaten)
+ else
kfree_skb_partial(skb, fragstolen);
+
+ if (unlikely(fin)) {
+ tcp_fin(sk);
+ /* tcp_fin() purges tp->out_of_order_queue,
+ * so we must end this loop right now.
+ */
+ break;
+ }
}
}
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct rb_node **p, *q, *parent;
struct sk_buff *skb1;
u32 seq, end_seq;
+ bool fragstolen;
tcp_ecn_check_ce(sk, skb);
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return;
}
inet_csk_schedule_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+ seq = TCP_SKB_CB(skb)->seq;
+ end_seq = TCP_SKB_CB(skb)->end_seq;
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+ tp->rcv_nxt, seq, end_seq);
- skb1 = skb_peek_tail(&tp->out_of_order_queue);
- if (!skb1) {
+ p = &tp->out_of_order_queue.rb_node;
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
- tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
- tp->selective_acks[0].end_seq =
- TCP_SKB_CB(skb)->end_seq;
- }
- __skb_queue_head(&tp->out_of_order_queue, skb);
- goto end;
- }
-
- seq = TCP_SKB_CB(skb)->seq;
- end_seq = TCP_SKB_CB(skb)->end_seq;
-
- if (seq == TCP_SKB_CB(skb1)->end_seq) {
- bool fragstolen;
-
- if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
- } else {
- tcp_grow_window(sk, skb);
- kfree_skb_partial(skb, fragstolen);
- skb = NULL;
+ tp->selective_acks[0].start_seq = seq;
+ tp->selective_acks[0].end_seq = end_seq;
}
-
- if (!tp->rx_opt.num_sacks ||
- tp->selective_acks[0].end_seq != seq)
- goto add_sack;
-
- /* Common case: data arrive in order after hole. */
- tp->selective_acks[0].end_seq = end_seq;
+ rb_link_node(&skb->rbnode, NULL, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+ tp->ooo_last_skb = skb;
goto end;
}
- /* Find place to insert this segment. */
- while (1) {
- if (!after(TCP_SKB_CB(skb1)->seq, seq))
- break;
- if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
- skb1 = NULL;
- break;
+ /* In the typical case, we are adding an skb to the end of the list.
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+ */
+ if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+ skb, &fragstolen)) {
+coalesce_done:
+ tcp_grow_window(sk, skb);
+ kfree_skb_partial(skb, fragstolen);
+ skb = NULL;
+ goto add_sack;
+ }
+
+ /* Find place to insert this segment. Handle overlaps on the way. */
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(seq, TCP_SKB_CB(skb1)->seq)) {
+ p = &parent->rb_left;
+ continue;
}
- skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
- }
- /* Do skb overlap to previous one? */
- if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
- if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
- /* All the bits are present. Drop. */
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb);
- skb = NULL;
- tcp_dsack_set(sk, seq, end_seq);
- goto add_sack;
- }
- if (after(seq, TCP_SKB_CB(skb1)->seq)) {
- /* Partial overlap. */
- tcp_dsack_set(sk, seq,
- TCP_SKB_CB(skb1)->end_seq);
- } else {
- if (skb_queue_is_first(&tp->out_of_order_queue,
- skb1))
- skb1 = NULL;
- else
- skb1 = skb_queue_prev(
- &tp->out_of_order_queue,
- skb1);
+ if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+ /* All the bits are present. Drop. */
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ tcp_drop(sk, skb);
+ skb = NULL;
+ tcp_dsack_set(sk, seq, end_seq);
+ goto add_sack;
+ }
+ if (after(seq, TCP_SKB_CB(skb1)->seq)) {
+ /* Partial overlap. */
+ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
+ } else {
+ /* skb's seq == skb1's seq and skb covers skb1.
+ * Replace skb1 with skb.
+ */
+ rb_replace_node(&skb1->rbnode, &skb->rbnode,
+ &tp->out_of_order_queue);
+ tcp_dsack_extend(sk,
+ TCP_SKB_CB(skb1)->seq,
+ TCP_SKB_CB(skb1)->end_seq);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ tcp_drop(sk, skb1);
+ goto merge_right;
+ }
+ } else if (tcp_ooo_try_coalesce(sk, skb1,
+ skb, &fragstolen)) {
+ goto coalesce_done;
}
+ p = &parent->rb_right;
}
- if (!skb1)
- __skb_queue_head(&tp->out_of_order_queue, skb);
- else
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
- /* And clean segments covered by new one as whole. */
- while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
- skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+ /* Insert segment into RB tree. */
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+merge_right:
+ /* Remove other segments covered by skb. */
+ while ((q = rb_next(&skb->rbnode)) != NULL) {
+ skb1 = rb_entry(q, struct sk_buff, rbnode);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
end_seq);
break;
}
- __skb_unlink(skb1, &tp->out_of_order_queue);
+ rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb1);
+ tcp_drop(sk, skb1);
}
+ /* If there is no skb after us, we are the last_skb ! */
+ if (!q)
+ tp->ooo_last_skb = skb;
add_sack:
if (tcp_is_sack(tp))
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- int eaten = -1;
bool fragstolen = false;
+ int eaten = -1;
- if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
- goto drop;
-
+ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+ __kfree_skb(skb);
+ return;
+ }
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
tcp_fin(sk);
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
+ if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
- if (skb_queue_empty(&tp->out_of_order_queue))
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
inet_csk(sk)->icsk_ack.pingpong = 0;
}
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_schedule_ack(sk);
drop:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return;
}
tcp_data_queue_ofo(sk, skb);
}
+static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ if (list)
+ return !skb_queue_is_last(list, skb) ? skb->next : NULL;
+
+ return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+}
+
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *list)
+ struct sk_buff_head *list,
+ struct rb_root *root)
{
- struct sk_buff *next = NULL;
+ struct sk_buff *next = tcp_skb_next(skb, list);
- if (!skb_queue_is_last(list, skb))
- next = skb_queue_next(list, skb);
+ if (list)
+ __skb_unlink(skb, list);
+ else
+ rb_erase(&skb->rbnode, root);
- __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
return next;
}
+/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
+static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct sk_buff *skb1;
+
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, root);
+}
+
/* Collapse contiguous sequence of skbs head..tail with
* sequence numbers start..end.
*
- * If tail is NULL, this means until the end of the list.
+ * If tail is NULL, this means until the end of the queue.
*
* Segments with FIN/SYN are not collapsed (only because this
* simplifies code)
*/
static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
- struct sk_buff *head, struct sk_buff *tail,
- u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
+ struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
{
- struct sk_buff *skb, *n;
+ struct sk_buff *skb = head, *n;
+ struct sk_buff_head tmp;
bool end_of_skbs;
/* First, check that queue is collapsible and find
- * the point where collapsing can be useful. */
- skb = head;
+ * the point where collapsing can be useful.
+ */
restart:
- end_of_skbs = true;
- skb_queue_walk_from_safe(list, skb, n) {
- if (skb == tail)
- break;
+ for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
+ n = tcp_skb_next(skb, list);
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb)
break;
goto restart;
break;
}
- if (!skb_queue_is_last(list, skb)) {
- struct sk_buff *next = skb_queue_next(list, skb);
- if (next != tail &&
- TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
- end_of_skbs = false;
- break;
- }
+ if (n && n != tail &&
+ TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
+ end_of_skbs = false;
+ break;
}
/* Decided to skip this, advance start seq. */
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
return;
+ __skb_queue_head_init(&tmp);
+
while (before(start, end)) {
int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
struct sk_buff *nskb;
nskb = alloc_skb(copy, GFP_ATOMIC);
if (!nskb)
- return;
+ break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
- __skb_queue_before(list, skb, nskb);
+ if (list)
+ __skb_queue_before(list, skb, nskb);
+ else
+ __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
skb_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
start += size;
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
- return;
+ goto end;
}
}
}
+end:
+ skb_queue_walk_safe(&tmp, skb, n)
+ tcp_rbtree_insert(root, skb);
}
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
{
struct tcp_sock *tp = tcp_sk(sk);
u32 range_truesize, sum_tiny = 0;
- struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
- struct sk_buff *head;
+ struct sk_buff *skb, *head;
+ struct rb_node *p;
u32 start, end;
- if (!skb)
+ p = rb_first(&tp->out_of_order_queue);
+ skb = rb_entry_safe(p, struct sk_buff, rbnode);
+new_range:
+ if (!skb) {
+ p = rb_last(&tp->out_of_order_queue);
+ /* Note: This is possible p is NULL here. We do not
+ * use rb_entry_safe(), as ooo_last_skb is valid only
+ * if rbtree is not empty.
+ */
+ tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
return;
-
+ }
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
range_truesize = skb->truesize;
- head = skb;
- for (;;) {
- struct sk_buff *next = NULL;
+ for (head = skb;;) {
+ skb = tcp_skb_next(skb, NULL);
- if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
- next = skb_queue_next(&tp->out_of_order_queue, skb);
- skb = next;
-
- /* Segment is terminated when we see gap or when
- * we are at the end of all the queue. */
+ /* Range is terminated when we see a gap or when
+ * we are at the queue end.
+ */
if (!skb ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
/* Do not attempt collapsing tiny skbs */
if (range_truesize != head->truesize ||
end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
- tcp_collapse(sk, &tp->out_of_order_queue,
+ tcp_collapse(sk, NULL, &tp->out_of_order_queue,
head, skb, start, end);
} else {
sum_tiny += range_truesize;
return;
}
- head = skb;
- if (!skb)
- break;
- /* Start new segment */
+ goto new_range;
+ }
+
+ range_truesize += skb->truesize;
+ if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
start = TCP_SKB_CB(skb)->seq;
+ if (after(TCP_SKB_CB(skb)->end_seq, end))
end = TCP_SKB_CB(skb)->end_seq;
- range_truesize = skb->truesize;
- } else {
- range_truesize += skb->truesize;
- if (before(TCP_SKB_CB(skb)->seq, start))
- start = TCP_SKB_CB(skb)->seq;
- if (after(TCP_SKB_CB(skb)->end_seq, end))
- end = TCP_SKB_CB(skb)->end_seq;
- }
}
}
/*
* Purge the out-of-order queue.
+ * Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
* Return true if queue was pruned.
*/
static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- bool res = false;
+ struct rb_node *node, *prev;
+ int goal;
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
- __skb_queue_purge(&tp->out_of_order_queue);
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
+ return false;
- /* Reset SACK state. A conforming SACK implementation will
- * do the same at a timeout based retransmit. When a connection
- * is in a sad state like this, we care only about integrity
- * of the connection not performance.
- */
- if (tp->rx_opt.sack_ok)
- tcp_sack_reset(&tp->rx_opt);
- sk_mem_reclaim(sk);
- res = true;
- }
- return res;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+ goal = sk->sk_rcvbuf >> 3;
+ node = &tp->ooo_last_skb->rbnode;
+ do {
+ prev = rb_prev(node);
+ rb_erase(node, &tp->out_of_order_queue);
+ goal -= rb_to_skb(node)->truesize;
+ __kfree_skb(rb_to_skb(node));
+ if (!prev || goal <= 0) {
+ sk_mem_reclaim(sk);
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ !tcp_under_memory_pressure(sk))
+ break;
+ goal = sk->sk_rcvbuf >> 3;
+ }
+
+ node = prev;
+ } while (node);
+ tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+
+ /* Reset SACK state. A conforming SACK implementation will
+ * do the same at a timeout based retransmit. When a connection
+ * is in a sad state like this, we care only about integrity
+ * of the connection not performance.
+ */
+ if (tp->rx_opt.sack_ok)
+ tcp_sack_reset(&tp->rx_opt);
+
+ return true;
}
/* Reduce allocated memory if we can, trying to get
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
- tcp_collapse(sk, &sk->sk_receive_queue,
+ tcp_collapse(sk, &sk->sk_receive_queue, NULL,
skb_peek(&sk->sk_receive_queue),
NULL,
tp->copied_seq, tp->rcv_nxt);
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
- (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
+ (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
/* Then ack it now */
tcp_send_ack(sk);
} else {
return true;
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return false;
}
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
}
EXPORT_SYMBOL(tcp_rcv_established);
TCP_DELACK_MAX, TCP_RTO_MAX);
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return 0;
} else {
tcp_send_ack(sk);
if (!queued) {
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
}
return 0;
}
return 0;
discard_and_relse:
+ sk_drops_add(sk, skb);
sock_put(sk);
goto discard_it;
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
newtp->snd_cwnd_cnt = 0;
tcp_init_xmit_timers(newsk);
- __skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
p++;
continue;
}
- state->offset++;
return ifa;
}
return ifa;
}
+ state->offset = 0;
while (++state->bucket < IN6_ADDR_HSIZE) {
- state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
- state->offset++;
return ifa;
}
}
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
skb->network_header = (u8 *)ipv6h - skb->head;
+ skb_reset_mac_len(skb);
if (udpfrag) {
int err = ip6_find_1stfragopt(skb, &prevhdr);
kfree_skb(skb);
return -ENOBUFS;
}
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
- /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
- * it is safe to call in our context (socket lock not held)
- */
- skb_set_owner_w(skb, (struct sock *)sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
u8 tproto;
int err;
+ /* ensure we can access the full inner ip header */
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return -1;
+
+ iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
tproto = ACCESS_ONCE(t->parms.proto);
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
u8 tproto;
int err;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ return -1;
+
+ ipv6h = ipv6_hdr(skb);
tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
return 0;
discard_and_relse:
+ sk_drops_add(sk, skb);
sock_put(sk);
goto discard_it;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
/* Keys without a station are used for TX only */
- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
break;
case NL80211_IFTYPE_ADHOC:
if (len < IEEE80211_DEAUTH_FRAME_LEN)
return;
- ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, reason);
+ ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
sta_info_destroy_addr(sdata, mgmt->sa);
}
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- ibss_dbg(sdata,
- "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+ ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+ mgmt->bssid, auth_transaction);
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
rx_timestamp = drv_get_tsf(local, sdata);
}
- ibss_dbg(sdata,
- "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+ ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
mgmt->sa, mgmt->bssid,
- (unsigned long long)rx_timestamp,
+ (unsigned long long)rx_timestamp);
+ ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
(unsigned long long)beacon_timestamp,
(unsigned long long)(rx_timestamp - beacon_timestamp),
jiffies);
tx_last_beacon = drv_tx_last_beacon(local);
- ibss_dbg(sdata,
- "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+ ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+ mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
"%s called with hardware scan in progress\n", __func__);
rtnl_lock();
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ /*
+ * XXX: there may be more work for other vif types and even
+ * for station mode: a good thing would be to run most of
+ * the iface type's dependent _stop (ieee80211_mg_stop,
+ * ieee80211_ibss_stop) etc...
+ * For now, fix only the specific bug that was seen: race
+ * between csa_connection_drop_work and us.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /*
+ * This worker is scheduled from the iface worker that
+ * runs on mac80211's workqueue, so we can't be
+ * scheduling this worker after the cancel right here.
+ * The exception is ieee80211_chswitch_done.
+ * Then we can have a race...
+ */
+ cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+ }
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ }
ieee80211_scan_cancel(local);
ieee80211_reconfig(local);
rtnl_unlock();
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_RXSTBC_MASK |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
forward = false;
reply = true;
target_metric = 0;
+
+ if (SN_GT(target_sn, ifmsh->sn))
+ ifmsh->sn = target_sn;
+
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
*/
if (sdata->reserved_chanctx) {
+ struct ieee80211_supported_band *sband = NULL;
+ struct sta_info *mgd_sta = NULL;
+ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
if (sdata->reserved_ready)
goto out;
+ if (sdata->vif.bss_conf.chandef.width !=
+ sdata->csa_chandef.width) {
+ /*
+ * For managed interface, we need to also update the AP
+ * station bandwidth and align the rate scale algorithm
+ * on the bandwidth change. Here we only consider the
+ * bandwidth of the new channel definition (as channel
+ * switch flow does not have the full HT/VHT/HE
+ * information), assuming that if additional changes are
+ * required they would be done as part of the processing
+ * of the next beacon from the AP.
+ */
+ switch (sdata->csa_chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ }
+
+ mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+ sband =
+ local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+ }
+
+ if (sdata->vif.bss_conf.chandef.width >
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
ret = ieee80211_vif_use_reserved_context(sdata);
if (ret) {
sdata_info(sdata,
goto out;
}
+ if (sdata->vif.bss_conf.chandef.width <
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
goto out;
}
{
u32 addr_len;
- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
if (addr_len != sizeof(struct in_addr) &&
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
if (settings->n_ciphers_pairwise > cipher_limit)
return -EINVAL;
+ if (len > sizeof(u32) * NL80211_MAX_NR_CIPHER_SUITES)
+ return -EINVAL;
+
memcpy(settings->ciphers_pairwise, data, len);
for (i = 0; i < settings->n_ciphers_pairwise; i++)
if (!buf)
return -ENOMEM;
- if (ies_len) {
+ if (ies_len && ies) {
static const u8 before_extcapa[] = {
/* not listing IEs expected to be created by driver */
WLAN_EID_RSN,
hdr.frame_control = fc;
hdr.duration_id = 0;
hdr.seq_ctrl = 0;
+ eth_zero_addr(hdr.addr4);
skip_header_bytes = ETH_HLEN;
if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
u8 *op_class)
{
u8 vht_opclass;
- u16 freq = chandef->center_freq1;
+ u32 freq = chandef->center_freq1;
if (freq >= 2412 && freq <= 2472) {
if (chandef->width > NL80211_CHAN_WIDTH_40)
(ut[i].family != prev_family))
return -EINVAL;
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
prev_family = ut[i].family;
switch (ut[i].family) {
}
reg = of_get_property(np, "reg", NULL);
- if (!reg)
+ if (!reg) {
+ of_node_put(np);
return NULL;
+ }
*gpioptr = *reg;
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __le32 cues[3] = {
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
- };
+ __le32 *cues;
/* check date of software used to build */
err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
&date, sizeof(u64));
if (err < 0)
- goto end;
+ return err;
/*
* firmware version 5058 or later has date later than "20070401", but
* 'date' is not null-terminated.
if (date < 0x3230303730343031LL) {
dev_err(&unit->device,
"Use firmware version 5058 or later\n");
- err = -ENOSYS;
- goto end;
+ return -ENXIO;
}
+ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+ if (!cues)
+ return -ENOMEM;
+
+ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
device->node_id, device->generation,
device->max_speed, BEBOB_ADDR_REG_REQ,
- cues, sizeof(cues));
+ cues, 3 * sizeof(*cues));
+ kfree(cues);
if (rcode != RCODE_COMPLETE) {
dev_err(&unit->device,
"Failed to send a cue to load firmware\n");
err = -EIO;
}
-end:
+
return err;
}
*/
void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
{
+ WARN_ON_ONCE(!bus->rb.area);
+
spin_lock_irq(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
/* reset controller */
azx_reset(bus, full_reset);
- /* initialize interrupts */
+ /* clear interrupts */
azx_int_clear(bus);
- azx_int_enable(bus);
/* initialize the codec command I/O */
snd_hdac_bus_init_cmd_io(bus);
+ /* enable interrupts after CORB/RIRB buffers are initialized above */
+ azx_int_enable(bus);
+
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
emu->support_tlv = 1;
return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
case SNDRV_EMU10K1_IOCTL_INFO:
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
snd_emu10k1_fx8010_info(emu, info);
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+ AZX_DCAPS_PM_RUNTIME },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", 0,
- 1, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+ 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
struct sigmadsp_control *ctrl, void *data)
{
/* safeload loads up to 20 bytes in a atomic operation */
- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
- sigmadsp->ops->safeload)
+ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
ctrl->num_bytes);
else
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include "wm8804.h"
};
MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+#if defined(CONFIG_OF)
static const struct of_device_id wm8804_of_match[] = {
{ .compatible = "wlf,wm8804", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
static struct i2c_driver wm8804_i2c_driver = {
.driver = {
.name = "wm8804",
.pm = &wm8804_pm,
- .of_match_table = wm8804_of_match,
+ .of_match_table = of_match_ptr(wm8804_of_match),
+ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
},
.probe = wm8804_i2c_probe,
.remove = wm8804_i2c_remove,
static int msm_compr_map_ion_fd(struct msm_compr_audio *prtd, int fd)
{
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
int ret = 0;
static int msm_compr_unmap_ion_fd(struct msm_compr_audio *prtd)
{
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
int ret = 0;
int dir = IN, ret = 0, stream_id;
unsigned long flags;
uint32_t stream_index;
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
pr_debug("%s\n", __func__);
static int msm_transcode_map_ion_fd(struct msm_transcode_loopback *trans,
int fd)
{
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
int ret = 0;
static int msm_transcode_unmap_ion_fd(struct msm_transcode_loopback *trans)
{
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
int ret = 0;
struct trans_loopback_pdata *pdata = snd_soc_platform_get_drvdata(
rtd->platform);
int ret = 0;
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
mutex_lock(&trans->lock);
int q6asm_audio_map_shm_fd(struct audio_client *ac, struct ion_client **client,
struct ion_handle **handle, int fd)
{
- ion_phys_addr_t paddr;
+ ion_phys_addr_t paddr = 0;
size_t pa_len = 0;
int ret;
int sz = 0;
continue;
}
+ /* let users know there is no DAI to link */
+ if (!dai_w->priv) {
+ dev_dbg(card->dev, "dai widget %s has no DAI\n",
+ dai_w->name);
+ continue;
+ }
+
dai = dai_w->priv;
/* ...find all widgets with the same stream and link them */
#endif
#endif
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
int arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
char *sym = syma->name;
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Skip over any initial dot */
if (*sym == '.')
sym++;
+#endif
/* Avoid "SyS" kernel syscall aliases */
if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
return SYMBOL_A;
}
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Allow matching against dot variants */
int arch__compare_symbol_names(const char *namea, const char *nameb)
{
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
--- /dev/null
+CONFIG_EFIVAR_FS=y
};
-static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug",
- "/debug",
- 0,
-};
-
/*
* data structures
*/
int alias;
int refs;
int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
- int hwcache_align, object_size, objs_per_slab;
- int sanity_checks, slab_size, store_user, trace;
+ unsigned int hwcache_align, object_size, objs_per_slab;
+ unsigned int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs, objects_partial, objects_total;
unsigned long alloc_fastpath, alloc_slowpath;