*.lzo
*.patch
*.gcno
+*.ll
modules.builtin
Module.symvers
*.dwo
# Kdevelop4
*.kdev4
+
+# fetched Android config fragments
+android/configs/android-*.cfg
Description:
Controls the issue rate of small discard commands.
+What: /sys/fs/f2fs/<disk>/discard_granularity
+Date: July 2017
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Controls discard granularity of inner discard thread, inner thread
+ will not issue discards with size that is smaller than granularity.
+ The unit size is one block, now only support configuring in range
+ of [1, 512].
+
What: /sys/fs/f2fs/<disk>/max_victim_search
Date: January 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Contact: "Chao Yu" <chao2.yu@samsung.com>
Description:
Controls the count of nid pages to be readaheaded.
+
+What: /sys/fs/f2fs/<disk>/dirty_nats_ratio
+Date: January 2016
+Contact: "Chao Yu" <chao2.yu@samsung.com>
+Description:
+ Controls dirty nat entries ratio threshold, if current
+ ratio exceeds configured threshold, checkpoint will
+ be triggered for flushing dirty nat entries.
+
+What: /sys/fs/f2fs/<disk>/lifetime_write_kbytes
+Date: January 2016
+Contact: "Shuoran Liu" <liushuoran@huawei.com>
+Description:
+ Shows total written kbytes issued to disk.
+
+What: /sys/fs/f2fs/<disk>/inject_rate
+Date: May 2016
+Contact: "Sheng Yong" <shengyong1@huawei.com>
+Description:
+ Controls the injection rate.
+
+What: /sys/fs/f2fs/<disk>/inject_type
+Date: May 2016
+Contact: "Sheng Yong" <shengyong1@huawei.com>
+Description:
+ Controls the injection type.
+
+What: /sys/fs/f2fs/<disk>/reserved_blocks
+Date: June 2017
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Description:
+ Controls current reserved blocks in system.
+
+What: /sys/fs/f2fs/<disk>/gc_urgent
+Date: August 2017
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Do background GC agressively
+
+What: /sys/fs/f2fs/<disk>/gc_urgent_sleep_time
+Date: August 2017
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+ Controls sleep time of GC urgent mode
--- /dev/null
+THS8135 Video DAC
+-----------------
+
+This is the binding for Texas Instruments THS8135 Video DAC bridge.
+
+Required properties:
+
+- compatible: Must be "ti,ths8135"
+
+Required nodes:
+
+This device has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for RGB input
+- Video port 1 for VGA output
+
+Example
+-------
+
+vga-bridge {
+ compatible = "ti,ths8135";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ vga_bridge_in: endpoint {
+ remote-endpoint = <&lcdc_out_vga>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ vga_bridge_out: endpoint {
+ remote-endpoint = <&vga_con_in>;
+ };
+ };
+ };
+};
--- /dev/null
+* AVIA HX711 ADC chip for weight cells
+ Bit-banging driver
+
+Required properties:
+ - compatible: Should be "avia,hx711"
+ - sck-gpios: Definition of the GPIO for the clock
+ - dout-gpios: Definition of the GPIO for data-out
+ See Documentation/devicetree/bindings/gpio/gpio.txt
+ - avdd-supply: Definition of the regulator used as analog supply
+
+Example:
+weight@0 {
+ compatible = "avia,hx711";
+ sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
+ dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
+ avdd-suppy = <&avdd>;
+};
+
--- /dev/null
+Qualcomm Technologies PNP Flash LED
+
+QPNP (Qualcomm Technologies Plug N Play) Flash LED (Light
+Emitting Diode) driver is used to provide illumination to
+camera sensor when background light is dim to capture good
+picture. It can also be used for flashlight/torch application.
+It is part of PMIC on Qualcomm Technologies reference platforms.
+The PMIC is connected to the host processor via SPMI bus.
+
+Required properties:
+- compatible : should be "qcom,qpnp-flash-led"
+- reg : base address and size for flash LED modules
+
+Optional properties:
+- qcom,headroom : headroom to use. Values should be 250, 300,
+ 400 and 500 in mV.
+- qcom,startup-dly : delay before flashing after flash executed.
+ Values should 10, 32, 64, and 128 in us.
+- qcom,clamp-curr : current to clamp at when voltage droop happens.
+ Values are in integer from 0 to 1000 inclusive,
+ indicating 0 to 1000 mA.
+- qcom,self-check-enabled : boolean type. self fault check enablement
+- qcom,thermal-derate-enabled : boolean type. derate enablement when module
+ temperature reaches threshold
+- qcom,thermal-derate-threshold : thermal threshold for derate. Values
+ should be 95, 105, 115, 125 in C.
+- qcom,thermal-derate-rate : derate rate when module temperature
+ reaches threshold. Values should be
+ "1_PERCENT", "1P25_PERCENT", "2_PERCENT",
+ "2P5_PERCENT", "5_PERCENT" in string.
+- qcom,current-ramp-enabled : boolean type. stepped current ramp enablement
+- qcom,ramp-up-step : current ramp up rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,ramp-dn-step : current ramp down rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,vph-pwr-droop-enabled : boolean type. VPH power droop enablement. Enablement
+ allows current clamp when phone power drops below
+ pre-determined threshold
+- qcom,vph-pwr-droop-threshold : VPH power threshold for module to clamp current.
+ Values are 2500 - 3200 in mV with 100 mV steps.
+- qcom,vph-pwr-droop-debounce-time : debounce time for module to confirm a voltage
+ droop is happening. Values are 0, 10, 32, 64
+ in us.
+- qcom,pmic-charger-support : Boolean type. This tells if flash utilizes charger boost
+ support
+- qcom,headroom-sense-ch0-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 0
+- qcom,headroom-sense-ch1-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 1
+- qcom,power-detect-enabled : Boolean type. This enables driver to get maximum flash LED
+ current at current battery level to avoid intensity clamp
+ when battery voltage is low
+- qcom,otst2-moduled-enabled : Boolean type. This enables driver to enable MASK to support
+ OTST2 connection.
+- qcom,follow-otst2-rb-disabled : Boolean type. This allows driver to reset/deset module.
+ By default, driver resets module. This entry allows driver to
+ bypass reset module sequence.
+- qcom,die-current-derate-enabled: Boolean type. This enables driver to get maximum flash LED
+ current, based on PMIC die temperature threshold to
+ avoid significant current derate from hardware. This property
+ is not needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-vadc : VADC channel source for flash LED. This property is not
+ needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-threshold : Integer type array for PMIC die temperature threshold.
+ Array should have at least one value. Values should be in
+ celcius. This property is not needed if PMIC is older than
+ PMI8994v2.0.
+- qcom,die-temp-derate-current : Integer type arrray for PMIC die temperature derate
+ current. Array should have at least one value. Values
+ should be in mA. This property is not needed if PMIC is older
+ than PMI8994v2.0.
+
+Required properties inside child node. Chile node contains settings for each individual LED.
+Each LED hardware needs a node for itself and a switch node to control brightness.
+For the purpose of turning on/off LED and better regulator control, "led:switch" node
+is introduced. "led:switch" acquires several existing properties from other nodes for
+operational simplification. For backward compatibility purpose, switch node can be optional:
+- label : type of led that will be used, either "flash" or "torch".
+- qcom,led-name : name of the LED. Accepted values are "led:flash_0",
+ "led:flash_1", "led:torch_0", "led:torch_1"
+- qcom,default-led-trigger : trigger for the camera flash and torch. Accepted values are
+ "flash0_trigger", "flash1_trigger", "torch0_trigger", torch1_trigger"
+- qcom,id : enumerated ID for each physical LED. Accepted values are "0",
+ "1", etc..
+- qcom,max-current : maximum current allowed on this LED. Valid values should be
+ integer from 0 to 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,pmic-revid : PMIC revision id source. This property is needed for PMI8996
+ revision check.
+
+Optional properties inside child node:
+- qcom,current : default current intensity for LED. Accepted values should be
+ integer from 0 t 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,duration : Duration for flash LED. When duration time expires, hardware will turn off
+ flash LED. Values should be from 10 ms to 1280 ms with 10 ms incremental
+ step. Not applicable to torch. It is required for LED:SWITCH node to handle
+ LED used as flash.
+- reg<n> : reg<n> (<n> represents number. eg 0,1,2,..) property is to add support for
+ multiple power sources. It includes two properties regulator-name and max-voltage.
+ Required property inside regulator node:
+ - regulator-name : This denotes this node is a regulator node and which
+ regulator to use.
+ Optional property inside regulator node:
+ - max-voltage : This specifies max voltage of regulator. Some switch
+ or boost regulator does not need this property.
+
+Example:
+ qcom,leds@d300 {
+ compatible = "qcom,qpnp-flash-led";
+ status = "okay";
+ reg = <0xd300 0x100>;
+ label = "flash";
+ qcom,headroom = <500>;
+ qcom,startup-dly = <128>;
+ qcom,clamp-curr = <200>;
+ qcom,pmic-charger-support;
+ qcom,self-check-enabled;
+ qcom,thermal-derate-enabled;
+ qcom,thermal-derate-threshold = <80>;
+ qcom,thermal-derate-rate = "4_PERCENT";
+ qcom,current-ramp-enabled;
+ qcom,ramp_up_step = "27US";
+ qcom,ramp_dn_step = "27US";
+ qcom,vph-pwr-droop-enabled;
+ qcom,vph-pwr-droop-threshold = <3200>;
+ qcom,vph-pwr-droop-debounce-time = <10>;
+ qcom,headroom-sense-ch0-enabled;
+ qcom,headroom-sense-ch1-enabled;
+ qcom,die-current-derate-enabled;
+ qcom,die-temp-vadc = <&pmi8994_vadc>;
+ qcom,die-temp-threshold = <85 80 75 70 65>;
+ qcom,die-temp-derate-current = <400 800 1200 1600 2000>;
+ qcom,pmic-revid = <&pmi8994_revid>;
+
+ pm8226_flash0: qcom,flash_0 {
+ label = "flash";
+ qcom,led-name = "led:flash_0";
+ qcom,default-led-trigger =
+ "flash0_trigger";
+ qcom,max-current = <1000>;
+ qcom,id = <0>;
+ qcom,duration = <1280>;
+ qcom,current = <625>;
+ };
+
+ pm8226_torch: qcom,torch_0 {
+ label = "torch";
+ qcom,led-name = "led:torch_0";
+ qcom,default-led-trigger =
+ "torch0_trigger";
+ boost-supply = <&pm8226_chg_boost>;
+ qcom,max-current = <200>;
+ qcom,id = <0>;
+ qcom,current = <120>;
+ qcom,max-current = <200>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+
+ pm8226_switch: qcom,switch {
+ lable = "switch";
+ qcom,led-name = "led:switch";
+ qcom,default-led-trigger =
+ "switch_trigger";
+ qcom,id = <2>;
+ qcom,current = <625>;
+ qcom,duration = <1280>;
+ qcom,max-current = <1000>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+ };
+
--- /dev/null
+QTI's QPNP PMIC Fuel Gauge Device
+
+QPNP PMIC FG provides interface to clients to read properties related
+to the battery. Its main function is to retrieve the State of Charge (SOC),
+a 0-100 percentage representing the amount of charge left in the battery.
+
+There are two required peripherals in the FG driver, both implemented as
+subnodes in the example. These peripherals must not be disabled if the FG
+device is to enabled:
+
+- qcom,fg-soc : The main FG device. Supports battery fuel gauge controls and
+ sensors.
+- qcom,fg-batt : The FG battery device supports interrupts and controls with
+ respect to the state of the connected battery.For example: the
+ peripheral informs the driver if the battery has been identified
+ by the fuel gauge based on a given battery resistance range.
+
+Optionally ADC nodes can be added
+- qcom,revid-tp-rev: A subnode with a register address for the TP_REV register
+ in the REVID peripheral. This is used to apply workarounds that
+ may depend on the trim program.
+- qcom,fg-adc-vbat : A subnode with a register address for the FG_ADC_USR
+ peripheral which is used mainly for battery current limiting (BCL).
+ This node maps out the VBAT reading register which allows to have
+ a +/- 32 mV accurate reading of VBAT.
+- qcom,fg-adc-ibat : A subnode with a register address for the FG_ADC_USR
+ peripheral which is used mainly for battery current limiting (BCL).
+ This node maps out the IBAT current reading register which allows
+ to have a +/- 32 mA accurate reading of IBAT.
+
+Parent node required properties:
+- compatible : should be "qcom,qpnp-fg" for the FG driver.
+- qcom,pmic-revid : Should specify the phandle of PMIC
+ revid module. This is used to identify
+ the PMIC subtype.
+
+Parent node optional properties:
+- qcom,warm-bat-decidegc: Warm battery temperature in decidegC.
+- qcom,cool-bat-decidegc: Cool battery temperature in decidegC.
+- qcom,hot-bat-decidegc: Hot battery temperature in decidegC.
+- qcom,cold-bat-decidegc: Cold battery temperature in decidegC.
+- qcom,cold-hot-jeita-hysteresis: A tuple of 2. Index[0] is cold
+ hysteresis and index[1] is hot
+ hysterisis(in decidegC).
+- qcom,ext-sense-type: Current sense channel used by the FG.
+ Set this to use external rsense.
+- qcom,thermal-coefficients: Byte array of thermal coefficients for
+ reading battery thermistor. This should
+ be exactly 6 bytes in length.
+ Example: [01 02 03 04 05 06]
+- qcom,resume-soc: soc to resume charging in percentage.
+- qcom,resume-soc-raw: soc to resume charging in the scale of
+ [0-255]. This overrides qcom,resume-soc
+ if defined.
+- qcom,hold-soc-while-full: A boolean property that when defined
+ holds SOC at 100% when the battery is
+ full.
+- qcom,bcl-lm-threshold-ma: BCL LPM to MPM mode transition threshold
+ in milliAmpere.
+- qcom,bcl-mh-threshold-ma: BCL MPM to HPM mode transition threshold
+ in milliAmpere.
+- qcom,use-otp-profile: Specify this flag to avoid RAM loading
+ any battery profile.
+- qcom,sw-rbias-control: Boolean property which defines whether
+ the Rbias needs to be controlled by
+ software. If this is not set, it will
+ be controlled by hardware (default).
+- qcom,fg-iterm-ma: Battery current at which the fuel gauge
+ will try to scale 100% towards. When
+ the charge current goes above this, the
+ SoC should be at 100%.
+- qcom,fg-chg-iterm-ma: Battery current at which the fuel gauge
+ will issue end of charge if the charger
+ is configured to use the fuel gauge
+ ADCs for end of charge detection. This
+ property is in milliamps and should be
+ positive (e.g. 100mA to terminate at
+ -100mA).
+- qcom,irq-volt-empty-mv: The voltage threshold that the empty
+ soc interrupt will be triggered. When
+ the empty soc interrupt fires, battery
+ soc will be pulled to 0 and the
+ userspace will be notified via the
+ power supply framework. The userspace
+ will read 0% soc and immediately
+ shutdown.
+- qcom,fg-cutoff-voltage-mv: The voltage where the fuel gauge will
+ steer the SOC to be zero. For example,
+ if the cutoff voltage is set to 3400mv,
+ the fuel gauge will try to count SoC so
+ that the battery SoC will be 0 when it
+ is 3400mV.
+- qcom,fg-vbat-estimate-diff-mv: If the estimated voltage based on SoC
+ and battery current/resistance differs
+ from the actual voltage by more than
+ this amount, the fuel gauge will
+ redo the first SoC estimate when the
+ driver probes.
+- qcom,fg-delta-soc: How many percent the monotonic SoC must
+ change before a new delta_soc interrupt
+ is asserted. If this value is raised
+ above 3-4, some period workarounds may
+ not function well, so it's best to
+ leave this at 1 or 2%.
+- qcom,fg-vbatt-low-threshold: Voltage (in mV) which upon set will be
+ used for configuring the low battery
+ voltage threshold. Interrupt will be
+ asserted and handled based upon
+ this. If this property is not specified,
+ low battery voltage threshold will be
+ configured to 4200 mV.
+- qcom,cycle-counter-en: Boolean property which enables the cycle
+ counter feature. If this property is
+ present, then the following properties
+ to specify low and high soc thresholds
+ should be defined.
+- qcom,capacity-learning-on: A boolean property to have the fuel
+ gauge driver attempt to learn the
+ battery capacity when charging. Takes
+ precedence over capacity-estimation-on.
+- qcom,capacity-learning-feedback: A boolean property to have the fuel
+ gauge driver to feedback the learned
+ capacity into the capacity learning
+ algorithm. This has to be used only if
+ the property "qcom,capacity-learning-on"
+ is specified.
+- qcom,cl-max-increment-deciperc: The maximum percent that the capacity
+ can rise as the result of a single
+ charge cycle. This property corresponds
+ to .1% increments.
+- qcom,cl-max-decrement-deciperc: The maximum percent that the capacity
+ can fall as the result of a single
+ charge cycle. This property corresponds
+ to .1% decrements.
+- qcom,cl-max-temp-decidegc: Above this temperature, capacity
+ learning will be canceled.
+- qcom,cl-mix-temp-decidegc: Below this temperature, capacity
+ learning will be canceled.
+- qcom,cl-max-start-soc: The battery soc has to be below this
+ value at the start of a charge cycle
+ for capacity learning to be run.
+- qcom,cl-vbat-est-thr-uv: The maximum difference between the
+ battery voltage shadow and the current
+ predicted voltage in uV to initiate
+ capacity learning.
+- qcom,capacity-estimation-on: A boolean property to have the fuel
+ gauge driver attempt to estimate the
+ battery capacity using battery
+ resistance.
+- qcom,aging-eval-current-ma: Current used to evaluate battery aging.
+ This value should be around the steady
+ state current drawn from the battery
+ when the phone is low on battery.
+- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for configuring
+ constant charge (CC) to constant
+ voltage (CV) setpoint in FG upon
+ which the battery EOC status will
+ be determined. This value should be
+ 10 mV less than the float voltage
+ configured in the charger.
+ This property should only be specified
+ if "qcom,autoadjust-vfloat" property is
+ specified in the charger driver to
+ ensure a proper operation.
+- qcom,bad-battery-detection-enable: A boolean property to enable the fuel
+ gauge driver to detect the damaged battery
+ when the safety-timer expires by using the
+ coulomb count.
+- qcom,fg-therm-delay-us: The time in microseconds to delay battery
+ thermistor biasing.
+- qcom,esr-pulse-tuning-en: A boolean property to enable ESR pulse
+ tuning feature. If this is enabled,
+ ESR pulse extraction will be disabled
+ when state of charge (SOC) is less than
+ 2%. It will be enabled back when SOC
+ gets above 2%. In addition, for SOC
+ between 2% and 5%, ESR pulse timing
+ settings will be different from default.
+ Once SOC crosses 5%, ESR pulse timings
+ will be restored back to default.
+
+qcom,fg-soc node required properties:
+- reg : offset and length of the PMIC peripheral register map.
+- interrupts : the interrupt mappings.
+ The format should be
+ <slave-id peripheral-id interrupt-number>.
+- interrupt-names : names for the mapped fg soc interrupts
+ The following interrupts are required:
+ 0: high-soc
+ 1: low-soc
+ 2: full-soc
+ 3: empty-soc
+ 4: delta-soc
+ 5: first-est-done
+ 6: sw-fallbk-ocv
+ 7: sw-fallbk-new-batt
+
+qcom,fg-memif node required properties:
+- reg : offset and length of the PMIC peripheral register map.
+- interrupts : the interrupt mappings.
+ The format should be
+ <slave-id peripheral-id interrupt-number>.
+- interrupt-names : names for the mapped fg adc interrupts
+ The following interrupts are required:
+ 0: mem-avail
+
+Example:
+pmi8994_fg: qcom,fg {
+ compatible = "qcom,qpnp-fg";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ qcom,pmic-revid = <&pmi8994_revid>;
+
+ qcom,fg-soc@4000 {
+ reg = <0x4000 0x100>;
+ interrupts = <0x2 0x40 0x0>,
+ <0x2 0x40 0x1>,
+ <0x2 0x40 0x2>,
+ <0x2 0x40 0x3>,
+ <0x2 0x40 0x4>,
+ <0x2 0x40 0x5>,
+ <0x2 0x40 0x6>,
+ <0x2 0x40 0x7>;
+
+ interrupt-names = "high-soc",
+ "low-soc",
+ "full-soc",
+ "empty-soc",
+ "delta-soc",
+ "first-est-done",
+ "sw-fallbk-ocv",
+ "sw-fallbk-new-batt";
+ };
+
+ qcom,fg-batt@4100 {
+ reg = <0x4100 0x100>;
+ interrupts = <0x2 0x41 0x0>,
+ <0x2 0x41 0x1>,
+ <0x2 0x41 0x2>,
+ <0x2 0x41 0x3>,
+ <0x2 0x41 0x4>,
+ <0x2 0x41 0x5>,
+ <0x2 0x41 0x6>,
+ <0x2 0x41 0x7>;
+
+ interrupt-names = "soft-cold",
+ "soft-hot",
+ "vbatt-low",
+ "batt-ided",
+ "batt-id-req",
+ "batt-unknown",
+ "batt-missing",
+ "batt-match";
+ };
+
+ qcom,fg-adc-vbat@4254 {
+ reg = <0x4254 0x1>;
+ };
+
+ qcom,fg-adc-ibat@4255 {
+ reg = <0x4255 0x1>;
+ };
+
+ qcom,fg-memif@4400 {
+ reg = <0x4400 0x100>;
+ interrupts = <0x2 0x44 0x0>,
+ <0x2 0x44 0x1>;
+
+ interrupt-names = "mem-avail",
+ "data-rcvry-sug";
+
+ qcom,cold-hot-jeita-hysteresis = <30 50>;
+ };
+};
--- /dev/null
+QPNP SMB Battery Charger
+
+QPNP SMB Charger is a single-cell switching mode battery charger. It can charge
+the battery and power the system via the USB and AC adapter input.
+
+The QPNP SMB Charger interfaces via the SPMI bus.
+
+There are six different peripherals adding the following functionality.
+Each of these peripherals are implemented as subnodes in the example at the
+end of this file.
+
+- qcom,chgr: Supports charging control and status
+ reporting.
+- qcom,bat-if: Battery status reporting such as presence,
+ temperature reporting and voltage collapse
+ protection.
+- qcom,usb-chgpth: USB charge path detection and input current
+ limiting configuration.
+- qcom,dc-chgpth: DC charge path detection and input current
+ limiting configuration.
+- qcom,chg-misc: Miscellaneous features such as watchdog timers
+ and SYSOK pin control
+- qcom,chg-otg: OTG configuration control.
+
+Parent node required properties:
+- compatible: Must be "qcom,qpnp-smbcharger"
+- #address-cells: Must be <1>
+- #size-cells: Must be <1>
+- qcom,pmic-revid: Should specify the phandle of PMIC
+ revid module. This is used to identify
+ the PMIC subtype.
+
+
+
+Sub node required properties:
+- reg: The SPMI address for this peripheral
+- interrupts: Specifies the interrupt associated with the peripheral.
+- interrupt-names: Specifies the interrupt names for the peripheral. Every
+ available interrupt needs to have an associated name
+ with it to indentify its purpose.
+
+ The following lists each subnode and their corresponding
+ required interrupt names:
+
+ qcom,chgr:
+ - chg-tcc-thr: Triggers on charge completion.
+ - chg-taper-thr: Triggers on the taper charge
+ transtion.
+ - chg-inhibit: Notifies on battery voltage
+ being too high to resume
+ charging.
+ - chg-p2f-thr: Triggers on transitioning from
+ precharge to fastcharge.
+ - chg-rechg-thr: Triggers on battery voltage
+ falling below the resume
+ threshold.
+
+ qcom,bat-if:
+ - batt-hot: Triggers on battery temperature
+ hitting the hot threshold.
+ Charging stops.
+ - batt-warm: Triggers on battery temperature
+ hitting the warm threshold.
+ Charging current is reduced.
+ - batt-cool: Triggers on battery temperature
+ hitting the cool threshold.
+ Charging current is reduced
+ - batt-cold: Triggers on battery temperature
+ hitting the cold threshold.
+ Charging stops.
+ - batt-missing: Battery missing status
+ interrupt.
+ - batt-low: Triggers on battery voltage
+ falling across a low threshold.
+
+ qcom,usb-chgpth:
+ - usbin-uv: USB input voltage falls below a
+ valid threshold.
+ - usbin-src-det: USB automatic source detection
+ finishes.
+
+ qcom,dc-chgpth:
+ - dcin-uv: DC input voltage falls below a
+ valid threshold.
+
+ qcom,chgr-misc:
+ - wdog-timeout-mins: Charger watchdog timer
+ interrupt.
+ - temp-shutdown: Triggers when charger goes
+ overtemp and causes a shutdown.
+ - power-ok: Triggers when the charger
+ switcher turns on or off.
+
+Regulator Subnodes:
+- qcom,smbcharger-boost-otg A subnode for a regulator device that turns on
+ the charger boost for OTG operation.
+- qcom,smbcharger-external-otg A subnode for a regulator device that switches
+ off charging and the USB input charge path
+ in order to allow an external regulator to
+ operate. This can be used in place of the
+ qcom,smbcharger-boost-otg if an external boost
+ is available.
+
+Regulator Sub node required properties:
+- regulator-name A name string for the regulator in question
+
+Optional Properties:
+- qcom,battery-psy-name The name of the main battery power supply that
+ the charger will register. Failing to define
+ this property will default the name to
+ "battery".
+- qcom,bms-psy-name The psy name to use for reporting battery
+ capacity. If left unspecified the capacity uses
+ a preprogrammed default value of 50.
+- qcom,float-voltage-mv Float Voltage in mV - the maximum voltage up
+ to which the battery is charged. Supported
+ range 3600mV to 4500mV
+- qcom,float-voltage-comp Specifies the JEITA float voltage compensation.
+ Value ranges from 0 to 63.
+- qcom,fastchg-current-ma Specifies the fast charge current in mA. Supported
+ range is from 300mA to 3000mA.
+- qcom,fastchg-current-comp Specifies the fast charge current compensation in
+ mA. Supported values are 250, 700, 900 and 1200mA.
+- qcom,charging-timeout-mins Maximum duration in minutes that a single
+ charge cycle may last. Supported values are:
+ 0, 192, 384, 768, and 1536. A value of 0
+ means that no charge cycle timeout is used and
+ charging can continue indefinitely.
+- qcom,precharging-timeout-mins Maximum duration in minutes that a single
+ precharge cycle may last. Supported values
+ are: 0, 24, 48, 96, 192. A value of 0 means
+ that no precharge cycle timeout is used and
+ charging can continue indefinitely. Note that
+ the qcom,charging-timeout-mins property must
+ be specified in order for this to take effect.
+- qcom,dc-psy-type The type of charger connected to the DC path.
+ Can be "Mains", "Wireless" or "Wipower"
+- qcom,dc-psy-ma The current in mA dc path can support. Must be
+ specified if dc-psy-type is specified. Valid
+ range 300mA to 2000mA.
+- qcom,dcin-vadc The phandle to pmi8994 voltage adc. The ADC is
+ used to get notifications when the DCIN voltage
+ crosses a programmed min/max threshold. This is
+ used to make configurations for optimized power
+ draw for Wipower.
+- qcom,wipower-div2-ilim-map
+- qcom,wipower-pt-ilim-map
+- qcom,wipower-default-ilim-map
+ Array of 5 elements to indicate the voltage ranges and their corresponding
+ current limits. The 5 elements with index [0..4] are:
+ [0] => voltage_low in uV
+ [1] => voltage_high in uV
+ [2] => current limit for pass through in mA
+ [3] => current limit for div2 mode dcin low voltage in mA
+ [4] => current limit for div2 mode dcin high voltage in mA
+ The div2 and pt tables indicate the current limits
+ to use when Wipower is operating in divide_by_2 mode
+ and pass through mode respectively.
+ The default table is used when the voltage ranges
+ are beyond the ones specified in the mapping table.
+ Note that if dcin-vadc or any of these mapping
+ tables are not specified, dynamic dcin input
+ is disabled.
+- qcom,charging-disabled Set this if charging should be disabled in the
+ build by default.
+- qcom,resume-delta-mv Specifies the minimum voltage drop in
+ millivolts below the float voltage that is
+ required in order to initiate a new charging
+ cycle. Supported values are: 50, 100, 200 and
+ 300mV.
+- qcom,chg-inhibit-en Boolean that indicates whether the charge inhibit
+ feature needs to be enabled. If this is not set,
+ charge inhibit feature is disabled by default.
+- qcom,chg-inhibit-fg Indicates if the recharge threshold source has
+ to be Fuel gauge ADC. If this is not set, it
+ will be analog sensor by default.
+- qcom,bmd-algo-disabled Indicates if the battery missing detection
+ algorithm is disabled. If this node is present
+ SMB uses the THERM pin for battery missing
+ detection.
+- qcom,charge-unknown-battery Boolean that indicates whether an unknown
+ battery without a matching profile will be
+ charged. If this is not set, if the fuel gauge
+ does not recognize the battery based on its
+ battery ID, the charger will not start
+ charging.
+- qcom,bmd-pin-src A string that indicates the source pin for the
+ battery missind detection. This can be either:
+ - "bpd_none"
+ battery is considered always present
+ - "bpd_id"
+ battery id pin is used
+ - "bpd_thm"
+ battery therm pin is used
+ - "bpd_thm_id"
+ both pins are used (battery is
+ considered missing if either pin is
+ floating).
+- qcom,iterm-ma Specifies the termination current to indicate
+ end-of-charge. Possible values in mA:
+ 50, 100, 150, 200, 250, 300, 500, 600.
+- qcom,iterm-disabled Disables the termination current feature. This
+ is a boolean property.
+- otg-parent-supply A phandle to an external boost regulator for
+ OTG if it exists.
+- qcom,thermal-mitigation: Array of input current limit values for
+ different system thermal mitigation levels.
+ This should be a flat array that denotates the
+ maximum charge current in mA for each thermal
+ level.
+- qcom,rparasitics-uohm: The parasitic resistance of the board following
+ the line from the battery connectors through
+ vph_power. This is used to calculate maximum
+ available current of the battery.
+- qcom,vled-max-uv: The maximum input voltage of the flash leds.
+ This is used to calculate maximum available
+ current of the battery.
+- qcom,autoadjust-vfloat A boolean property that when set, makes the
+ driver automatically readjust vfloat using the
+ fuel gauge ADC readings to make charging more
+ accurate.
+- qcom,jeita-temp-hard-limit property when present will enable or disable
+ the jeita temperature hard limit based on the
+ value 1 or 0. Specify 0 if the jeita temp hard
+ limit needs to be disabled. If it is not present,
+ jeita temperature hard limit will be based on what
+ the bootloader had set earlier.
+- qcom,low-volt-dcin: A boolean property which upon set will enable the
+ AICL deglitch configuration dynamically. This needs
+ to be set if the DCIN supply is going to be less
+ than or equal to 5V.
+- qcom,force-aicl-rerun: A boolean property which upon set will enable the
+ AICL rerun by default along with the deglitch time
+ configured to long interval (20 ms). Also, specifying
+ this property will not adjust the AICL deglitch time
+ dynamically for handling the battery over-voltage
+ oscillations when the charger is headroom limited.
+- qcom,aicl-rerun-period-s If force-aicl-rerun is on, this property dictates
+ how often aicl is reran in seconds. Possible values
+ are 45, 90, 180, and 360.
+- qcom,ibat-ocp-threshold-ua Maximum current before the battery will trigger
+ overcurrent protection. Use the recommended
+ battery pack value minus some margin.
+- qcom,soft-vfloat-comp-disabled Set this property when the battery is
+ powered via external source and could
+ go above the float voltage.
+- qcom,parallel-usb-min-current-ma Minimum current drawn by the primary
+ charger before enabling the parallel
+ charger if one exists. Do not define
+ this property if no parallel chargers
+ exist.
+- qcom,parallel-usb-9v-min-current-ma Minimum current drawn by the primary
+ charger before enabling the parallel
+ charger if one exists. This property
+ applies only for 9V chargers.
+- qcom,parallel-allowed-lowering-ma Acceptable current drop from the initial limit
+ to keep parallel charger activated. If the
+ charger current reduces beyond this threshold
+ parallel charger is disabled. Must be specified
+ if parallel charger is used.
+- qcom,parallel-main-chg-fcc-percent Percentage of the fast charge current allotted to the
+ main charger when parallel charging is enabled and
+ operational. If this property is not defined, the
+ driver defaults to a 50%/50% split between the main
+ and parallel charger.
+- qcom,parallel-main-chg-icl-percent Percentage of the input current allotted to the
+ main charger when parallel charging is enabled and
+ operational. If this property is not defined, the
+ driver defaults to a 60%/40% split between the main
+ and parallel charger.
+- qcom,battery-data Points to the phandle of node which
+ contains the battery-profiles supported
+ by the charger/FG.
+- qcom,chg-led-support A bool property to support the charger led feature.
+- qcom,chg-led-sw-controls A bool property to allow the software to control
+ the charger led without a valid charger.
+- qcom,skip-usb-notification A boolean property to be used when usb gets present
+ and type from other means. Especially true on
+ liquid hardware, where usb presence is detected based on GPIO.
+- qcom,skip-usb-suspend-for-fake-battery A boolean property to skip
+ suspending USB path for fake
+ battery.
+- qcom,vchg_sns-vadc Phandle of the VADC node.
+- qcom,vchg-adc-channel-id The ADC channel to which the VCHG is routed.
+
+Example:
+ qcom,qpnp-smbcharger {
+ compatible = "qcom,qpnp-smbcharger";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,iterm-ma = <100>;
+ qcom,float-voltage-mv = <4200>;
+ qcom,resume-delta-mv = <100>;
+ qcom,bmd-pin-src = "bpd_thm_id";
+ qcom,dc-psy-type = "Mains";
+ qcom,dc-psy-ma = <1500>;
+ qcom,bms-psy-name = "bms";
+ qcom,battery-psy-name = "battery";
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,vchg_sns-vadc = <&pmi8950_vadc>;
+ qcom,vchg-adc-channel-id = <3>;
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x2 0x10 0x0>,
+ <0x2 0x10 0x1>,
+ <0x2 0x10 0x2>,
+ <0x2 0x10 0x3>,
+ <0x2 0x10 0x4>,
+ <0x2 0x10 0x5>,
+ <0x2 0x10 0x6>,
+ <0x2 0x10 0x7>;
+
+ interrupt-names = "chg-error",
+ "chg-inhibit",
+ "chg-prechg-sft",
+ "chg-complete-chg-sft",
+ "chg-p2f-thr",
+ "chg-rechg-thr",
+ "chg-taper-thr",
+ "chg-tcc-thr";
+ };
+
+ qcom,otg@1100 {
+ reg = <0x1100 0x100>;
+ };
+
+ qcom,bat-if@1200 {
+ reg = <0x1200 0x100>;
+ interrupts = <0x2 0x12 0x0>,
+ <0x2 0x12 0x1>,
+ <0x2 0x12 0x2>,
+ <0x2 0x12 0x3>,
+ <0x2 0x12 0x4>,
+ <0x2 0x12 0x5>,
+ <0x2 0x12 0x6>,
+ <0x2 0x12 0x7>;
+
+ interrupt-names = "batt-hot",
+ "batt-warm",
+ "batt-cold",
+ "batt-cool",
+ "batt-ov",
+ "batt-low",
+ "batt-missing",
+ "batt-term-missing";
+ };
+
+ qcom,usb-chgpth@1300 {
+ reg = <0x1300 0x100>;
+ interrupts = <0x2 0x13 0x0>,
+ <0x2 0x13 0x1>,
+ <0x2 0x13 0x2>,
+ <0x2 0x13 0x3>,
+ <0x2 0x13 0x4>,
+ <0x2 0x13 0x5>,
+ <0x2 0x13 0x6>;
+
+ interrupt-names = "usbin-uv",
+ "usbin-ov",
+ "usbin-src-det",
+ "otg-fail",
+ "otg-oc",
+ "aicl-done",
+ "usbid-change";
+ };
+
+ qcom,dc-chgpth@1400 {
+ reg = <0x1400 0x100>;
+ interrupts = <0x2 0x14 0x0>,
+ <0x2 0x14 0x1>;
+
+ interrupt-names = "dcin-uv",
+ "dcin-ov";
+ };
+
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts = <0x2 0x16 0x0>,
+ <0x2 0x16 0x1>,
+ <0x2 0x16 0x2>,
+ <0x2 0x16 0x3>,
+ <0x2 0x16 0x4>,
+ <0x2 0x16 0x5>;
+
+ interrupt-names = "power-ok",
+ "temp-shutdown",
+ "wdog-timeout",
+ "flash-fail",
+ "otst2",
+ "otst3";
+ };
+ };
Optional properties:
- assert-falling-edge: when present, assert is indicated by a falling edge
(instead of by a rising edge)
+- use-system-time-ts: use the system time via pps_get_ts as the timestamp
+ if this is not defined then the timestamp will come
+ from the monotonic boot time
Example:
pps {
- qcom,qpnp-lab-init-lcd-voltage: The default output voltage when LAB regulator
is configured in lcd mode.
- qcom,qpnp-lab-ps-threshold: The threshold in mA of Pulse Skip Mode for
- LAB regulator. Supported values are 20, 30,
- 40 and 50.
+ LAB regulator. Supported values for
+ PMI8994/6 are 20, 30, 40 and 50.
+ Supported values for PMI8998/PM660A are
+ 50, 60, 70 and 80.
- interrupts: Specify the interrupts as per the interrupt
encoding.
Currently "lab-vreg-ok" is required and "lab-sc_err"
qcom,smmu-enabled;
};
+* msm-audio-ion-vm
+
+Required properties:
+ - compatible : "qcom,msm-audio-ion-vm"
+
+Optional properties:
+ - qcom,smmu-enabled:
+ It is possible that some MSM have SMMU in ADSP. While other
+ MSM use no SMMU. Audio lib introduce wrapper for ION APIs.
+ The wrapper needs presence of SMMU in ADSP to handle ION
+ APIs differently. Presence of this property means ADSP has
+ SMMU in it.
+
+Example:
+
+qcom,msm-audio-ion-vm {
+ compatible = "qcom,msm-audio-ion-vm;
+ qcom,smmu-enabled;
+};
+
* MSM8994 ASoC Machine driver
Required properties:
atmel Atmel Corporation
auo AU Optronics Corporation
avago Avago Technologies
+avia avia semiconductor
avic Shanghai AVIC Optoelectronics Co., Ltd.
axis Axis Communications AB
bosch Bosch Sensortec GmbH
collection, triggered in background when I/O subsystem is
idle. If background_gc=on, it will turn on the garbage
collection and if background_gc=off, garbage collection
- will be truned off. If background_gc=sync, it will turn
+ will be turned off. If background_gc=sync, it will turn
on synchronous garbage collection running in background.
Default value for this option is on. So garbage
collection is on by default.
disable_roll_forward Disable the roll-forward recovery routine
norecovery Disable the roll-forward recovery routine, mounted read-
only (i.e., -o ro,disable_roll_forward)
-discard Issue discard/TRIM commands when a segment is cleaned.
+discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
+ enabled, f2fs will issue discard/TRIM commands when a
+ segment is cleaned.
no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while
for node from the end of main area.
disable_ext_identify Disable the extension list configured by mkfs, so f2fs
does not aware of cold files such as media files.
inline_xattr Enable the inline xattrs feature.
+noinline_xattr Disable the inline xattrs feature.
inline_data Enable the inline data feature: New created small(<~3.4k)
files can be written into inode block.
inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The
space of inode block which is used to store inline
dentries is limited to ~3.4k.
+noinline_dentry Diable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly,
as many as extent which map between contiguous logical
address and physical address per inode, resulting in
increasing the cache hit ratio. Set by default.
-noextent_cache Diable an extent cache based on rb-tree explicitly, see
+noextent_cache Disable an extent cache based on rb-tree explicitly, see
the above extent_cache mount option.
noinline_data Disable the inline data feature, inline data feature is
enabled by default.
+data_flush Enable data flushing before checkpoint in order to
+ persist data of regular and symlink.
+mode=%s Control block allocation mode which supports "adaptive"
+ and "lfs". In "lfs" mode, there should be no random
+ writes towards main area.
+io_bits=%u Set the bit size of write IO requests. It should be set
+ with "mode=lfs".
+usrquota Enable plain user disk quota accounting.
+grpquota Enable plain group disk quota accounting.
+prjquota Enable plain project quota accounting.
+usrjquota=<file> Appoint specified file and type during mount, so that quota
+grpjquota=<file> information can be properly updated during recovery flow,
+prjjquota=<file> <quota file>: must be in root directory;
+jqfmt=<quota type> <quota type>: [vfsold,vfsv0,vfsv1].
+offusrjquota Turn off user journelled quota.
+offgrpjquota Turn off group journelled quota.
+offprjjquota Turn off project journelled quota.
+quota Enable plain user disk quota accounting.
+noquota Disable all plain disk quota option.
================================================================================
DEBUGFS ENTRIES
policy for garbage collection. Setting gc_idle = 0
(default) will disable this option. Setting
gc_idle = 1 will select the Cost Benefit approach
- & setting gc_idle = 2 will select the greedy aproach.
+ & setting gc_idle = 2 will select the greedy approach.
+
+ gc_urgent This parameter controls triggering background GCs
+ urgently or not. Setting gc_urgent = 0 [default]
+ makes back to default behavior, while if it is set
+ to 1, background thread starts to do GC by given
+ gc_urgent_sleep_time interval.
+
+ gc_urgent_sleep_time This parameter controls sleep time for gc_urgent.
+ 500 ms is set by default. See above gc_urgent.
reclaim_segments This parameter controls the number of prefree
segments to be reclaimed. If the number of prefree
file. Each file is dump_ssa and dump_sit.
The dump.f2fs is used to debug on-disk data structures of the f2fs filesystem.
-It shows on-disk inode information reconized by a given inode number, and is
+It shows on-disk inode information recognized by a given inode number, and is
able to dump all the SSA and SIT entries into predefined files, ./dump_ssa and
./dump_sit respectively.
# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
# 4) Check for missing system calls
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
- "/^->/{s:->#\(.*\):/* \1 */:; \
- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:->::; p;}"
-endef
-
-# Use filechk to avoid rebuilds when a header changes, but the resulting file
-# does not
-define filechk_offsets
- (set -e; \
- echo "#ifndef $2"; \
- echo "#define $2"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y); \
- echo ""; \
- echo "#endif" )
-endef
-
#####
# 1) Generate bounds.h
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 85
+SUBLEVEL = 94
EXTRAVERSION =
NAME = Blurry Fish Butt
HOSTCC = gcc
HOSTCXX = g++
-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
HOSTCXXFLAGS = -O2
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += -Os
+KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
KBUILD_CFLAGS += -O2
KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TRIPLE ?= $(CROSS_COMPILE)
+CLANG_TARGET := -target $(notdir $(CLANG_TRIPLE:%-=%))
+GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier)
# Quiet clang warning: comparison of unsigned expression < 0 is always false
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
# See modpost pattern 2
KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
else
# These warnings generated too much noise in a regular build.
@echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
@echo ' dir/ - Build all files in dir and below'
@echo ' dir/file.[ois] - Build specified target only'
+ @echo ' dir/file.ll - Build the LLVM assembly file'
+ @echo ' (requires compiler support for LLVM assembly generation)'
@echo ' dir/file.lst - Build specified mixed source/assembly target only'
@echo ' (requires a recent binutils and recent build (System.map))'
@echo ' dir/file.ko - Build module including final link'
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
-o -name modules.builtin -o -name '.tmp_*.o.*' \
+ -o -name '*.ll' \
-o -name '*.gcno' \) -type f -print | xargs rm -f
# Generate tags for editors
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+%.ll: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
# Modules
/: prepare scripts FORCE
+++ /dev/null
-The files in this directory are meant to be used as a base for an Android
-kernel config. All devices should have the options in android-base.cfg enabled.
-While not mandatory, the options in android-recommended.cfg enable advanced
-Android features.
-
-Assuming you already have a minimalist defconfig for your device, a possible
-way to enable these options would be:
-
- ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
-
-This will generate a .config that can then be used to save a new defconfig or
-compile a new kernel with Android features enabled.
-
-Because there is no tool to consistently generate these config fragments,
-lets keep them alphabetically sorted instead of random.
+++ /dev/null
-# KEEP ALPHABETICALLY SORTED
-CONFIG_ARMV8_DEPRECATED=y
-CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_SETEND_EMULATION=y
-CONFIG_SWP_EMULATION=y
+++ /dev/null
-# KEEP ALPHABETICALLY SORTED
-# CONFIG_DEVKMEM is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_FHANDLE is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_NFSD is not set
-# CONFIG_NFS_FS is not set
-# CONFIG_OABI_COMPAT is not set
-# CONFIG_SYSVIPC is not set
-# CONFIG_USELIB is not set
-CONFIG_ANDROID=y
-CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
-CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ASHMEM=y
-CONFIG_AUDIT=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_DEFAULT_SECURITY_SELINUX=y
-CONFIG_EMBEDDED=y
-CONFIG_FB=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_INET=y
-CONFIG_INET_DIAG_DESTROY=y
-CONFIG_INET_ESP=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_IP6_NF_RAW=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IPV6=y
-CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARP_MANGLE=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_MATCH_AH=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_NAT=y
-CONFIG_IP_NF_RAW=y
-CONFIG_IP_NF_SECURITY=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_NETMAP=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_NET=y
-CONFIG_NETDEVICES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_XT_MATCH_COMMENT=y
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_HELPER=y
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-CONFIG_NETFILTER_XT_MATCH_LENGTH=y
-CONFIG_NETFILTER_XT_MATCH_LIMIT=y
-CONFIG_NETFILTER_XT_MATCH_MAC=y
-CONFIG_NETFILTER_XT_MATCH_MARK=y
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA=y
-CONFIG_NETFILTER_XT_MATCH_SOCKET=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
-CONFIG_NETFILTER_XT_MATCH_STRING=y
-CONFIG_NETFILTER_XT_MATCH_TIME=y
-CONFIG_NETFILTER_XT_MATCH_U32=y
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_TARGET_TPROXY=y
-CONFIG_NETFILTER_XT_TARGET_TRACE=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_CLS_U32=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_U32=y
-CONFIG_NET_KEY=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_HTB=y
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_AMANDA=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_H323=y
-CONFIG_NF_CONNTRACK_IPV4=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_NETBIOS_NS=y
-CONFIG_NF_CONNTRACK_PPTP=y
-CONFIG_NF_CONNTRACK_SANE=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_TFTP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_NAT=y
-CONFIG_NO_HZ=y
-CONFIG_PACKET=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PPP=y
-CONFIG_PPPOLAC=y
-CONFIG_PPPOPNS=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_MPPE=y
-CONFIG_PREEMPT=y
-CONFIG_PROFILING=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RTC_CLASS=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_SECCOMP=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_STAGING=y
-CONFIG_SYNC=y
-CONFIG_TUN=y
-CONFIG_UID_SYS_STATS=y
-CONFIG_UNIX=y
-CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_GADGET=y
-CONFIG_XFRM_USER=y
--- /dev/null
+#!/bin/sh
+
+curl https://android.googlesource.com/kernel/configs/+archive/master/android-4.4.tar.gz | tar xzv
+
+++ /dev/null
-# KEEP ALPHABETICALLY SORTED
-# CONFIG_AIO is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_NF_CONNTRACK_SIP is not set
-# CONFIG_PM_WAKELOCKS_GC is not set
-# CONFIG_VT is not set
-CONFIG_ANDROID_TIMED_GPIO=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_ARM_KERNMEM_PERMS=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_COMPACTION=y
-CONFIG_CPU_SW_DOMAIN_PAN=y
-CONFIG_DEBUG_RODATA=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_UEVENT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
-CONFIG_DRAGONRISE_FF=y
-CONFIG_ENABLE_DEFAULT_TRACERS=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FUSE_FS=y
-CONFIG_GREENASIA_FF=y
-CONFIG_HIDRAW=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_ACRUX=y
-CONFIG_HID_ACRUX_FF=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_DRAGONRISE=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EMS_FF=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GREENASIA=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_HOLTEK=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_KEYTOUCH=y
-CONFIG_HID_KYE=y
-CONFIG_HID_LCPOWER=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_LOGITECH_DJ=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_ORTEK=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_PICOLCD=y
-CONFIG_HID_PRIMAX=y
-CONFIG_HID_PRODIKEYS=y
-CONFIG_HID_ROCCAT=y
-CONFIG_HID_SAITEK=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SMARTJOYPLUS=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SPEEDLINK=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_TIVO=y
-CONFIG_HID_TOPSEED=y
-CONFIG_HID_TWINHAN=y
-CONFIG_HID_UCLOGIC=y
-CONFIG_HID_WACOM=y
-CONFIG_HID_WALTOP=y
-CONFIG_HID_WIIMOTE=y
-CONFIG_HID_ZEROPLUS=y
-CONFIG_HID_ZYDACRON=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_KEYCHORD=y
-CONFIG_INPUT_KEYRESET=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_TABLET=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
-CONFIG_JOYSTICK_XPAD=y
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_KSM=y
-CONFIG_LOGIG940_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGITECH_FF=y
-CONFIG_MD=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEMORY_STATE_TIME=y
-CONFIG_MSDOS_FS=y
-CONFIG_PANIC_TIMEOUT=5
-CONFIG_PANTHERLORD_FF=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_POWER_SUPPLY=y
-CONFIG_PSTORE=y
-CONFIG_PSTORE_CONSOLE=y
-CONFIG_PSTORE_RAM=y
-CONFIG_QFMT_V2=y
-CONFIG_QUOTA=y
-CONFIG_QUOTACTL=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QUOTA_TREE=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_SND=y
-CONFIG_SOUND=y
-CONFIG_SUSPEND_TIME=y
-CONFIG_TABLET_USB_ACECAD=y
-CONFIG_TABLET_USB_AIPTEK=y
-CONFIG_TABLET_USB_GTCO=y
-CONFIG_TABLET_USB_HANWANG=y
-CONFIG_TABLET_USB_KBTAB=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_TASK_XACCT=y
-CONFIG_TIMER_STATS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_UHID=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_USBNET=y
-CONFIG_VFAT_FS=y
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
* need to be careful to avoid a name clashes.
*/
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */
lr r0, [efa]
mov r1, sp
+ ; hardware auto-disables MMU, re-enable it to allow kernel vaddr
+ ; access for say stack unwinding of modules for crash dumps
+ lr r3, [ARC_REG_PID]
+ or r3, r3, MMU_ENABLE
+ sr r3, [ARC_REG_PID]
+
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
local_irq_save(flags);
- /* re-enable the MMU */
- write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
-
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
used instead of the auto-probing which utilizes the register.
config REMAP_VECTORS_TO_RAM
- bool 'Install vectors to the beginning of RAM' if DRAM_BASE
- depends on DRAM_BASE
+ bool 'Install vectors to the beginning of RAM'
help
The kernel needs to change the hardware exception vectors.
In nommu mode, the hardware exception vectors are normally
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
-CHECKFLAGS += -D__arm__
+CHECKFLAGS += -D__arm__ -m32
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o
interrupts = <25>;
#dma-channels = <32>;
#dma-cells = <2>;
+ #dma-requests = <75>;
status = "okay";
};
interrupts = <25>;
#dma-channels = <32>;
#dma-cells = <2>;
+ #dma-requests = <100>;
status = "okay";
};
apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dtb \
apq8096-v3-pmi8996-dragonboard.dtb
-dtb-$(CONFIG_MSM_GVM_QUIN) += vplatform-lfv-msm8996.dtb
+dtb-$(CONFIG_MSM_GVM_QUIN) += vplatform-lfv-msm8996-telematics.dtb \
+ vplatform-lfv-msm8996-ivi.dtb
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
dtbo-$(CONFIG_ARCH_MSM8998) += \
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
i2c@75b6000 { /* BLSP8 */
/* ADV7533 HDMI Bridge Chip removed on ADP Lite */
- adv7533@3d {
- status = "disabled";
- };
-
adv7533@39 {
status = "disabled";
};
"qpnp-wled-sink-base",
"qpnp-wled-ibb-base",
"qpnp-wled-lab-base";
- interrupts = <0x3 0xd8 0x2 IRQ_TYPE_NONE>;
+ interrupts = <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "sc-irq";
status = "okay";
linux,name = "wled";
<1600>;
qcom,qpnp-lab-limit-maximum-current = <1600>;
qcom,qpnp-lab-limit-max-current-enable;
- qcom,qpnp-lab-ps-threshold = <20>;
+ qcom,qpnp-lab-ps-threshold = <70>;
qcom,qpnp-lab-ps-enable;
qcom,qpnp-lab-nfet-size = <100>;
qcom,qpnp-lab-pfet-size = <100>;
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
pinctrl-0 = <&pmx_pps>;
gpios = <&tlmm 22 0>;
status = "okay";
+ use-system-time-ts;
};
gpio_fan {
&pcie2 {
/* Enumerate MDM on wake interrupt */
qcom,boot-option = <0x0>;
+ /delete-property/ qcom,l1-supported;
+ /delete-property/ qcom,l1ss-supported;
};
&mdm3 {
qcom,invert = <1>; /* Output high */
status = "okay";
};
+
+ mpp@a500 { /* MPP 6 */
+ qcom,mode = <1>; /* Digital output */
+ qcom,output-type = <0>; /* CMOS logic */
+ qcom,vin-sel = <2>; /* S4 1.8V */
+ qcom,src-sel = <0>; /* Constant */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
};
&pm8994_vadc {
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
};
};
+ qcom,msm-dai-tdm-sec-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37136>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 36886>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <0>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36880>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_1: qcom,msm-dai-q6-tdm-sec-rx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36882>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_2: qcom,msm-dai-q6-tdm-sec-rx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36884>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_3: qcom,msm-dai-q6-tdm-sec-rx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36886>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-sec-tx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37137>;
qcom,master-en = <1>;
status = "okay";
};
+
+ /* GPIO 11 for Home Key */
+ gpio@ca00 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
};
&i2c_6 { /* BLSP1 QUP6 (NFC) */
qcom,sensors = <12>;
};
+ qcom,qbt1000 {
+ compatible = "qcom,qbt1000";
+ clock-names = "core", "iface";
+ clocks = <&clock_gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+ <&clock_gcc GCC_BLSP1_AHB_CLK>;
+ clock-frequency = <15000000>;
+ qcom,ipc-gpio = <&tlmm 72 0>;
+ qcom,finger-detect-gpio = <&pm660_gpios 11 0>;
+ };
+
qcom,sensor-information {
compatible = "qcom,sensor-information";
sensor_information0: qcom,sensor-information-0 {
*/
#include "sdm660-cdp.dtsi"
+#include "sdm636-camera-sensor-mtp.dtsi"
/ {
};
+++ /dev/null
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-&soc {
- qcom,msm-dai-mi2s {
- dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
- qcom,msm-mi2s-rx-lines = <2>;
- qcom,msm-mi2s-tx-lines = <1>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active
- &sec_mi2s_sd1_active>;
- pinctrl-1 = <&sec_mi2s_sleep &sec_mi2s_sd0_sleep
- &sec_mi2s_sd1_sleep>;
- };
-
- dai_mi2s: qcom,msm-dai-q6-mi2s-tert {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&tert_mi2s_active &tert_mi2s_sd0_active>;
- pinctrl-1 = <&tert_mi2s_sleep &tert_mi2s_sd0_sleep>;
- };
-
- dai_mi2s_quat: qcom,msm-dai-q6-mi2s-quat {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active>;
- pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_sd0_sleep>;
- };
- };
-
- qcom,msm-dai-tdm-tert-rx {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&tert_tdm_dout_active>;
- pinctrl-1 = <&tert_tdm_dout_sleep>;
- };
-
- qcom,msm-dai-tdm-quat-rx {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&quat_tdm_dout_active>;
- pinctrl-1 = <&quat_tdm_dout_sleep>;
- };
-};
-
--- /dev/null
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,msm-dai-mi2s {
+ dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active
+ &sec_mi2s_sd1_active>;
+ pinctrl-1 = <&sec_mi2s_sleep &sec_mi2s_sd0_sleep
+ &sec_mi2s_sd1_sleep>;
+ };
+
+ dai_mi2s: qcom,msm-dai-q6-mi2s-tert {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&tert_mi2s_active &tert_mi2s_sd0_active>;
+ pinctrl-1 = <&tert_mi2s_sleep &tert_mi2s_sd0_sleep>;
+ };
+
+ dai_mi2s_quat: qcom,msm-dai-q6-mi2s-quat {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active>;
+ pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_sd0_sleep>;
+ };
+ };
+
+ qcom,msm-dai-tdm-tert-rx {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&tert_tdm_dout_active>;
+ pinctrl-1 = <&tert_tdm_dout_sleep>;
+ };
+
+ qcom,msm-dai-tdm-quat-rx {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&quat_tdm_dout_active>;
+ pinctrl-1 = <&quat_tdm_dout_sleep>;
+ };
+
+ qcom,msm-audio-ion {
+ compatible = "qcom,msm-audio-ion";
+ qcom,smmu-version = <2>;
+ qcom,smmu-enabled;
+ iommus = <&lpass_q6_smmu 1>;
+ };
+
+ qcom,msm-adsp-loader {
+ status = "ok";
+ compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <0>;
+ };
+
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
+ qcom,msm_fastrpc {
+ compatible = "qcom,msm-fastrpc-adsp";
+
+ qcom,msm_fastrpc_compute_cb1 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 8>;
+ };
+ qcom,msm_fastrpc_compute_cb2 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 9>;
+ };
+ qcom,msm_fastrpc_compute_cb3 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 10>;
+ };
+ qcom,msm_fastrpc_compute_cb4 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 11>;
+ };
+ qcom,msm_fastrpc_compute_cb5 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 12>;
+ };
+ qcom,msm_fastrpc_compute_cb6 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 5>;
+ };
+ qcom,msm_fastrpc_compute_cb7 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 6>;
+ };
+ qcom,msm_fastrpc_compute_cb8 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 7>;
+ };
+ };
+
+ qcom,glink-smem-native-xprt-adsp@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x9820010 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x200>;
+ interrupts = <0 157 1>;
+ label = "lpass";
+ qcom,qos-config = <0x1b8>;
+ qcom,ramp-time = <0xaf>;
+ };
+
+ qcom,glink-qos-config-adsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x3c 0x0 0x3c 0x0 0x3c 0x0 0x3c 0x0>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ linux,phandle = <0x1b8>;
+ phandle = <0x1b8>;
+ };
+
+ qcom,lpass@9300000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x9300000 0x00100>;
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pm8994_s1_corner>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <7 100000>;
+
+ clocks = <&clock_gcc clk_cxo_pil_lpass_clk>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+ qcom,edge = "lpass";
+ memory-region = <&peripheral_mem>;
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ };
+};
+
--- /dev/null
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/ {
+ aliases {
+ spi9 = &spi_9;
+ i2c6 = &i2c_6;
+ i2c8 = &i2c_8;
+ };
+};
+
+&soc {
+ spi_9: spi@75B7000 { /* BLSP2 QUP3 */
+ compatible = "qcom,spi-qup-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "spi_physical";
+ reg = <0x075B7000 0x600>;
+ interrupt-names = "spi_irq";
+ interrupts = <0 103 0>;
+ spi-max-frequency = <19200000>;
+ qcom,infinite-mode = <0>;
+ qcom,ver-reg-exists;
+ qcom,master-id = <84>;
+ qcom,use-pinctrl;
+ pinctrl-names = "spi_default", "spi_sleep";
+ pinctrl-0 = <&spi_9_active>;
+ pinctrl-1 = <&spi_9_sleep>;
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp2_qup3_spi_apps_clk>;
+ status = "disabled";
+ };
+
+ i2c_6: i2c@757a000 { /* BLSP1 QUP6 */
+ compatible = "qcom,i2c-msm-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x757a000 0x1000>;
+ reg-names = "qup_phys_addr";
+ interrupt-names = "qup_irq";
+ interrupts = <0 100 0>;
+ qcom,disable-dma;
+ qcom,master-id = <86>;
+ qcom,clk-freq-out = <400000>;
+ qcom,clk-freq-in = <19200000>;
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp1_qup6_i2c_apps_clk>;
+ pinctrl-names = "i2c_active", "i2c_sleep";
+ pinctrl-0 = <&i2c_6_active>;
+ pinctrl-1 = <&i2c_6_sleep>;
+ status = "disabled";
+ };
+
+ i2c_8: i2c@75b6000 { /* BLSP2 QUP2 */
+ compatible = "qcom,i2c-msm-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "qup_phys_addr";
+ reg = <0x75b6000 0x1000>;
+ interrupt-names = "qup_irq";
+ interrupts = <0 102 0>;
+ qcom,disable-dma;
+ qcom,master-id = <84>;
+ qcom,clk-freq-out = <400000>;
+ qcom,clk-freq-in = <19200000>;
+ clock-names = "iface_clk", "core_clk";
+ clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
+ <&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>;
+ pinctrl-names = "i2c_active", "i2c_sleep";
+ pinctrl-0 = <&i2c_8_active>;
+ pinctrl-1 = <&i2c_8_sleep>;
+ status = "disabled";
+ };
+
+ blsp1_uart2: uart@07570000 { /* BLSP1 UART2 */
+ compatible = "qcom,msm-hsuart-v14";
+ reg = <0x07570000 0x1000>,
+ <0x7544000 0x2b000>;
+ reg-names = "core_mem", "bam_mem";
+ interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+ interrupts = <0 108 0>, <0 238 0>, <0 810 0>;
+ #address-cells = <0>;
+
+ qcom,inject-rx-on-wakeup;
+ qcom,rx-char-to-inject = <0xFD>;
+
+ qcom,bam-tx-ep-pipe-index = <2>;
+ qcom,bam-rx-ep-pipe-index = <3>;
+ qcom,master-id = <86>;
+ clock-names = "core_clk", "iface_clk";
+ clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+ <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+ pinctrl-names = "sleep", "default";
+ pinctrl-0 = <&blsp1_uart2_sleep>;
+ pinctrl-1 = <&blsp1_uart2_active>;
+
+ qcom,msm-bus,name = "buart2";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <86 512 0 0>,
+ <86 512 500 800>;
+ status = "disabled";
+ };
+};
--- /dev/null
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "skeleton64.dtsi"
+#include "vplatform-lfv-msm8996.dtsi"
+#include <dt-bindings/clock/msm-clocks-8996.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8996";
+ compatible = "qcom,msm8996";
+ qcom,msm-id = <246 0x0>;
+};
+
+&soc {
+ qcom,msm-audio-ion-vm {
+ compatible = "qcom,msm-audio-ion-vm";
+ qcom,smmu-enabled;
+ };
+};
+
+&spi_9 {
+ status = "okay";
+};
+
+&i2c_8 {
+ status = "okay";
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
--- /dev/null
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ipc-spinlock@740000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0x740000 0x8000>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,rmtfs_sharedmem@85e00000 {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x85e00000 0x00200000>;
+ reg-names = "rmtfs";
+ qcom,client-id = <0x00000001>;
+ };
+
+ qcom,glink-smem-native-xprt-modem@86000000 {
+ compatible = "qcom,glink-smem-native-xprt";
+ reg = <0x86000000 0x200000>,
+ <0x9820010 0x4>;
+ reg-names = "smem", "irq-reg-base";
+ qcom,irq-mask = <0x8000>;
+ interrupts = <0 452 1>;
+ label = "mpss";
+ };
+
+ qcom,ipc_router {
+ compatible = "qcom,ipc_router";
+ qcom,node-id = <1>;
+ };
+
+ qcom,ipc_router_modem_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "mpss";
+ qcom,glink-xprt = "smd_trans";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,ipc_router_q6_xprt {
+ compatible = "qcom,ipc_router_glink_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "lpass";
+ qcom,glink-xprt = "smd_trans";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ ipa_hw: qcom,ipa@680000 {
+ compatible = "qcom,ipa";
+ reg = <0x680000 0x4effc>,
+ <0x684000 0x26934>;
+ reg-names = "ipa-base", "bam-base";
+ interrupts = <0 333 0>,
+ <0 432 0>;
+ interrupt-names = "ipa-irq", "bam-irq";
+ qcom,ipa-hw-ver = <5>; /* IPA core version = IPAv2.5 */
+ qcom,ipa-hw-mode = <0>;
+ qcom,ee = <0>;
+ qcom,use-ipa-tethering-bridge;
+ qcom,ipa-bam-remote-mode;
+ qcom,modem-cfg-emb-pipe-flt;
+ clocks = <&clock_gcc clk_ipa_clk>;
+ clock-names = "core_clk";
+ qcom,use-dma-zone;
+ qcom,msm-bus,name = "ipa";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <90 512 0 0>, <90 585 0 0>, /* No vote */
+ <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */
+ <90 512 206000 960000>, <90 585 206000 960000>; /* PERF */
+ qcom,bus-vector-names = "MIN", "SVS", "PERF";
+ };
+
+ qcom,rmnet-ipa {
+ compatible = "qcom,rmnet-ipa";
+ qcom,rmnet-ipa-ssr;
+ qcom,ipa-loaduC;
+ qcom,ipa-advertise-sg-support;
+ };
+
+ pil_modem: qcom,mss@2080000 {
+ compatible = "qcom,pil-q6v55-mss";
+ reg = <0x2080000 0x100>,
+ <0x0763000 0x008>,
+ <0x0765000 0x008>,
+ <0x0764000 0x008>,
+ <0x2180000 0x020>,
+ <0x038f008 0x004>;
+ reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+ "halt_nc", "rmb_base", "restart_reg";
+
+ clocks = <&clock_gcc clk_cxo_clk_src>,
+ <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+ <&clock_gcc clk_pnoc_clk>,
+ <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
+ <&clock_gcc clk_gcc_boot_rom_ahb_clk>,
+ <&clock_gcc clk_gpll0_out_msscc>,
+ <&clock_gcc clk_gcc_mss_snoc_axi_clk>,
+ <&clock_gcc clk_gcc_mss_mnoc_bimc_axi_clk>,
+ <&clock_gcc clk_qdss_clk>;
+ clock-names = "xo", "iface_clk", "pnoc_clk", "bus_clk",
+ "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk", "qdss_clk";
+ qcom,proxy-clock-names = "xo", "pnoc_clk", "qdss_clk";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+ "gpll0_mss_clk", "snoc_axi_clk",
+ "mnoc_axi_clk";
+
+ interrupts = <0 448 1>;
+ vdd_cx-supply = <&pm8994_s1_corner>;
+ vdd_cx-voltage = <7>;
+ vdd_mx-supply = <&pm8994_s2_corner>;
+ vdd_mx-uV = <6>;
+ vdd_pll-supply = <&pm8994_l12>;
+ qcom,vdd_pll = <1800000>;
+ qcom,firmware-name = "modem";
+ qcom,pil-self-auth;
+ qcom,sysmon-id = <0>;
+ qcom,ssctl-instance-id = <0x12>;
+ qcom,override-acc;
+ qcom,ahb-clk-vote;
+ qcom,pnoc-clk-vote;
+ qcom,qdsp6v56-1-5;
+ qcom,mx-spike-wa;
+ memory-region = <&modem_mem>;
+ qcom,mem-protect-id = <0xF>;
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+ qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+ status = "ok";
+ };
+};
--- /dev/null
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "skeleton64.dtsi"
+#include "vplatform-lfv-msm8996.dtsi"
+#include "vplatform-lfv-smmu.dtsi"
+#include "msm8996-smp2p.dtsi"
+#include "vplatform-lfv-msm8996-audio.dtsi"
+#include "vplatform-lfv-msm8996-modem.dtsi"
+#include <dt-bindings/clock/msm-clocks-8996.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8996";
+ compatible = "qcom,msm8996";
+ qcom,msm-id = <246 0x0>;
+};
+
+&reserved_memory {
+ modem_mem: modem_region@88800000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x88800000 0 0x6200000>;
+ };
+ peripheral_mem: peripheral_region@8ea00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0 0x8ea00000 0 0x2b00000>;
+ };
+ adsp_mem: adsp_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x00000000 0 0xffffffff>;
+ reusable;
+ alignment = <0 0x100000>;
+ size = <0 0x400000>;
+ };
+};
+
+&soc {
+ qcom,smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0x86000000 0x200000>,
+ <0x9820010 0x4>,
+ <0x7b4000 0x8>;
+ reg-names = "smem", "irq-reg-base",
+ "smem_targ_info_reg";
+ qcom,mpu-enabled;
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x0>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ interrupts = <0 449 1>;
+ label = "modem";
+ qcom,not-loadable;
+ };
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0x1>;
+ qcom,smd-irq-offset = <0x0>;
+ qcom,smd-irq-bitmask = <0x100>;
+ interrupts = <0x0 0x9c 0x1>;
+ label = "adsp";
+ };
+ };
+
+ qcom,cnss {
+ compatible = "qcom,cnss";
+ wlan-bootstrap-gpio = <&tlmm 46 0>;
+ vdd-wlan-en-supply = <&wlan_en_vreg>;
+ vdd-wlan-supply = <&rome_vreg>;
+ vdd-wlan-io-supply = <&pm8994_s4>;
+ vdd-wlan-xtal-supply = <&pm8994_l30>;
+ vdd-wlan-core-supply = <&pm8994_s3>;
+ wlan-ant-switch-supply = <&pm8994_l18_pin_ctrl>;
+ qcom,wlan-en-vreg-support;
+ qcom,notify-modem-status;
+ pinctrl-names = "bootstrap_active", "bootstrap_sleep";
+ pinctrl-0 = <&cnss_bootstrap_active>;
+ pinctrl-1 = <&cnss_bootstrap_sleep>;
+
+ qcom,msm-bus,name = "msm-cnss";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ /* No vote */
+ <45 512 0 0>,
+ /* Up to 200 Mbps */
+ <45 512 41421 1520000>,
+ /* Up to 400 Mbps */
+ <45 512 96650 1520000>,
+ /* Up to 800 Mbps */
+ <45 512 207108 14432000>;
+ };
+};
+
+&spi_9 {
+ status = "okay";
+};
+
+&i2c_6 {
+ status = "okay";
+};
+
+&i2c_8 {
+ status = "okay";
+};
+
+&blsp1_uart2 {
+ status = "okay";
+};
* GNU General Public License for more details.
*/
-/dts-v1/;
-
#include "skeleton64.dtsi"
#include <dt-bindings/clock/msm-clocks-8996.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
compatible = "qcom,msm8996";
qcom,msm-id = <246 0x0>;
- aliases {
- spi9 = &spi_9;
- i2c6 = &i2c_6;
- i2c8 = &i2c_8;
- };
-
- soc: soc { };
-
psci {
compatible = "arm,psci";
method = "smc";
cpu_on = <0xc4000003>;
};
- reserved-memory {
+ soc: soc { };
+
+ reserved_memory: reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
reg = <0 0xc8000000 0 0x00400000>;
label = "ion_audio_mem";
};
- modem_mem: modem_region@88800000 {
- compatible = "removed-dma-pool";
- no-map;
- reg = <0 0x88800000 0 0x6200000>;
- };
- peripheral_mem: peripheral_region@8ea00000 {
- compatible = "removed-dma-pool";
- no-map;
- reg = <0 0x8ea00000 0 0x2b00000>;
- };
- adsp_mem: adsp_region {
- compatible = "shared-dma-pool";
- alloc-ranges = <0 0x00000000 0 0xffffffff>;
- reusable;
- alignment = <0 0x100000>;
- size = <0 0x400000>;
- };
};
};
#include "vplatform-lfv-ion.dtsi"
-#include "vplatform-lfv-smmu.dtsi"
+#include "vplatform-lfv-msm8996-pinctrl.dtsi"
+#include "vplatform-lfv-msm8996-blsp.dtsi"
&soc {
#address-cells = <1>;
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
asoc-codec = <&stub_codec>;
asoc-codec-names = "msm-stub-codec.1";
};
- qcom,msm-adsp-loader {
- status = "ok";
- compatible = "qcom,adsp-loader";
- qcom,adsp-state = <0>;
- };
-
- qcom,msm-audio-ion {
- compatible = "qcom,msm-audio-ion";
- qcom,smmu-version = <2>;
- qcom,smmu-enabled;
- iommus = <&lpass_q6_smmu 1>;
- };
pcm0: qcom,msm-pcm {
compatible = "qcom,msm-pcm-dsp";
afe_pcm_tx: qcom,msm-dai-q6-be-afe-pcm-tx {
compatible = "qcom,msm-dai-q6-dev";
qcom,msm-dai-q6-dev-id = <225>;
- };
+ };
afe_proxy_rx: com,msm-dai-q6-afe-proxy-rx {
compatible = "qcom,msm-dai-q6-dev";
};
};
+ qcom,msm-dai-tdm-sec-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37136>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 36886>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <0>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36880>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_1: qcom,msm-dai-q6-tdm-sec-rx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36882>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_2: qcom,msm-dai-q6-tdm-sec-rx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36884>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_3: qcom,msm-dai-q6-tdm-sec-rx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36886>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-sec-tx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37137>;
compatible = "qcom,msm-pcm-hostless";
};
- qcom,msm-adsprpc-mem {
- compatible = "qcom,msm-adsprpc-mem-region";
- memory-region = <&adsp_mem>;
- };
-
- qcom,msm_fastrpc {
- compatible = "qcom,msm-fastrpc-adsp";
-
- qcom,msm_fastrpc_compute_cb1 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 8>;
- };
- qcom,msm_fastrpc_compute_cb2 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 9>;
- };
- qcom,msm_fastrpc_compute_cb3 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 10>;
- };
- qcom,msm_fastrpc_compute_cb4 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 11>;
- };
- qcom,msm_fastrpc_compute_cb5 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 12>;
- };
- qcom,msm_fastrpc_compute_cb6 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 5>;
- };
- qcom,msm_fastrpc_compute_cb7 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 6>;
- };
- qcom,msm_fastrpc_compute_cb8 {
- compatible = "qcom,msm-fastrpc-compute-cb";
- label = "adsprpc-smd";
- iommus = <&lpass_q6_smmu 7>;
- };
- };
-};
-
-#include "vplatform-lfv-msm8996-pinctrl.dtsi"
-#include "msm8996-smp2p.dtsi"
-
-&soc {
- qcom,ipc-spinlock@740000 {
- compatible = "qcom,ipc-spinlock-sfpb";
- reg = <0x740000 0x8000>;
- qcom,num-locks = <8>;
- };
-
- qcom,smem@86000000 {
- compatible = "qcom,smem";
- reg = <0x86000000 0x200000>,
- <0x9820010 0x4>,
- <0x7b4000 0x8>;
- reg-names = "smem", "irq-reg-base",
- "smem_targ_info_reg";
- qcom,mpu-enabled;
-
- qcom,smd-modem {
- compatible = "qcom,smd";
- qcom,smd-edge = <0>;
- qcom,smd-irq-offset = <0x0>;
- qcom,smd-irq-bitmask = <0x1000>;
- interrupts = <0 449 1>;
- label = "modem";
- qcom,not-loadable;
- };
-
- qcom,smd-adsp {
- compatible = "qcom,smd";
- qcom,smd-edge = <0x1>;
- qcom,smd-irq-offset = <0x0>;
- qcom,smd-irq-bitmask = <0x100>;
- interrupts = <0x0 0x9c 0x1>;
- label = "adsp";
- };
- };
-
- qcom,rmtfs_sharedmem@0 {
- compatible = "qcom,sharedmem-uio";
- reg = <0x85e00000 0x00200000>;
- reg-names = "rmtfs";
- qcom,client-id = <0x00000001>;
- };
-
- qcom,glink-smem-native-xprt-modem@86000000 {
- compatible = "qcom,glink-smem-native-xprt";
- reg = <0x86000000 0x200000>,
- <0x9820010 0x4>;
- reg-names = "smem", "irq-reg-base";
- qcom,irq-mask = <0x8000>;
- interrupts = <0 452 1>;
- label = "mpss";
- };
-
- qcom,glink-smem-native-xprt-adsp@86000000 {
- compatible = "qcom,glink-smem-native-xprt";
- reg = <0x86000000 0x200000>,
- <0x9820010 0x4>;
- reg-names = "smem", "irq-reg-base";
- qcom,irq-mask = <0x200>;
- interrupts = <0 157 1>;
- label = "lpass";
- qcom,qos-config = <0x1b8>;
- qcom,ramp-time = <0xaf>;
- };
-
- qcom,glink-qos-config-adsp {
- compatible = "qcom,glink-qos-config";
- qcom,flow-info = <0x3c 0x0 0x3c 0x0 0x3c 0x0 0x3c 0x0>;
- qcom,mtu-size = <0x800>;
- qcom,tput-stats-cycle = <0xa>;
- linux,phandle = <0x1b8>;
- phandle = <0x1b8>;
- };
-
- /* IPC router */
- qcom,ipc_router {
- compatible = "qcom,ipc_router";
- qcom,node-id = <1>;
- };
-
- qcom,ipc_router_modem_xprt {
- compatible = "qcom,ipc_router_glink_xprt";
- qcom,ch-name = "IPCRTR";
- qcom,xprt-remote = "mpss";
- qcom,glink-xprt = "smd_trans";
- qcom,xprt-linkid = <1>;
- qcom,xprt-version = <1>;
- qcom,fragmented-data;
- };
-
- qcom,ipc_router_q6_xprt {
- compatible = "qcom,ipc_router_glink_xprt";
- qcom,ch-name = "IPCRTR";
- qcom,xprt-remote = "lpass";
- qcom,glink-xprt = "smd_trans";
- qcom,xprt-linkid = <1>;
- qcom,xprt-version = <1>;
- qcom,fragmented-data;
- };
-
- /* IPA including NDP-BAM */
- ipa_hw: qcom,ipa@680000 {
- compatible = "qcom,ipa";
- reg = <0x680000 0x4effc>,
- <0x684000 0x26934>;
- reg-names = "ipa-base", "bam-base";
- interrupts = <0 333 0>,
- <0 432 0>;
- interrupt-names = "ipa-irq", "bam-irq";
- qcom,ipa-hw-ver = <5>; /* IPA core version = IPAv2.5 */
- qcom,ipa-hw-mode = <0>;
- qcom,ee = <0>;
- qcom,use-ipa-tethering-bridge;
- qcom,ipa-bam-remote-mode;
- qcom,modem-cfg-emb-pipe-flt;
- clocks = <&clock_gcc clk_ipa_clk>;
- clock-names = "core_clk";
- qcom,use-dma-zone;
- qcom,msm-bus,name = "ipa";
- qcom,msm-bus,num-cases = <3>;
- qcom,msm-bus,num-paths = <2>;
- qcom,msm-bus,vectors-KBps =
- <90 512 0 0>, <90 585 0 0>, /* No vote */
- <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */
- <90 512 206000 960000>, <90 585 206000 960000>; /* PERF */
- qcom,bus-vector-names = "MIN", "SVS", "PERF";
- };
-
- /* rmnet over IPA */
- qcom,rmnet-ipa {
- compatible = "qcom,rmnet-ipa";
- qcom,rmnet-ipa-ssr;
- qcom,ipa-loaduC;
- qcom,ipa-advertise-sg-support;
- };
-
- /* SPS */
qcom,sps {
compatible = "qcom,msm_sps_4k";
qcom,device-type = <3>;
#reset-cells = <1>;
};
- pil_modem: qcom,mss@2080000 {
- compatible = "qcom,pil-q6v55-mss";
- reg = <0x2080000 0x100>,
- <0x0763000 0x008>,
- <0x0765000 0x008>,
- <0x0764000 0x008>,
- <0x2180000 0x020>,
- <0x038f008 0x004>;
- reg-names = "qdsp6_base", "halt_q6", "halt_modem",
- "halt_nc", "rmb_base", "restart_reg";
-
- clocks = <&clock_gcc clk_cxo_clk_src>,
- <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
- <&clock_gcc clk_pnoc_clk>,
- <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
- <&clock_gcc clk_gcc_boot_rom_ahb_clk>,
- <&clock_gcc clk_gpll0_out_msscc>,
- <&clock_gcc clk_gcc_mss_snoc_axi_clk>,
- <&clock_gcc clk_gcc_mss_mnoc_bimc_axi_clk>,
- <&clock_gcc clk_qdss_clk>;
- clock-names = "xo", "iface_clk", "pnoc_clk", "bus_clk",
- "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
- "mnoc_axi_clk", "qdss_clk";
- qcom,proxy-clock-names = "xo", "pnoc_clk", "qdss_clk";
- qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
- "gpll0_mss_clk", "snoc_axi_clk",
- "mnoc_axi_clk";
-
- interrupts = <0 448 1>;
- vdd_cx-supply = <&pm8994_s1_corner>;
- vdd_cx-voltage = <7>;
- vdd_mx-supply = <&pm8994_s2_corner>;
- vdd_mx-uV = <6>;
- vdd_pll-supply = <&pm8994_l12>;
- qcom,vdd_pll = <1800000>;
- qcom,firmware-name = "modem";
- qcom,pil-self-auth;
- qcom,sysmon-id = <0>;
- qcom,ssctl-instance-id = <0x12>;
- qcom,override-acc;
- qcom,ahb-clk-vote;
- qcom,pnoc-clk-vote;
- qcom,qdsp6v56-1-5;
- qcom,mx-spike-wa;
- memory-region = <&modem_mem>;
- qcom,mem-protect-id = <0xF>;
-
- /* GPIO inputs from mss */
- qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
- qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
- qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
- qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
- qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
-
- /* GPIO output to mss */
- qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
- status = "ok";
- };
-
pm8994_s1_corner: regulator-s1-corner {
compatible = "qcom,stub-regulator";
regulator-name = "pm8994_s1_corner";
regulator-min-microvolt = <1>;
regulator-max-microvolt = <7>;
};
-
- spi_9: spi@75B7000 { /* BLSP2 QUP3 */
- compatible = "qcom,spi-qup-v2";
- #address-cells = <1>;
- #size-cells = <0>;
- reg-names = "spi_physical";
- reg = <0x075B7000 0x600>;
- interrupt-names = "spi_irq";
- interrupts = <0 103 0>;
- spi-max-frequency = <19200000>;
- qcom,infinite-mode = <0>;
- qcom,ver-reg-exists;
- qcom,master-id = <84>;
- qcom,use-pinctrl;
- pinctrl-names = "spi_default", "spi_sleep";
- pinctrl-0 = <&spi_9_active>;
- pinctrl-1 = <&spi_9_sleep>;
- clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
- <&clock_gcc clk_gcc_blsp2_qup3_spi_apps_clk>;
- };
-
- i2c_6: i2c@757a000 { /* BLSP1 QUP6 */
- compatible = "qcom,i2c-msm-v2";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x757a000 0x1000>;
- reg-names = "qup_phys_addr";
- interrupt-names = "qup_irq";
- interrupts = <0 100 0>;
- qcom,disable-dma;
- qcom,master-id = <86>;
- qcom,clk-freq-out = <400000>;
- qcom,clk-freq-in = <19200000>;
- clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
- <&clock_gcc clk_gcc_blsp1_qup6_i2c_apps_clk>;
- pinctrl-names = "i2c_active", "i2c_sleep";
- pinctrl-0 = <&i2c_6_active>;
- pinctrl-1 = <&i2c_6_sleep>;
- };
-
- i2c_8: i2c@75b6000 { /* BLSP2 QUP2 */
- compatible = "qcom,i2c-msm-v2";
- #address-cells = <1>;
- #size-cells = <0>;
- reg-names = "qup_phys_addr";
- reg = <0x75b6000 0x1000>;
- interrupt-names = "qup_irq";
- interrupts = <0 102 0>;
- qcom,disable-dma;
- qcom,master-id = <84>;
- qcom,clk-freq-out = <400000>;
- qcom,clk-freq-in = <19200000>;
- clock-names = "iface_clk", "core_clk";
- clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>,
- <&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>;
- pinctrl-names = "i2c_active", "i2c_sleep";
- pinctrl-0 = <&i2c_8_active>;
- pinctrl-1 = <&i2c_8_sleep>;
- };
-
- blsp1_uart2: uart@07570000 { /* BLSP1 UART2 */
- compatible = "qcom,msm-hsuart-v14";
- reg = <0x07570000 0x1000>,
- <0x7544000 0x2b000>;
- reg-names = "core_mem", "bam_mem";
- interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
- interrupts = <0 108 0>, <0 238 0>, <0 810 0>;
- #address-cells = <0>;
-
- qcom,inject-rx-on-wakeup;
- qcom,rx-char-to-inject = <0xFD>;
-
- qcom,bam-tx-ep-pipe-index = <2>;
- qcom,bam-rx-ep-pipe-index = <3>;
- qcom,master-id = <86>;
- clock-names = "core_clk", "iface_clk";
- clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
- <&clock_gcc clk_gcc_blsp1_ahb_clk>;
- pinctrl-names = "sleep", "default";
- pinctrl-0 = <&blsp1_uart2_sleep>;
- pinctrl-1 = <&blsp1_uart2_active>;
-
- qcom,msm-bus,name = "buart2";
- qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps =
- <86 512 0 0>,
- <86 512 500 800>;
- };
-
- qcom,lpass@9300000 {
- compatible = "qcom,pil-tz-generic";
- reg = <0x9300000 0x00100>;
- interrupts = <0 162 1>;
-
- vdd_cx-supply = <&pm8994_s1_corner>;
- qcom,proxy-reg-names = "vdd_cx";
- qcom,vdd_cx-uV-uA = <7 100000>;
-
- clocks = <&clock_gcc clk_cxo_pil_lpass_clk>;
- clock-names = "xo";
- qcom,proxy-clock-names = "xo";
-
- qcom,pas-id = <1>;
- qcom,proxy-timeout-ms = <10000>;
- qcom,smem-id = <423>;
- qcom,sysmon-id = <1>;
- qcom,ssctl-instance-id = <0x14>;
- qcom,firmware-name = "adsp";
- qcom,edge = "lpass";
- memory-region = <&peripheral_mem>;
-
- /* GPIO inputs from lpass */
- qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
- qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
- qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
- qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
-
- /* GPIO output to lpass */
- qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
- };
-
- qcom,cnss {
- compatible = "qcom,cnss";
- wlan-bootstrap-gpio = <&tlmm 46 0>;
- vdd-wlan-en-supply = <&wlan_en_vreg>;
- vdd-wlan-supply = <&rome_vreg>;
- vdd-wlan-io-supply = <&pm8994_s4>;
- vdd-wlan-xtal-supply = <&pm8994_l30>;
- vdd-wlan-core-supply = <&pm8994_s3>;
- wlan-ant-switch-supply = <&pm8994_l18_pin_ctrl>;
- qcom,wlan-en-vreg-support;
- qcom,notify-modem-status;
- pinctrl-names = "bootstrap_active", "bootstrap_sleep";
- pinctrl-0 = <&cnss_bootstrap_active>;
- pinctrl-1 = <&cnss_bootstrap_sleep>;
-
- qcom,msm-bus,name = "msm-cnss";
- qcom,msm-bus,num-cases = <4>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps =
- /* No vote */
- <45 512 0 0>,
- /* Up to 200 Mbps */
- <45 512 41421 1520000>,
- /* Up to 400 Mbps */
- <45 512 96650 1520000>,
- /* Up to 800 Mbps */
- <45 512 207108 14432000>;
- };
};
-#include "vplatform-lfv-agave.dtsi"
};
msiof0: spi@e6e20000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
};
msiof1: spi@e6e10000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e10000 0 0x0064>;
interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
};
msiof2: spi@e6e00000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e00000 0 0x0064>;
interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
};
msiof3: spi@e6c90000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6c90000 0 0x0064>;
interrupts = <0 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
*/
void kvm_free_stage2_pgd(struct kvm *kvm)
{
- if (kvm->arch.pgd == NULL)
- return;
+ void *pgd = NULL;
+ void *hwpgd = NULL;
spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ if (kvm->arch.pgd) {
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ pgd = READ_ONCE(kvm->arch.pgd);
+ hwpgd = kvm_get_hwpgd(kvm);
+ kvm->arch.pgd = NULL;
+ }
spin_unlock(&kvm->mmu_lock);
- kvm_free_hwpgd(kvm_get_hwpgd(kvm));
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(kvm->arch.pgd);
-
- kvm->arch.pgd = NULL;
+ if (hwpgd)
+ kvm_free_hwpgd(hwpgd);
+ if (KVM_PREALLOC_LEVEL > 0 && pgd)
+ kfree(pgd);
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
-static const struct of_device_id const ramc_ids[] __initconst = {
+static const struct of_device_id ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
unsigned result;
};
-static const struct of_device_id const bcm_kona_smc_ids[] __initconst = {
+static const struct of_device_id bcm_kona_smc_ids[] __initconst = {
{.compatible = "brcm,kona-smc"},
{.compatible = "bcm,kona-smc"}, /* deprecated name */
{},
.power_off = csn3xxx_usb_power_off,
};
-static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = {
+static const struct of_dev_auxdata cns3xxx_auxdata[] __initconst = {
{ "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata },
{ "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata },
{ "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL },
};
#endif
-static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = {
+static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
#ifdef CONFIG_SOC_AM33XX
{ .compatible = "ti,am3-prcm", .data = &am3_prm_data },
#endif
u8 hsscll_12;
};
-static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = {
+static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = {
{
.load = 50,
.loadbits = 0x3,
static struct mmp_dma_platdata pxa_dma_pdata = {
.dma_channels = 0,
+ .nb_requestors = 0,
};
static struct resource pxa_dma_resource[] = {
.resource = pxa_dma_resource,
};
-void __init pxa2xx_set_dmac_info(int nb_channels)
+void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
{
pxa_dma_pdata.dma_channels = nb_channels;
+ pxa_dma_pdata.nb_requestors = nb_requestors;
pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
}
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
- pxa2xx_set_dmac_info(16);
+ pxa2xx_set_dmac_info(16, 40);
pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
ret = platform_add_devices(pxa25x_devices,
ARRAY_SIZE(pxa25x_devices));
if (!of_have_populated_dt()) {
pxa_register_device(&pxa27x_device_gpio,
&pxa27x_gpio_info);
- pxa2xx_set_dmac_info(32);
+ pxa2xx_set_dmac_info(32, 75);
ret = platform_add_devices(devices,
ARRAY_SIZE(devices));
}
if (of_have_populated_dt())
return 0;
- pxa2xx_set_dmac_info(32);
+ pxa2xx_set_dmac_info(32, 100);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if (ret)
return ret;
setup_irq(irq, &spear_timer_irq);
}
-static const struct of_device_id const timer_of_match[] __initconst = {
+static const struct of_device_id timer_of_match[] __initconst = {
{ .compatible = "st,spear-timer", },
{ },
};
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the
}
#endif
-extern void __init pxa2xx_set_dmac_info(int nb_channels);
+extern void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
#endif /* __PLAT_DMA_H */
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
+ .mmap = xen_swiotlb_dma_mmap,
};
int __init xen_mm_init(void)
endif
endif
-KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
+ifeq ($(cc-name),clang)
+# This is a workaround for https://bugs.llvm.org/show_bug.cgi?id=30792.
+# TODO: revert when this is fixed in LLVM.
+KBUILD_CFLAGS += -mno-implicit-float
+else
+KBUILD_CFLAGS += -mgeneral-regs-only
+endif
+KBUILD_CFLAGS += $(lseinstr)
KBUILD_CFLAGS += -fno-pic
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
TEXT_OFFSET := 0x00080000
endif
+ifeq ($(cc-name),clang)
+KBUILD_CFLAGS += $(call cc-disable-warning, asm-operand-widths)
+endif
+
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
# in 32-bit arithmetic
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
--- /dev/null
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_STMVL53L0=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_ICNSS=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
--- /dev/null
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_HMP=y
+CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8998=y
+CONFIG_ARCH_MSMHAMSTER=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_QCOM_TLB_EL2_HANDLER=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_MSM_RMNET_MHI=y
+CONFIG_RNDIS_IPA=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_ATH_CARDS=y
+CONFIG_WIL6210=m
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_STMVL53L0=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_SERIAL_MSM_SMD=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SOUNDWIRE=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_SDM660=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_MSM_BCL_CTL=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_BATTERY_BCL=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
+CONFIG_MSM_APM=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_LIMITS_MONITOR=y
+CONFIG_LIMITS_LITE_HW=y
+CONFIG_THERMAL_MONITOR=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_WCD9335_CODEC=y
+CONFIG_WCD934X_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_REGULATOR_CPR3_HMSS=y
+CONFIG_REGULATOR_CPR3_MMSS=y
+CONFIG_REGULATOR_CPRH_KBSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
+CONFIG_QCOM_KGSL=y
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_FB_MSM=y
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
+CONFIG_FB_MSM_MDSS_DP_PANEL=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MSM8998=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_CORTEX_ARM64=y
+CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_ESOC=y
+CONFIG_ESOC_DEV=y
+CONFIG_ESOC_CLIENT=y
+CONFIG_ESOC_DEBUG=y
+CONFIG_ESOC_MDM_4x=y
+CONFIG_ESOC_MDM_DRV=y
+CONFIG_ESOC_MDM_DBG_ENG=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_QPNP_REVID=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA=y
+CONFIG_RMNET_IPA=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_GPIO_USB_DETECT=y
+CONFIG_MSM_MHI=y
+CONFIG_MSM_MHI_UCI=y
+CONFIG_MSM_MHI_DEBUG=y
+CONFIG_SEEMP_CORE=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_TIMER_LEAP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
+CONFIG_MSM_SMEM=y
+CONFIG_QPNP_HAPTIC=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMD_XPRT=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_QCOM_DCC=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SPM=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_IRQ_HELPER=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_ICNSS=y
+CONFIG_MSM_GLADIATOR_ERP_V2=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_RUN_QUEUE_STATS=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_MSM_ADSP_LOADER=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_TRACER_PKT=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_MSM_MPM_OF=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_REMOTEQDSS=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_QBT1000=y
+CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
+CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_SPDM_SCM=y
+CONFIG_DEVFREQ_SPDM=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_SENSORS_SSC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_QPDI=y
+CONFIG_CORESIGHT_SOURCE_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
CONFIG_SOC_CAMERA_PLATFORM=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
CONFIG_SOC_CAMERA_PLATFORM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_LEDS_QPNP=y
-CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_FLASH=y
CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_SWITCH=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_EXYNOS=y
CONFIG_LEDS_QPNP=y
-CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_FLASH=y
CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_ATH10K=m
CONFIG_ATH10K_TARGET_SNOC=m
CONFIG_ATH10K_SNOC=y
+CONFIG_ATH10K_DFS_CERTIFIED=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_BTFM_SLIM=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_CFG80211_WEXT=y
CONFIG_ATH10K_TARGET_SNOC=m
CONFIG_ATH10K_SNOC=y
CONFIG_ATH10K_DEBUG=y
+CONFIG_ATH10K_DFS_CERTIFIED=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_USELIB is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
ldr dgb, [x0, #16]
/* load sha1_ce_state::finalize */
- ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
+ ldr_l w4, sha1_ce_offsetof_finalize, x4
+ ldr w4, [x0, x4]
/* load input */
0: ld1 {v8.4s-v11.4s}, [x1], #64
* the padding is handled by the C code in that case.
*/
cbz x4, 3f
- ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
+ ldr_l w4, sha1_ce_offsetof_count, x4
+ ldr x4, [x0, x4]
movi v9.2d, #0
mov x8, #0x80000000
movi v10.2d, #0
#include <linux/crypto.h>
#include <linux/module.h>
-#define ASM_EXPORT(sym, val) \
- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);
+const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
+const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
+
static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
- ASM_EXPORT(sha1_ce_offsetof_count,
- offsetof(struct sha1_ce_state, sst.count));
- ASM_EXPORT(sha1_ce_offsetof_finalize,
- offsetof(struct sha1_ce_state, finalize));
-
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */
- ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
+ ldr_l w4, sha256_ce_offsetof_finalize, x4
+ ldr w4, [x0, x4]
/* load input */
0: ld1 {v16.4s-v19.4s}, [x1], #64
* the padding is handled by the C code in that case.
*/
cbz x4, 3f
- ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
+ ldr_l w4, sha256_ce_offsetof_count, x4
+ ldr x4, [x0, x4]
movi v17.2d, #0
mov x8, #0x80000000
movi v18.2d, #0
#include <linux/crypto.h>
#include <linux/module.h>
-#define ASM_EXPORT(sym, val) \
- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);
+const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
+ sst.count);
+const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
+ finalize);
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
- ASM_EXPORT(sha256_ce_offsetof_count,
- offsetof(struct sha256_ce_state, sst.count));
- ASM_EXPORT(sha256_ce_offsetof_finalize,
- offsetof(struct sha256_ce_state, finalize));
-
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
+ msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.ne 1f
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the initial
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
- { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
# The gate DSO image is built using a special linker script.
include $(src)/Makefile.gate
-# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
-define sed-y
- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
-endef
-quiet_cmd_nr_irqs = GEN $@
-define cmd_nr_irqs
- (set -e; \
- echo "#ifndef __ASM_NR_IRQS_H__"; \
- echo "#define __ASM_NR_IRQS_H__"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y) $<; \
- echo ""; \
- echo "#endif" ) > $@
-endef
-
# We use internal kbuild rules to avoid the "is up to date" message from make
arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
- $(Q)mkdir -p $(dir $@)
- $(call cmd,nr_irqs)
+include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE
+ $(call filechk,offsets,__ASM_NR_IRQS_H__)
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
extern void *irq_stack[NR_CPUS];
+/*
+ * The highest address on the IRQ stack contains a dummy frame put down in
+ * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jalr v0
unsigned long pc,
unsigned long *ra)
{
+ unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
+ struct pt_regs *regs;
int leaf;
- extern void ret_from_irq(void);
- extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
- * If we reached the bottom of interrupt context,
- * return saved pc in pt_regs.
+ * IRQ stacks start at IRQ_STACK_START
+ * task stacks at THREAD_SIZE - 32
*/
- if (pc == (unsigned long)ret_from_irq ||
- pc == (unsigned long)ret_from_exception) {
- struct pt_regs *regs;
- if (*sp >= stack_page &&
- *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
- regs = (struct pt_regs *)*sp;
- pc = regs->cp0_epc;
- if (!user_mode(regs) && __kernel_text_address(pc)) {
- *sp = regs->regs[29];
- *ra = regs->regs[31];
- return pc;
- }
+ low = stack_page;
+ if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+ high = stack_page + IRQ_STACK_START;
+ irq_stack_high = high;
+ } else {
+ high = stack_page + THREAD_SIZE - 32;
+ irq_stack_high = 0;
+ }
+
+ /*
+ * If we reached the top of the interrupt stack, start unwinding
+ * the interrupted task stack.
+ */
+ if (unlikely(*sp == irq_stack_high)) {
+ unsigned long task_sp = *(unsigned long *)*sp;
+
+ /*
+ * Check that the pointer saved in the IRQ stack head points to
+ * something within the stack of the current task
+ */
+ if (!object_is_on_stack((void *)task_sp))
+ return 0;
+
+ /*
+ * Follow pointer to tasks kernel stack frame where interrupted
+ * state was saved.
+ */
+ regs = (struct pt_regs *)task_sp;
+ pc = regs->cp0_epc;
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
}
return 0;
}
if (leaf < 0)
return 0;
- if (*sp < stack_page ||
- *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
+ if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
* Force .bss to 64K alignment so that .bss..swapper_pg_dir
* gets that alignment. .sbss should be empty, so there will be
* no holes after __init_end. */
- BSS_SECTION(0, 0x10000, 0)
+ BSS_SECTION(0, 0x10000, 8)
_end = . ;
panic("Failed to load xbar nodes from devicetree");
if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
- if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
- res_xbar.name) < 0)
+ if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
+ res_xbar.name))
panic("Failed to get xbar resources");
ltq_xbar_membase = ioremap_nocache(res_xbar.start,
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
return SIGILL;
}
}
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
return SIGILL;
}
}
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
rt2880_pinmux_data = rt3883_pinmux_data;
- ralink_soc == RT3883_SOC;
+ ralink_soc = RT3883_SOC;
}
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
- * on every box.
+ * on every box.
*/
#include <linux/capability.h>
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
/*
* configure:
*
- * Configure the cpu with a given data image. First turn off the counters,
+ * Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to write image\n");
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to start counters\n");
}
/*
- * Open the device and initialize all of its memory. The device is only
+ * Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
* called on the processor that the download should happen
* on.
*/
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int err;
size_t image_size;
uint32_t interface_type;
uint32_t test;
- if (perf_processor_interface == ONYX_INTF)
+ if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
- else if (perf_processor_interface == CUDA_INTF)
+ else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
- else
+ else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
/* First check the machine type is correct for
the requested image */
- if (((perf_processor_interface == CUDA_INTF) &&
- (interface_type != CUDA_INTF)) ||
- ((perf_processor_interface == ONYX_INTF) &&
- (interface_type != ONYX_INTF)))
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
- if (((interface_type == CUDA_INTF) &&
+ if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
- ((interface_type == ONYX_INTF) &&
- (test >= MAX_ONYX_IMAGES)))
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
- if (interface_type == CUDA_INTF)
+ if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
-/*
+/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
- /*
+ /*
* We can only use the lower 32-bits, the upper 32-bits should be 0
- * anyway given this is in the kernel
+ * anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
- onyx_images[TLBMISS][15] &= 0xffffff00;
+ onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[BIG_CPI][15] &= 0xffffff00;
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
- cuda_images[TLBMISS][16] =
+ cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBMISS][17] =
+ cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[TLBHANDMISS][16] =
+ cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBHANDMISS][17] =
+ cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[BIG_CPI][16] =
+ cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[BIG_CPI][17] =
+ cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
/*
* ioctl routine
- * All routines effect the processor that they are executed on. Thus you
+ * All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
}
/* copy out the Counters */
- if (copy_to_user((void __user *)arg, raddr,
+ if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
.open = perf_open,
.release = perf_release
};
-
+
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
-
+
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
userbuf[22] = 0;
userbuf[23] = 0;
- /*
+ /*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
} else {
/*
- * Read RDR-15 which contains the counters and sticky bits
+ * Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
- /*
+ /*
* Clear out the counters
*/
perf_rdr_clear(15);
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
-
+
return 0;
}
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
- }
+ }
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+ if (!runway) {
+ pr_err("perf_write_image: ioremap failed!\n");
+ return -ENOMEM;
+ }
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
- __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
-
+
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
- return 0;
+ return 0;
}
/*
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
- }
+ }
}
printk("perf_rdr_write done\n");
}
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+#define __get_user_or_set_dar(_regs, _dest, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__get_user_inatomic(_dest, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
+#define __put_user_or_set_dar(_regs, _src, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__put_user_inatomic(_src, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr,
} else {
unsigned long pc = regs->nip ^ (swiz & 4);
- if (__get_user_inatomic(instr,
- (unsigned int __user *)pc))
+ if (__get_user_or_set_dar(regs, instr,
+ (unsigned int __user *)pc))
return -EFAULT;
+
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = ®s->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = ®s->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
* Only POWER6 has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
- unsigned int flags)
+static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
+ unsigned int reg, unsigned int flags)
{
char *ptr0 = (char *) ¤t->thread.TS_FPR(reg);
char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1);
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
{
char *ptr0 = (char *)®s->gpr[reg];
char *ptr1 = (char *)®s->gpr[reg+1];
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: GPR must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
#endif /* CONFIG_PPC64 */
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
if (flags & ST)
- ret |= __put_user(ptr[i^sw], addr + i);
+ ret = __put_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
else
- ret |= __get_user(ptr[i^sw], addr + i);
+ ret = __get_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
+
+ if (ret)
+ return ret;
}
ptr += elsize;
#ifdef __LITTLE_ENDIAN__
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
- int ret, i;
+ int i;
union data {
u64 ll;
double dd;
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
- return emulate_fp_pair(addr, reg, flags);
+ return emulate_fp_pair(regs, addr, reg, flags);
} else {
#ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
}
data.ll = 0;
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __get_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
-
- if (unlikely(ret))
- return -EFAULT;
+ if (__get_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
} else if (flags & F) {
data.ll = current->thread.TS_FPR(reg);
break;
}
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __put_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
+ if (__put_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
- if (unlikely(ret))
- return -EFAULT;
} else if (flags & F)
current->thread.TS_FPR(reg) = data.ll;
else
struct kvm_create_spapr_tce *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
+ struct kvmppc_spapr_tce_table *siter;
long npages;
int ret = -ENOMEM;
int i;
- /* Check this LIOBN hasn't been previously allocated */
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == args->liobn)
- return -EBUSY;
- }
-
npages = kvmppc_stt_npages(args->window_size);
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
- goto fail;
+ return ret;
stt->liobn = args->liobn;
stt->window_size = args->window_size;
goto fail;
}
- kvm_get_kvm(kvm);
-
mutex_lock(&kvm->lock);
- list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ /* Check this LIOBN hasn't been previously allocated */
+ ret = 0;
+ list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
+ if (siter->liobn == args->liobn) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!ret)
+ ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR | O_CLOEXEC);
+
+ if (ret >= 0) {
+ list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+ kvm_get_kvm(kvm);
+ }
mutex_unlock(&kvm->lock);
- return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR | O_CLOEXEC);
+ if (ret >= 0)
+ return ret;
-fail:
- if (stt) {
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
+ fail:
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
- kfree(stt);
- }
+ kfree(stt);
return ret;
}
return -ENOENT;
dn = dlpar_configure_connector(drc_index, parent_dn);
- if (!dn)
+ if (!dn) {
+ of_node_put(parent_dn);
return -ENOENT;
+ }
rc = dlpar_attach_node(dn);
if (rc)
extern atomic_t dcpage_flushes_xcall;
extern int sysctl_tsb_ratio;
-#endif
+#ifdef CONFIG_SERIAL_SUNHV
+void sunhv_migrate_hvcons_irq(int cpu);
+#endif
+#endif
void sun_do_break(void);
extern int stop_a_enabled;
extern int scons_pwroff;
int cpu;
if (tlb_type == hypervisor) {
+ int this_cpu = smp_processor_id();
+#ifdef CONFIG_SERIAL_SUNHV
+ sunhv_migrate_hvcons_irq(this_cpu);
+#endif
for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
continue;
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled) {
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR
-config ARCH_HWEIGHT_CFLAGS
- string
- default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
- default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
-
config ARCH_SUPPORTS_UPROBES
def_bool y
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
+# For gcc stack alignment is specified with -mpreferred-stack-boundary,
+# clang has the option -mstack-alignment for that purpose.
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align4 := -mpreferred-stack-boundary=2
+ cc_stack_align8 := -mpreferred-stack-boundary=3
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align4 := -mstack-alignment=4
+ cc_stack_align8 := -mstack-alignment=8
+endif
+
# How to compile the 16-bit code. Note we always compile for -march=i386;
# that way we can complain to the user if the CPU is insufficient.
#
-DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse \
- $(call cc-option, -ffreestanding) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
+ -mno-mmx -mno-sse
+
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit
# with nonstandard options
KBUILD_CFLAGS += -fno-pic
- # prevent gcc from keeping the stack 16 byte aligned
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
+ # Align the stack to the register width instead of using the default
+ # alignment of 16 bytes. This reduces stack usage and the number of
+ # alignment instructions.
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots:
KBUILD_CFLAGS += -m64
# Align jump targets to 1 byte, not the default 16 bytes:
- KBUILD_CFLAGS += -falign-jumps=1
+ KBUILD_CFLAGS += $(call cc-option,-falign-jumps=1)
# Pack loops tightly as well:
- KBUILD_CFLAGS += -falign-loops=1
+ KBUILD_CFLAGS += $(call cc-option,-falign-loops=1)
# Don't autogenerate traditional x87 instructions
KBUILD_CFLAGS += $(call cc-option,-mno-80387)
KBUILD_CFLAGS += -fno-pic
- # Use -mpreferred-stack-boundary=3 if supported.
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+ # By default gcc and clang use a stack alignment of 16 bytes for x86.
+ # However the standard kernel entry on x86-64 leaves the stack on an
+ # 8-byte boundary. If the compiler isn't informed about the actual
+ # alignment it will generate extra alignment instructions for the
+ # default alignment which keep the stack *mis*aligned.
+ # Furthermore an alignment to the register width reduces stack usage
+ # and the number of alignment instructions.
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
# Use -mskip-rax-setup if supported.
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
}
/* Circular multiply for better bit diffusion */
- asm("mul %3"
+ asm(_ASM_MUL "%3"
: "=a" (random), "=d" (raw)
: "a" (random), "rm" (mix_const));
random += raw;
#include "ctype.h"
#include "string.h"
+/*
+ * Undef these macros so that the functions that we provide
+ * here will have the correct names regardless of how string.h
+ * may have chosen to #define them.
+ */
+#undef memcpy
+#undef memset
+#undef memcmp
+
int memcmp(const void *s1, const void *s2, size_t len)
{
u8 diff;
#include <linux/linkage.h>
#include <asm/inst.h>
-#define CONCAT(a,b) a##b
#define VMOVDQ vmovdqu
#define xdata0 %xmm0
#define num_bytes %r8
#define tmp %r10
-#define DDQ(i) CONCAT(ddq_add_,i)
-#define XMM(i) CONCAT(%xmm, i)
#define DDQ_DATA 0
#define XDATA 1
#define KEY_128 1
/* generate a unique variable for ddq_add_x */
.macro setddq n
- var_ddq_add = DDQ(\n)
+ var_ddq_add = ddq_add_\n
.endm
/* generate a unique variable for xmm register */
.macro setxdata n
- var_xdata = XMM(\n)
+ var_xdata = %xmm\n
.endm
/* club the numeric 'id' to the symbol 'name' */
#define new_len2 145f-144f
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
alt_end_marker ":\n"
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
- * The additional "-" is needed because gas works with s32s.
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
-#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
#define _ASM_X86_HWEIGHT_H
#ifdef CONFIG_64BIT
-/* popcnt %edi, %eax -- redundant REX prefix for alignment */
-#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
+/* popcnt %edi, %eax */
+#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
/* popcnt %rdi, %rax */
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
#define REG_IN "D"
#define REG_OUT "a"
#endif
-/*
- * __sw_hweightXX are called from within the alternatives below
- * and callee-clobbered registers need to be taken care of. See
- * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
- * compiler switches.
- */
+#define __HAVE_ARCH_SW_HWEIGHT
+
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
- unsigned int res = 0;
+ unsigned int res;
asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
- : "="REG_OUT (res)
- : REG_IN (w));
+ : "="REG_OUT (res)
+ : REG_IN (w));
return res;
}
#else
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
- unsigned long res = 0;
+ unsigned long res;
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
- : "="REG_OUT (res)
- : REG_IN (w));
+ : "="REG_OUT (res)
+ : REG_IN (w));
return res;
}
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
+#define _ASM_MUL __ASM_SIZE(mul)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
+ unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
- (pr_reg)[21] = current->thread.fs; \
- (pr_reg)[22] = current->thread.gs; \
+ rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
+ rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
xsave = &fpu->state.xsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+
+ /* xcomp_bv must be 0 when using uncompacted format */
+ if (!ret && xsave->header.xcomp_bv)
+ ret = -EINVAL;
+
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
*/
memset(&xsave->header.reserved, 0, 48);
+ /*
+ * In case of failure, mark all states as init:
+ */
+ if (ret)
+ fpstate_init(&fpu->state);
+
return ret;
}
fpu__drop(fpu);
if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
- __copy_from_user(&env, buf, sizeof(env))) {
+ __copy_from_user(&env, buf, sizeof(env)) ||
+ (state_size > offsetof(struct xregs_state, header) &&
+ fpu->state.xsave.header.xcomp_bv)) {
fpstate_init(&fpu->state);
err = -1;
} else {
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
+
+EXPORT_SYMBOL(__sw_hweight32);
EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(__sw_hweight32);
+EXPORT_SYMBOL(__sw_hweight64);
+
/*
* Export string functions. We normally rely on gcc builtin for most of these,
* but gcc sometimes decides not to inline them.
/* Allow posting non-urgent interrupts */
new.sn = 0;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
}
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
{
#ifdef CONFIG_SMP
if (vcpu->mode == IN_GUEST_MODE) {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
/*
- * Currently, we don't support urgent interrupt,
- * all interrupts are recognized as non-urgent
- * interrupt, so we cannot post interrupts when
- * 'SN' is set.
+ * The vector of interrupt to be delivered to vcpu had
+ * been set in PIR before this function.
+ *
+ * Following cases will be reached in this block, and
+ * we always send a notification event in all cases as
+ * explained below.
*
- * If the vcpu is in guest mode, it means it is
- * running instead of being scheduled out and
- * waiting in the run queue, and that's the only
- * case when 'SN' is set currently, warning if
- * 'SN' is set.
+ * Case 1: vcpu keeps in non-root mode. Sending a
+ * notification event posts the interrupt to vcpu.
+ *
+ * Case 2: vcpu exits to root mode and is still
+ * runnable. PIR will be synced to vIRR before the
+ * next vcpu entry. Sending a notification event in
+ * this case has no effect, as vcpu is not in root
+ * mode.
+ *
+ * Case 3: vcpu exits to root mode and is blocked.
+ * vcpu_block() has already synced PIR to vIRR and
+ * never blocks vcpu if vIRR is not cleared. Therefore,
+ * a blocked vcpu here does not wait for any requested
+ * interrupts in PIR, and sending a notification event
+ * which has no effect is safe here.
*/
- WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
page_to_phys(vmx->nested.virtual_apic_page));
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ } else {
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING;
+#endif
}
if (cpu_has_vmx_msr_bitmap() &&
* (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
*/
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
- kvm_set_cr4(vcpu, vmcs12->host_cr4);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu);
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
return 0;
}
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
if(vcpu->pre_pcpu != -1) {
spin_lock_irqsave(
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
- int idx, ret = -EINVAL;
+ int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
- BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
+ if (guest_irq >= irq_rt->nr_rt_entries ||
+ hlist_empty(&irq_rt->map[guest_irq])) {
+ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+ guest_irq, irq_rt->nr_rt_entries);
+ goto out;
+ }
hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
if (e->type != KVM_IRQ_ROUTING_MSI)
if (set)
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
- else {
- /* suppress notification event before unposting */
- pi_set_sn(vcpu_to_pi_desc(vcpu));
+ else
ret = irq_set_vcpu_affinity(host_irq, NULL);
- pi_clear_sn(vcpu_to_pi_desc(vcpu));
- }
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
-obj-y += msr.o msr-reg.o msr-reg-export.o hash.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hash.o hweight.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
--- /dev/null
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+
+/*
+ * unsigned int __sw_hweight32(unsigned int w)
+ * %rdi: w
+ */
+ENTRY(__sw_hweight32)
+
+#ifdef CONFIG_X86_64
+ movl %edi, %eax # w
+#endif
+ __ASM_SIZE(push,) %__ASM_REG(dx)
+ movl %eax, %edx # w -> t
+ shrl %edx # t >>= 1
+ andl $0x55555555, %edx # t &= 0x55555555
+ subl %edx, %eax # w -= t
+
+ movl %eax, %edx # w -> t
+ shrl $2, %eax # w_tmp >>= 2
+ andl $0x33333333, %edx # t &= 0x33333333
+ andl $0x33333333, %eax # w_tmp &= 0x33333333
+ addl %edx, %eax # w = w_tmp + t
+
+ movl %eax, %edx # w -> t
+ shrl $4, %edx # t >>= 4
+ addl %edx, %eax # w_tmp += t
+ andl $0x0f0f0f0f, %eax # w_tmp &= 0x0f0f0f0f
+ imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101
+ shrl $24, %eax # w = w_tmp >> 24
+ __ASM_SIZE(pop,) %__ASM_REG(dx)
+ ret
+ENDPROC(__sw_hweight32)
+
+ENTRY(__sw_hweight64)
+#ifdef CONFIG_X86_64
+ pushq %rdi
+ pushq %rdx
+
+ movq %rdi, %rdx # w -> t
+ movabsq $0x5555555555555555, %rax
+ shrq %rdx # t >>= 1
+ andq %rdx, %rax # t &= 0x5555555555555555
+ movabsq $0x3333333333333333, %rdx
+ subq %rax, %rdi # w -= t
+
+ movq %rdi, %rax # w -> t
+ shrq $2, %rdi # w_tmp >>= 2
+ andq %rdx, %rax # t &= 0x3333333333333333
+ andq %rdi, %rdx # w_tmp &= 0x3333333333333333
+ addq %rdx, %rax # w = w_tmp + t
+
+ movq %rax, %rdx # w -> t
+ shrq $4, %rdx # t >>= 4
+ addq %rdx, %rax # w_tmp += t
+ movabsq $0x0f0f0f0f0f0f0f0f, %rdx
+ andq %rdx, %rax # w_tmp &= 0x0f0f0f0f0f0f0f0f
+ movabsq $0x0101010101010101, %rdx
+ imulq %rdx, %rax # w_tmp *= 0x0101010101010101
+ shrq $56, %rax # w = w_tmp >> 56
+
+ popq %rdx
+ popq %rdi
+ ret
+#else /* CONFIG_X86_32 */
+ /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
+ pushl %ecx
+
+ call __sw_hweight32
+ movl %eax, %ecx # stash away result
+ movl %edx, %eax # second part of input
+ call __sw_hweight32
+ addl %ecx, %eax # result
+
+ popl %ecx
+ ret
+#endif
+ENDPROC(__sw_hweight64)
offset = uaddr & ~PAGE_MASK;
for (j = cur_page; j < page_limit; j++) {
unsigned int bytes = PAGE_SIZE - offset;
+ unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (len <= 0)
break;
bytes)
break;
+ /*
+ * check if vector was merged with previous
+ * drop page reference if needed
+ */
+ if (bio->bi_vcnt == prev_bi_vcnt)
+ put_page(pages[j]);
+
len -= bytes;
offset = 0;
}
goto err_free_blkg;
}
- wb_congested = wb_congested_get_create(&q->backing_dev_info,
+ wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, GFP_NOWAIT);
if (!wb_congested) {
ret = -ENOMEM;
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
/* some drivers (floppy) instantiate a queue w/o disk registered */
- if (blkg->q->backing_dev_info.dev)
- return dev_name(blkg->q->backing_dev_info.dev);
+ if (blkg->q->backing_dev_info->dev)
+ return dev_name(blkg->q->backing_dev_info->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(blkg_dev_name);
* flip its congestion state for events on other blkcgs.
*/
if (rl == &rl->q->root_rl)
- clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+ clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
#else
/* see blk_clear_congested() */
if (rl == &rl->q->root_rl)
- set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+ set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
* @bdev: device
*
* Locates the passed device's request queue and returns the address of its
- * backing_dev_info. This function can only be called if @bdev is opened
- * and the return value is never NULL.
+ * backing_dev_info. The return value is never NULL however we may return
+ * &noop_backing_dev_info if the bdev is not currently open.
*/
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- return &q->backing_dev_info;
+ return bdev->bd_bdi;
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
blk_flush_integrity();
/* @q won't process any more request, flush async actions */
- del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+ del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
if (q->mq_ops)
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
- int err;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q->bio_split)
goto fail_id;
- q->backing_dev_info.ra_pages =
+ q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
+ if (!q->backing_dev_info)
+ goto fail_split;
+
+ q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
- q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
- q->backing_dev_info.name = "block";
+ q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
+ q->backing_dev_info->name = "block";
q->node = node_id;
- err = bdi_init(&q->backing_dev_info);
- if (err)
- goto fail_split;
-
- setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+ setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head);
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
- bdi_destroy(&q->backing_dev_info);
+ bdi_put(q->backing_dev_info);
fail_split:
bioset_free(q->bio_split);
fail_id:
* disturb iosched and blkcg but weird is bettern than dead.
*/
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
- __func__, dev_name(q->backing_dev_info.dev));
+ __func__, dev_name(q->backing_dev_info->dev));
rq->cmd_flags &= ~REQ_ELVPRIV;
rq->elv.icq = NULL;
BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
- laptop_io_completion(&req->q->backing_dev_info);
+ laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req);
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;
- disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
}
EXPORT_SYMBOL(blk_integrity_register);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+ disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
- unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+ unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_CACHE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
if (ret < 0)
return ret;
- q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+ q->backing_dev_info->ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
return ret;
}
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
- bdi_exit(&q->backing_dev_info);
+ bdi_put(q->backing_dev_info);
blkcg_exit_queue(q);
if (q->elevator) {
disk_alloc_events(disk);
/* Register BDI before referencing it from bdev */
- bdi = &disk->queue->backing_dev_info;
+ bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) {
+ bdev_unhash_inode(MKDEV(disk->major,
+ disk->first_minor + part->partno));
invalidate_partition(disk, part->partno);
delete_partition(disk, part->partno);
}
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
- bdi_unregister(&disk->queue->backing_dev_info);
+ bdi_unregister(disk->queue->backing_dev_info);
blk_unregister_queue(disk);
} else {
WARN_ON(1);
if (!gpt)
return NULL;
- count = le32_to_cpu(gpt->num_partition_entries) *
+ count = (size_t)le32_to_cpu(gpt->num_partition_entries) *
le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
return NULL;
gpt_header **gpt, gpt_entry **ptes)
{
u32 crc, origcrc;
- u64 lastlba;
+ u64 lastlba, pt_size;
if (!ptes)
return 0;
goto fail;
}
+ /* Sanity check partition table size */
+ pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) *
+ le32_to_cpu((*gpt)->sizeof_partition_entry);
+ if (pt_size > KMALLOC_MAX_SIZE) {
+ pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n",
+ (unsigned long long)pt_size, KMALLOC_MAX_SIZE);
+ goto fail;
+ }
+
if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
goto fail;
/* Check the GUID Partition Entry Array CRC */
- crc = efi_crc32((const unsigned char *) (*ptes),
- le32_to_cpu((*gpt)->num_partition_entries) *
- le32_to_cpu((*gpt)->sizeof_partition_entry));
+ crc = efi_crc32((const unsigned char *) (*ptes), pt_size);
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
pr_debug("GUID Partitition Entry Array CRC check failed.\n");
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
+ select CRYPTO_ECB
help
XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
key size 256, 384 or 512 bits. This implementation currently
}
sgl = sreq->tsg;
n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
+ for_each_sg(sgl, sg, n, i) {
+ struct page *page = sg_page(sg);
+
+ /* some SGs may not have a page mapped */
+ if (page && atomic_read(&page->_count))
+ put_page(page);
+ }
kfree(sreq->tsg);
}
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
- if (sg)
+ if (sg) {
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
+ }
list_add_tail(&sgl->list, &ctx->tsgl);
}
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
- struct scatterlist *sg = req->src;
- unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
int err;
- if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+ if (nbytes &&
+ (sg = req->src, offset = sg->offset,
+ nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
task->pid, desired.prio,
to_kernel_prio(policy, priority));
+ trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
+ to_kernel_prio(policy, priority),
+ desired.prio);
+
/* Set the actual priority */
if (task->policy != policy || is_rt_policy(policy)) {
struct sched_param params;
struct binder_priority node_prio,
bool inherit_rt)
{
- struct binder_priority desired_prio;
+ struct binder_priority desired_prio = t->priority;
if (t->set_priority_called)
return;
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
- } else {
- desired_prio.prio = t->priority.prio;
- desired_prio.sched_policy = t->priority.sched_policy;
}
if (node_prio.prio < t->priority.prio ||
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t: transaction that needs to be cleaned up
+ * @reason: reason the transaction wasn't delivered
+ * @error_code: error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+ const char *reason,
+ uint32_t error_code)
+{
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+ binder_send_failed_reply(t, error_code);
+ } else {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered transaction %d, %s\n",
+ t->debug_id, reason);
+ binder_free_transaction(t);
+ }
+}
+
+/**
* binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the buffer at which to validate an object.
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
return true;
}
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node: struct binder_node for which to get refs
+ * @proc: returns @node->proc if valid
+ * @error: if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+ struct binder_node *node,
+ struct binder_proc **procp,
+ uint32_t *error)
+{
+ struct binder_node *target_node = NULL;
+
+ binder_node_inner_lock(node);
+ if (node->proc) {
+ target_node = node;
+ binder_inc_node_nilocked(node, 1, 0, NULL);
+ binder_inc_node_tmpref_ilocked(node);
+ node->proc->tmp_ref++;
+ *procp = node->proc;
+ } else
+ *error = BR_DEAD_REPLY;
+ binder_node_inner_unlock(node);
+
+ return target_node;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
- binder_inc_node(ref->node, 1, 0, NULL);
- target_node = ref->node;
- }
- binder_proc_unlock(proc);
- if (target_node == NULL) {
+ target_node = binder_get_node_refs_for_txn(
+ ref->node, &target_proc,
+ &return_error);
+ } else {
binder_user_error("%d:%d got transaction to invalid handle\n",
- proc->pid, thread->pid);
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- return_error_param = -EINVAL;
- return_error_line = __LINE__;
- goto err_invalid_target_handle;
}
+ binder_proc_unlock(proc);
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
- if (target_node == NULL) {
+ if (target_node)
+ target_node = binder_get_node_refs_for_txn(
+ target_node, &target_proc,
+ &return_error);
+ else
return_error = BR_DEAD_REPLY;
- mutex_unlock(&context->context_mgr_node_lock);
- return_error_line = __LINE__;
- goto err_no_context_mgr_node;
- }
- binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock);
}
- e->to_node = target_node->debug_id;
- binder_node_lock(target_node);
- target_proc = target_node->proc;
- if (target_proc == NULL) {
- binder_node_unlock(target_node);
- return_error = BR_DEAD_REPLY;
+ if (!target_node) {
+ /*
+ * return_error is set above
+ */
+ return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
- binder_inner_proc_lock(target_proc);
- target_proc->tmp_ref++;
- binder_inner_proc_unlock(target_proc);
- binder_node_unlock(target_node);
+ e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
-err_no_context_mgr_node:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
- if (target_node)
+ if (target_node) {
binder_dec_node(target_node, 1, 0);
+ binder_dec_node_tmpref(target_node);
+ }
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "put_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "copy_to_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(tr);
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
- if (t->buffer->target_node &&
- !(t->flags & TF_ONE_WAY)) {
- binder_send_failed_reply(t, BR_DEAD_REPLY);
- } else {
- binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered transaction %d\n",
- t->debug_id);
- binder_free_transaction(t);
- }
+
+ binder_cleanup_transaction(t, "process died.",
+ BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_deferred_workqueue = create_singlethread_workqueue("binder");
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/list_lru.h>
#include "binder_alloc.h"
#include "binder_trace.h"
+struct list_lru binder_alloc_lru;
+
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
pr_info(x); \
} while (0)
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer +
- alloc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer < buffer)
+ if (new_buffer->data < buffer->data)
p = &parent->rb_left;
- else if (new_buffer > buffer)
+ else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
+ void *kern_ptr;
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer)
+ if (kern_ptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer)
+ else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
+ struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %pK-%pK\n", alloc->pid,
trace_binder_update_page_range(alloc, allocate, start, end);
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(alloc->tsk);
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
+ if (!vma && need_mm) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
+ bool on_lru;
+ size_t index;
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
alloc->pid, page_addr);
goto err_alloc_page_failed;
}
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
}
user_page_addr =
(uintptr_t)page_addr + alloc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
alloc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
+
+ trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- alloc->user_buffer_offset, PAGE_SIZE, NULL);
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
err_alloc_page_failed:
+err_page_ptr_cleared:
;
}
err_no_vma:
return ERR_PTR(-ENOSPC);
}
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
+ WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
- rb_erase(best_fit, &alloc->free_buffers);
- buffer->free = 0;
- buffer->free_in_progress = 0;
- binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ struct binder_buffer *new_buffer;
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
alloc->pid, size, alloc->free_async_space);
}
return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr);
+ return ERR_PTR(-ENOMEM);
}
/**
static void *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
+ bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
+
+ if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
- alloc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(alloc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
}
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < alloc->buffer);
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
+ struct binder_buffer *next = binder_buffer_next(buffer);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
}
}
if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
- if (binder_update_page_range(alloc, 1, alloc->buffer,
- alloc->buffer + PAGE_SIZE, vma)) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
}
- buffer = alloc->buffer;
- INIT_LIST_HEAD(&alloc->buffers);
+
+ buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ /* Same as mmgrab() in later kernel versions */
+ atomic_inc(&alloc->vma_vm_mm->mm_count);
return 0;
-err_alloc_small_buf_failed:
+err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
{
struct rb_node *n;
int buffers, page_count;
+ struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */
buffers++;
}
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
page_count = 0;
if (alloc->pages) {
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
+ bool on_lru;
- if (!alloc->pages[i])
+ if (!alloc->pages[i].page_ptr)
continue;
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK not freed\n",
- __func__, alloc->pid, i, page_addr);
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(alloc->pages[i]);
+ __free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
}
/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
* binder_alloc_get_allocated_count() - return count of buffers
* @alloc: binder_alloc for this proc
*
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+ struct vm_area_struct *vma;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ vma = alloc->vma;
+ if (vma) {
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+ }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+
+ if (vma) {
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(vma,
+ page_addr +
+ alloc->user_buffer_offset,
+ PAGE_SIZE, NULL);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ spin_lock(lock);
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED_RETRY;
+
+err_down_write_mmap_sem_failed:
+ mmput_async(mm);
+err_mmget:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
*
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
}
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/list_lru.h>
+extern struct list_lru binder_alloc_lru;
struct binder_transaction;
/**
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- uint8_t data[0];
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
};
/**
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of physical page addresses for each
- * page of mmap'd space
+ * @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct page **pages;
+ struct binder_lru_page *pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
--- /dev/null
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
+TRACE_EVENT(binder_set_priority,
+ TP_PROTO(int proc, int thread, unsigned int old_prio,
+ unsigned int desired_prio, unsigned int new_prio),
+ TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
+
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(int, thread)
+ __field(unsigned int, old_prio)
+ __field(unsigned int, new_prio)
+ __field(unsigned int, desired_prio)
+ ),
+ TP_fast_assign(
+ __entry->proc = proc;
+ __entry->thread = thread;
+ __entry->old_prio = old_prio;
+ __entry->new_prio = new_prio;
+ __entry->desired_prio = desired_prio;
+ ),
+ TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
+ __entry->proc, __entry->thread, __entry->old_prio,
+ __entry->new_prio, __entry->desired_prio)
+);
+
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
static void ata_tport_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
device_initialize(dev);
dev->type = &ata_port_type;
- dev->parent = get_device(parent);
+ dev->parent = parent;
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
static void ata_tlink_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
int error;
device_initialize(dev);
- dev->parent = get_device(&ap->tdev);
+ dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
static void ata_tdev_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
int error;
device_initialize(dev);
- dev->parent = get_device(&link->tdev);
+ dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+ q->backing_dev_info->name = "aoe";
+ q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev);
- r = bdi_congested(&q->backing_dev_info, bdi_bits);
+ r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device);
if (r)
reason = 'b';
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;
- q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = device;
+ q->backing_dev_info->congested_fn = drbd_congested;
+ q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_stack_limits(q, b);
- if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+ if (q->backing_dev_info->ra_pages != b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info.ra_pages,
- b->backing_dev_info.ra_pages);
- q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+ q->backing_dev_info->ra_pages,
+ b->backing_dev_info->ra_pages);
+ q->backing_dev_info->ra_pages = b->backing_dev_info->ra_pages;
}
}
}
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
- bdi_rw_congested(&device->rq_queue->backing_dev_info);
+ bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
- clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+ clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
+ set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q;
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ disk->queue = NULL;
}
+
+ put_disk(disk);
skdev->disk = NULL;
}
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
static int diag_dbgfs_finished;
static int diag_dbgfs_dci_data_index;
static int diag_dbgfs_dci_finished;
-
+static struct mutex diag_dci_dbgfs_mutex;
static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
buf_size = ksize(buf);
bytes_remaining = buf_size;
+ mutex_lock(&diag_dci_dbgfs_mutex);
if (diag_dbgfs_dci_data_index == 0) {
bytes_written =
scnprintf(buf, buf_size,
}
temp_data++;
}
-
diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+ mutex_unlock(&diag_dci_dbgfs_mutex);
bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
bytes_in_buf);
kfree(buf);
pr_warn("diag: could not allocate memory for dci debug info\n");
mutex_init(&dci_stat_mutex);
+ mutex_init(&diag_dci_dbgfs_mutex);
return 0;
err:
kfree(dci_traffic);
kfree(dci_traffic);
mutex_destroy(&dci_stat_mutex);
+ mutex_destroy(&diag_dci_dbgfs_mutex);
}
#else
int diag_debugfs_init(void) { return 0; }
found = 1;
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: wake up logging process\n");
wake_up_interruptible(&driver->wait_q);
}
#include <asm/atomic.h>
#include "diagfwd_bridge.h"
+#define THRESHOLD_CLIENT_LIMIT 50
+
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
#define APPS_BUF_SIZE 4096
#define DIAG_MAX_REQ_SIZE (16 * 1024)
#define DIAG_MAX_RSP_SIZE (16 * 1024)
-#define APF_DIAG_PADDING 256
+#define APF_DIAG_PADDING 0
/*
* In the worst case, the HDLC buffer can be atmost twice the size of the
* original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
wait_queue_head_t wait_q;
struct diag_client_map *client_map;
int *data_ready;
+ atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
int num_clients;
int polling_reg_flag;
int use_device_tree;
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
module_param(max_clients, uint, 0);
/* Timer variables */
if (i < driver->num_clients) {
diag_add_client(i, file);
} else {
- if (i < threshold_client_limit) {
+ if (i < THRESHOLD_CLIENT_LIMIT) {
driver->num_clients++;
temp = krealloc(driver->client_map
, (driver->num_clients) * sizeof(struct
}
}
driver->data_ready[i] = 0x0;
+ atomic_set(&driver->data_ready_notif[i], 0);
driver->data_ready[i] |= MSG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
if (driver->ref_count == 0)
diag_mempool_init();
}
driver->data_ready[i] |= DEINIT_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
return 0;
}
-static int check_data_ready(int index)
-{
- int data_type = 0;
-
- mutex_lock(&driver->diagchar_mutex);
- data_type = driver->data_ready[index];
- mutex_unlock(&driver->diagchar_mutex);
- return data_type;
-}
-
static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
pr_err("diag: bad address from user side\n");
return -EFAULT;
}
- wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
+ wait_event_interruptible(driver->wait_q,
+ atomic_read(&driver->data_ready_notif[index]) > 0);
mutex_lock(&driver->diagchar_mutex);
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
/* place holder for number of data field */
ret += sizeof(int);
/* In case, the thread wakes up and the logging mode is
not memory device any more, the condition needs to be cleared */
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
}
if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
data_type = driver->data_ready[index] & DEINIT_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
driver->data_ready[index] ^= DEINIT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
diag_remove_client_entry(file);
return ret;
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
event_mask.mask_len);
}
driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
*(driver->apps_req_buf),
driver->apps_req_buf_len);
driver->data_ready[index] ^= PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_pktdata = 0;
goto exit;
}
COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
driver->dci_pkt_length);
driver->data_ready[index] ^= DCI_PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_dcipktdata = 0;
goto exit;
}
COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
event_mask_composite), DCI_EVENT_MASK_SIZE);
driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
log_mask_composite), DCI_LOG_MASK_SIZE);
driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
exit_stat = diag_copy_dci(buf, count, entry, &ret);
mutex_lock(&driver->diagchar_mutex);
driver->data_ready[index] ^= DCI_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
if (exit_stat == 1) {
mutex_unlock(&driver->dci_mutex);
* situation.
*/
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: Force wakeup of logging process\n");
wake_up_interruptible(&driver->wait_q);
break;
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
- if (driver->client_map[i].pid != 0)
+ if (driver->client_map[i].pid != 0) {
driver->data_ready[i] |= type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
wake_up_interruptible(&driver->wait_q);
mutex_unlock(&driver->diagchar_mutex);
}
driver->client_map[j].pid ==
driver->md_session_map[i]->pid) {
driver->data_ready[j] |= type;
+ atomic_inc(
+ &driver->data_ready_notif[j]);
break;
}
}
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == process_id) {
driver->data_ready[i] |= data_type;
+ atomic_inc(&driver->data_ready_notif[i]);
break;
}
wake_up_interruptible(&driver->wait_q);
, GFP_KERNEL)) == NULL)
goto err;
kmemleak_not_leak(driver->data_ready);
+ for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+ atomic_set(&driver->data_ready_notif[i], 0);
if (driver->apps_req_buf == NULL) {
driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
if (!driver->apps_req_buf)
F_GFX(160000000, 0, 2, 0, 0, 640000000),
F_GFX(266000000, 0, 2, 0, 0, 532000000),
F_GFX(370000000, 0, 2, 0, 0, 740000000),
+ F_GFX(430000000, 0, 2, 0, 0, 860000000),
F_GFX(465000000, 0, 2, 0, 0, 930000000),
F_GFX(588000000, 0, 2, 0, 0, 1176000000),
F_GFX(647000000, 0, 2, 0, 0, 1294000000),
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI
+ depends on ACPI_PROCESSOR
select ACPI_CPPC_LIB
default n
help
policy->cpuinfo.transition_latency = transition_latency;
+ /*
+ * Android: set default parameters for parity between schedutil and
+ * schedfreq
+ */
+ policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
+ policy->down_transition_delay_us = 50000; /* 50ms */
+
return 0;
out_free_cpufreq_table:
static int __init cpufreq_interactive_init(void)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ int ret = 0;
spin_lock_init(&speedchange_cpumask_lock);
mutex_init(&gov_lock);
/* NB: wake up so the thread does not look hung to the freezer */
wake_up_process_no_notif(speedchange_task);
- return cpufreq_register_governor(&cpufreq_gov_interactive);
+ ret = cpufreq_register_governor(&cpufreq_gov_interactive);
+ if (ret) {
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+ }
+ return ret;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
req_ctx->swinit = 0;
} else {
desc->ptr[1] = zero_entry;
- /* Indicate next op is not the first. */
- req_ctx->first = 0;
}
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
- t_alg->algt.alg.hash.setkey = ahash_setkey;
+ if (!strncmp(alg->cra_name, "hmac", 4))
+ t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);
return ret;
}
- vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
+ vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
if (!vbus_attach)
goto notify_otg;
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
* published by the Free Software Foundation.
*
*/
+
+/*
+ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
+ * references to the section markers, override their visibility as 'hidden'
+ */
+#pragma GCC visibility push(hidden)
+#include <asm/sections.h>
+#pragma GCC visibility pop
+
#include <linux/efi.h>
#include <asm/efi.h>
-#include <asm/sections.h>
#include "efistub.h"
return err;
}
-static const struct of_device_id const psci_of_match[] __initconst = {
+static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{ .compatible = "arm,psci-1.0", .data = psci_0_2_init},
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
- sizeof(struct kfd_event_data)))
+ sizeof(struct kfd_event_data))) {
+ ret = -EFAULT;
goto fail;
+ }
ret = init_event_waiter(p, &event_waiters[i],
event_data.event_id, i);
return -EINVAL;
}
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
+ mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_encoder *encoder;
+ struct drm_connector connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
struct gpio_desc *gpio_pd;
};
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
return to_encoder_slave(encoder)->slave_priv;
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
* Per spec it is allowed to pulse the HDP signal to indicate that the
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
if (!adv7511->i2c_edid)
return -ENOMEM;
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
}
break;
+ case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ break;
case HDCP_STATE_AUTH_ENC_NONE:
hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
if (sde_hdmi_tx_is_panel_on(hdmi_ctrl))
{
}
+static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->hdr_eotf = SDE_HDMI_HDR_EOTF_NONE;
+ connector->hdr_metadata_type_one = false;
+ connector->hdr_max_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_avg_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_min_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_supported = false;
+}
+
static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
vbi_pkt_reg |= BIT(5) | BIT(4);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ } else {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* disable GC CD override */
+ hdmi_ctrl_reg &= ~BIT(27);
+ /* disable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg &= ~BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+
+ /* disable the GC packet sending */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg &= ~(BIT(5) | BIT(4));
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
}
}
display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
display->sink_hdcp22_support = false;
+ sde_hdmi_clear_hdr_info(bridge);
mutex_unlock(&display->display_lock);
}
HDCP_STATE_AUTH_FAIL);
}
+static void sde_hdmi_hdcp2p2_fail_noreauth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH);
+}
+
+static void sde_hdmi_hdcp2p2_srm_cb(void *client_ctx)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ sde_hdmi_hdcp2p2_fail_noreauth(ctrl);
+}
+
static int sde_hdmi_hdcp2p2_ddc_rd_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
u8 *buf, int size, u32 timeout)
{
static struct hdcp_client_ops client_ops = {
.wakeup = sde_hdmi_hdcp2p2_wakeup,
.notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
+ .srm_cb = sde_hdmi_hdcp2p2_srm_cb,
};
static struct hdcp_txmtr_ops txmtr_ops;
#define SDE_HDMI_HDCP_14 0x14
#define SDE_HDMI_HDCP_NONE 0x0
+#define SDE_HDMI_HDR_LUMINANCE_NONE 0x0
+#define SDE_HDMI_HDR_EOTF_NONE 0x0
+
/*
* Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
* read by the hardware
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ LIST_HEAD(tmp_head);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &tmp_head);
+ }
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
if (vbl_ev->enable)
kms->funcs->enable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
priv->crtcs[vbl_ev->crtc_id]);
kfree(vbl_ev);
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
}
-
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
return false;
}
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct sde_core_perf_params *perf)
+{
+ struct sde_crtc_state *sde_cstate;
+
+ if (!crtc || !state || !perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_cstate = to_sde_crtc_state(state);
+ memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+ perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->core_clk_rate =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+ SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
sde_cstate = to_sde_crtc_state(state);
- bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ _sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+ bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs += tmp_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
}
SDE_DEBUG("final threshold bw limit = %d\n", threshold);
if (!threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
return -E2BIG;
}
return 0;
}
-static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
- struct drm_crtc *crtc,
- struct sde_core_perf_params *perf)
-{
- struct sde_crtc_state *sde_cstate;
-
- sde_cstate = to_sde_crtc_state(crtc->state);
- memset(perf, 0, sizeof(struct sde_core_perf_params));
-
- perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
- perf->max_per_pipe_ib =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
- perf->core_clk_rate =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
-
- SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
- crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
-}
-
static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
struct drm_crtc *crtc, struct sde_core_perf_params *perf,
bool nrt_client, u32 core_clk)
to_sde_crtc_state(tmp_crtc->state);
perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
- sde_cstate->cur_perf.max_per_pipe_ib);
+ sde_cstate->new_perf.max_per_pipe_ib);
- bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
SDE_DEBUG("crtc=%d bw=%llu\n",
tmp_crtc->base.id,
- sde_cstate->cur_perf.bw_ctl);
+ sde_cstate->new_perf.bw_ctl);
}
}
void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
+ struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
struct sde_kms *kms;
return;
}
+ sde_crtc = to_sde_crtc(crtc);
sde_cstate = to_sde_crtc_state(crtc->state);
/* only do this for command panel or writeback */
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_sde_cmd_release_bw(crtc->base.id);
- sde_cstate->cur_perf.bw_ctl = 0;
- sde_cstate->new_perf.bw_ctl = 0;
+ sde_crtc->cur_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
_sde_core_perf_crtc_update_bus(kms, crtc, 0);
}
return clk_round_rate(kms->perf.core_clk, clk_rate);
}
-static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
+ struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
{
u32 clk_rate = 0;
- struct drm_crtc *crtc;
+ struct drm_crtc *tmp_crtc;
struct sde_crtc_state *sde_cstate;
int ncrtc = 0;
+ u32 tmp_rate;
+
+ drm_for_each_crtc(tmp_crtc, kms->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
- drm_for_each_crtc(crtc, kms->dev) {
- if (_sde_core_perf_crtc_is_power_on(crtc)) {
- sde_cstate = to_sde_crtc_state(crtc->state);
- clk_rate = max(sde_cstate->cur_perf.core_clk_rate,
- clk_rate);
+ if (crtc->base.id == tmp_crtc->base.id) {
+ /* for current CRTC, use the cached value */
+ tmp_rate = crct_perf->core_clk_rate;
+ } else {
+ sde_cstate = to_sde_crtc_state(tmp_crtc->state);
+ tmp_rate = sde_cstate->new_perf.core_clk_rate;
+ }
+
+ clk_rate = max(tmp_rate, clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
}
ncrtc++;
SDE_ATRACE_BEGIN(__func__);
- old = &sde_cstate->cur_perf;
- new = &sde_cstate->new_perf;
+ /*
+ * cache the performance numbers in the crtc prior to the
+ * crtc kickoff, so the same numbers are used during the
+ * perf update that happens post kickoff.
+ */
+
+ if (params_changed)
+ memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
+ sizeof(struct sde_core_perf_params));
- if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- if (params_changed)
- _sde_core_perf_calc_crtc(kms, crtc, new);
+ old = &sde_crtc->cur_perf;
+ new = &sde_crtc->new_perf;
+ if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote or writeback output vote
* use the new clock for the rotator bw calculation.
*/
if (update_clk)
- clk_rate = _sde_core_perf_get_core_clk_rate(kms);
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
if (update_bus)
_sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
*/
if (update_clk) {
SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
- SDE_EVT32(kms->dev, stop_req, clk_rate);
+ SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
+ old->core_clk_rate, new->core_clk_rate);
+
ret = sde_power_clk_set_rate(&priv->phandle,
kms->perf.clk_name, clk_rate);
if (ret) {
struct sde_crtc_frame_event *fevent;
struct drm_crtc *crtc;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
unsigned long flags;
}
fevent = container_of(work, struct sde_crtc_frame_event, work);
- if (!fevent->crtc) {
+ if (!fevent->crtc || !fevent->crtc->state) {
SDE_ERROR("invalid crtc\n");
return;
}
crtc = fevent->crtc;
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
sde_kms = _sde_crtc_get_kms(crtc);
if (!sde_kms) {
} else {
SDE_EVT32(DRMID(crtc), fevent->event, 2);
}
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ sde_core_perf_crtc_update(crtc, 0, false);
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
ktime_to_ns(fevent->ts),
static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "is_rt: %d\n", cstate->is_rt);
seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
- seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
- seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
+
+ seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %u\n",
+ sde_crtc->cur_perf.core_clk_rate);
seq_printf(s, "max_per_pipe_ib: %llu\n",
- cstate->cur_perf.max_per_pipe_ib);
+ sde_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
* @frame_event_list : available frame event list
* @pending : Whether any page-flip events are pending signal
* @spin_lock : spin lock for frame event, transaction status, etc...
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @new_perf : new performance committed to clock/bandwidth driver
*/
struct sde_crtc {
struct drm_crtc base;
struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
spinlock_t spin_lock;
+
+ struct sde_core_perf_params cur_perf;
+ struct sde_core_perf_params new_perf;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
+ * @new_perf: new performance state being requested
*/
struct sde_crtc_state {
struct drm_crtc_state base;
uint64_t input_fence_timeout_ns;
struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
- struct sde_core_perf_params cur_perf;
struct sde_core_perf_params new_perf;
};
iter->type = type;
}
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
{
struct list_head *blk_list;
return false;
}
-void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void *_sde_rm_get_hw_by_id_locked(
+ struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ int id)
{
struct list_head *blk_list;
struct sde_rm_hw_blk *blk;
return hw;
}
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+ void *ret = NULL;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_by_id_locked(rm, type, id);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
{
switch (type) {
sde_hw_mdp_destroy(rm->hw_mdp);
rm->hw_mdp = NULL;
+ mutex_destroy(&rm->rm_lock);
+
return 0;
}
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
if (lm_cfg->dspp != DSPP_MAX) {
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->dspp) {
*dspp = iter.blk;
break;
}
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->pingpong) {
*pp = iter.blk;
break;
/* Find a primary mixer */
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&dspp, 0, sizeof(dspp));
memset(&pp, 0, sizeof(pp));
/* Valid primary mixer found, find matching peers */
sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
/* reserve a free PINGPONG_SLAVE block */
rc = -ENAVAIL;
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter_i)) {
+ while (_sde_rm_get_hw_locked(rm, &iter_i)) {
struct sde_pingpong_cfg *pp_cfg =
(struct sde_pingpong_cfg *)
(iter_i.blk->catalog);
memset(&ctls, 0, sizeof(ctls));
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
unsigned long caps;
bool has_split_display, has_ppsplit;
struct sde_cdm_cfg *cdm;
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
bool match = false;
if (RESERVED_BY_OTHER(iter.blk, rsvp))
/* Find the block entry in the rm, and note the reservation */
sde_rm_init_hw_iter(&iter, 0, type);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id != id)
continue;
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
struct drm_connector *conn)
return;
}
+ mutex_lock(&rm->rm_lock);
+
rsvp = _sde_rm_get_rsvp(rm, enc);
if (!rsvp) {
SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- return;
+ goto end;
}
conn = _sde_rm_get_connector(enc);
if (!conn) {
SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
- return;
+ goto end;
}
top_ctrl = sde_connector_get_property(conn->state,
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_UNKNOWN);
}
+
+end:
+ mutex_unlock(&rm->rm_lock);
}
static int _sde_rm_commit_rsvp(
crtc_state->crtc->base.id, test_only);
SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+ mutex_lock(&rm->rm_lock);
+
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
conn_state, &reqs);
if (ret) {
SDE_ERROR("failed to populate hw requirements\n");
- return ret;
+ goto end;
}
/*
* replace the current with the next.
*/
rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt)
- return -ENOMEM;
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
rsvp_cur = _sde_rm_get_rsvp(rm, enc);
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+end:
+ mutex_unlock(&rm->rm_lock);
+
return ret;
}
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
*/
struct sde_rm {
struct drm_device *dev;
struct sde_hw_mdp *hw_mdp;
uint32_t lm_max_width;
uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
};
/**
HDCP_STATE_AUTHENTICATING,
HDCP_STATE_AUTHENTICATED,
HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH,
HDCP_STATE_AUTH_ENC_NONE,
HDCP_STATE_AUTH_ENC_1X,
HDCP_STATE_AUTH_ENC_2P2
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
{
/* the worst case is computed from the set_report command with a
* reportID > 15 and the maximum report length */
- int args_len = sizeof(__u8) + /* optional ReportID byte */
+ int args_len = sizeof(__u8) + /* ReportID */
+ sizeof(__u8) + /* optional ReportID byte */
sizeof(__u16) + /* data register */
sizeof(__u16) + /* size of the report */
report_size; /* report */
unsigned int rsize = 0;
char *rdesc;
int ret, n;
+ int num_descriptors;
+ size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
return -ENODEV;
}
+ if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+ dbg_hid("hid descriptor is too short\n");
+ return -EINVAL;
+ }
+
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- for (n = 0; n < hdesc->bNumDescriptors; n++)
+ num_descriptors = min_t(int, hdesc->bNumDescriptors,
+ (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+ for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
out_src = smsg_out;
break;
+ case WRITE_TO_FILE:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = sizeof(struct hv_do_fcopy);
+ break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_FROM_REG(val) DIV_ROUND_CLOSEST((val) * 95, 4)
+#define VDD_CLAMP(val) clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val) DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
-#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_FROM_REG(val) ((val) * 19)
+#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
-#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
- clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
+
+#define FAN_BASE(div) (480000 >> (div))
+#define FAN_CLAMP(val, div) clamp_val(val, FAN_BASE(div) / 255, \
+ FAN_BASE(div))
+#define FAN_TO_REG(val, div) ((val) == 0 ? 0 : \
+ DIV_ROUND_CLOSEST(480000, \
+ FAN_CLAMP(val, div) << (div)))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
-#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
- (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
+#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
+#define TEMP_CLAMP(val) clamp_val(val, -130000, 125000)
+#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
char *buf)
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
stm_source_link_drop(src);
- device_destroy(&stm_source_class, src->dev.devt);
+ device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
static int at91_twi_resume_noirq(struct device *dev)
{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
int ret;
if (!pm_runtime_status_suspended(dev)) {
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
+ at91_init_twi_bus(twi_dev);
+
return 0;
}
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
wdata1 |= *buf++ << ((i - 4) * 8);
writel(wdata0, i2c->regs + REG_TOK_WDATA0);
- writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+ writel(wdata1, i2c->regs + REG_TOK_WDATA1);
dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
wdata0, wdata1, len);
unsigned int vref_mv)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int i, ret = -1;
+ int i, ret;
unsigned long long scale_uv;
u32 id;
return ret;
/* reset the serial interface */
- ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+ ret = ad_sd_reset(&st->sd, 32);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
}
EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+/**
+ * ad_sd_reset() - Reset the serial interface
+ *
+ * @sigma_delta: The sigma delta device
+ * @reset_length: Number of SCLKs with DIN = 1
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length)
+{
+ uint8_t *buf;
+ unsigned int size;
+ int ret;
+
+ size = DIV_ROUND_UP(reset_length, 8);
+ buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memset(buf, 0xff, size);
+ ret = spi_write(sigma_delta->spi, buf, size);
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_reset);
+
static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
-#define AXP288_ADC_TS_PIN_GPADC 0xF2
-#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
return IIO_VAL_INT;
}
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
- unsigned long address)
-{
- /* channels other than GPADC do not need to switch TS pin */
- if (address != AXP288_GP_ADC_H)
- return 0;
-
- return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
-}
-
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
- chan->address)) {
- dev_err(&indio_dev->dev, "GPADC mode\n");
- ret = -EINVAL;
- break;
- }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
- chan->address))
- dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
return ret;
}
-static int axp288_adc_set_state(struct regmap *regmap)
-{
- /* ADC should be always enabled for internal FG to function */
- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
- return -EIO;
-
- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
-}
-
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = axp288_adc_set_state(axp20x->regmap);
+ ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
* MCP3204
* MCP3208
* ------------
+ * 13 bit converter
+ * MCP3301
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
}
static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
- bool differential, int device_index)
+ bool differential, int device_index, int *val)
{
int ret;
switch (device_index) {
case mcp3001:
- return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ return 0;
case mcp3002:
case mcp3004:
case mcp3008:
- return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ return 0;
case mcp3201:
- return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ return 0;
case mcp3202:
case mcp3204:
case mcp3208:
- return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ return 0;
case mcp3301:
- return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
+ *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
+ | adc->rx_buf[1], 12);
+ return 0;
default:
return -EINVAL;
}
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp320x_adc_conversion(adc, channel->address,
- channel->differential, device_index);
-
+ channel->differential, device_index, val);
if (ret < 0)
goto out;
- *val = ret;
ret = IIO_VAL_INT;
break;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
+ spi_set_drvdata(spi, indio_dev);
chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
/* Enable 3v1 bias regulator for MADC[3:6] */
madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
- if (IS_ERR(madc->usb3v1))
- return -ENODEV;
+ if (IS_ERR(madc->usb3v1)) {
+ ret = -ENODEV;
+ goto err_i2c;
+ }
ret = regulator_enable(madc->usb3v1);
if (ret)
ret = iio_device_register(iio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio device\n");
- goto err_i2c;
+ goto err_usb3v1;
}
return 0;
+err_usb3v1:
+ regulator_disable(madc->usb3v1);
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:
ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
- goto err_free_samplerate_trigger;
+ goto err_clk_disable_unprepare;
ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
err_free_irq:
free_irq(irq, indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_trigger_free(xadc->samplerate_trigger);
err_triggered_buffer_cleanup:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_triggered_buffer_cleanup(indio_dev);
-err_clk_disable_unprepare:
- clk_disable_unprepare(xadc->clk);
err_device_free:
kfree(indio_dev->channels);
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
- if (ret)
+ if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+ return ret;
+ }
len = snprintf(buf, sizeof(buf), "0x%X\n", val);
unsigned long flags;
while (wait) {
- unsigned long shadow;
+ unsigned long shadow = 0;
int cstart, previ = -1;
/*
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}
out:
up_write(&ppriv->vlan_rwsem);
+ rtnl_unlock();
+
if (result)
free_netdev(priv->dev);
- rtnl_unlock();
-
return result;
}
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
- unregister_netdevice(priv->dev);
list_del(&priv->list);
dev = priv->dev;
break;
}
up_write(&ppriv->vlan_rwsem);
+ if (dev) {
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
+ }
+
rtnl_unlock();
if (dev) {
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
},
},
{
+ /* Gigabyte P57 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ },
+ },
+ {
/* Schenker XMG C504 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
return unmap_size;
}
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
__arm_lpae_set_pte(ptep, pte, cfg);
- } else {
+ } else if (!iopte_leaf(pte, lvl)) {
cptep = iopte_deref(pte, data);
+ } else {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
}
/* Rinse, repeat */
static int __init crossbar_of_init(struct device_node *node)
{
- int i, size, max = 0, reserved = 0, entry;
+ int i, size, reserved = 0;
+ u32 max = 0, entry;
const __be32 *irqsr;
int ret = -ENOMEM;
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
isdn_net_local *lp;
struct ippp_struct *is;
int proto;
- unsigned char protobuf[4];
is = file->private_data;
if (!lp)
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
else {
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
+ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+ unsigned char protobuf[4];
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ if (copy_from_user(protobuf, buf, 4))
+ return -EFAULT;
+
+ proto = PPP_PROTOCOL(protobuf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
return 0;
+ }
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED)) {
unsigned short hl;
struct sk_buff *skb;
+ unsigned char *cpy_buf;
/*
* we need to reserve enough space in front of
* sk_buff. old call to dev_alloc_skb only reserved
return count;
}
skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, count), buf, count))
+ cpy_buf = skb_put(skb, count);
+ if (copy_from_user(cpy_buf, buf, count))
{
kfree_skb(skb);
return -EFAULT;
}
+
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ proto = PPP_PROTOCOL(cpy_buf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
LEDs in both PWM and light pattern generator (LPG) modes. For older
PMICs, it also supports WLEDs and flash LEDs.
+config LEDS_QPNP_FLASH
+ tristate "Support for QPNP Flash LEDs"
+ depends on LEDS_CLASS && SPMI
+ help
+ This driver supports the flash LED functionality of Qualcomm
+ Technologies, Inc. QPNP PMICs. This driver supports PMICs up through
+ PM8994. It can configure the flash LED target current for several
+ independent channels.
+
config LEDS_QPNP_FLASH_V2
tristate "Support for QPNP V2 Flash LEDs"
- depends on LEDS_CLASS && MFD_SPMI_PMIC
+ depends on LEDS_CLASS && MFD_SPMI_PMIC && !LEDS_QPNP_FLASH
help
This driver supports the flash V2 LED functionality of Qualcomm
Technologies, Inc. QPNP PMICs. This driver supports PMICs starting
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o
+obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o
obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o
obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
--- /dev/null
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+#include <linux/leds-qpnp-flash.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "leds.h"
+
+#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05)
+#define FLASH_SAFETY_TIMER(base) (base + 0x40)
+#define FLASH_MAX_CURRENT(base) (base + 0x41)
+#define FLASH_LED0_CURRENT(base) (base + 0x42)
+#define FLASH_LED1_CURRENT(base) (base + 0x43)
+#define FLASH_CLAMP_CURRENT(base) (base + 0x44)
+#define FLASH_MODULE_ENABLE_CTRL(base) (base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base) (base + 0x47)
+#define FLASH_LED_TMR_CTRL(base) (base + 0x48)
+#define FLASH_HEADROOM(base) (base + 0x4A)
+#define FLASH_STARTUP_DELAY(base) (base + 0x4B)
+#define FLASH_MASK_ENABLE(base) (base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base) (base + 0x4F)
+#define FLASH_FAULT_DETECT(base) (base + 0x51)
+#define FLASH_THERMAL_DRATE(base) (base + 0x52)
+#define FLASH_CURRENT_RAMP(base) (base + 0x54)
+#define FLASH_VPH_PWR_DROOP(base) (base + 0x5A)
+#define FLASH_HDRM_SNS_ENABLE_CTRL0(base) (base + 0x5C)
+#define FLASH_HDRM_SNS_ENABLE_CTRL1(base) (base + 0x5D)
+#define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0)
+#define FLASH_PERPH_RESET_CTRL(base) (base + 0xDA)
+#define FLASH_TORCH(base) (base + 0xE4)
+
+#define FLASH_STATUS_REG_MASK 0xFF
+#define FLASH_LED_FAULT_STATUS(base) (base + 0x08)
+#define INT_LATCHED_STS(base) (base + 0x18)
+#define IN_POLARITY_HIGH(base) (base + 0x12)
+#define INT_SET_TYPE(base) (base + 0x11)
+#define INT_EN_SET(base) (base + 0x15)
+#define INT_LATCHED_CLR(base) (base + 0x14)
+
+#define FLASH_HEADROOM_MASK 0x03
+#define FLASH_STARTUP_DLY_MASK 0x03
+#define FLASH_VREG_OK_FORCE_MASK 0xC0
+#define FLASH_FAULT_DETECT_MASK 0x80
+#define FLASH_THERMAL_DERATE_MASK 0xBF
+#define FLASH_SECURE_MASK 0xFF
+#define FLASH_TORCH_MASK 0x03
+#define FLASH_CURRENT_MASK 0x7F
+#define FLASH_TMR_MASK 0x03
+#define FLASH_TMR_SAFETY 0x00
+#define FLASH_SAFETY_TIMER_MASK 0x7F
+#define FLASH_MODULE_ENABLE_MASK 0xE0
+#define FLASH_STROBE_MASK 0xC0
+#define FLASH_CURRENT_RAMP_MASK 0xBF
+#define FLASH_VPH_PWR_DROOP_MASK 0xF3
+#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81
+#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0
+#define FLASH_FOLLOW_OTST2_RB_MASK 0x08
+
+#define FLASH_LED_TRIGGER_DEFAULT "none"
+#define FLASH_LED_HEADROOM_DEFAULT_MV 500
+#define FLASH_LED_STARTUP_DELAY_DEFAULT_US 128
+#define FLASH_LED_CLAMP_CURRENT_DEFAULT_MA 200
+#define FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C 80
+#define FLASH_LED_RAMP_UP_STEP_DEFAULT_US 3
+#define FLASH_LED_RAMP_DN_STEP_DEFAULT_US 3
+#define FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV 3200
+#define FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US 10
+#define FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT 2
+#define FLASH_RAMP_UP_DELAY_US_MIN 1000
+#define FLASH_RAMP_UP_DELAY_US_MAX 1001
+#define FLASH_RAMP_DN_DELAY_US_MIN 2160
+#define FLASH_RAMP_DN_DELAY_US_MAX 2161
+#define FLASH_BOOST_REGULATOR_PROBE_DELAY_MS 2000
+#define FLASH_TORCH_MAX_LEVEL 0x0F
+#define FLASH_MAX_LEVEL 0x4F
+#define FLASH_LED_FLASH_HW_VREG_OK 0x40
+#define FLASH_LED_FLASH_SW_VREG_OK 0x80
+#define FLASH_LED_STROBE_TYPE_HW 0x04
+#define FLASH_DURATION_DIVIDER 10
+#define FLASH_LED_HEADROOM_DIVIDER 100
+#define FLASH_LED_HEADROOM_OFFSET 2
+#define FLASH_LED_MAX_CURRENT_MA 1000
+#define FLASH_LED_THERMAL_THRESHOLD_MIN 95
+#define FLASH_LED_THERMAL_DEVIDER 10
+#define FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV 2500
+#define FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER 100
+#define FLASH_LED_HDRM_SNS_ENABLE 0x81
+#define FLASH_LED_HDRM_SNS_DISABLE 0x01
+#define FLASH_LED_UA_PER_MA 1000
+#define FLASH_LED_MASK_MODULE_MASK2_ENABLE 0x20
+#define FLASH_LED_MASK3_ENABLE_SHIFT 7
+#define FLASH_LED_MODULE_CTRL_DEFAULT 0x60
+#define FLASH_LED_CURRENT_READING_DELAY_MIN 5000
+#define FLASH_LED_CURRENT_READING_DELAY_MAX 5001
+#define FLASH_LED_OPEN_FAULT_DETECTED 0xC
+
+#define FLASH_UNLOCK_SECURE 0xA5
+#define FLASH_LED_TORCH_ENABLE 0x00
+#define FLASH_LED_TORCH_DISABLE 0x03
+#define FLASH_MODULE_ENABLE 0x80
+#define FLASH_LED0_TRIGGER 0x80
+#define FLASH_LED1_TRIGGER 0x40
+#define FLASH_LED0_ENABLEMENT 0x40
+#define FLASH_LED1_ENABLEMENT 0x20
+#define FLASH_LED_DISABLE 0x00
+#define FLASH_LED_MIN_CURRENT_MA 13
+#define FLASH_SUBTYPE_DUAL 0x01
+#define FLASH_SUBTYPE_SINGLE 0x02
+
+/*
+ * ID represents physical LEDs for individual control purpose.
+ */
+enum flash_led_id {
+ FLASH_LED_0 = 0,
+ FLASH_LED_1,
+ FLASH_LED_SWITCH,
+};
+
+enum flash_led_type {
+ FLASH = 0,
+ TORCH,
+ SWITCH,
+};
+
+enum thermal_derate_rate {
+ RATE_1_PERCENT = 0,
+ RATE_1P25_PERCENT,
+ RATE_2_PERCENT,
+ RATE_2P5_PERCENT,
+ RATE_5_PERCENT,
+};
+
+enum current_ramp_steps {
+ RAMP_STEP_0P2_US = 0,
+ RAMP_STEP_0P4_US,
+ RAMP_STEP_0P8_US,
+ RAMP_STEP_1P6_US,
+ RAMP_STEP_3P3_US,
+ RAMP_STEP_6P7_US,
+ RAMP_STEP_13P5_US,
+ RAMP_STEP_27US,
+};
+
+struct flash_regulator_data {
+ struct regulator *regs;
+ const char *reg_name;
+ u32 max_volt_uv;
+};
+
+/*
+ * Configurations for each individual LED
+ */
+struct flash_node_data {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct flash_regulator_data *reg_data;
+ u16 max_current;
+ u16 prgm_current;
+ u16 prgm_current2;
+ u16 duration;
+ u8 id;
+ u8 type;
+ u8 trigger;
+ u8 enable;
+ u8 num_regulators;
+ bool flash_on;
+};
+
+/*
+ * Flash LED configuration read from device tree
+ */
+struct flash_led_platform_data {
+ unsigned int temp_threshold_num;
+ unsigned int temp_derate_curr_num;
+ unsigned int *die_temp_derate_curr_ma;
+ unsigned int *die_temp_threshold_degc;
+ u16 ramp_up_step;
+ u16 ramp_dn_step;
+ u16 vph_pwr_droop_threshold;
+ u16 headroom;
+ u16 clamp_current;
+ u8 thermal_derate_threshold;
+ u8 vph_pwr_droop_debounce_time;
+ u8 startup_dly;
+ u8 thermal_derate_rate;
+ bool pmic_charger_support;
+ bool self_check_en;
+ bool thermal_derate_en;
+ bool current_ramp_en;
+ bool vph_pwr_droop_en;
+ bool hdrm_sns_ch0_en;
+ bool hdrm_sns_ch1_en;
+ bool power_detect_en;
+ bool mask3_en;
+ bool follow_rb_disable;
+ bool die_current_derate_en;
+};
+
+struct qpnp_flash_led_buffer {
+ struct mutex debugfs_lock; /* Prevent thread concurrency */
+ size_t rpos;
+ size_t wpos;
+ size_t len;
+ struct qpnp_flash_led *led;
+ u32 buffer_cnt;
+ char data[0];
+};
+
+/*
+ * Flash LED data structure containing flash LED attributes
+ */
+struct qpnp_flash_led {
+ struct pmic_revid_data *revid_data;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct flash_led_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ struct flash_node_data *flash_node;
+ struct power_supply *battery_psy;
+ struct workqueue_struct *ordered_workq;
+ struct qpnp_vadc_chip *vadc_dev;
+ struct mutex flash_led_lock;
+ struct dentry *dbgfs_root;
+ int num_leds;
+ u16 base;
+ u16 current_addr;
+ u16 current2_addr;
+ u8 peripheral_type;
+ u8 fault_reg;
+ bool gpio_enabled;
+ bool charging_enabled;
+ bool strobe_debug;
+ bool dbg_feature_en;
+ bool open_fault;
+};
+
+static u8 qpnp_flash_led_ctrl_dbg_regs[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x4A, 0x4B, 0x4C, 0x4F, 0x51, 0x52, 0x54, 0x55, 0x5A, 0x5C, 0x5D,
+};
+
+static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led,
+ struct file *file)
+{
+ struct qpnp_flash_led_buffer *log;
+ size_t logbufsize = SZ_4K;
+
+ log = kzalloc(logbufsize, GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+ mutex_init(&log->debugfs_lock);
+ log->led = led;
+
+ log->buffer_cnt = 1;
+ file->private_data = log;
+
+ return 0;
+}
+
+static int flash_led_dfs_open(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = inode->i_private;
+
+ return flash_led_dbgfs_file_open(led, file);
+}
+
+static int flash_led_dfs_close(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led_buffer *log = file->private_data;
+
+ if (log) {
+ file->private_data = NULL;
+ mutex_destroy(&log->debugfs_lock);
+ kfree(log);
+ }
+
+ return 0;
+}
+
+#define MIN_BUFFER_WRITE_LEN 20
+static int print_to_log(struct qpnp_flash_led_buffer *log,
+ const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *log_buf;
+ size_t size = log->len - log->wpos;
+
+ if (size < MIN_BUFFER_WRITE_LEN)
+ return 0; /* not enough buffer left */
+
+ log_buf = &log->data[log->wpos];
+ va_start(args, fmt);
+ cnt = vscnprintf(log_buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
+ uint val;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ rc = regmap_read(led->regmap, INT_LATCHED_STS(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ INT_LATCHED_STS(led->base), rc);
+ goto unlock_mutex;
+ }
+ log->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", val);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ log->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", led->fault_reg);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
+ char *kbuf;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (!ret) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->strobe_debug = true;
+ else
+ led->strobe_debug = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return ret;
+}
+
+static ssize_t flash_led_dfs_dbg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
+ char *kbuf;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->dbg_feature_en = true;
+ else
+ led->dbg_feature_en = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return ret;
+}
+
+static const struct file_operations flash_led_dfs_latched_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_latched_reg_read,
+};
+
+static const struct file_operations flash_led_dfs_strobe_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_fault_reg_read,
+ .write = flash_led_dfs_fault_reg_enable,
+};
+
+static const struct file_operations flash_led_dfs_dbg_feature_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .write = flash_led_dfs_dbg_enable,
+};
+
+static int
+qpnp_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(led->regmap, addr, mask, val);
+ if (rc)
+ dev_err(&led->pdev->dev,
+ "Unable to update_bits to addr=%x, rc(%d)\n", addr, rc);
+
+ dev_dbg(&led->pdev->dev, "Write 0x%02X to addr 0x%02X\n", val, addr);
+
+ return rc;
+}
+
+static int qpnp_flash_led_get_allowed_die_temp_curr(struct qpnp_flash_led *led,
+ int64_t die_temp_degc)
+{
+ int die_temp_curr_ma;
+
+ if (die_temp_degc >= led->pdata->die_temp_threshold_degc[0])
+ die_temp_curr_ma = 0;
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[1])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[0];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[2])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[1];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[3])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[2];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[4])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[3];
+ else
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[4];
+
+ return die_temp_curr_ma;
+}
+
+static int64_t qpnp_flash_led_get_die_temp(struct qpnp_flash_led *led)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc;
+
+ rc = qpnp_vadc_read(led->vadc_dev, SPARE2, &die_temp_result);
+ if (rc) {
+ pr_err("failed to read the die temp\n");
+ return -EINVAL;
+ }
+
+ return die_temp_result.physical;
+}
+
+static int qpnp_get_pmic_revid(struct qpnp_flash_led *led)
+{
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(led->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ dev_err(&led->pdev->dev,
+ "qcom,pmic-revid property missing\n");
+ return -EINVAL;
+ }
+
+ led->revid_data = get_revid_data(revid_dev_node);
+ if (IS_ERR(led->revid_data)) {
+ pr_err("Couldn't get revid data rc = %ld\n",
+ PTR_ERR(led->revid_data));
+ return PTR_ERR(led->revid_data);
+ }
+
+ return 0;
+}
+
+static int
+qpnp_flash_led_get_max_avail_current(struct flash_node_data *flash_node,
+ struct qpnp_flash_led *led)
+{
+ union power_supply_propval prop;
+ int64_t chg_temp_milidegc, die_temp_degc;
+ int max_curr_avail_ma = 2000;
+ int allowed_die_temp_curr_ma = 2000;
+ int rc;
+
+ if (led->pdata->power_detect_en) {
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to query power supply\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When charging is enabled, enforce this new enablement
+ * sequence to reduce fuel gauge reading resolution.
+ */
+ if (led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ return -EINVAL;
+ }
+
+ usleep_range(FLASH_LED_CURRENT_READING_DELAY_MIN,
+ FLASH_LED_CURRENT_READING_DELAY_MAX);
+ }
+
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, &prop);
+ if (!prop.intval) {
+ dev_err(&led->pdev->dev,
+ "battery too low for flash\n");
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (prop.intval / FLASH_LED_UA_PER_MA);
+ }
+
+ /*
+ * When thermal mitigation is available, this logic will execute to
+ * derate current based upon the PMIC die temperature.
+ */
+ if (led->pdata->die_current_derate_en) {
+ chg_temp_milidegc = qpnp_flash_led_get_die_temp(led);
+ if (chg_temp_milidegc < 0)
+ return -EINVAL;
+
+ die_temp_degc = div_s64(chg_temp_milidegc, 1000);
+ allowed_die_temp_curr_ma =
+ qpnp_flash_led_get_allowed_die_temp_curr(led,
+ die_temp_degc);
+ if (allowed_die_temp_curr_ma < 0)
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (max_curr_avail_ma >= allowed_die_temp_curr_ma)
+ ? allowed_die_temp_curr_ma : max_curr_avail_ma;
+
+ return max_curr_avail_ma;
+}
+
+static ssize_t qpnp_flash_led_die_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable die_temp feature; non-zero to enable feature*/
+ if (val == 0)
+ led->pdata->die_current_derate_en = false;
+ else
+ led->pdata->die_current_derate_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_led_strobe_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct flash_node_data *flash_node;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+
+ /* '0' for sw strobe; '1' for hw strobe */
+ if (state == 1)
+ flash_node->trigger |= FLASH_LED_STROBE_TYPE_HW;
+ else
+ flash_node->trigger &= ~FLASH_LED_STROBE_TYPE_HW;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int rc, i, count = 0;
+ u16 addr;
+ uint val;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+ for (i = 0; i < ARRAY_SIZE(qpnp_flash_led_ctrl_dbg_regs); i++) {
+ addr = led->base + qpnp_flash_led_ctrl_dbg_regs[i];
+ rc = regmap_read(led->regmap, addr, &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ addr, rc);
+ return -EINVAL;
+ }
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "REG_0x%x = 0x%02x\n", addr, val);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_current_derate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable derate feature; non-zero to enable derate feature */
+ if (val == 0)
+ led->pdata->power_detect_en = false;
+ else
+ led->pdata->power_detect_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int max_curr_avail_ma = 0;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (led->flash_node[0].flash_on)
+ max_curr_avail_ma += led->flash_node[0].max_current;
+ if (led->flash_node[1].flash_on)
+ max_curr_avail_ma += led->flash_node[1].max_current;
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current(flash_node, led);
+
+ if (max_curr_avail_ma < 0)
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma);
+}
+
+static struct device_attribute qpnp_flash_led_attrs[] = {
+ __ATTR(strobe, 0664, NULL, qpnp_led_strobe_type_store),
+ __ATTR(reg_dump, 0664, qpnp_flash_led_dump_regs_show, NULL),
+ __ATTR(enable_current_derate, 0664, NULL,
+ qpnp_flash_led_current_derate_store),
+ __ATTR(max_allowed_current, 0664, qpnp_flash_led_max_current_show,
+ NULL),
+ __ATTR(enable_die_temp_current_derate, 0664, NULL,
+ qpnp_flash_led_die_temp_store),
+};
+
+static int qpnp_flash_led_get_thermal_derate_rate(const char *rate)
+{
+ /*
+ * return 5% derate as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(rate, "1_PERCENT") == 0)
+ return RATE_1_PERCENT;
+ else if (strcmp(rate, "1P25_PERCENT") == 0)
+ return RATE_1P25_PERCENT;
+ else if (strcmp(rate, "2_PERCENT") == 0)
+ return RATE_2_PERCENT;
+ else if (strcmp(rate, "2P5_PERCENT") == 0)
+ return RATE_2P5_PERCENT;
+ else if (strcmp(rate, "5_PERCENT") == 0)
+ return RATE_5_PERCENT;
+ else
+ return RATE_5_PERCENT;
+}
+
+static int qpnp_flash_led_get_ramp_step(const char *step)
+{
+ /*
+ * return 27 us as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(step, "0P2_US") == 0)
+ return RAMP_STEP_0P2_US;
+ else if (strcmp(step, "0P4_US") == 0)
+ return RAMP_STEP_0P4_US;
+ else if (strcmp(step, "0P8_US") == 0)
+ return RAMP_STEP_0P8_US;
+ else if (strcmp(step, "1P6_US") == 0)
+ return RAMP_STEP_1P6_US;
+ else if (strcmp(step, "3P3_US") == 0)
+ return RAMP_STEP_3P3_US;
+ else if (strcmp(step, "6P7_US") == 0)
+ return RAMP_STEP_6P7_US;
+ else if (strcmp(step, "13P5_US") == 0)
+ return RAMP_STEP_13P5_US;
+ else
+ return RAMP_STEP_27US;
+}
+
+static u8 qpnp_flash_led_get_droop_debounce_time(u8 val)
+{
+ /*
+ * return 10 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 0:
+ return 0;
+ case 10:
+ return 1;
+ case 32:
+ return 2;
+ case 64:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+static u8 qpnp_flash_led_get_startup_dly(u8 val)
+{
+ /*
+ * return 128 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 10:
+ return 0;
+ case 32:
+ return 1;
+ case 64:
+ return 2;
+ case 128:
+ return 3;
+ default:
+ return 3;
+ }
+}
+
+static int
+qpnp_flash_led_get_peripheral_type(struct qpnp_flash_led *led)
+{
+ int rc;
+ uint val;
+
+ rc = regmap_read(led->regmap,
+ FLASH_LED_PERIPHERAL_SUBTYPE(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read peripheral subtype\n");
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static int qpnp_flash_led_module_disable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ union power_supply_propval psy_prop;
+ int rc;
+ uint val, tmp;
+
+ rc = regmap_read(led->regmap, FLASH_LED_STROBE_CTRL(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Unable to read strobe reg\n");
+ return -EINVAL;
+ }
+
+ tmp = (~flash_node->trigger) & val;
+ if (!tmp) {
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (led->battery_psy &&
+ led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to enble charger i/p current limit\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return -EINVAL;
+ }
+
+ if (led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_suspend);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to disable GPIO\n");
+ return -EINVAL;
+ }
+ led->gpio_enabled = false;
+ }
+
+ if (led->battery_psy) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED0_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED1_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH)
+ flash_node->trigger &= FLASH_LED_STROBE_TYPE_HW;
+
+ return 0;
+}
+
+static enum
+led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int flash_regulator_parse_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node) {
+
+ int i = 0, rc;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ const char *temp_string;
+ u32 val;
+
+ flash_node->reg_data = devm_kzalloc(&led->pdev->dev,
+ sizeof(struct flash_regulator_data *) *
+ flash_node->num_regulators,
+ GFP_KERNEL);
+ if (!flash_node->reg_data) {
+ dev_err(&led->pdev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ rc = of_property_read_string(temp, "regulator-name",
+ &temp_string);
+ if (!rc)
+ flash_node->reg_data[i].reg_name = temp_string;
+ else {
+ dev_err(&led->pdev->dev,
+ "Unable to read regulator name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "max-voltage", &val);
+ if (!rc) {
+ flash_node->reg_data[i].max_volt_uv = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read max voltage\n");
+ return rc;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int flash_regulator_setup(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_setup;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ flash_node->reg_data[i].regs =
+ regulator_get(flash_node->cdev.dev,
+ flash_node->reg_data[i].reg_name);
+ if (IS_ERR(flash_node->reg_data[i].regs)) {
+ rc = PTR_ERR(flash_node->reg_data[i].regs);
+ dev_err(&led->pdev->dev,
+ "Failed to get regulator\n");
+ goto error_regulator_setup;
+ }
+
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ rc = regulator_set_voltage(flash_node->reg_data[i].regs,
+ flash_node->reg_data[i].max_volt_uv,
+ flash_node->reg_data[i].max_volt_uv);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator set voltage failed\n");
+ regulator_put(flash_node->reg_data[i].regs);
+ goto error_regulator_setup;
+ }
+ }
+ }
+
+ return rc;
+
+error_regulator_setup:
+ while (i--) {
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ regulator_set_voltage(flash_node->reg_data[i].regs,
+ 0, flash_node->reg_data[i].max_volt_uv);
+ }
+
+ regulator_put(flash_node->reg_data[i].regs);
+ }
+
+ return rc;
+}
+
+static int flash_regulator_enable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_enable;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ rc = regulator_enable(flash_node->reg_data[i].regs);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator enable failed\n");
+ goto error_regulator_enable;
+ }
+ }
+
+ return rc;
+
+error_regulator_enable:
+ while (i--)
+ regulator_disable(flash_node->reg_data[i].regs);
+
+ return rc;
+}
+
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+ int *max_current)
+{
+ struct led_classdev *led_cdev = trigger_to_lcdev(trig);
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+ int rc;
+
+ if (!led_cdev) {
+ pr_err("Invalid led_trigger provided\n");
+ return -EINVAL;
+ }
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
+ dev_err(&led->pdev->dev, "Invalid options %d\n", options);
+ return -EINVAL;
+ }
+
+ if (options & ENABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "enable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & DISABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, false);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "disable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & QUERY_MAX_CURRENT) {
+ rc = qpnp_flash_led_get_max_avail_current(flash_node, led);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "query max current failed, rc=%d\n", rc);
+ return rc;
+ }
+ *max_current = rc;
+ }
+
+ return 0;
+}
+
+static void qpnp_flash_led_work(struct work_struct *work)
+{
+ struct flash_node_data *flash_node = container_of(work,
+ struct flash_node_data, work);
+ struct qpnp_flash_led *led = dev_get_drvdata(&flash_node->pdev->dev);
+ union power_supply_propval psy_prop;
+ int rc, brightness = flash_node->cdev.brightness;
+ int max_curr_avail_ma = 0;
+ int total_curr_ma = 0;
+ int i;
+ u8 val;
+ uint temp;
+
+ mutex_lock(&led->flash_led_lock);
+
+ if (!brightness)
+ goto turn_off;
+
+ if (led->open_fault) {
+ dev_err(&led->pdev->dev, "Open fault detected\n");
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+
+ if (!flash_node->flash_on && flash_node->num_regulators > 0) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc) {
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+ }
+
+ if (!led->gpio_enabled && led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_active);
+ if (rc) {
+ dev_err(&led->pdev->dev, "failed to enable GPIO\n");
+ goto error_enable_gpio;
+ }
+ led->gpio_enabled = true;
+ }
+
+ if (led->dbg_feature_en) {
+ rc = qpnp_led_masked_write(led,
+ INT_SET_TYPE(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_SET_TYPE write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ IN_POLARITY_HIGH(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "IN_POLARITY_HIGH write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_EN_SET(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev, "INT_EN_SET write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_LATCHED_CLR(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_LATCHED_CLR write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH) {
+ led->flash_node[led->num_leds - 1].trigger |=
+ (0x80 >> flash_node->id);
+ if (flash_node->id == FLASH_LED_0)
+ led->flash_node[led->num_leds - 1].prgm_current =
+ flash_node->prgm_current;
+ else if (flash_node->id == FLASH_LED_1)
+ led->flash_node[led->num_leds - 1].prgm_current2 =
+ flash_node->prgm_current;
+ }
+
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL /
+ flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_TORCH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->type == FLASH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+
+ psy_prop.intval = true;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ if (led->battery_psy) {
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_STATUS,
+ &psy_prop);
+ if (psy_prop.intval < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid battery status\n");
+ goto exit_flash_led_work;
+ }
+
+ if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_CHARGING)
+ led->charging_enabled = true;
+ else if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_DISCHARGING
+ || psy_prop.intval ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING)
+ led->charging_enabled = false;
+ }
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current
+ (flash_node, led);
+ if (max_curr_avail_ma < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to get max avail curr\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ total_curr_ma += flash_node->prgm_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ total_curr_ma += flash_node->prgm_current2;
+
+ if (max_curr_avail_ma < total_curr_ma) {
+ flash_node->prgm_current =
+ (flash_node->prgm_current *
+ max_curr_avail_ma) / total_curr_ma;
+ flash_node->prgm_current2 =
+ (flash_node->prgm_current2 *
+ max_curr_avail_ma) / total_curr_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ if (max_curr_avail_ma < flash_node->prgm_current) {
+ dev_err(&led->pdev->dev,
+ "battery only supprots %d mA\n",
+ max_curr_avail_ma);
+ flash_node->prgm_current =
+ (u16)max_curr_avail_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL
+ / flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ val = (u8)((flash_node->duration - FLASH_DURATION_DIVIDER)
+ / FLASH_DURATION_DIVIDER);
+ rc = qpnp_led_masked_write(led,
+ FLASH_SAFETY_TIMER(led->base),
+ FLASH_SAFETY_TIMER_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Safety timer reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (!led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_UP_DELAY_US_MIN,
+ FLASH_RAMP_UP_DELAY_US_MAX);
+ }
+
+ if (led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to disable charger i/p curr limit\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->strobe_debug && led->dbg_feature_en) {
+ udelay(2000);
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base),
+ &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr= %x, rc(%d)\n",
+ FLASH_LED_FAULT_STATUS(led->base), rc);
+ goto exit_flash_led_work;
+ }
+ led->fault_reg = temp;
+ }
+ } else {
+ pr_err("Both Torch and Flash cannot be select at same time\n");
+ for (i = 0; i < led->num_leds; i++)
+ led->flash_node[i].flash_on = false;
+ goto turn_off;
+ }
+
+ flash_node->flash_on = true;
+ mutex_unlock(&led->flash_led_lock);
+
+ return;
+
+turn_off:
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH)
+ led->flash_node[led->num_leds - 1].trigger &=
+ ~(0x80 >> flash_node->id);
+ if (flash_node->type == TORCH) {
+ /*
+ * Checking LED fault status detects hardware open fault.
+ * If fault occurs, all subsequent LED enablement requests
+ * will be rejected to protect hardware.
+ */
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base), &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to read out fault status register\n");
+ goto exit_flash_led_work;
+ }
+
+ led->open_fault |= (val & FLASH_LED_OPEN_FAULT_DETECTED);
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger
+ | FLASH_LED_STROBE_TYPE_HW),
+ FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_DN_DELAY_US_MIN, FLASH_RAMP_DN_DELAY_US_MAX);
+exit_flash_hdrm_sns:
+ if (led->pdata->hdrm_sns_ch0_en) {
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_1 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+exit_flash_led_work:
+ rc = qpnp_flash_led_module_disable(led, flash_node);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ goto exit_flash_led_work;
+ }
+error_enable_gpio:
+ if (flash_node->flash_on && flash_node->num_regulators > 0)
+ flash_regulator_enable(led, flash_node, false);
+
+ flash_node->flash_on = false;
+ mutex_unlock(&led->flash_led_lock);
+}
+
+static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (value < LED_OFF) {
+ pr_err("Invalid brightness value\n");
+ return;
+ }
+
+ if (value > flash_node->cdev.max_brightness)
+ value = flash_node->cdev.max_brightness;
+
+ flash_node->cdev.brightness = value;
+ if (led->flash_node[led->num_leds - 1].id ==
+ FLASH_LED_SWITCH) {
+ if (flash_node->type == TORCH)
+ led->flash_node[led->num_leds - 1].type = TORCH;
+ else if (flash_node->type == FLASH)
+ led->flash_node[led->num_leds - 1].type = FLASH;
+
+ led->flash_node[led->num_leds - 1].max_current
+ = flash_node->max_current;
+
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_1) {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+
+ flash_node->prgm_current = value;
+ flash_node->flash_on = value ? true : false;
+ } else if (flash_node->id == FLASH_LED_SWITCH) {
+ if (!value) {
+ flash_node->prgm_current = 0;
+ flash_node->prgm_current2 = 0;
+ }
+ }
+ } else {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = value;
+ }
+
+ queue_work(led->ordered_workq, &flash_node->work);
+}
+
+static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
+{
+ int rc;
+ u8 val, temp_val;
+ uint val_int;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ FLASH_STROBE_MASK, FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "LED timer ctrl reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ val = (u8)(led->pdata->headroom / FLASH_LED_HEADROOM_DIVIDER -
+ FLASH_LED_HEADROOM_OFFSET);
+ rc = qpnp_led_masked_write(led,
+ FLASH_HEADROOM(led->base),
+ FLASH_HEADROOM_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Headroom reg write failed\n");
+ return rc;
+ }
+
+ val = qpnp_flash_led_get_startup_dly(led->pdata->startup_dly);
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_STARTUP_DELAY(led->base),
+ FLASH_STARTUP_DLY_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Startup delay reg write failed\n");
+ return rc;
+ }
+
+ val = (u8)(led->pdata->clamp_current * FLASH_MAX_LEVEL /
+ FLASH_LED_MAX_CURRENT_MA);
+ rc = qpnp_led_masked_write(led,
+ FLASH_CLAMP_CURRENT(led->base),
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Clamp current reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->pmic_charger_support)
+ val = FLASH_LED_FLASH_HW_VREG_OK;
+ else
+ val = FLASH_LED_FLASH_SW_VREG_OK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_OK_FORCE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VREG OK force reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->self_check_en)
+ val = FLASH_MODULE_ENABLE;
+ else
+ val = FLASH_LED_DISABLE;
+ rc = qpnp_led_masked_write(led,
+ FLASH_FAULT_DETECT(led->base),
+ FLASH_FAULT_DETECT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Fault detect reg write failed\n");
+ return rc;
+ }
+
+ val = 0x0;
+ val |= led->pdata->mask3_en << FLASH_LED_MASK3_ENABLE_SHIFT;
+ val |= FLASH_LED_MASK_MODULE_MASK2_ENABLE;
+ rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base),
+ FLASH_MASK_MODULE_CONTRL_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Mask module enable failed\n");
+ return rc;
+ }
+
+ rc = regmap_read(led->regmap, FLASH_PERPH_RESET_CTRL(led->base),
+ &val_int);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ FLASH_PERPH_RESET_CTRL(led->base), rc);
+ return -EINVAL;
+ }
+ val = (u8)val_int;
+
+ if (led->pdata->follow_rb_disable) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val |= FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ }
+
+ if (!led->pdata->thermal_derate_en)
+ val = 0x0;
+ else {
+ val = led->pdata->thermal_derate_en << 7;
+ val |= led->pdata->thermal_derate_rate << 3;
+ val |= (led->pdata->thermal_derate_threshold -
+ FLASH_LED_THERMAL_THRESHOLD_MIN) /
+ FLASH_LED_THERMAL_DEVIDER;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_THERMAL_DRATE(led->base),
+ FLASH_THERMAL_DERATE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Thermal derate reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->current_ramp_en)
+ val = 0x0;
+ else {
+ val = led->pdata->current_ramp_en << 7;
+ val |= led->pdata->ramp_up_step << 3;
+ val |= led->pdata->ramp_dn_step;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_CURRENT_RAMP(led->base),
+ FLASH_CURRENT_RAMP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Current ramp reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->vph_pwr_droop_en)
+ val = 0x0;
+ else {
+ val = led->pdata->vph_pwr_droop_en << 7;
+ val |= ((led->pdata->vph_pwr_droop_threshold -
+ FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV) /
+ FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER) << 4;
+ temp_val =
+ qpnp_flash_led_get_droop_debounce_time(
+ led->pdata->vph_pwr_droop_debounce_time);
+ if (temp_val == 0xFF) {
+ dev_err(&led->pdev->dev, "Invalid debounce time\n");
+ return temp_val;
+ }
+
+ val |= temp_val;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_VPH_PWR_DROOP(led->base),
+ FLASH_VPH_PWR_DROOP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VPH PWR droop reg write failed\n");
+ return rc;
+ }
+
+ led->battery_psy = power_supply_get_by_name("battery");
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to get battery power supply\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ const char *temp_string;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ int rc = 0, num_regs = 0;
+ u32 val;
+
+ rc = of_property_read_string(node, "label", &temp_string);
+ if (!rc) {
+ if (strcmp(temp_string, "flash") == 0)
+ flash_node->type = FLASH;
+ else if (strcmp(temp_string, "torch") == 0)
+ flash_node->type = TORCH;
+ else if (strcmp(temp_string, "switch") == 0)
+ flash_node->type = SWITCH;
+ else {
+ dev_err(&led->pdev->dev, "Wrong flash LED type\n");
+ return -EINVAL;
+ }
+ } else if (rc < 0) {
+ dev_err(&led->pdev->dev, "Unable to read flash type\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read current\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,id", &val);
+ if (!rc)
+ flash_node->id = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led ID\n");
+ return rc;
+ }
+
+ if (flash_node->type == SWITCH || flash_node->type == FLASH) {
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ flash_node->duration = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read duration\n");
+ return rc;
+ }
+ }
+
+ switch (led->peripheral_type) {
+ case FLASH_SUBTYPE_SINGLE:
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ break;
+ case FLASH_SUBTYPE_DUAL:
+ if (flash_node->id == FLASH_LED_0)
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ else if (flash_node->id == FLASH_LED_1)
+ flash_node->trigger = FLASH_LED1_TRIGGER;
+ break;
+ default:
+ dev_err(&led->pdev->dev, "Invalid peripheral type\n");
+ }
+
+ while ((temp = of_get_next_child(node, temp))) {
+ if (of_find_property(temp, "regulator-name", NULL))
+ num_regs++;
+ }
+
+ if (num_regs)
+ flash_node->num_regulators = num_regs;
+
+ return rc;
+}
+
+static int qpnp_flash_led_parse_common_dt(
+ struct qpnp_flash_led *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val, temp_val;
+ const char *temp;
+
+ led->pdata->headroom = FLASH_LED_HEADROOM_DEFAULT_MV;
+ rc = of_property_read_u32(node, "qcom,headroom", &val);
+ if (!rc)
+ led->pdata->headroom = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read headroom\n");
+ return rc;
+ }
+
+ led->pdata->startup_dly = FLASH_LED_STARTUP_DELAY_DEFAULT_US;
+ rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+ if (!rc)
+ led->pdata->startup_dly = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read startup delay\n");
+ return rc;
+ }
+
+ led->pdata->clamp_current = FLASH_LED_CLAMP_CURRENT_DEFAULT_MA;
+ rc = of_property_read_u32(node, "qcom,clamp-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->pdata->clamp_current = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read clamp current\n");
+ return rc;
+ }
+
+ led->pdata->pmic_charger_support =
+ of_property_read_bool(node,
+ "qcom,pmic-charger-support");
+
+ led->pdata->self_check_en =
+ of_property_read_bool(node, "qcom,self-check-enabled");
+
+ led->pdata->thermal_derate_en =
+ of_property_read_bool(node,
+ "qcom,thermal-derate-enabled");
+
+ if (led->pdata->thermal_derate_en) {
+ led->pdata->thermal_derate_rate =
+ FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT;
+ rc = of_property_read_string(node, "qcom,thermal-derate-rate",
+ &temp);
+ if (!rc) {
+ temp_val =
+ qpnp_flash_led_get_thermal_derate_rate(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_rate = (u8)temp_val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_threshold =
+ FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C;
+ rc = of_property_read_u32(node, "qcom,thermal-derate-threshold",
+ &val);
+ if (!rc)
+ led->pdata->thermal_derate_threshold = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate threshold\n");
+ return rc;
+ }
+ }
+
+ led->pdata->current_ramp_en =
+ of_property_read_bool(node,
+ "qcom,current-ramp-enabled");
+ if (led->pdata->current_ramp_en) {
+ led->pdata->ramp_up_step = FLASH_LED_RAMP_UP_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_up_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp up step values\n");
+ return -EINVAL;
+ }
+ led->pdata->ramp_up_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp up steps\n");
+ return rc;
+ }
+
+ led->pdata->ramp_dn_step = FLASH_LED_RAMP_DN_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_dn_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp down step values\n");
+ return rc;
+ }
+ led->pdata->ramp_dn_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp down steps\n");
+ return rc;
+ }
+ }
+
+ led->pdata->vph_pwr_droop_en = of_property_read_bool(node,
+ "qcom,vph-pwr-droop-enabled");
+ if (led->pdata->vph_pwr_droop_en) {
+ led->pdata->vph_pwr_droop_threshold =
+ FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-threshold", &val);
+ if (!rc) {
+ led->pdata->vph_pwr_droop_threshold = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop threshold\n");
+ return rc;
+ }
+
+ led->pdata->vph_pwr_droop_debounce_time =
+ FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-debounce-time", &val);
+ if (!rc)
+ led->pdata->vph_pwr_droop_debounce_time = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop debounce time\n");
+ return rc;
+ }
+ }
+
+ led->pdata->hdrm_sns_ch0_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch0-enabled");
+
+ led->pdata->hdrm_sns_ch1_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch1-enabled");
+
+ led->pdata->power_detect_en = of_property_read_bool(node,
+ "qcom,power-detect-enabled");
+
+ led->pdata->mask3_en = of_property_read_bool(node,
+ "qcom,otst2-module-enabled");
+
+ led->pdata->follow_rb_disable = of_property_read_bool(node,
+ "qcom,follow-otst2-rb-disabled");
+
+ led->pdata->die_current_derate_en = of_property_read_bool(node,
+ "qcom,die-current-derate-enabled");
+
+ if (led->pdata->die_current_derate_en) {
+ led->vadc_dev = qpnp_get_vadc(&led->pdev->dev, "die-temp");
+ if (IS_ERR(led->vadc_dev)) {
+ pr_err("VADC channel property Missing\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(node, "qcom,die-temp-threshold",
+ &led->pdata->temp_threshold_num)) {
+ if (led->pdata->temp_threshold_num > 0) {
+ led->pdata->die_temp_threshold_degc =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_threshold_num,
+ GFP_KERNEL);
+
+ if (led->pdata->die_temp_threshold_degc
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die temp array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_threshold_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-threshold",
+ led->pdata->die_temp_threshold_degc,
+ led->pdata->temp_threshold_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ if (of_find_property(node, "qcom,die-temp-derate-current",
+ &led->pdata->temp_derate_curr_num)) {
+ if (led->pdata->temp_derate_curr_num > 0) {
+ led->pdata->die_temp_derate_curr_ma =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_derate_curr_num,
+ GFP_KERNEL);
+ if (led->pdata->die_temp_derate_curr_ma
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die derate current array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_derate_curr_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-derate-current",
+ led->pdata->die_temp_derate_curr_ma,
+ led->pdata->temp_derate_curr_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp limits rc =%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+ if (led->pdata->temp_threshold_num !=
+ led->pdata->temp_derate_curr_num) {
+ pr_err("Both array size are not same\n");
+ return -EINVAL;
+ }
+ }
+
+ led->pinctrl = devm_pinctrl_get(&led->pdev->dev);
+ if (IS_ERR_OR_NULL(led->pinctrl)) {
+ dev_err(&led->pdev->dev, "Unable to acquire pinctrl\n");
+ led->pinctrl = NULL;
+ return 0;
+ }
+
+ led->gpio_state_active = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_enable");
+ if (IS_ERR_OR_NULL(led->gpio_state_active)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED active state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_active);
+ }
+
+ led->gpio_state_suspend = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_disable");
+ if (IS_ERR_OR_NULL(led->gpio_state_suspend)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED disable state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_suspend);
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_probe(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led;
+ unsigned int base;
+ struct device_node *node, *temp;
+ struct dentry *root, *file;
+ int rc, i = 0, j, num_leds = 0;
+ u32 val;
+
+ root = NULL;
+ node = pdev->dev.of_node;
+ if (node == NULL) {
+ dev_info(&pdev->dev, "No flash device defined\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!led->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ led->base = base;
+ led->pdev = pdev;
+ led->current_addr = FLASH_LED0_CURRENT(led->base);
+ led->current2_addr = FLASH_LED1_CURRENT(led->base);
+
+ led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL);
+ if (!led->pdata)
+ return -ENOMEM;
+
+ led->peripheral_type = (u8)qpnp_flash_led_get_peripheral_type(led);
+ if (led->peripheral_type < 0) {
+ dev_err(&pdev->dev, "Failed to get peripheral type\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_parse_common_dt(led, node);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to get common config for flash LEDs\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_init_settings(led);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to initialize flash LED\n");
+ return rc;
+ }
+
+ rc = qpnp_get_pmic_revid(led);
+ if (rc)
+ return rc;
+
+ temp = NULL;
+ while ((temp = of_get_next_child(node, temp)))
+ num_leds++;
+
+ if (!num_leds)
+ return -ECHILD;
+
+ led->flash_node = devm_kzalloc(&pdev->dev,
+ (sizeof(struct flash_node_data) * num_leds),
+ GFP_KERNEL);
+ if (!led->flash_node) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&led->flash_led_lock);
+
+ led->ordered_workq = alloc_ordered_workqueue("flash_led_workqueue", 0);
+ if (!led->ordered_workq) {
+ dev_err(&pdev->dev, "Failed to allocate ordered workqueue\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ led->flash_node[i].cdev.brightness_set =
+ qpnp_flash_led_brightness_set;
+ led->flash_node[i].cdev.brightness_get =
+ qpnp_flash_led_brightness_get;
+ led->flash_node[i].pdev = pdev;
+
+ INIT_WORK(&led->flash_node[i].work, qpnp_flash_led_work);
+ rc = of_property_read_string(temp, "qcom,led-name",
+ &led->flash_node[i].cdev.name);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read flash name\n");
+ return rc;
+ }
+
+ rc = of_property_read_string(temp, "qcom,default-led-trigger",
+ &led->flash_node[i].cdev.default_trigger);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read trigger name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,max-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->flash_node[i].max_current = (u16)val;
+ led->flash_node[i].cdev.max_brightness = val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read max current\n");
+ return rc;
+ }
+ rc = led_classdev_register(&pdev->dev,
+ &led->flash_node[i].cdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to register led\n");
+ goto error_led_register;
+ }
+
+ led->flash_node[i].cdev.dev->of_node = temp;
+
+ rc = qpnp_flash_led_parse_each_led_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to parse config for each LED\n");
+ goto error_led_register;
+ }
+
+ if (led->flash_node[i].num_regulators) {
+ rc = flash_regulator_parse_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to parse regulator data\n");
+ goto error_led_register;
+ }
+
+ rc = flash_regulator_setup(led, &led->flash_node[i],
+ true);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to set up regulator\n");
+ goto error_led_register;
+ }
+ }
+
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+ rc =
+ sysfs_create_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ if (rc)
+ goto error_led_register;
+ }
+
+ i++;
+ }
+
+ led->num_leds = i;
+
+ root = debugfs_create_dir("flashLED", NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in kernel");
+ goto error_led_debugfs;
+ }
+
+ led->dbgfs_root = root;
+ file = debugfs_create_file("enable_debug", 0600, root, led,
+ &flash_led_dfs_dbg_feature_fops);
+ if (!file) {
+ pr_err("error creating 'enable_debug' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("latched", 0600, root, led,
+ &flash_led_dfs_latched_reg_fops);
+ if (!file) {
+ pr_err("error creating 'latched' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("strobe", 0600, root, led,
+ &flash_led_dfs_strobe_reg_fops);
+ if (!file) {
+ pr_err("error creating 'strobe' entry\n");
+ goto error_led_debugfs;
+ }
+
+ dev_set_drvdata(&pdev->dev, led);
+
+ return 0;
+
+error_led_debugfs:
+ i = led->num_leds - 1;
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+error_led_register:
+ for (; i >= 0; i--) {
+ for (; j >= 0; j--)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return rc;
+}
+
+static int qpnp_flash_led_remove(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev);
+ int i, j;
+
+ for (i = led->num_leds - 1; i >= 0; i--) {
+ if (led->flash_node[i].reg_data) {
+ if (led->flash_node[i].flash_on)
+ flash_regulator_enable(led,
+ &led->flash_node[i], false);
+ flash_regulator_setup(led, &led->flash_node[i],
+ false);
+ }
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(led->dbgfs_root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-flash-led",},
+ { },
+};
+
+static struct platform_driver qpnp_flash_led_driver = {
+ .driver = {
+ .name = "qcom,qpnp-flash-led",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_flash_led_probe,
+ .remove = qpnp_flash_led_remove,
+};
+
+static int __init qpnp_flash_led_init(void)
+{
+ return platform_driver_register(&qpnp_flash_led_driver);
+}
+late_initcall(qpnp_flash_led_init);
+
+static void __exit qpnp_flash_led_exit(void)
+{
+ platform_driver_unregister(&qpnp_flash_led_driver);
+}
+module_exit(qpnp_flash_led_exit);
+
+MODULE_DESCRIPTION("QPNP Flash LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-flash");
#define QPNP_WLED_VLOOP_COMP_RES_MASK 0xF0
#define QPNP_WLED_VLOOP_COMP_RES_OVERWRITE 0x80
-#define QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM 320
#define QPNP_WLED_LOOP_COMP_RES_STEP_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MIN_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MAX_KOHM 320
#define QPNP_WLED_BOOST_DUTY_MIN_NS 26
#define QPNP_WLED_BOOST_DUTY_MAX_NS 156
#define QPNP_WLED_DEF_BOOST_DUTY_NS 104
-#define QPNP_WLED_SWITCH_FREQ_MASK 0x70
-#define QPNP_WLED_SWITCH_FREQ_800_KHZ 800
-#define QPNP_WLED_SWITCH_FREQ_1600_KHZ 1600
-#define QPNP_WLED_SWITCH_FREQ_OVERWRITE 0x80
+#define QPNP_WLED_SWITCH_FREQ_MASK GENMASK(3, 0)
+#define QPNP_WLED_SWITCH_FREQ_OVERWRITE BIT(7)
#define QPNP_WLED_OVP_MASK GENMASK(1, 0)
#define QPNP_WLED_TEST4_EN_DEB_BYPASS_ILIM_BIT BIT(6)
#define QPNP_WLED_TEST4_EN_SH_FOR_SS_BIT BIT(5)
#define QPNP_WLED_ILIM_FAULT_BIT BIT(0)
#define QPNP_WLED_OVP_FAULT_BIT BIT(1)
#define QPNP_WLED_SC_FAULT_BIT BIT(2)
+#define QPNP_WLED_OVP_FLT_RT_STS_BIT BIT(1)
+
+/* QPNP_WLED_SOFTSTART_RAMP_DLY */
+#define SOFTSTART_OVERWRITE_BIT BIT(7)
+#define SOFTSTART_RAMP_DELAY_MASK GENMASK(2, 0)
/* sink registers */
#define QPNP_WLED_CURR_SINK_REG(b) (b + 0x46)
bool ovp_irq_disabled;
bool auto_calib_enabled;
bool auto_calib_done;
+ bool module_dis_perm;
ktime_t start_ovp_fault_time;
};
{
int rc;
+ if (wled->module_dis_perm)
+ return 0;
+
rc = qpnp_wled_masked_write_reg(wled,
QPNP_WLED_MODULE_EN_REG(base_addr),
QPNP_WLED_MODULE_EN_MASK,
return 0;
}
-#define AUTO_CALIB_BRIGHTNESS 16
+#define AUTO_CALIB_BRIGHTNESS 200
static int wled_auto_calibrate(struct qpnp_wled *wled)
{
int rc = 0, i;
u8 reg = 0, sink_config = 0, sink_test = 0, sink_valid = 0, int_sts;
- mutex_lock(&wled->lock);
-
- /* disable OVP IRQ */
- if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
- disable_irq_nosync(wled->ovp_irq);
- wled->ovp_irq_disabled = true;
- }
-
/* read configured sink configuration */
rc = qpnp_wled_read_reg(wled,
QPNP_WLED_CURR_SINK_REG(wled->sink_base), &sink_config);
goto failed_calib;
}
+ if (wled->en_cabc) {
+ for (i = 0; i < wled->max_strings; i++) {
+ reg = 0;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CABC_REG(wled->sink_base, i),
+ QPNP_WLED_CABC_MASK, reg);
+ if (rc < 0)
+ goto failed_calib;
+ }
+ }
+
/* disable all sinks */
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_CURR_SINK_REG(wled->sink_base), 0);
goto failed_calib;
}
- rc = qpnp_wled_masked_write_reg(wled,
- QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
- QPNP_WLED_MODULE_EN_MASK,
- QPNP_WLED_MODULE_EN_MASK);
- if (rc < 0) {
- pr_err("Failed to enable WLED module rc=%d\n", rc);
- goto failed_calib;
- }
- /*
- * Delay for the WLED soft-start, check the OVP status
- * only after soft-start is complete
- */
- usleep_range(QPNP_WLED_SOFT_START_DLY_US,
- QPNP_WLED_SOFT_START_DLY_US + 1000);
-
/* iterate through the strings one by one */
for (i = 0; i < wled->max_strings; i++) {
sink_test = 1 << (QPNP_WLED_CURR_SINK_SHIFT + i);
goto failed_calib;
}
+ /* Enable the module */
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, QPNP_WLED_MODULE_EN_MASK);
+ if (rc < 0) {
+ pr_err("Failed to enable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
/* delay for WLED soft-start */
usleep_range(QPNP_WLED_SOFT_START_DLY_US,
QPNP_WLED_SOFT_START_DLY_US + 1000);
i + 1);
else
sink_valid |= sink_test;
+
+ /* Disable the module */
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, 0);
+ if (rc < 0) {
+ pr_err("Failed to disable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
}
if (sink_valid == sink_config) {
if (!sink_config) {
pr_warn("No valid WLED sinks found\n");
- goto failed_calib;
- }
-
- rc = qpnp_wled_masked_write_reg(wled,
- QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
- QPNP_WLED_MODULE_EN_MASK, 0);
- if (rc < 0) {
- pr_err("Failed to disable WLED module rc=%d\n", rc);
+ wled->module_dis_perm = true;
goto failed_calib;
}
/* MODULATOR_EN setting for valid sinks */
for (i = 0; i < wled->max_strings; i++) {
+ if (wled->en_cabc) {
+ reg = 1 << QPNP_WLED_CABC_SHIFT;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CABC_REG(wled->sink_base, i),
+ QPNP_WLED_CABC_MASK, reg);
+ if (rc < 0)
+ goto failed_calib;
+ }
+
if (sink_config & (1 << (QPNP_WLED_CURR_SINK_SHIFT + i)))
reg = (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
else
}
/* restore brightness */
- rc = qpnp_wled_set_level(wled, wled->cdev.brightness);
+ rc = qpnp_wled_set_level(wled, !wled->cdev.brightness ?
+ AUTO_CALIB_BRIGHTNESS : wled->cdev.brightness);
if (rc < 0) {
pr_err("Failed to set brightness after calibration rc=%d\n",
rc);
QPNP_WLED_SOFT_START_DLY_US + 1000);
failed_calib:
- if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
- enable_irq(wled->ovp_irq);
- wled->ovp_irq_disabled = false;
- }
- mutex_unlock(&wled->lock);
return rc;
}
return false;
}
+static int qpnp_wled_auto_calibrate_at_init(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 fault_status = 0, rt_status = 0;
+
+ if (!wled->auto_calib_enabled)
+ return 0;
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_INT_RT_STS(wled->ctrl_base), &rt_status);
+ if (rc < 0)
+ pr_err("Failed to read RT status rc=%d\n", rc);
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &fault_status);
+ if (rc < 0)
+ pr_err("Failed to read fault status rc=%d\n", rc);
+
+ if ((rt_status & QPNP_WLED_OVP_FLT_RT_STS_BIT) ||
+ (fault_status & QPNP_WLED_OVP_FAULT_BIT)) {
+ mutex_lock(&wled->lock);
+ rc = wled_auto_calibrate(wled);
+ if (rc < 0)
+ pr_err("Failed auto-calibration rc=%d\n", rc);
+ else
+ wled->auto_calib_done = true;
+ mutex_unlock(&wled->lock);
+ }
+
+ return rc;
+}
+
/* ovp irq handler */
static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
{
if (fault_sts & QPNP_WLED_OVP_FAULT_BIT) {
if (wled->auto_calib_enabled && !wled->auto_calib_done) {
if (qpnp_wled_auto_cal_required(wled)) {
+ mutex_lock(&wled->lock);
+ if (wled->ovp_irq > 0 &&
+ !wled->ovp_irq_disabled) {
+ disable_irq_nosync(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
+ }
+
rc = wled_auto_calibrate(wled);
- if (rc < 0) {
+ if (rc < 0)
pr_err("Failed auto-calibration rc=%d\n",
- rc);
- return IRQ_HANDLED;
+ rc);
+ else
+ wled->auto_calib_done = true;
+
+ if (wled->ovp_irq > 0 &&
+ wled->ovp_irq_disabled) {
+ enable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = false;
}
- wled->auto_calib_done = true;
+ mutex_unlock(&wled->lock);
}
}
}
u8 mask = 0, reg = 0;
/* Configure the LOOP COMP GM register */
- if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
- wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
- if (wled->loop_auto_gm_en)
- reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
-
- if (wled->loop_auto_gm_thresh >
- QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
- wled->loop_auto_gm_thresh =
- QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
-
- reg |= wled->loop_auto_gm_thresh <<
- QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
- mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
- QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)) {
+ if (wled->disp_type_amoled) {
+ reg = 0;
+ mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ } else {
+ if (wled->loop_auto_gm_en)
+ reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
+
+ if (wled->loop_auto_gm_thresh >
+ QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
+ wled->loop_auto_gm_thresh =
+ QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
+
+ reg |= wled->loop_auto_gm_thresh <<
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
+ mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ }
}
if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
/* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */
reg = (wled->disp_type_amoled) ? 0 : 2;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base), reg);
+ mask = SOFTSTART_RAMP_DELAY_MASK;
+ if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ && wled->disp_type_amoled) {
+ reg |= SOFTSTART_OVERWRITE_BIT;
+ mask |= SOFTSTART_OVERWRITE_BIT;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base),
+ mask, reg);
if (rc)
return rc;
return rc;
/* Configure the SWITCHING FREQ register */
- if (wled->switch_freq_khz == QPNP_WLED_SWITCH_FREQ_1600_KHZ)
- temp = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
+ if (wled->switch_freq_khz == 1600)
+ reg = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
else
- temp = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
+ reg = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), ®);
+ /*
+ * Do not set the overwrite bit when switching frequency is selected
+ * for AMOLED. This register is in logic reset block which can cause
+ * the value to be overwritten during module enable/disable.
+ */
+ mask = QPNP_WLED_SWITCH_FREQ_MASK | QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+ if (!wled->disp_type_amoled)
+ reg |= QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), mask, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_SWITCH_FREQ_MASK;
- reg |= (temp | QPNP_WLED_SWITCH_FREQ_OVERWRITE);
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), reg);
- if (rc)
- return rc;
rc = qpnp_wled_ovp_config(wled);
if (rc < 0) {
return rc;
}
+ rc = qpnp_wled_auto_calibrate_at_init(wled);
+ if (rc < 0)
+ pr_err("Failed to auto-calibrate at init rc=%d\n", rc);
+
/* setup ovp and sc irqs */
if (wled->ovp_irq >= 0) {
rc = devm_request_threaded_irq(&wled->pdev->dev, wled->ovp_irq,
return rc;
}
- wled->loop_comp_res_kohm =
- QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM;
+ wled->loop_comp_res_kohm = 320;
+ if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ wled->loop_comp_res_kohm = 300;
+
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,loop-comp-res-kohm", &temp_val);
if (!rc) {
return rc;
}
- wled->switch_freq_khz = QPNP_WLED_SWITCH_FREQ_800_KHZ;
+ wled->switch_freq_khz = wled->disp_type_amoled ? 1600 : 800;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,switch-freq-khz", &temp_val);
if (!rc) {
wled->pmic_rev_id->pmic_subtype, wled->pmic_rev_id->rev4);
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_SINK_BASE,
- 0, 0);
+ NULL, NULL);
if (!prop) {
dev_err(&pdev->dev, "Couldnt find sink's addr rc %d\n", rc);
return rc;
wled->sink_base = be32_to_cpu(*prop);
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_CTRL_BASE,
- 0, 0);
+ NULL, NULL);
if (!prop) {
dev_err(&pdev->dev, "Couldnt find ctrl's addr rc = %d\n", rc);
return rc;
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct task_struct *writeback_thread;
+ struct workqueue_struct *writeback_write_wq;
struct keybuf writeback_keys;
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
- wake_up_gc(op->c);
-
if (op->bypass)
return bch_data_invalidate(cl);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+ wake_up_gc(op->c);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0;
- if (bdi_congested(&q->backing_dev_info, bits))
+ if (bdi_congested(q->backing_dev_info, bits))
return 1;
if (cached_dev_get(dc)) {
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
cached_dev_put(dc);
struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request;
- g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+ g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request;
- g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+ g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
- q->backing_dev_info.congested_data = d;
+ q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(dc);
+ bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
cancel_delayed_work_sync(&dc->writeback_rate_update);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
+ if (dc->writeback_write_wq)
+ destroy_workqueue(dc->writeback_write_wq);
mutex_lock(&bch_register_lock);
set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
- dc->disk.disk->queue->backing_dev_info.ra_pages =
- max(dc->disk.disk->queue->backing_dev_info.ra_pages,
- q->backing_dev_info.ra_pages);
+ dc->disk.disk->queue->backing_dev_info->ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info->ra_pages,
+ q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc);
goto err;
bcache_device_attach(d, c, u - c->uuids);
+ bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (!IS_ERR(bdev))
+ bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto out;
}
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- unsigned v = size;
+ ssize_t v = size;
struct cache_set *c;
struct kobj_uevent_env *env;
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+ v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0)
return v;
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
+/**
+ * bch_hprint() - formats @v to human readable string for sysfs.
+ *
+ * @v - signed 64 bit integer
+ * @buf - the (at least 8 byte) buffer to format the result into.
+ *
+ * Returns the number of bytes used by format.
+ */
ssize_t bch_hprint(char *buf, int64_t v)
{
static const char units[] = "?kMGTPEZY";
- char dec[4] = "";
- int u, t = 0;
-
- for (u = 0; v >= 1024 || v <= -1024; u++) {
- t = v & ~(~0 << 10);
- v >>= 10;
- }
-
- if (!u)
- return sprintf(buf, "%llu", v);
-
- if (v < 100 && v > -100)
- snprintf(dec, sizeof(dec), ".%i", t / 100);
-
- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+ int u = 0, t;
+
+ uint64_t q;
+
+ if (v < 0)
+ q = -v;
+ else
+ q = v;
+
+ /* For as long as the number is more than 3 digits, but at least
+ * once, shift right / divide by 1024. Keep the remainder for
+ * a digit after the decimal point.
+ */
+ do {
+ u++;
+
+ t = q & ~(~0 << 10);
+ q >>= 10;
+ } while (q >= 1000);
+
+ if (v < 0)
+ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
+ * yields 8 bytes.
+ */
+ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
+ else
+ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
static void __update_writeback_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty_finish, system_wq);
+ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
static void read_dirty_endio(struct bio *bio)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty, system_wq);
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
}
static void read_dirty(struct cached_dev *dc)
return MAP_CONTINUE;
}
-void bch_sectors_dirty_init(struct cached_dev *dc)
+void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
- op.inode = dc->disk.id;
+ op.inode = d->id;
- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
+ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
+ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!dc->writeback_write_wq)
+ return -ENOMEM;
+
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
return ret;
}
+static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
+{
+ uint64_t i, ret = 0;
+
+ mutex_lock(&bch_register_lock);
+
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+
+ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ ret += bcache_dev_sectors_dirty(d);
+ }
+
+ mutex_unlock(&bch_register_lock);
+
+ return ret;
+}
+
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_sectors_dirty_init(struct cached_dev *dc);
+void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
long pages;
struct bitmap_page *new_bp;
+ if (bitmap->storage.file && !init) {
+ pr_info("md: cannot resize file-based bitmap\n");
+ return -EINVAL;
+ }
+
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.
static int is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
char b[BDEVNAME_SIZE];
if (likely(q))
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
return 1;
q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static void requeue_bios(struct pool *pool)
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
- r = md->queue->backing_dev_info.wb.state &
+ r = md->queue->backing_dev_info->wb.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_data = md;
+ md->queue->backing_dev_info->congested_data = md;
}
static void dm_init_old_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
for (i = 0; i < conf->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
rcu_read_unlock();
* oldconf until no one uses it anymore.
*/
mddev_suspend(mddev);
- oldconf = rcu_dereference(mddev->private);
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
mddev->raid_disks++;
WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
"copied raid_disks doesn't match mddev->raid_disks");
return err;
}
if (mddev->queue) {
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = md_congested;
+ mddev->queue->backing_dev_info->congested_data = mddev;
+ mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info.congested_fn = NULL;
+ mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
* first available device
*/
for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
}
*/
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2* stripe;
}
dump_zones(mddev);
* non-congested targets, it can be removed
*/
if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
else
- ret &= bdi_congested(&q->backing_dev_info, bits);
+ ret &= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
}
* maybe...
*/
stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
if (md_integrity_register(mddev))
int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
conf->fullsync = 0;
}
spin_unlock(&head->batch_head->batch_lock);
goto unlock_out;
}
+ /*
+ * We must assign batch_head of this stripe within the
+ * batch_lock, otherwise clear_batch_ready of batch head
+ * stripe could clear BATCH_READY bit of this stripe and
+ * this stripe->batch_head doesn't get assigned, which
+ * could confuse clear_batch_ready for this stripe
+ */
+ sh->batch_head = head->batch_head;
/*
* at this point, head's BATCH_READY could be cleared, but we
*/
list_add(&sh->batch_list, &head->batch_list);
spin_unlock(&head->batch_head->batch_lock);
-
- sh->batch_head = head->batch_head;
} else {
head->batch_head = head;
sh->batch_head = head->batch_head;
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED)),
+ (1 << STRIPE_DEGRADED) |
+ (1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state;
spin_unlock_irq(&conf->device_lock);
+ r5l_flush_stripe_to_raid(conf->log);
+
async_tx_issue_pending_all();
blk_finish_plug(&plug);
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info.capabilities |=
+ mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
else
- mddev->queue->backing_dev_info.capabilities &=
+ mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev);
}
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
}
}
by Nathan Laredo <laredo@gnu.org> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count)
+ int addr, u32 val, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
- if (count <= 0 || count > 32764) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
return 0;
}
-u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
+u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
- if (count > 32764 || count <= 0) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}
/* DEBI (saa7146 data extension bus interface) access */
extern int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count);
+ int addr, u32 val, unsigned int count);
extern u32 av7110_debiread(struct av7110 *av7110, u32 config,
- int addr, int count);
+ int addr, unsigned int count);
/* DEBI during interrupt */
/* single word writes */
-static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
av7110_debiwrite(av7110, config, addr, val, count);
}
av7110_debiwrite(av7110, config, addr, 0, count);
}
-static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
u32 res;
}
/* DEBI outside interrupts, only for count <= 4! */
-static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
spin_unlock_irqrestore(&av7110->debilock, flags);
}
-static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
u32 res;
if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);
#define VFE40_STATS_BURST_LEN_8916_VERSION 2
#define VFE40_FETCH_BURST_LEN 3
#define VFE40_UB_SIZE 1536 /* 1536 * 128 bits = 24KB */
+#define VFE40_STATS_SIZE 392
#define VFE40_UB_SIZE_8952 2048 /* 2048 * 128 bits = 32KB */
#define VFE40_UB_SIZE_8916 3072 /* 3072 * 128 bits = 48KB */
#define VFE40_EQUAL_SLICE_UB 190 /* (UB_SIZE - STATS SIZE)/6 */
#define VFE40_EQUAL_SLICE_UB_8916 236
#define VFE40_TOTAL_WM_UB 1144 /* UB_SIZE - STATS SIZE */
-#define VFE40_TOTAL_WM_UB_8916 1656
+#define VFE40_TOTAL_WM_UB_8916 2680
#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
static uint32_t msm_vfe40_get_ub_size(struct vfe_device *vfe_dev)
{
- if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION) {
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8939_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8953_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8917_VERSION) {
vfe_dev->ub_info->wm_ub = VFE40_TOTAL_WM_UB_8916;
return VFE40_TOTAL_WM_UB_8916;
}
*irq_status0 &= vfe_dev->irq0_mask;
*irq_status1 &= vfe_dev->irq1_mask;
+ if (*irq_status0 &&
+ (*irq_status0 == msm_camera_io_r(vfe_dev->vfe_base + 0x38))) {
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
+ }
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
.num_comp_mask = 3,
.num_rdi = 3,
.num_rdi_master = 3,
- .min_wm_ub = 64,
+ .min_wm_ub = 96,
.scratch_buf_range = SZ_32M + SZ_4M,
};
if (rc)
return rc;
+ vfe_dev->num_norm_clk = vfe_dev->num_clk;
for (i = 0; i < vfe_dev->num_clk; i++) {
if (strcmp(vfe_dev->vfe_clk_info[i].clk_name,
"camss_vfe_stream_clk") == 0) {
* those state transitions instead of directly forcing stream to
* be INACTIVE
*/
+ memset(&stream_info->sw_skip, 0,
+ sizeof(struct msm_isp_sw_framskip));
intf = SRC_TO_INTF(stream_info->stream_src);
if (stream_info->lpm_mode == 0 &&
stream_info->state != PAUSED) {
}
if (capability) {
- fsize->type = capability->width.step_size == 1 &&
- capability->height.step_size == 1 ?
- V4L2_FRMSIZE_TYPE_CONTINUOUS :
- V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise.min_width = capability->width.min;
fsize->stepwise.max_width = capability->width.max;
fsize->stepwise.step_width = capability->width.step_size;
goto done;
}
+ /* Validate the user-provided bit-size and offset */
+ if (mapping->size > 32 ||
+ mapping->offset + mapping->size > ctrl->info.size * 8) {
+ ret = -EINVAL;
+ goto done;
+ }
+
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
("wcd9xxx_core", 0);
if (!IS_ERR(debugfs_wcd9xxx_dent)) {
debugfs_peek = debugfs_create_file("slimslave_peek",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "slimslave_peek", &codec_debug_ops);
debugfs_poke = debugfs_create_file("slimslave_poke",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "slimslave_poke", &codec_debug_ops);
debugfs_power_state = debugfs_create_file("power_state",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "power_state", &codec_debug_ops);
debugfs_reg_dump = debugfs_create_file("slimslave_reg_dump",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "slimslave_reg_dump", &codec_debug_ops);
}
#endif
kernel = false;
}
+ /*
+ * Increment driver use count. Enables global TLBIs for hash
+ * and callbacks to handle the segment table
+ */
cxl_ctx_get();
if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
- cxl_ctx_get();
/* indicate success */
rc = 0;
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ /*
+ * Increment driver use count. Enables global TLBIs for hash
+ * and callbacks to handle the segment table
+ */
+ cxl_ctx_get();
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_ctx_put();
goto out;
}
#include "qseecom_kernel.h"
+#define SRMAPP_NAME "hdcpsrm"
#define TZAPP_NAME "hdcp2p2"
#define HDCP1_APP_NAME "hdcp1"
#define QSEECOM_SBUFF_SIZE 0x1000
#define HDCP_SESSION_INIT SERVICE_CREATE_CMD(16)
#define HDCP_SESSION_DEINIT SERVICE_CREATE_CMD(17)
#define HDCP_TXMTR_START_AUTHENTICATE SERVICE_CREATE_CMD(18)
+#define HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST SERVICE_CREATE_CMD(19)
#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF)
#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF)
uint8_t message[MAX_TX_MESSAGE_SIZE];
};
+struct __attribute__ ((__packed__)) hdcp_rcv_id_list_req {
+ uint32_t commandid;
+ uint32_t ctxHandle;
+};
+struct __attribute__ ((__packed__)) hdcp_rcv_id_list_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
/*
* struct hdcp_lib_handle - handle for hdcp client
* @qseecom_handle - for sending commands to qseecom
static int hdcp_lib_txmtr_init_legacy(struct hdcp_lib_handle *handle);
static struct qseecom_handle *hdcp1_handle;
+static struct qseecom_handle *hdcpsrm_handle;
+
static bool hdcp1_supported = true;
static bool hdcp1_enc_enabled;
static struct mutex hdcp1_ta_cmd_lock;
goto exit;
}
+ if (!hdcpsrm_handle) {
+ rc = qseecom_start_app(&hdcpsrm_handle,
+ SRMAPP_NAME, QSEECOM_SBUFF_SIZE);
+ if (rc) {
+ pr_err("qseecom_start_app failed for SRM TA %d\n", rc);
+ goto exit;
+ }
+ }
+
handle->hdcp_state |= HDCP_STATE_APP_LOADED;
pr_debug("qseecom_start_app success\n");
goto exit;
}
- /* deallocate the resources for qseecom handle */
+ /* deallocate the resources for qseecom hdcp2p2 handle */
rc = qseecom_shutdown_app(&handle->qseecom_handle);
if (rc) {
- pr_err("qseecom_shutdown_app failed err: %d\n", rc);
+ pr_err("hdcp2p2 qseecom_shutdown_app failed err: %d\n", rc);
+ goto exit;
+ }
+
+ /* deallocate the resources for qseecom hdcpsrm handle */
+ rc = qseecom_shutdown_app(&hdcpsrm_handle);
+ if (rc) {
+ pr_err("hdcpsrm qseecom_shutdown_app failed err: %d\n", rc);
goto exit;
}
return 0;
}
+static int hdcp_validate_recv_id(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_rcv_id_list_req *recv_id_req;
+ struct hdcp_rcv_id_list_rsp *recv_id_rsp;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* validate the receiver ID list against the new SRM blob */
+ recv_id_req = (struct hdcp_rcv_id_list_req *)
+ handle->qseecom_handle->sbuf;
+ recv_id_req->commandid = HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST;
+ recv_id_req->ctxHandle = handle->tz_ctxhandle;
+
+ recv_id_rsp = (struct hdcp_rcv_id_list_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ recv_id_req,
+ QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)),
+ recv_id_rsp,
+ QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_rsp)));
+
+
+ if ((rc < 0) || (recv_id_rsp->status != HDCP_SUCCESS) ||
+ (recv_id_rsp->commandid !=
+ HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST)) {
+ pr_err("qseecom cmd failed with err = %d status = %d\n",
+ rc, recv_id_rsp->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ return rc;
+}
+
int hdcp1_set_enc(bool enable)
{
int rc = 0;
return ret;
}
+static ssize_t hdmi_hdcp_srm_updated(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ int srm_updated;
+ struct hdcp_lib_handle *handle;
+ ssize_t ret = count;
+
+ handle = hdcp_drv_mgr->handle;
+
+ rc = kstrtoint(buf, 10, &srm_updated);
+ if (rc) {
+ pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ return -EINVAL;
+ }
+
+ if (srm_updated) {
+ if (hdcp_validate_recv_id(handle)) {
+ pr_debug("SRM check FAILED\n");
+ if (handle && handle->client_ops->srm_cb)
+ handle->client_ops->srm_cb(handle->client_ctx);
+ } else {
+ pr_debug("SRM check PASSED\n");
+ }
+ }
+
+ return ret;
+}
+
static DEVICE_ATTR(tp, S_IRUGO | S_IWUSR, msm_hdcp_1x_sysfs_rda_tp,
msm_hdcp_1x_sysfs_wta_tp);
static DEVICE_ATTR(min_level_change, S_IWUSR, NULL,
hdmi_hdcp2p2_sysfs_wta_min_level_change);
+static DEVICE_ATTR(srm_updated, S_IWUSR, NULL,
+hdmi_hdcp_srm_updated);
+
void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp)
{
memcpy((void *)&hdcp_drv_mgr->cached_tp,
static struct attribute *msm_hdcp_fs_attrs[] = {
&dev_attr_tp.attr,
&dev_attr_min_level_change.attr,
+ &dev_attr_srm_updated.attr,
NULL
};
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm(audio->ac, 0, 0);
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
- aac_cfg.sample_rate,
- aac_cfg.ch_cfg);
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
{
struct mmc_card *card = filp->private_data;
struct mmc_wr_pack_stats *pack_stats;
- int i;
+ int i, ret = 0;
int max_num_of_packed_reqs = 0;
- char *temp_buf;
+ char *temp_buf, *temp_ubuf;
+ size_t tubuf_cnt = 0;
if (!card)
return cnt;
max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
- temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+ if (cnt <= (strlen_user(ubuf) + 1))
+ goto exit;
+
+ temp_buf = kzalloc(TEMP_BUF_SIZE, GFP_KERNEL);
if (!temp_buf)
goto exit;
+ tubuf_cnt = cnt - strlen_user(ubuf) - 1;
+
+ temp_ubuf = kzalloc(tubuf_cnt, GFP_KERNEL);
+ if (!temp_ubuf)
+ goto cleanup;
+
spin_lock(&pack_stats->lock);
snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
mmc_hostname(card->host));
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
if (pack_stats->packing_events[i]) {
"%s: Packed %d reqs - %d times\n",
mmc_hostname(card->host), i,
pack_stats->packing_events[i]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
}
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: stopped packing due to the following reasons:\n",
mmc_hostname(card->host));
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of segments\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of sectors\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: wrong data direction\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: flush or discard\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: empty queue\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EMPTY_QUEUE]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[REL_WRITE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: rel write\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[REL_WRITE]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[THRESHOLD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: Threshold\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[THRESHOLD]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) {
"%s: %d times: Large sector alignment\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[RANDOM]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: random request\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[RANDOM]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[FUA]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: fua request\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FUA]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
+ if (strlen_user(ubuf) < cnt - strlen(temp_ubuf))
+ ret = copy_to_user((ubuf + strlen_user(ubuf)),
+ temp_ubuf, tubuf_cnt);
+ else
+ ret = -EFAULT;
+ if (ret)
+ pr_err("%s: %s: Copy to userspace failed: %s\n",
+ mmc_hostname(card->host), __func__, ubuf);
spin_unlock(&pack_stats->lock);
+ kfree(temp_ubuf);
+
+cleanup:
kfree(temp_buf);
pr_info("%s", ubuf);
sdio_free_func_cis(func);
kfree(func->info);
-
+ kfree(func->tmpbuf);
kfree(func);
}
if (!func)
return ERR_PTR(-ENOMEM);
+ /*
+ * allocate buffer separately to make sure it's properly aligned for
+ * DMA usage (incl. 64 bit DMA)
+ */
+ func->tmpbuf = kmalloc(4, GFP_KERNEL);
+ if (!func->tmpbuf) {
+ kfree(func);
+ return ERR_PTR(-ENOMEM);
+ }
+
func->card = card;
device_initialize(&func->dev);
u32 tempval1 = gfar_read(®s->maccfg1);
u32 tempval = gfar_read(®s->maccfg2);
u32 ecntrl = gfar_read(®s->ecntrl);
- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
spin_lock_irqsave(&mal->lock, flags);
mal_disable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
- goto again;
}
mc->ops->poll_tx(mc->dev);
}
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ /* In case of PCI error, adapter lose its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
igb_reset(adapter);
wr32(E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- mvpp2_txq_inc_get(txq_pcpu);
-
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
tx_buf->size, DMA_TO_DEVICE);
- if (!tx_buf->skb)
- continue;
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
}
}
}
}
+#define MLX4_EN_WRAP_AROUND_SEC 10UL
+/* By scheduling the overflow check every 5 seconds, we have a reasonably
+ * good chance we wont miss a wrap around.
+ * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ */
+#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
+
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
{
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ MLX4_EN_OVERFLOW_PERIOD);
unsigned long flags;
if (timeout) {
.enable = mlx4_en_phc_enable,
};
-#define MLX4_EN_WRAP_AROUND_SEC 10ULL
/* This function calculates the max shift that enables the user range
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
- u64 ns, zero = 0;
/* mlx4_en_init_timestamp is called for each netdev.
* mdev->ptp_clock is common for all ports, skip initialization if
ktime_to_ns(ktime_get_real()));
write_unlock_irqrestore(&mdev->clock_lock, flags);
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- mdev->overflow_period = ns;
-
/* Configure the PHC */
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
return -ENOSYS;
}
- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
-
dev->caps.hca_core_clock = hca_param.hca_core_clock;
memset(&dev_cap, 0, sizeof(dev_cap));
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
- unsigned long overflow_period;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct notifier_block nb;
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
- ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
};
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_PORT_LIST_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
- switch (skb->data[0] & 0xf0) {
- case 0x40:
+ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+ switch (ip_version) {
+ case 4:
pi.proto = htons(ETH_P_IP);
break;
- case 0x60:
+ case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
optionally with LEDs that indicate traffic
config USB_NET_PLUSB
- tristate "Prolific PL-2301/2302/25A1 based cables"
+ tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
depends on USB_USBNET
}
static const struct driver_info prolific_info = {
- .description = "Prolific PL-2301/PL-2302/PL-25A1",
+ .description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
* Host-to-Host Cable
*/
.driver_info = (unsigned long) &prolific_info,
+
+},
+
+/* super speed cables */
+{
+ USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom
+ * also: goobay Active USB 3.0
+ * Data Link,
+ * Unitek Y-3501
+ */
+ .driver_info = (unsigned long) &prolific_info,
},
{ }, // END
module_usb_driver(plusb_driver);
MODULE_AUTHOR("David Brownell");
-MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
+MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
MODULE_LICENSE("GPL");
elength = 1;
goto next_desc;
}
+ if ((buflen < elength) || (elength < 3)) {
+ dev_err(&intf->dev, "invalid descriptor buffer length\n");
+ break;
+ }
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
- ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ if (QCA_REV_WCN3990(ar))
+ ar->htt.max_num_pending_tx =
+ TARGET_HL_1_0_NUM_MSDU_DESC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
#define TARGET_HL_10_TLV_NUM_PEERS 14
#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
+#define TARGET_HL_1_0_NUM_MSDU_DESC (3600)
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
.num_different_channels = 1,
.max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit),
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
},
{
.limits = ath10k_wcn3990_qcs_if_limit,
.num_different_channels = 1,
.max_interfaces = 2,
.n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit_ibss),
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
},
};
struct regulatory_request *request))
{
const struct ieee80211_regdomain *regd;
+ u32 chan_num;
+ struct ieee80211_channel *chan;
wiphy->reg_notifier = reg_notifier;
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
}
wiphy_apply_custom_regulatory(wiphy, regd);
+
+ /* For regulatory rules similar to the following:
+ * REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), channels 12/13 are enabled
+ * due to support of 5/10 MHz.
+ * Therefore, disable 2.4 Ghz channels that dont have 20 mhz bw
+ */
+ for (chan_num = 0;
+ chan_num < wiphy->bands[IEEE80211_BAND_2GHZ]->n_channels;
+ chan_num++) {
+ chan = &wiphy->bands[IEEE80211_BAND_2GHZ]->channels[chan_num];
+ if (chan->flags & IEEE80211_CHAN_NO_20MHZ)
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
- params_le->scan_type = 0;
+ params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
params_le->nprobes = cpu_to_le32(-1);
params_le->active_time = cpu_to_le32(-1);
params_le->home_time = cpu_to_le32(-1);
memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le));
- /* if request is null exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
n_ssids = request->n_ssids;
n_channels = request->n_channels;
+
/* Copy channel array if applicable */
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
ptr += sizeof(ssid_le);
}
} else {
- brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
- if ((request->ssids) && request->ssids->ssid_len) {
- brcmf_dbg(SCAN, "SSID %s len=%d\n",
- params_le->ssid_le.SSID,
- request->ssids->ssid_len);
- params_le->ssid_le.SSID_len =
- cpu_to_le32(request->ssids->ssid_len);
- memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid,
- request->ssids->ssid_len);
- }
+ brcmf_dbg(SCAN, "Performing passive scan\n");
+ params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
}
/* Adding mask to channel numbers */
params_le->channel_num =
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 status;
struct brcmf_escan_result_le *escan_result_le;
+ u32 escan_buflen;
struct brcmf_bss_info_le *bss_info_le;
struct brcmf_bss_info_le *bss = NULL;
u32 bi_length;
if (status == BRCMF_E_STATUS_PARTIAL) {
brcmf_dbg(SCAN, "ESCAN Partial result\n");
+ if (e->datalen < sizeof(*escan_result_le)) {
+ brcmf_err("invalid event data length\n");
+ goto exit;
+ }
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
+ escan_buflen = le32_to_cpu(escan_result_le->buflen);
+ if (escan_buflen > WL_ESCAN_BUF_SIZE ||
+ escan_buflen > e->datalen ||
+ escan_buflen < sizeof(*escan_result_le)) {
+ brcmf_err("Invalid escan buffer length: %d\n",
+ escan_buflen);
+ goto exit;
+ }
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
}
bi_length = le32_to_cpu(bss_info_le->length);
- if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
- WL_ESCAN_RESULTS_FIXED_SIZE)) {
- brcmf_err("Invalid bss_info length %d: ignoring\n",
+ if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
+ brcmf_err("Ignoring invalid bss_info length: %d\n",
bi_length);
goto exit;
}
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT 0xFF
+#define BRCMF_SCANTYPE_ACTIVE 0
+#define BRCMF_SCANTYPE_PASSIVE 1
+
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
enum cnss_debug_quirks {
LINK_DOWN_SELF_RECOVERY,
SKIP_DEVICE_BOOT,
+ USE_CORE_ONLY_FW,
};
unsigned long quirks;
if (!pci_priv)
return -ENODEV;
+ cnss_pm_request_resume(pci_priv);
+
cnss_driver_call_remove(plat_priv);
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
return 0;
}
+ if (test_bit(USE_CORE_ONLY_FW, &quirks)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ return 0;
+ }
+
cnss_set_pin_connect_status(plat_priv);
if (qmi_bypass) {
if (!pci_priv)
return -ENODEV;
+ cnss_pm_request_resume(pci_priv);
+
cnss_driver_call_remove(plat_priv);
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
if (!pci_priv)
return -ENODEV;
+ cnss_pr_dbg("Suspending PCI link\n");
if (!pci_priv->pci_link_state) {
cnss_pr_info("PCI link is already suspended!\n");
goto out;
if (!pci_priv)
return -ENODEV;
+ cnss_pr_dbg("Resuming PCI link\n");
if (pci_priv->pci_link_state) {
cnss_pr_info("PCI link is already resumed!\n");
goto out;
driver_ops = plat_priv->driver_ops;
if (driver_ops && driver_ops->suspend) {
ret = driver_ops->suspend(pci_dev, state);
- if (pci_priv->pci_link_state) {
- if (cnss_pci_set_mhi_state(pci_priv,
- CNSS_MHI_SUSPEND)) {
+ if (ret) {
+ cnss_pr_err("Failed to suspend host driver, err = %d\n",
+ ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ if (pci_priv->pci_link_state) {
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND);
+ if (ret) {
+ if (driver_ops && driver_ops->resume)
driver_ops->resume(pci_dev);
- ret = -EAGAIN;
- goto out;
- }
-
- cnss_set_pci_config_space(pci_priv,
- SAVE_PCI_CONFIG_SPACE);
- pci_disable_device(pci_dev);
-
- ret = pci_set_power_state(pci_dev, PCI_D3hot);
- if (ret)
- cnss_pr_err("Failed to set D3Hot, err = %d\n",
- ret);
+ ret = -EAGAIN;
+ goto out;
}
+
+ cnss_set_pci_config_space(pci_priv,
+ SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n",
+ ret);
}
cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ return 0;
+
out:
return ret;
}
if (!plat_priv)
goto out;
- driver_ops = plat_priv->driver_ops;
- if (driver_ops && driver_ops->resume && !pci_priv->pci_link_down_ind) {
- ret = pci_enable_device(pci_dev);
- if (ret)
- cnss_pr_err("Failed to enable PCI device, err = %d\n",
- ret);
+ if (pci_priv->pci_link_down_ind)
+ goto out;
- if (pci_priv->saved_state)
- cnss_set_pci_config_space(pci_priv,
- RESTORE_PCI_CONFIG_SPACE);
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
- pci_set_master(pci_dev);
- cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv,
+ RESTORE_PCI_CONFIG_SPACE);
+
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->resume) {
ret = driver_ops->resume(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to resume host driver, err = %d\n",
+ ret);
}
+ return 0;
+
out:
return ret;
}
ret = pci_set_power_state(pci_dev, PCI_D3hot);
if (ret)
cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+
+ cnss_pr_dbg("Suspending PCI link\n");
if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
- cnss_pr_err("Failed to shutdown PCI link!\n");
+ cnss_pr_err("Failed to suspend PCI link!\n");
ret = -EAGAIN;
goto resume_mhi;
}
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
}
- pci_priv->pci_link_state = PCI_LINK_DOWN;
cnss_pci_set_auto_suspended(pci_priv, 1);
cnss_pci_set_monitor_wake_intr(pci_priv, true);
pci_dev = pci_priv->pci_dev;
if (!pci_priv->pci_link_state) {
+ cnss_pr_dbg("Resuming PCI link\n");
if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
cnss_pr_err("Failed to resume PCI link!\n");
ret = -EAGAIN;
goto out;
}
pci_priv->pci_link_state = PCI_LINK_UP;
+
ret = pci_enable_device(pci_dev);
if (ret)
cnss_pr_err("Failed to enable PCI device, err = %d\n",
ret);
+
+ cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
}
- cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
- pci_set_master(pci_dev);
- cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
cnss_pci_set_auto_suspended(pci_priv, 0);
bus_bw_info = &plat_priv->bus_bw_info;
}
EXPORT_SYMBOL(cnss_auto_resume);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+ if (!pci_dev)
+ return -ENODEV;
+
+ return pm_request_resume(&pci_dev->dev);
+}
+
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv);
void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv);
#endif /* _CNSS_PCI_H */
/* NVM offsets (in words) definitions */
enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
+ SUBSYSTEM_ID = 0x0A,
HW_ADDR = 0x15,
/* NVM SW-Section offset (in words) definitions */
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 * const nvm_ch_flags,
- bool lar_supported)
+ bool lar_supported, bool no_wide_in_5ghz)
{
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
u16 ch_flags;
- bool is_5ghz;
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ bool is_5ghz = (ch_idx >= num_2ghz_channels);
+
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (ch_idx >= num_2ghz_channels &&
- !data->sku_cap_band_52GHz_enable)
+ if (is_5ghz && !data->sku_cap_band_52GHz_enable)
continue;
+ /* workaround to disable wide channels in 5GHz */
+ if (no_wide_in_5ghz && is_5ghz) {
+ ch_flags &= ~(NVM_CHANNEL_40MHZ |
+ NVM_CHANNEL_80MHZ |
+ NVM_CHANNEL_160MHZ);
+ }
+
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = (ch_idx < num_2ghz_channels) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channel->band = is_5ghz ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
- is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *ch_section,
- u8 tx_chains, u8 rx_chains, bool lar_supported)
+ u8 tx_chains, u8 rx_chains, bool lar_supported,
+ bool no_wide_in_5ghz)
{
int n_channels;
int n_used = 0;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
n_channels = iwl_init_channel_map(
dev, cfg, data,
- &ch_section[NVM_CHANNELS], lar_supported);
+ &ch_section[NVM_CHANNELS], lar_supported,
+ no_wide_in_5ghz);
else
n_channels = iwl_init_channel_map(
dev, cfg, data,
&ch_section[NVM_CHANNELS_FAMILY_8000],
- lar_supported);
+ lar_supported,
+ no_wide_in_5ghz);
sband = &data->bands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
#define IWL_4165_DEVICE_ID 0x5501
+static bool
+iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw)
+{
+ /*
+ * Workaround a bug in Indonesia SKUs where the regulatory in
+ * some 7000-family OTPs erroneously allow wide channels in
+ * 5GHz. To check for Indonesia, we take the SKU value from
+ * bits 1-4 in the subsystem ID and check if it is either 5 or
+ * 9. In those cases, we need to force-disable wide channels
+ * in 5GHz otherwise the FW will throw a sysassert when we try
+ * to use them.
+ */
+ if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /*
+ * Unlike the other sections in the NVM, the hw
+ * section uses big-endian.
+ */
+ u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw
+ + SUBSYSTEM_ID);
+ u8 sku = (subsystem_id & 0x1e) >> 1;
+
+ if (sku == 5 || sku == 9) {
+ IWL_DEBUG_EEPROM(dev,
+ "disabling wide channels in 5GHz (0x%0x %d)\n",
+ subsystem_id, sku);
+ return true;
+ }
+ }
+
+ return false;
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
u32 mac_addr0, u32 mac_addr1, u32 hw_id)
{
struct iwl_nvm_data *data;
+ bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
u32 sku;
u32 radio_cfg;
u16 lar_config;
iwl_set_hw_address(cfg, data, nvm_hw);
iwl_init_sbands(dev, cfg, data, nvm_sw,
- tx_chains, rx_chains, lar_fw_supported);
+ tx_chains, rx_chains, lar_fw_supported,
+ no_wide_in_5ghz);
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_FAMILY_8000_OLD :
iwl_init_sbands(dev, cfg, data, regulatory,
tx_chains, rx_chains,
- lar_fw_supported && data->lar_enabled);
+ lar_fw_supported && data->lar_enabled,
+ no_wide_in_5ghz);
}
data->calib_version = 255;
struct iwl_mvm_mc_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
int ret, len;
/* if we don't have free ports, mcast frames will be dropped */
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
+ const char *hwname = NULL;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
- if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ param.hwname = hwname;
+ }
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
s64 idx = -1;
const char *hwname = NULL;
- if (info->attrs[HWSIM_ATTR_RADIO_ID])
+ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
- else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
- else
+ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ } else
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
if (data->idx != idx)
continue;
} else {
- if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ if (!hwname ||
+ strcmp(hwname, wiphy_name(data->hw->wiphy)))
continue;
}
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
+ kfree(hwname);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
+ kfree(hwname);
return -ENODEV;
}
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
- adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);
sizeof(struct mwifiex_chan_stats);
for (i = 0 ; i < num_chan; i++) {
+ if (adapter->survey_idx >= adapter->num_in_chan_stats) {
+ mwifiex_dbg(adapter, WARN,
+ "FW reported too many channel results (max %d)\n",
+ adapter->num_in_chan_stats);
+ return;
+ }
chan_stats.chan_num = fw_chan_stats->chan_num;
chan_stats.bandcfg = fw_chan_stats->bandcfg;
chan_stats.flags = fw_chan_stats->flags;
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
/* find adapter */
if (!_rtl_pci_find_adapter(pdev, hw)) {
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
/* Init IO handler */
pci_set_drvdata(pdev, NULL);
rtl_deinit_core(hw);
+fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
-fail2:
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
#ifndef _NVME_H
#define _NVME_H
+#include <linux/mutex.h>
#include <linux/nvme.h>
#include <linux/pci.h>
#include <linux/kref.h>
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
+ struct mutex shutdown_lock;
char name[12];
char serial[20];
char model[40];
nvme_dev_list_remove(dev);
+ mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
+ mutex_unlock(&dev->shutdown_lock);
}
static void nvme_dev_remove(struct nvme_dev *dev)
INIT_LIST_HEAD(&dev->namespaces);
INIT_WORK(&dev->reset_work, nvme_reset_work);
+ mutex_init(&dev->shutdown_lock);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
if (rc) {
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
+ } else {
+ pci_set_master(pdev);
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len;
- return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
int retval;
struct ipa_wan_msg *wan_msg;
struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg cache_wan_msg;
wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
if (!wan_msg) {
return -EFAULT;
}
+ memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
/* cache the cne event */
memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
ipa_ctx->num_ipa_cne_evt_req].wan_msg,
- wan_msg,
- sizeof(struct ipa_wan_msg));
+ &cache_wan_msg,
+ sizeof(cache_wan_msg));
memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
ipa_ctx->num_ipa_cne_evt_req].msg_meta,
mutex_unlock(&add_mux_channel_lock);
return -EFAULT;
}
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name
+ [IFNAMSIZ-1] = '\0';
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
int retval;
struct ipa_wan_msg *wan_msg;
struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg cache_wan_msg;
wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
if (!wan_msg) {
return -EFAULT;
}
+ memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
/* cache the cne event */
memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
- wan_msg,
- sizeof(struct ipa_wan_msg));
+ &cache_wan_msg,
+ sizeof(cache_wan_msg));
memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
return 0;
}
+/* This function called as part of usb pipe resume */
int ipa3_xdci_connect(u32 clnt_hdl)
{
int result;
return result;
}
+
+/* This function called as part of usb pipe connect */
int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
{
struct ipa3_ep_context *ep;
int result = -EFAULT;
enum gsi_status gsi_res;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
goto write_chan_scratch_fail;
}
}
+
+ if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ep->ep_delay_set = true;
+
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) success\n", clnt_hdl);
+ } else {
+ ep->ep_delay_set = false;
+ }
+
gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error starting channel: %d\n", gsi_res);
/* Clocks should be voted for before invoking this function */
static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
- u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
+ u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl,
+ bool remove_delay)
{
int result;
bool is_empty = false;
int i;
bool stop_in_proc;
struct ipa3_ep_context *ep;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
if (!stop_in_proc)
goto exit;
+ if (remove_delay && ep->ep_delay_set == true) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl,
+ &ep_cfg_ctrl);
+ if (result) {
+ IPAERR
+ ("client (ep: %d) failed to remove delay result=%d\n",
+ clnt_hdl, result);
+ } else {
+ IPADBG("client (ep: %d) delay removed\n",
+ clnt_hdl);
+ ep->ep_delay_set = false;
+ }
+ }
+
/* if stop_in_proc, lets wait for emptiness */
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
result = ipa3_is_xdci_channel_empty(ep, &is_empty);
if (should_force_clear)
ipa3_disable_force_clear(qmi_req_id);
exit:
+ if (remove_delay && ep->ep_delay_set == true) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl,
+ &ep_cfg_ctrl);
+ if (result) {
+ IPAERR
+ ("client (ep: %d) failed to remove delay result=%d\n",
+ clnt_hdl, result);
+ } else {
+ IPADBG("client (ep: %d) delay removed\n",
+ clnt_hdl);
+ ep->ep_delay_set = false;
+ }
+ }
return result;
}
source_pipe_bitmask = 1 <<
ipa3_get_ep_mapping(ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
- source_pipe_bitmask, should_force_clear, clnt_hdl);
+ source_pipe_bitmask, should_force_clear, clnt_hdl,
+ true);
if (result) {
IPAERR("Fail to stop UL channel with data drain\n");
WARN_ON(1);
if (!is_dpl) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
- source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+ source_pipe_bitmask, should_force_clear, ul_clnt_hdl,
+ false);
if (result) {
IPAERR("Error stopping UL channel: result = %d\n",
result);
bool switch_to_intr;
int inactive_cycles;
u32 eot_in_poll_err;
+ bool ep_delay_set;
/* sys MUST be the last element of this struct */
struct ipa3_sys_context *sys;
add_mux_channel_lock);
return -EFAULT;
}
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name
+ [IFNAMSIZ-1] = '\0';
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
menu "Qualcomm Technologies Inc Charger and Fuel Gauge support"
+config QPNP_SMBCHARGER
+ tristate "QPNP SMB Charger driver"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here to enable the dual path switch mode battery charger which
+ supports USB detection and battery charging up to 3A.
+ The driver also offers relevant information to userspace via the
+ power supply framework.
+
+config QPNP_FG
+ tristate "QPNP fuel gauge driver"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here to enable the Fuel Gauge driver. This adds support for
+ battery fuel gauging and state of charge of battery connected to the
+ fuel gauge. The state of charge is reported through a BMS power
+ supply property and also sends uevents when the capacity is updated.
+
config QPNP_FG_GEN3
tristate "QPNP GEN3 fuel gauge driver"
depends on MFD_SPMI_PMIC
+obj-$(CONFIG_QPNP_SMBCHARGER) += qpnp-smbcharger.o batterydata-lib.o pmic-voter.o
+obj-$(CONFIG_QPNP_FG) += qpnp-fg.o
obj-$(CONFIG_QPNP_FG_GEN3) += qpnp-fg-gen3.o fg-memif.o fg-util.o
obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o pmic-voter.o
obj-$(CONFIG_SMB1351_USB_CHARGER) += battery.o smb1351-charger.o pmic-voter.o
struct mutex sram_rw_lock;
struct mutex charge_full_lock;
struct mutex qnovo_esr_ctrl_lock;
+ spinlock_t suspend_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
bool slope_limit_en;
bool use_ima_single_mode;
bool qnovo_enable;
+ bool suspended;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
if (rc < 0)
return rc;
- *msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /*
+ * To have better endpoints for 0 and 100, it is good to tune the
+ * calculation discarding values 0 and 255 while rounding off. Rest
+ * of the values 1-254 will be scaled to 1-99. DIV_ROUND_UP will not
+ * be suitable here as it rounds up any value higher than 252 to 100.
+ */
+ if (*msoc == FULL_SOC_RAW)
+ *msoc = 100;
+ else if (*msoc == 0)
+ *msoc = 0;
+ else
+ *msoc = DIV_ROUND_CLOSEST((*msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
return 0;
}
struct power_supply *psy = data;
struct fg_chip *chip = container_of(nb, struct fg_chip, nb);
+ spin_lock(&chip->suspend_lock);
+ if (chip->suspended) {
+ /* Return if we are still suspended */
+ spin_unlock(&chip->suspend_lock);
+ return NOTIFY_OK;
+ }
+ spin_unlock(&chip->suspend_lock);
+
if (event != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
mutex_init(&chip->ttf.lock);
mutex_init(&chip->charge_full_lock);
mutex_init(&chip->qnovo_esr_ctrl_lock);
+ spin_lock_init(&chip->suspend_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
struct fg_chip *chip = dev_get_drvdata(dev);
int rc;
+ spin_lock(&chip->suspend_lock);
+ chip->suspended = true;
+ spin_unlock(&chip->suspend_lock);
+
rc = fg_esr_timer_config(chip, true);
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
if (fg_sram_dump)
schedule_delayed_work(&chip->sram_dump_work,
msecs_to_jiffies(fg_sram_dump_period_ms));
+
+ if (!work_pending(&chip->status_change_work)) {
+ pm_stay_awake(chip->dev);
+ schedule_work(&chip->status_change_work);
+ }
+
+ spin_lock(&chip->suspend_lock);
+ chip->suspended = false;
+ spin_unlock(&chip->suspend_lock);
+
return 0;
}
--- /dev/null
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "FG: %s: " fmt, __func__
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/power_supply.h>
+#include <linux/of_batterydata.h>
+#include <linux/string_helpers.h>
+#include <linux/alarmtimer.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+/* Register offsets */
+
+/* Interrupt offsets */
+#define INT_RT_STS(base) (base + 0x10)
+#define INT_EN_CLR(base) (base + 0x16)
+
+/* SPMI Register offsets */
+#define SOC_MONOTONIC_SOC 0x09
+#define SOC_BOOT_MOD 0x50
+#define SOC_RESTART 0x51
+
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RAM register offsets */
+#define RAM_OFFSET 0x400
+
+/* Bit/Mask definitions */
+#define FULL_PERCENT 0xFF
+#define MAX_TRIES_SOC 5
+#define MA_MV_BIT_RES 39
+#define MSB_SIGN BIT(7)
+#define IBAT_VBAT_MASK 0x7F
+#define NO_OTP_PROF_RELOAD BIT(6)
+#define REDO_FIRST_ESTIMATE BIT(3)
+#define RESTART_GO BIT(0)
+#define THERM_DELAY_MASK 0xE0
+
+/* SUBTYPE definitions */
+#define FG_SOC 0x9
+#define FG_BATT 0xA
+#define FG_ADC 0xB
+#define FG_MEMIF 0xC
+
+#define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
+#define MEM_IF_TIMEOUT_MS 5000
+#define BUCKET_COUNT 8
+#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+
+#define BCL_MA_TO_ADC(_current, _adc_val) { \
+ _adc_val = (u8)((_current) * 100 / 976); \
+}
+
+/* Debug Flag Definitions */
+enum {
+ FG_SPMI_DEBUG_WRITES = BIT(0), /* Show SPMI writes */
+ FG_SPMI_DEBUG_READS = BIT(1), /* Show SPMI reads */
+ FG_IRQS = BIT(2), /* Show interrupts */
+ FG_MEM_DEBUG_WRITES = BIT(3), /* Show SRAM writes */
+ FG_MEM_DEBUG_READS = BIT(4), /* Show SRAM reads */
+ FG_POWER_SUPPLY = BIT(5), /* Show POWER_SUPPLY */
+ FG_STATUS = BIT(6), /* Show FG status changes */
+ FG_AGING = BIT(7), /* Show FG aging algorithm */
+};
+
+/* PMIC REVISIONS */
+#define REVID_RESERVED 0
+#define REVID_VARIANT 1
+#define REVID_ANA_MAJOR 2
+#define REVID_DIG_MAJOR 3
+
+enum dig_major {
+ DIG_REV_1 = 0x1,
+ DIG_REV_2 = 0x2,
+ DIG_REV_3 = 0x3,
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum wa_flags {
+ IADC_GAIN_COMP_WA = BIT(0),
+ USE_CC_SOC_REG = BIT(1),
+ PULSE_REQUEST_WA = BIT(2),
+ BCL_HI_POWER_FOR_CHGLED_WA = BIT(3)
+};
+
+enum current_sense_type {
+ INTERNAL_CURRENT_SENSE,
+ EXTERNAL_CURRENT_SENSE,
+};
+
+struct fg_mem_setting {
+ u16 address;
+ u8 offset;
+ int value;
+};
+
+struct fg_mem_data {
+ u16 address;
+ u8 offset;
+ unsigned int len;
+ int value;
+};
+
+struct fg_learning_data {
+ int64_t cc_uah;
+ int64_t learned_cc_uah;
+ int init_cc_pc_val;
+ bool active;
+ bool feedback_on;
+ struct mutex learning_lock;
+ ktime_t time_stamp;
+ /* configuration properties */
+ int max_start_soc;
+ int max_increment;
+ int max_decrement;
+ int min_temp;
+ int max_temp;
+ int vbat_est_thr_uv;
+};
+
+struct fg_rslow_data {
+ u8 rslow_cfg;
+ u8 rslow_thr;
+ u8 rs_to_rslow[2];
+ u8 rslow_comp[4];
+ uint32_t chg_rs_to_rslow;
+ uint32_t chg_rslow_comp_c1;
+ uint32_t chg_rslow_comp_c2;
+ uint32_t chg_rslow_comp_thr;
+ bool active;
+ struct mutex lock;
+};
+
+struct fg_cyc_ctr_data {
+ bool en;
+ bool started[BUCKET_COUNT];
+ u16 count[BUCKET_COUNT];
+ u8 last_soc[BUCKET_COUNT];
+ int id;
+ struct mutex lock;
+};
+
+struct fg_iadc_comp_data {
+ u8 dfl_gain_reg[2];
+ bool gain_active;
+ int64_t dfl_gain;
+};
+
+struct fg_cc_soc_data {
+ int init_sys_soc;
+ int init_cc_soc;
+ int full_capacity;
+ int delta_soc;
+};
+
+/* FG_MEMIF setting index */
+enum fg_mem_setting_index {
+ FG_MEM_SOFT_COLD = 0,
+ FG_MEM_SOFT_HOT,
+ FG_MEM_HARD_COLD,
+ FG_MEM_HARD_HOT,
+ FG_MEM_RESUME_SOC,
+ FG_MEM_BCL_LM_THRESHOLD,
+ FG_MEM_BCL_MH_THRESHOLD,
+ FG_MEM_TERM_CURRENT,
+ FG_MEM_CHG_TERM_CURRENT,
+ FG_MEM_IRQ_VOLT_EMPTY,
+ FG_MEM_CUTOFF_VOLTAGE,
+ FG_MEM_VBAT_EST_DIFF,
+ FG_MEM_DELTA_SOC,
+ FG_MEM_BATT_LOW,
+ FG_MEM_THERM_DELAY,
+ FG_MEM_SETTING_MAX,
+};
+
+/* FG_MEMIF data index */
+enum fg_mem_data_index {
+ FG_DATA_BATT_TEMP = 0,
+ FG_DATA_OCV,
+ FG_DATA_VOLTAGE,
+ FG_DATA_CURRENT,
+ FG_DATA_BATT_ESR,
+ FG_DATA_BATT_ESR_COUNT,
+ FG_DATA_BATT_SOC,
+ FG_DATA_CC_CHARGE,
+ FG_DATA_VINT_ERR,
+ FG_DATA_CPRED_VOLTAGE,
+ /* values below this only gets read once per profile reload */
+ FG_DATA_BATT_ID,
+ FG_DATA_BATT_ID_INFO,
+ FG_DATA_MAX,
+};
+
+#define SETTING(_idx, _address, _offset, _value) \
+ [FG_MEM_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_setting settings[FG_MEM_SETTING_MAX] = {
+ /* ID Address, Offset, Value*/
+ SETTING(SOFT_COLD, 0x454, 0, 100),
+ SETTING(SOFT_HOT, 0x454, 1, 400),
+ SETTING(HARD_COLD, 0x454, 2, 50),
+ SETTING(HARD_HOT, 0x454, 3, 450),
+ SETTING(RESUME_SOC, 0x45C, 1, 0),
+ SETTING(BCL_LM_THRESHOLD, 0x47C, 2, 50),
+ SETTING(BCL_MH_THRESHOLD, 0x47C, 3, 752),
+ SETTING(TERM_CURRENT, 0x40C, 2, 250),
+ SETTING(CHG_TERM_CURRENT, 0x4F8, 2, 250),
+ SETTING(IRQ_VOLT_EMPTY, 0x458, 3, 3100),
+ SETTING(CUTOFF_VOLTAGE, 0x40C, 0, 3200),
+ SETTING(VBAT_EST_DIFF, 0x000, 0, 30),
+ SETTING(DELTA_SOC, 0x450, 3, 1),
+ SETTING(BATT_LOW, 0x458, 0, 4200),
+ SETTING(THERM_DELAY, 0x4AC, 3, 0),
+};
+
+#define DATA(_idx, _address, _offset, _length, _value) \
+ [FG_DATA_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .len = _length, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_data fg_data[FG_DATA_MAX] = {
+ /* ID Address, Offset, Length, Value*/
+ DATA(BATT_TEMP, 0x550, 2, 2, -EINVAL),
+ DATA(OCV, 0x588, 3, 2, -EINVAL),
+ DATA(VOLTAGE, 0x5CC, 1, 2, -EINVAL),
+ DATA(CURRENT, 0x5CC, 3, 2, -EINVAL),
+ DATA(BATT_ESR, 0x554, 2, 2, -EINVAL),
+ DATA(BATT_ESR_COUNT, 0x558, 2, 2, -EINVAL),
+ DATA(BATT_SOC, 0x56C, 1, 3, -EINVAL),
+ DATA(CC_CHARGE, 0x570, 0, 4, -EINVAL),
+ DATA(VINT_ERR, 0x560, 0, 4, -EINVAL),
+ DATA(CPRED_VOLTAGE, 0x540, 0, 2, -EINVAL),
+ DATA(BATT_ID, 0x594, 1, 1, -EINVAL),
+ DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL),
+};
+
+static int fg_debug_mask;
+module_param_named(
+ debug_mask, fg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int fg_sense_type = -EINVAL;
+static int fg_restart;
+
+static int fg_est_dump;
+module_param_named(
+ first_est_dump, fg_est_dump, int, S_IRUSR | S_IWUSR
+);
+
+static char *fg_batt_type;
+module_param_named(
+ battery_type, fg_batt_type, charp, S_IRUSR | S_IWUSR
+);
+
+static int fg_sram_update_period_ms = 30000;
+module_param_named(
+ sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
+);
+
+struct fg_irq {
+ int irq;
+ unsigned long disabled;
+};
+
+enum fg_soc_irq {
+ HIGH_SOC,
+ LOW_SOC,
+ FULL_SOC,
+ EMPTY_SOC,
+ DELTA_SOC,
+ FIRST_EST_DONE,
+ SW_FALLBK_OCV,
+ SW_FALLBK_NEW_BATT,
+ FG_SOC_IRQ_COUNT,
+};
+
+enum fg_batt_irq {
+ JEITA_SOFT_COLD,
+ JEITA_SOFT_HOT,
+ VBATT_LOW,
+ BATT_IDENTIFIED,
+ BATT_ID_REQ,
+ BATTERY_UNKNOWN,
+ BATT_MISSING,
+ BATT_MATCH,
+ FG_BATT_IRQ_COUNT,
+};
+
+enum fg_mem_if_irq {
+ FG_MEM_AVAIL,
+ TA_RCVRY_SUG,
+ FG_MEM_IF_IRQ_COUNT,
+};
+
+enum fg_batt_aging_mode {
+ FG_AGING_NONE,
+ FG_AGING_ESR,
+ FG_AGING_CC,
+};
+
+enum register_type {
+ MEM_INTF_CFG,
+ MEM_INTF_CTL,
+ MEM_INTF_ADDR_LSB,
+ MEM_INTF_RD_DATA0,
+ MEM_INTF_WR_DATA0,
+ MAX_ADDRESS,
+};
+
+struct register_offset {
+ u16 address[MAX_ADDRESS];
+};
+
+static struct register_offset offset[] = {
+ [0] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x40, 0x41, 0x42, 0x4C, 0x48},
+ },
+ [1] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x50, 0x51, 0x61, 0x67, 0x63},
+ },
+};
+
+#define MEM_INTF_CFG(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CFG])
+#define MEM_INTF_CTL(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CTL])
+#define MEM_INTF_ADDR_LSB(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_ADDR_LSB])
+#define MEM_INTF_RD_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_RD_DATA0])
+#define MEM_INTF_WR_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_WR_DATA0])
+
+struct fg_wakeup_source {
+ struct wakeup_source source;
+ unsigned long enabled;
+};
+
+static void fg_stay_awake(struct fg_wakeup_source *source)
+{
+ if (!__test_and_set_bit(0, &source->enabled)) {
+ __pm_stay_awake(&source->source);
+ pr_debug("enabled source %s\n", source->source.name);
+ }
+}
+
+static void fg_relax(struct fg_wakeup_source *source)
+{
+ if (__test_and_clear_bit(0, &source->enabled)) {
+ __pm_relax(&source->source);
+ pr_debug("disabled source %s\n", source->source.name);
+ }
+}
+
+#define THERMAL_COEFF_N_BYTES 6
+struct fg_chip {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ u8 pmic_subtype;
+ u8 pmic_revision[4];
+ u8 revision[4];
+ u16 soc_base;
+ u16 batt_base;
+ u16 mem_base;
+ u16 vbat_adc_addr;
+ u16 ibat_adc_addr;
+ u16 tp_rev_addr;
+ u32 wa_flag;
+ atomic_t memif_user_cnt;
+ struct fg_irq soc_irq[FG_SOC_IRQ_COUNT];
+ struct fg_irq batt_irq[FG_BATT_IRQ_COUNT];
+ struct fg_irq mem_irq[FG_MEM_IF_IRQ_COUNT];
+ struct completion sram_access_granted;
+ struct completion sram_access_revoked;
+ struct completion batt_id_avail;
+ struct completion first_soc_done;
+ struct power_supply *bms_psy;
+ struct power_supply_desc bms_psy_d;
+ struct mutex rw_lock;
+ struct mutex sysfs_restart_lock;
+ struct delayed_work batt_profile_init;
+ struct work_struct dump_sram;
+ struct work_struct status_change_work;
+ struct work_struct cycle_count_work;
+ struct work_struct battery_age_work;
+ struct work_struct update_esr_work;
+ struct work_struct set_resume_soc_work;
+ struct work_struct rslow_comp_work;
+ struct work_struct sysfs_restart_work;
+ struct work_struct init_work;
+ struct work_struct charge_full_work;
+ struct work_struct gain_comp_work;
+ struct work_struct bcl_hi_power_work;
+ struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ struct fg_wakeup_source memif_wakeup_source;
+ struct fg_wakeup_source profile_wakeup_source;
+ struct fg_wakeup_source empty_check_wakeup_source;
+ struct fg_wakeup_source resume_soc_wakeup_source;
+ struct fg_wakeup_source gain_comp_wakeup_source;
+ struct fg_wakeup_source capacity_learning_wakeup_source;
+ bool first_profile_loaded;
+ struct fg_wakeup_source update_temp_wakeup_source;
+ struct fg_wakeup_source update_sram_wakeup_source;
+ bool fg_restarting;
+ bool profile_loaded;
+ bool use_otp_profile;
+ bool battery_missing;
+ bool power_supply_registered;
+ bool sw_rbias_ctrl;
+ bool use_thermal_coefficients;
+ bool esr_strict_filter;
+ bool soc_empty;
+ bool charge_done;
+ bool resume_soc_lowered;
+ bool vbat_low_irq_enabled;
+ bool charge_full;
+ bool hold_soc_while_full;
+ bool input_present;
+ bool otg_present;
+ bool safety_timer_expired;
+ bool bad_batt_detection_en;
+ bool bcl_lpm_disabled;
+ bool charging_disabled;
+ struct delayed_work update_jeita_setting;
+ struct delayed_work update_sram_data;
+ struct delayed_work update_temp_work;
+ struct delayed_work check_empty_work;
+ char *batt_profile;
+ u8 thermal_coefficients[THERMAL_COEFF_N_BYTES];
+ u32 cc_cv_threshold_mv;
+ unsigned int batt_profile_len;
+ unsigned int batt_max_voltage_uv;
+ const char *batt_type;
+ const char *batt_psy_name;
+ unsigned long last_sram_update_time;
+ unsigned long last_temp_update_time;
+ int64_t ocv_coeffs[12];
+ int64_t cutoff_voltage;
+ int evaluation_current;
+ int ocv_junction_p1p2;
+ int ocv_junction_p2p3;
+ int nom_cap_uah;
+ int actual_cap_uah;
+ int status;
+ int prev_status;
+ int health;
+ enum fg_batt_aging_mode batt_aging_mode;
+ /* capacity learning */
+ struct fg_learning_data learning_data;
+ struct alarm fg_cap_learning_alarm;
+ struct work_struct fg_cap_learning_work;
+ struct fg_cc_soc_data sw_cc_soc_data;
+ /* rslow compensation */
+ struct fg_rslow_data rslow_comp;
+ /* cycle counter */
+ struct fg_cyc_ctr_data cyc_ctr;
+ /* iadc compensation */
+ struct fg_iadc_comp_data iadc_comp_data;
+ /* interleaved memory access */
+ u16 *offset;
+ bool ima_supported;
+ bool init_done;
+ /* jeita hysteresis */
+ bool jeita_hysteresis_support;
+ bool batt_hot;
+ bool batt_cold;
+ int cold_hysteresis;
+ int hot_hysteresis;
+ /* ESR pulse tuning */
+ struct fg_wakeup_source esr_extract_wakeup_source;
+ struct work_struct esr_extract_config_work;
+ bool esr_extract_disabled;
+ bool imptr_pulse_slow_en;
+ bool esr_pulse_tune_en;
+};
+
+/* FG_MEMIF DEBUGFS structures */
+#define ADDR_LEN 4 /* 3 byte address + 1 space character */
+#define CHARS_PER_ITEM 3 /* Format is 'XX ' */
+#define ITEMS_PER_LINE 4 /* 4 data items per line */
+#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION (8)
+
+static const char *DFS_ROOT_NAME = "fg_memif";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+static const char *default_batt_type = "Unknown Battery";
+static const char *loading_batt_type = "Loading Battery Data";
+static const char *missing_batt_type = "Disconnected Battery";
+
+/* Log buffer */
+struct fg_log_buffer {
+ size_t rpos; /* Current 'read' position in buffer */
+ size_t wpos; /* Current 'write' position in buffer */
+ size_t len; /* Length of the buffer */
+ char data[0]; /* Log buffer */
+};
+
+/* transaction parameters */
+struct fg_trans {
+ u32 cnt; /* Number of bytes to read */
+ u16 addr; /* 12-bit address in SRAM */
+ u32 offset; /* Offset of last read data + byte offset */
+ struct fg_chip *chip;
+ struct fg_log_buffer *log; /* log buffer */
+ u8 *data; /* fg data that is read */
+ struct mutex memif_dfs_lock; /* Prevent thread concurrency */
+};
+
+struct fg_dbgfs {
+ u32 cnt;
+ u32 addr;
+ struct fg_chip *chip;
+ struct dentry *root;
+ struct mutex lock;
+ struct debugfs_blob_wrapper help_msg;
+};
+
+static struct fg_dbgfs dbgfs_data = {
+ .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+ .help_msg = {
+ .data =
+"FG Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/fg_memif\n"
+" /help -- Static help text\n"
+" /address -- Starting register address for reads or writes\n"
+" /count -- Number of registers to read (only used for reads)\n"
+" /data -- Initiates the SRAM read (formatted output)\n"
+"\n",
+ },
+};
+
+static const struct of_device_id fg_match_table[] = {
+ { .compatible = QPNP_FG_DEV_NAME, },
+ {}
+};
+
+static char *fg_supplicants[] = {
+ "battery",
+ "bcl",
+ "fg_adc"
+};
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, " ");
+ }
+}
+
+static int fg_write(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("addr cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(chip->regmap, addr, val, len);
+ if (rc) {
+ pr_err("write failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_WRITES)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("write(0x%04X), sid=%d, len=%d; %s\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, len,
+ str);
+ }
+
+ return rc;
+}
+
+static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(chip->regmap, addr, val, len);
+ if (rc) {
+ pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", addr,
+ to_spmi_device(pdev->dev.parent)->usid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_READS)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("read(0x%04x), sid=%d, len=%d; %s\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, len,
+ str);
+ }
+
+ return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, int len)
+{
+ int rc;
+
+ rc = regmap_update_bits(chip->regmap, addr, mask, val);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define RIF_MEM_ACCESS_REQ BIT(7)
+static int fg_check_rif_mem_access(struct fg_chip *chip, bool *status)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read rif_mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ *status = mem_if_sts & RIF_MEM_ACCESS_REQ;
+ return 0;
+}
+
+static bool fg_check_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+ bool rif_mem_sts = false;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return false;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0)
+ return false;
+
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return false;
+
+ return rif_mem_sts;
+}
+
+static inline int fg_assert_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & RIF_MEM_ACCESS_REQ) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INTF_CTL_BURST BIT(7)
+#define INTF_CTL_WR_EN BIT(6)
+static int fg_config_access(struct fg_chip *chip, bool write,
+ bool burst)
+{
+ int rc;
+ u8 intf_ctl = 0;
+
+ intf_ctl = (write ? INTF_CTL_WR_EN : 0) | (burst ? INTF_CTL_BURST : 0);
+
+ rc = fg_write(chip, &intf_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+static int fg_req_and_wait_access(struct fg_chip *chip, int timeout)
+{
+ int rc = 0, ret = 0;
+ bool tried_again = false;
+
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, RIF_MEM_ACCESS_REQ, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+ fg_stay_awake(&chip->memif_wakeup_source);
+ }
+
+wait:
+ /* Wait for MEM_AVAIL IRQ. */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_granted,
+ msecs_to_jiffies(timeout));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int fg_release_access(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, 0, 1);
+ fg_relax(&chip->memif_wakeup_source);
+ reinit_completion(&chip->sram_access_granted);
+
+ return rc;
+}
+
+static void fg_release_access_if_necessary(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ if (atomic_sub_return(1, &chip->memif_user_cnt) <= 0) {
+ fg_release_access(chip);
+ }
+ mutex_unlock(&chip->rw_lock);
+}
+
+/*
+ * fg_mem_lock disallows the fuel gauge to release access until it has been
+ * released.
+ *
+ * an equal number of calls must be made to fg_mem_release for the fuel gauge
+ * driver to release the sram access.
+ */
+static void fg_mem_lock(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_mem_release(struct fg_chip *chip)
+{
+ fg_release_access_if_necessary(chip);
+}
+
+static int fg_set_ram_addr(struct fg_chip *chip, u16 *address)
+{
+ int rc;
+
+ rc = fg_write(chip, (u8 *) address,
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], 2);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n",
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define BUF_LEN 4
+static int fg_sub_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len,
+ int offset)
+{
+ int rc, total_len;
+ u8 *rd_data = val;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_config_access(chip, 0, (len > 4));
+ if (rc)
+ return rc;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ if (!offset) {
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip),
+ min(len, BUF_LEN));
+ } else {
+ rc = fg_read(chip, rd_data,
+ MEM_INTF_RD_DATA0(chip) + offset,
+ min(len, BUF_LEN - offset));
+
+ /* manually set address to allow continous reads */
+ address += BUF_LEN;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+ }
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ rd_data += (BUF_LEN - offset);
+ len -= (BUF_LEN - offset);
+ offset = 0;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+ return rc;
+}
+
+static int fg_conventional_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, orig_address = address;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ rc = fg_sub_mem_read(chip, val, address, len, offset);
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, sublen;
+ bool access_configured = false;
+ u8 *wr_data = val, word[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3)
+ return -EINVAL;
+
+ address = ((address + offset) / 4) * 4;
+ offset = (address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES) {
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, wr_data, len);
+ pr_info("writing: %s\n", str);
+ }
+
+ while (len > 0) {
+ if (offset != 0) {
+ sublen = min(4 - offset, len);
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word + offset, wr_data, sublen);
+ /* configure access as burst if more to write */
+ rc = fg_config_access(chip, 1, (len - sublen) > 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ offset = 0;
+ access_configured = true;
+ } else if (len >= 4) {
+ if (!access_configured) {
+ rc = fg_config_access(chip, 1, len > 4);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ }
+ sublen = 4;
+ memcpy(word, wr_data, 4);
+ } else if (len > 0 && len < 4) {
+ sublen = len;
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word, wr_data, sublen);
+ rc = fg_config_access(chip, 1, 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ } else {
+ pr_err("Invalid length: %d\n", len);
+ break;
+ }
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip), 4);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip), rc);
+ goto out;
+ }
+ len -= sublen;
+ wr_data += sublen;
+ address += 4;
+ }
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+#define MEM_INTF_IMA_CFG 0x52
+#define MEM_INTF_IMA_OPR_STS 0x54
+#define MEM_INTF_IMA_ERR_STS 0x5F
+#define MEM_INTF_IMA_EXP_STS 0x55
+#define MEM_INTF_IMA_HW_STS 0x56
+#define MEM_INTF_IMA_BYTE_EN 0x60
+#define IMA_ADDR_STBL_ERR BIT(7)
+#define IMA_WR_ACS_ERR BIT(6)
+#define IMA_RD_ACS_ERR BIT(5)
+#define IMA_IACS_CLR BIT(2)
+#define IMA_IACS_RDY BIT(1)
+static int fg_check_ima_exception(struct fg_chip *chip)
+{
+ int rc = 0, ret = 0;
+ u8 err_sts, exp_sts = 0, hw_sts = 0;
+
+ rc = fg_read(chip, &err_sts,
+ chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ return rc;
+ }
+
+ if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
+ u8 temp;
+
+ fg_read(chip, &exp_sts,
+ chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
+ fg_read(chip, &hw_sts,
+ chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+ pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+ rc = err_sts;
+
+ /* clear the error */
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, IMA_IACS_CLR, 1);
+ temp = 0x4;
+ ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+ temp = 0x0;
+ ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+ ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, 0, 1);
+ if (!ret)
+ return -EAGAIN;
+ else
+ pr_err("Error clearing IMA exception ret=%d\n", ret);
+ }
+
+ return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+ int rc = 0, timeout = 250;
+ u8 ima_opr_sts = 0;
+
+ /*
+ * Additional delay to make sure IACS ready bit is set after
+ * Read/Write operation.
+ */
+
+ usleep_range(30, 35);
+ while (1) {
+ rc = fg_read(chip, &ima_opr_sts,
+ chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+ if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
+ break;
+ } else {
+ if (!(--timeout) || rc)
+ break;
+ /* delay for iacs_ready to be asserted */
+ usleep_range(5000, 7000);
+ }
+ }
+
+ if (!timeout || rc) {
+ pr_err("IACS_RDY not set\n");
+ /* perform IACS_CLR sequence */
+ fg_check_ima_exception(chip);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define IACS_SLCT BIT(5)
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val,
+ u16 address, int offset, int len)
+{
+ int rc = 0, i;
+ u8 *word = val, byte_enable = 0, num_bytes = 0;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ?
+ (BUF_LEN - offset) : len;
+ /* write to byte_enable */
+ for (i = offset; i < (offset + num_bytes); i++)
+ byte_enable |= BIT(i);
+
+ rc = fg_write(chip, &byte_enable,
+ chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+ if (rc) {
+ pr_err("Unable to write to byte_en_reg rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* write data */
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ /*
+ * The last-byte WR_DATA3 starts the write transaction.
+ * Write a dummy value to WR_DATA3 if it does not have
+ * valid data. This dummy data is not written to the
+ * SRAM as byte_en for WR_DATA3 is not set.
+ */
+ if (!(byte_enable & BIT(3))) {
+ u8 dummy_byte = 0x0;
+ rc = fg_write(chip, &dummy_byte,
+ MEM_INTF_WR_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Unable to write dummy-data to WR_DATA3 rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ word += num_bytes;
+ len -= num_bytes;
+ offset = byte_enable = 0;
+ }
+
+ return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int offset, int len)
+{
+ int rc = 0, total_len;
+ u8 *rd_data = val, num_bytes;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ? (BUF_LEN - offset) : len;
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+
+ rd_data += num_bytes;
+ len -= num_bytes;
+ offset = 0;
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ if (len && (len + offset) < BUF_LEN) {
+ /* move to single mode */
+ u8 intr_ctl = 0;
+
+ rc = fg_write(chip, &intr_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to move to single mode rc=%d\n",
+ rc);
+ return -EIO;
+ }
+ }
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+
+ return rc;
+}
+
+#define IMA_REQ_ACCESS (IACS_SLCT | RIF_MEM_ACCESS_REQ)
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+ u16 address, int len, int offset, int op)
+{
+ int rc = 0;
+ bool rif_mem_sts = true;
+ int time_count = 0;
+
+ while (1) {
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return rc;
+
+ if (!rif_mem_sts)
+ break;
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("RIF_MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+ op ? "write" : "read");
+
+ /*
+ * Try this no more than 4 times. If RIF_MEM_ACCESS_REQ is not
+ * clear, then return an error instead of waiting for it again.
+ */
+ if (time_count > 4) {
+ pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+ usleep_range(4000, 4100);
+ time_count++;
+ }
+
+ /* configure for IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ IMA_REQ_ACCESS, IMA_REQ_ACCESS, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit rc = %d\n", rc);
+ return rc;
+ }
+
+ /* configure for the read/write single/burst mode */
+ rc = fg_config_access(chip, op, (offset + len) > 4);
+ if (rc) {
+ pr_err("failed to set configure memory access rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* write addresses to the register */
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc) {
+ pr_err("failed to set SRAM address rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc)
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+
+ return rc;
+}
+
+#define MEM_INTF_FG_BEAT_COUNT 0x57
+#define BEAT_COUNT_MASK 0x0F
+#define RETRY_COUNT 3
+static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 start_beat_count, end_beat_count, count = 0;
+ bool retry = false;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ if (address < RAM_OFFSET) {
+ /*
+ * OTP memory reads need a conventional memory access, do a
+ * conventional read when SRAM offset < RAM_OFFSET.
+ */
+ rc = fg_conventional_mem_read(chip, val, address, len, offset,
+ 0);
+ if (rc)
+ pr_err("Failed to read OTP memory %d\n", rc);
+ goto exit;
+ }
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
+ if (rc) {
+ pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* read the start beat count */
+ rc = fg_read(chip, &start_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ /* read data */
+ rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to read SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+ /* read the end beat count */
+ rc = fg_read(chip, &end_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ start_beat_count &= BEAT_COUNT_MASK;
+ end_beat_count &= BEAT_COUNT_MASK;
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Start beat_count = %x End beat_count = %x\n",
+ start_beat_count, end_beat_count);
+ if (start_beat_count != end_beat_count) {
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Beat count do not match - retry transaction\n");
+ retry = true;
+ }
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+exit:
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 count = 0;
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
+ if (rc) {
+ pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* write data */
+ rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to write SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ mutex_unlock(&chip->rw_lock);
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_read(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_read(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_write(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_write(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, u8 offset)
+{
+ int rc = 0;
+ u8 reg[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_mem_read(chip, reg, addr, 4, 0, 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ reg[offset] &= ~mask;
+ reg[offset] |= val & mask;
+
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, reg, 4);
+ pr_debug("Writing %s address %03x, offset %d\n", str, addr, offset);
+
+ rc = fg_mem_write(chip, reg, addr, 4, 0, 0);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int soc_to_setpoint(int soc)
+{
+ return DIV_ROUND_CLOSEST(soc * 255, 100);
+}
+
+static void batt_to_setpoint_adc(int vbatt_mv, u8 *data)
+{
+ int val;
+ /* Battery voltage is an offset from 0 V and LSB is 1/2^15. */
+ val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
+ data[0] = val & 0xFF;
+ data[1] = val >> 8;
+ return;
+}
+
+static u8 batt_to_setpoint_8b(int vbatt_mv)
+{
+ int val;
+ /* Battery voltage is an offset from 2.5 V and LSB is 5/2^9. */
+ val = (vbatt_mv - 2500) * 512 / 1000;
+ return DIV_ROUND_CLOSEST(val, 5);
+}
+
+static u8 therm_delay_to_setpoint(u32 delay_us)
+{
+ u8 val;
+
+ if (delay_us < 2560)
+ val = 0;
+ else if (delay_us > 163840)
+ val = 7;
+ else
+ val = ilog2(delay_us / 10) - 7;
+ return val << 5;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define BATTERY_SOC_REG 0x56C
+#define BATTERY_SOC_OFFSET 1
+#define FULL_PERCENT_3B 0xFFFFFF
+static int get_battery_soc_raw(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[3];
+
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ return 0;
+ }
+ return (int)(buffer[2] << 16 | buffer[1] << 8 | buffer[0]);
+}
+
+#define COUNTER_IMPTR_REG 0X558
+#define COUNTER_PULSE_REG 0X55C
+#define SOC_FULL_REG 0x564
+#define COUNTER_IMPTR_OFFSET 2
+#define COUNTER_PULSE_OFFSET 0
+#define SOC_FULL_OFFSET 3
+#define ESR_PULSE_RECONFIG_SOC 0xFFF971
+static int fg_configure_soc(struct fg_chip *chip)
+{
+ u32 batt_soc;
+ u8 cntr[2] = {0, 0};
+ int rc = 0;
+
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ /* Read Battery SOC */
+ batt_soc = get_battery_soc_raw(chip);
+
+ if (batt_soc > ESR_PULSE_RECONFIG_SOC) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("Configuring soc registers batt_soc: %x\n",
+ batt_soc);
+ batt_soc = ESR_PULSE_RECONFIG_SOC;
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write BATT_SOC rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 2,
+ COUNTER_IMPTR_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ }
+out:
+ fg_release_access_if_necessary(chip);
+ return rc;
+}
+
+#define SOC_EMPTY BIT(3)
+static bool fg_is_batt_empty(struct fg_chip *chip)
+{
+ u8 fg_soc_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_EMPTY) != 0;
+}
+
+static int get_monotonic_soc_raw(struct fg_chip *chip)
+{
+ u8 cap[2];
+ int rc, tries = 0;
+
+ while (tries < MAX_TRIES_SOC) {
+ rc = fg_read(chip, cap,
+ chip->soc_base + SOC_MONOTONIC_SOC, 2);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ chip->soc_base + SOC_MONOTONIC_SOC, rc);
+ return rc;
+ }
+
+ if (cap[0] == cap[1])
+ break;
+
+ tries++;
+ }
+
+ if (tries == MAX_TRIES_SOC) {
+ pr_err("shadow registers do not match\n");
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("raw: 0x%02x\n", cap[0]);
+ return cap[0];
+}
+
+#define EMPTY_CAPACITY 0
+#define DEFAULT_CAPACITY 50
+#define MISSING_CAPACITY 100
+#define FULL_CAPACITY 100
+#define FULL_SOC_RAW 0xFF
+static int get_prop_capacity(struct fg_chip *chip)
+{
+ int msoc;
+
+ if (chip->battery_missing)
+ return MISSING_CAPACITY;
+ if (!chip->profile_loaded && !chip->use_otp_profile)
+ return DEFAULT_CAPACITY;
+ if (chip->charge_full)
+ return FULL_CAPACITY;
+ if (chip->soc_empty) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("capacity: %d, EMPTY\n",
+ EMPTY_CAPACITY);
+ return EMPTY_CAPACITY;
+ }
+ msoc = get_monotonic_soc_raw(chip);
+ if (msoc == 0)
+ return EMPTY_CAPACITY;
+ else if (msoc == FULL_SOC_RAW)
+ return FULL_CAPACITY;
+ return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+}
+
+#define HIGH_BIAS 3
+#define MED_BIAS BIT(1)
+#define LOW_BIAS BIT(0)
+static u8 bias_ua[] = {
+ [HIGH_BIAS] = 150,
+ [MED_BIAS] = 15,
+ [LOW_BIAS] = 5,
+};
+
+static int64_t get_batt_id(unsigned int battery_id_uv, u8 bid_info)
+{
+ u64 battery_id_ohm;
+
+ if ((bid_info & 0x3) == 0) {
+ pr_err("can't determine battery id 0x%02x\n", bid_info);
+ return -EINVAL;
+ }
+
+ battery_id_ohm = div_u64(battery_id_uv, bias_ua[bid_info & 0x3]);
+
+ return battery_id_ohm;
+}
+
+#define DEFAULT_TEMP_DEGC 250
+static int get_sram_prop_now(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d value %d\n",
+ fg_data[type].address, fg_data[type].offset,
+ fg_data[type].value);
+
+ if (type == FG_DATA_BATT_ID)
+ return get_batt_id(fg_data[type].value,
+ fg_data[FG_DATA_BATT_ID_INFO].value);
+
+ return fg_data[type].value;
+}
+
+#define MIN_TEMP_DEGC -300
+#define MAX_TEMP_DEGC 970
+static int get_prop_jeita_temp(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d\n", settings[type].address,
+ settings[type].offset);
+
+ return settings[type].value;
+}
+
+static int set_prop_jeita_temp(struct fg_chip *chip,
+ unsigned int type, int decidegc)
+{
+ int rc = 0;
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d temp%d\n",
+ settings[type].address,
+ settings[type].offset, decidegc);
+
+ settings[type].value = decidegc;
+
+ cancel_delayed_work_sync(
+ &chip->update_jeita_setting);
+ schedule_delayed_work(
+ &chip->update_jeita_setting, 0);
+
+ return rc;
+}
+
+#define EXTERNAL_SENSE_SELECT 0x4AC
+#define EXTERNAL_SENSE_OFFSET 0x2
+#define EXTERNAL_SENSE_BIT BIT(2)
+static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ EXTERNAL_SENSE_BIT,
+ ext_sense_type ? EXTERNAL_SENSE_BIT : 0,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define EXPONENT_MASK 0xF800
+#define MANTISSA_MASK 0x3FF
+#define SIGN BIT(10)
+#define EXPONENT_SHIFT 11
+#define MICRO_UNIT 1000000ULL
+static int64_t float_decode(u16 reg)
+{
+ int64_t final_val, exponent_val, mantissa_val;
+ int exponent, mantissa, n;
+ bool sign;
+
+ exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT;
+ mantissa = (reg & MANTISSA_MASK);
+ sign = !!(reg & SIGN);
+
+ pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign);
+
+ mantissa_val = mantissa * MICRO_UNIT;
+
+ n = exponent - 15;
+ if (n < 0)
+ exponent_val = MICRO_UNIT >> -n;
+ else
+ exponent_val = MICRO_UNIT << n;
+
+ n = n - 10;
+ if (n < 0)
+ mantissa_val >>= -n;
+ else
+ mantissa_val <<= n;
+
+ final_val = exponent_val + mantissa_val;
+
+ if (sign)
+ final_val *= -1;
+
+ return final_val;
+}
+
+#define MIN_HALFFLOAT_EXP_N -15
+#define MAX_HALFFLOAT_EXP_N 16
+static int log2_floor(int64_t uval)
+{
+ int n = 0;
+ int64_t i = MICRO_UNIT;
+
+ if (uval > i) {
+ while (uval > i && n > MIN_HALFFLOAT_EXP_N) {
+ i <<= 1;
+ n += 1;
+ }
+ if (uval < i)
+ n -= 1;
+ } else if (uval < i) {
+ while (uval < i && n < MAX_HALFFLOAT_EXP_N) {
+ i >>= 1;
+ n -= 1;
+ }
+ }
+
+ return n;
+}
+
+static int64_t exp2_int(int64_t n)
+{
+ int p = n - 1;
+
+ if (p > 0)
+ return (2 * MICRO_UNIT) << p;
+ else
+ return (2 * MICRO_UNIT) >> abs(p);
+}
+
+static u16 float_encode(int64_t uval)
+{
+ int sign = 0, n, exp, mantissa;
+ u16 half = 0;
+
+ if (uval < 0) {
+ sign = 1;
+ uval = abs(uval);
+ }
+ n = log2_floor(uval);
+ exp = n + 15;
+ mantissa = div_s64(div_s64((uval - exp2_int(n)) * exp2_int(10 - n),
+ MICRO_UNIT) + MICRO_UNIT / 2, MICRO_UNIT);
+
+ half = (mantissa & MANTISSA_MASK) | ((sign << 10) & SIGN)
+ | ((exp << 11) & EXPONENT_MASK);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("uval = %lld, m = 0x%02x, sign = 0x%02x, exp = 0x%02x, half = 0x%04x\n",
+ uval, mantissa, sign, exp, half);
+ return half;
+}
+
+#define BATT_IDED BIT(3)
+static int fg_is_batt_id_valid(struct fg_chip *chip)
+{
+ u8 fg_batt_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return rc;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+
+ return (fg_batt_sts & BATT_IDED) ? 1 : 0;
+}
+
+static int64_t twos_compliment_extend(int64_t val, int nbytes)
+{
+ int i;
+ int64_t mask;
+
+ mask = 0x80LL << ((nbytes - 1) * 8);
+ if (val & mask) {
+ for (i = 8; i > nbytes; i--) {
+ mask = 0xFFLL << ((i - 1) * 8);
+ val |= mask;
+ }
+ }
+
+ return val;
+}
+
+#define LSB_24B_NUMRTR 596046
+#define LSB_24B_DENMTR 1000000
+#define LSB_16B_NUMRTR 152587
+#define LSB_16B_DENMTR 1000
+#define LSB_8B 9800
+#define TEMP_LSB_16B 625
+#define DECIKELVIN 2730
+#define SRAM_PERIOD_NO_ID_UPDATE_MS 100
+#define FULL_PERCENT_28BIT 0xFFFFFFF
+static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+{
+ int i, j, rc = 0;
+ u8 reg[4];
+ int64_t temp;
+ int battid_valid = fg_is_batt_id_valid(chip);
+
+ fg_stay_awake(&chip->update_sram_wakeup_source);
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_mem_lock(chip);
+ for (i = 1; i < FG_DATA_MAX; i++) {
+ if (chip->profile_loaded && i >= FG_DATA_BATT_ID)
+ continue;
+ rc = fg_mem_read(chip, reg, fg_data[i].address,
+ fg_data[i].len, fg_data[i].offset, 0);
+ if (rc) {
+ pr_err("Failed to update sram data\n");
+ break;
+ }
+
+ temp = 0;
+ for (j = 0; j < fg_data[i].len; j++)
+ temp |= reg[j] << (8 * j);
+
+ switch (i) {
+ case FG_DATA_OCV:
+ case FG_DATA_VOLTAGE:
+ case FG_DATA_CPRED_VOLTAGE:
+ fg_data[i].value = div_u64(
+ (u64)(u16)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_CURRENT:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div_s64(
+ (s64)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_BATT_ESR:
+ fg_data[i].value = float_decode((u16) temp);
+ break;
+ case FG_DATA_BATT_ESR_COUNT:
+ fg_data[i].value = (u16)temp;
+ break;
+ case FG_DATA_BATT_ID:
+ if (battid_valid)
+ fg_data[i].value = reg[0] * LSB_8B;
+ break;
+ case FG_DATA_BATT_ID_INFO:
+ if (battid_valid)
+ fg_data[i].value = reg[0];
+ break;
+ case FG_DATA_BATT_SOC:
+ fg_data[i].value = div64_s64((temp * 10000),
+ FULL_PERCENT_3B);
+ break;
+ case FG_DATA_CC_CHARGE:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(
+ temp * (int64_t)chip->nom_cap_uah,
+ FULL_PERCENT_28BIT);
+ break;
+ case FG_DATA_VINT_ERR:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(temp * chip->nom_cap_uah,
+ FULL_PERCENT_3B);
+ break;
+ };
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("%d %lld %d\n", i, temp, fg_data[i].value);
+ }
+ fg_mem_release(chip);
+
+ if (!rc)
+ get_current_time(&chip->last_sram_update_time);
+
+resched:
+ if (battid_valid) {
+ complete_all(&chip->batt_id_avail);
+ *resched_ms = fg_sram_update_period_ms;
+ } else {
+ *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
+ }
+ fg_relax(&chip->update_sram_wakeup_source);
+}
+
+#define SRAM_TIMEOUT_MS 3000
+static void update_sram_data_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_sram_data.work);
+ int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+ bool tried_again = false;
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(SRAM_TIMEOUT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ update_sram_data(chip, &resched_ms);
+
+out:
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(resched_ms));
+}
+
+#define BATT_TEMP_OFFSET 3
+#define BATT_TEMP_CNTRL_MASK 0x17
+#define DISABLE_THERM_BIT BIT(0)
+#define TEMP_SENSE_ALWAYS_BIT BIT(1)
+#define TEMP_SENSE_CHARGE_BIT BIT(2)
+#define FORCE_RBIAS_ON_BIT BIT(4)
+#define BATT_TEMP_OFF DISABLE_THERM_BIT
+#define BATT_TEMP_ON (FORCE_RBIAS_ON_BIT | TEMP_SENSE_ALWAYS_BIT | \
+ TEMP_SENSE_CHARGE_BIT)
+#define TEMP_PERIOD_UPDATE_MS 10000
+#define TEMP_PERIOD_TIMEOUT_MS 3000
+static void update_temp_data(struct work_struct *work)
+{
+ s16 temp;
+ u8 reg[2];
+ bool tried_again = false;
+ int rc, ret, timeout = TEMP_PERIOD_TIMEOUT_MS;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_temp_work.work);
+
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_stay_awake(&chip->update_temp_wakeup_source);
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_ON,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_ON rc=%d\n", rc);
+ goto out;
+ }
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(timeout));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Read FG_DATA_BATT_TEMP now */
+ rc = fg_mem_read(chip, reg, fg_data[0].address,
+ fg_data[0].len, fg_data[0].offset,
+ chip->sw_rbias_ctrl ? 1 : 0);
+ if (rc) {
+ pr_err("Failed to update temp data\n");
+ goto out;
+ }
+
+ temp = reg[0] | (reg[1] << 8);
+ fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
+ - DECIKELVIN;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
+
+ get_current_time(&chip->last_temp_update_time);
+
+out:
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_OFF,
+ BATT_TEMP_OFFSET);
+ if (rc)
+ pr_err("failed to write BATT_TEMP_OFF rc=%d\n", rc);
+ }
+ fg_relax(&chip->update_temp_wakeup_source);
+
+resched:
+ schedule_delayed_work(
+ &chip->update_temp_work,
+ msecs_to_jiffies(TEMP_PERIOD_UPDATE_MS));
+}
+
+static void update_jeita_setting(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_jeita_setting.work);
+ u8 reg[4];
+ int i, rc;
+
+ for (i = 0; i < 4; i++)
+ reg[i] = (settings[FG_MEM_SOFT_COLD + i].value / 10) + 30;
+
+ rc = fg_mem_write(chip, reg, settings[FG_MEM_SOFT_COLD].address,
+ 4, settings[FG_MEM_SOFT_COLD].offset, 0);
+ if (rc)
+ pr_err("failed to update JEITA setting rc=%d\n", rc);
+}
+
+static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold)
+{
+ u16 address;
+ int offset, rc;
+
+ address = settings[FG_MEM_RESUME_SOC].address;
+ offset = settings[FG_MEM_RESUME_SOC].offset;
+
+ rc = fg_mem_masked_write(chip, address, 0xFF, threshold, offset);
+
+ if (rc)
+ pr_err("write failed rc=%d\n", rc);
+ else
+ pr_debug("setting resume-soc to %x\n", threshold);
+
+ return rc;
+}
+
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+ int rc = 0;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (!rc)
+ *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+ return rc;
+}
+
+#define BATT_CYCLE_NUMBER_REG 0x5E8
+#define BATT_CYCLE_OFFSET 0
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, i, address;
+ u8 data[2];
+
+ fg_mem_lock(chip);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ address = BATT_CYCLE_NUMBER_REG + i * 2;
+ rc = fg_mem_read(chip, (u8 *)&data, address, 2,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to read BATT_CYCLE_NUMBER[%d] rc: %d\n",
+ i, rc);
+ else
+ chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+ }
+ fg_mem_release(chip);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, len, i;
+
+ if (!chip->cyc_ctr.en)
+ return;
+
+ len = sizeof(chip->cyc_ctr.count);
+ memset(chip->cyc_ctr.count, 0, len);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ chip->cyc_ctr.started[i] = false;
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ rc = fg_mem_write(chip, (u8 *)&chip->cyc_ctr.count,
+ BATT_CYCLE_NUMBER_REG, len,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER rc=%d\n", rc);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+ int rc = 0, address;
+ u16 cyc_count;
+ u8 data[2];
+
+ if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+ return 0;
+
+ cyc_count = chip->cyc_ctr.count[bucket];
+ cyc_count++;
+ data[0] = cyc_count & 0xFF;
+ data[1] = cyc_count >> 8;
+
+ address = BATT_CYCLE_NUMBER_REG + bucket * 2;
+
+ rc = fg_mem_write(chip, data, address, 2, BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER[%d] rc=%d\n",
+ bucket, rc);
+ else
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+}
+
+static void update_cycle_count(struct work_struct *work)
+{
+ int rc = 0, bucket, i;
+ u8 reg[3], batt_soc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ cycle_count_work);
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ rc = fg_mem_read(chip, reg, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read battery soc rc: %d\n", rc);
+ goto out;
+ }
+ batt_soc = reg[2];
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_soc: %x bucket: %d\n", reg[2], bucket);
+
+ /*
+ * If we've started counting for the previous bucket,
+ * then store the counter for that bucket if the
+ * counter for current bucket is getting started.
+ */
+ if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+ !chip->cyc_ctr.started[bucket]) {
+ rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+ if (rc) {
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ goto out;
+ } else {
+ chip->cyc_ctr.started[bucket - 1] = false;
+ chip->cyc_ctr.last_soc[bucket - 1] = 0;
+ }
+ }
+ if (!chip->cyc_ctr.started[bucket]) {
+ chip->cyc_ctr.started[bucket] = true;
+ chip->cyc_ctr.last_soc[bucket] = batt_soc;
+ }
+ } else {
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ if (chip->cyc_ctr.started[i] &&
+ batt_soc > chip->cyc_ctr.last_soc[i]) {
+ rc = fg_inc_store_cycle_ctr(chip, i);
+ if (rc)
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ chip->cyc_ctr.started[i] = false;
+ }
+ }
+out:
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+ int count;
+
+ if (!chip->cyc_ctr.en)
+ return 0;
+
+ if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+ return -EINVAL;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+ mutex_unlock(&chip->cyc_ctr.lock);
+ return count;
+}
+
+static void half_float_to_buffer(int64_t uval, u8 *buffer)
+{
+ u16 raw;
+
+ raw = float_encode(uval);
+ buffer[0] = (u8)(raw & 0xFF);
+ buffer[1] = (u8)((raw >> 8) & 0xFF);
+}
+
+static int64_t half_float(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return float_decode(val);
+}
+
+static int voltage_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ /* the range of voltage 2b is [-5V, 5V], so it will fit in an int */
+ return (int)div_u64(((u64)val) * LSB_16B_NUMRTR, LSB_16B_DENMTR);
+}
+
+static int bcap_uah_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return ((int)val) * 1000;
+}
+
+static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
+{
+ int64_t *coeffs;
+
+ if (soc > chip->ocv_junction_p1p2 * 10)
+ coeffs = chip->ocv_coeffs;
+ else if (soc > chip->ocv_junction_p2p3 * 10)
+ coeffs = chip->ocv_coeffs + 4;
+ else
+ coeffs = chip->ocv_coeffs + 8;
+ /* the range of ocv will fit in a 32 bit int */
+ return (int)(coeffs[0]
+ + div_s64(coeffs[1] * soc, 1000LL)
+ + div_s64(coeffs[2] * soc * soc, 1000000LL)
+ + div_s64(coeffs[3] * soc * soc * soc, 1000000000LL));
+}
+
+static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv)
+{
+ int64_t val;
+ int soc = -EINVAL;
+ /*
+ * binary search variables representing the valid start and end
+ * percentages to search
+ */
+ int start = 0, end = 1000, mid;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("target_ocv = %d\n", ocv);
+ /* do a binary search for the closest soc to match the ocv */
+ while (end - start > 1) {
+ mid = (start + end) / 2;
+ val = lookup_ocv_for_soc(chip, mid);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("start = %d, mid = %d, end = %d, ocv = %lld\n",
+ start, mid, end, val);
+ if (ocv < val) {
+ end = mid;
+ } else if (ocv > val) {
+ start = mid;
+ } else {
+ soc = mid;
+ break;
+ }
+ }
+ /*
+ * if the exact soc was not found and there are two or less values
+ * remaining, just compare them and see which one is closest to the ocv
+ */
+ if (soc == -EINVAL) {
+ if (abs(ocv - lookup_ocv_for_soc(chip, start))
+ > abs(ocv - lookup_ocv_for_soc(chip, end)))
+ soc = end;
+ else
+ soc = start;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("closest = %d, target_ocv = %d, ocv_found = %d\n",
+ soc, ocv, lookup_ocv_for_soc(chip, soc));
+ return soc;
+}
+
+#define ESR_ACTUAL_REG 0x554
+#define BATTERY_ESR_REG 0x4F4
+#define TEMP_RS_TO_RSLOW_REG 0x514
+static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
+{
+ int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
+ int64_t esr_actual, battery_esr, val;
+ int soc_cutoff_aged, soc_cutoff_new, rc;
+ int battery_soc, unusable_soc, batt_temp;
+ u8 buffer[3];
+
+ if (chip->batt_aging_mode != FG_AGING_ESR)
+ return 0;
+
+ if (chip->nom_cap_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv coefficients not loaded, aborting\n");
+ return 0;
+ }
+ fg_mem_lock(chip);
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ if (batt_temp < 150 || batt_temp > 400) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery temp (%d) out of range, aborting\n",
+ (int)batt_temp);
+ rc = 0;
+ goto done;
+ }
+
+ battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B;
+ if (battery_soc < 25 || battery_soc > 75) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery SoC (%d) out of range, aborting\n",
+ (int)battery_soc);
+ rc = 0;
+ goto done;
+ }
+
+ rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
+ esr_actual = half_float(buffer);
+ rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+ battery_esr = half_float(buffer);
+
+ if (rc) {
+ goto error_done;
+ } else if (esr_actual < battery_esr) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Batt ESR lower than ESR actual, aborting\n");
+ rc = 0;
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, 0, 0);
+ temp_rs_to_rslow = half_float(buffer);
+
+ if (rc)
+ goto error_done;
+
+ fg_mem_release(chip);
+
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("batt_soc = %d, cutoff_voltage = %lld, eval current = %d\n",
+ battery_soc, chip->cutoff_voltage,
+ chip->evaluation_current);
+ pr_info("temp_rs_to_rslow = %lld, batt_esr = %lld, esr_actual = %lld\n",
+ temp_rs_to_rslow, battery_esr, esr_actual);
+ }
+
+ /* calculate soc_cutoff_new */
+ val = (1000000LL + temp_rs_to_rslow) * battery_esr;
+ do_div(val, 1000000);
+ ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ /* calculate soc_cutoff_aged */
+ val = (1000000LL + temp_rs_to_rslow) * esr_actual;
+ do_div(val, 1000000);
+ ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv_cutoff_new = %lld, ocv_cutoff_aged = %lld\n",
+ ocv_cutoff_new, ocv_cutoff_aged);
+
+ soc_cutoff_new = lookup_soc_for_ocv(chip, ocv_cutoff_new);
+ soc_cutoff_aged = lookup_soc_for_ocv(chip, ocv_cutoff_aged);
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("aged soc = %d, new soc = %d\n",
+ soc_cutoff_aged, soc_cutoff_new);
+ unusable_soc = soc_cutoff_aged - soc_cutoff_new;
+
+ *actual_capacity = div64_s64(((int64_t)chip->nom_cap_uah)
+ * (1000 - unusable_soc), 1000);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("nom cap = %d, actual cap = %d\n",
+ chip->nom_cap_uah, *actual_capacity);
+
+ return rc;
+
+error_done:
+ pr_err("some register reads failed: %d\n", rc);
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+static void battery_age_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ battery_age_work);
+
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+}
+
+static enum power_supply_property fg_power_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_HI_POWER,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ bool vbatt_low_sts;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_BATTERY_TYPE:
+ if (chip->battery_missing)
+ val->strval = missing_batt_type;
+ else if (chip->fg_restarting)
+ val->strval = loading_batt_type;
+ else
+ val->strval = chip->batt_type;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = chip->batt_max_voltage_uv;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+ break;
+ case POWER_SUPPLY_PROP_ESR_COUNT:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ val->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE_ID:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+ val->intval = (int)vbatt_low_sts;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = chip->nom_cap_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = chip->learning_data.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = chip->learning_data.cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ val->intval = !!chip->bcl_lpm_disabled;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int correction_times[] = {
+ 1470,
+ 2940,
+ 4410,
+ 5880,
+ 7350,
+ 8820,
+ 10290,
+ 11760,
+ 13230,
+ 14700,
+ 16170,
+ 17640,
+ 19110,
+ 20580,
+ 22050,
+ 23520,
+ 24990,
+ 26460,
+ 27930,
+ 29400,
+ 30870,
+ 32340,
+ 33810,
+ 35280,
+ 36750,
+ 38220,
+ 39690,
+ 41160,
+ 42630,
+ 44100,
+ 45570,
+ 47040,
+};
+
+static int correction_factors[] = {
+ 1000000,
+ 1007874,
+ 1015789,
+ 1023745,
+ 1031742,
+ 1039780,
+ 1047859,
+ 1055979,
+ 1064140,
+ 1072342,
+ 1080584,
+ 1088868,
+ 1097193,
+ 1105558,
+ 1113964,
+ 1122411,
+ 1130899,
+ 1139427,
+ 1147996,
+ 1156606,
+ 1165256,
+ 1173947,
+ 1182678,
+ 1191450,
+ 1200263,
+ 1209115,
+ 1218008,
+ 1226942,
+ 1235915,
+ 1244929,
+ 1253983,
+ 1263076,
+};
+
+#define FG_CONVERSION_FACTOR (64198531LL)
+static int iavg_3b_to_uah(u8 *buffer, int delta_ms)
+{
+ int64_t val, i_filtered;
+ int i, correction_factor;
+
+ for (i = 0; i < ARRAY_SIZE(correction_times); i++) {
+ if (correction_times[i] > delta_ms)
+ break;
+ }
+ if (i >= ARRAY_SIZE(correction_times)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge took more than 32 cycles\n");
+ i = ARRAY_SIZE(correction_times) - 1;
+ }
+ correction_factor = correction_factors[i];
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("delta_ms = %d, cycles = %d, correction = %d\n",
+ delta_ms, i, correction_factor);
+ val = buffer[2] << 16 | buffer[1] << 8 | buffer[0];
+ /* convert val from signed 24b to signed 64b */
+ i_filtered = (val << 40) >> 40;
+ val = i_filtered * correction_factor;
+ val = div64_s64(val + FG_CONVERSION_FACTOR / 2, FG_CONVERSION_FACTOR);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("i_filtered = 0x%llx/%lld, cc_uah = %lld\n",
+ i_filtered, i_filtered, val);
+
+ return val;
+}
+
+static bool fg_is_temperature_ok_for_learning(struct fg_chip *chip)
+{
+ int batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+ if (batt_temp > chip->learning_data.max_temp
+ || batt_temp < chip->learning_data.min_temp) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("temp (%d) out of range [%d, %d], aborting\n",
+ batt_temp,
+ chip->learning_data.min_temp,
+ chip->learning_data.max_temp);
+ return false;
+ }
+ return true;
+}
+
+static void fg_cap_learning_stop(struct fg_chip *chip)
+{
+ chip->learning_data.cc_uah = 0;
+ chip->learning_data.active = false;
+}
+
+#define I_FILTERED_REG 0x584
+static void fg_cap_learning_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ fg_cap_learning_work);
+ u8 i_filtered[3], data[3];
+ int rc, cc_uah, delta_ms;
+ ktime_t now_kt, delta_kt;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (!chip->learning_data.active)
+ goto fail;
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ mutex_unlock(&chip->learning_data.learning_lock);
+ fg_relax(&chip->capacity_learning_wakeup_source);
+ return;
+ }
+
+ fg_mem_lock(chip);
+
+ rc = fg_mem_read(chip, i_filtered, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to read i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ memset(data, 0, 3);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+
+ now_kt = ktime_get_boottime();
+ delta_kt = ktime_sub(now_kt, chip->learning_data.time_stamp);
+ chip->learning_data.time_stamp = now_kt;
+
+ delta_ms = (int)div64_s64(ktime_to_ns(delta_kt), 1000000);
+
+ cc_uah = iavg_3b_to_uah(i_filtered, delta_ms);
+ chip->learning_data.cc_uah -= cc_uah;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return;
+
+}
+
+#define CC_SOC_BASE_REG 0x5BC
+#define CC_SOC_OFFSET 3
+#define CC_SOC_MAGNITUDE_MASK 0x1FFFFFFF
+#define CC_SOC_NEGATIVE_BIT BIT(29)
+static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc)
+{
+ int rc;
+ u8 reg[4];
+ unsigned int temp, magnitude;
+
+ rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read CC_SOC_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
+ magnitude = temp & CC_SOC_MAGNITUDE_MASK;
+ if (temp & CC_SOC_NEGATIVE_BIT)
+ *cc_soc = -1 * (~magnitude + 1);
+ else
+ *cc_soc = magnitude;
+
+ return 0;
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int cc_pc_val, rc = -EINVAL;
+ unsigned int cc_soc_delta_pc;
+ int64_t delta_cc_uah;
+
+ if (!chip->learning_data.active)
+ goto fail;
+
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ rc = fg_get_cc_soc(chip, &cc_pc_val);
+ if (rc) {
+ pr_err("failed to get CC_SOC, stopping capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ cc_soc_delta_pc = DIV_ROUND_CLOSEST(
+ abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
+ * 100, FULL_PERCENT_28BIT);
+
+ delta_cc_uah = div64_s64(
+ chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
+ 100);
+ chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+ cc_pc_val, cc_soc_delta_pc,
+ chip->learning_data.cc_uah);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+#define FG_CAP_LEARNING_INTERVAL_NS 30000000000
+static enum alarmtimer_restart fg_cap_learning_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm, struct fg_chip,
+ fg_cap_learning_alarm);
+
+ if (chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm fired\n");
+ schedule_work(&chip->fg_cap_learning_work);
+ alarm_forward_now(alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ return ALARMTIMER_RESTART;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm misfired\n");
+ return ALARMTIMER_NORESTART;
+}
+
+#define FG_AGING_STORAGE_REG 0x5E4
+#define ACTUAL_CAPACITY_REG 0x578
+#define MAH_TO_SOC_CONV_REG 0x4A0
+#define CC_SOC_COEFF_OFFSET 0
+#define ACTUAL_CAPACITY_OFFSET 2
+#define MAH_TO_SOC_CONV_CS_OFFSET 0
+static int fg_calc_and_store_cc_soc_coeff(struct fg_chip *chip, int16_t cc_mah)
+{
+ int rc;
+ int64_t cc_to_soc_coeff, mah_to_soc;
+ u8 data[2];
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, ACTUAL_CAPACITY_REG, 2,
+ ACTUAL_CAPACITY_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to store actual capacity: %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_read(chip, (u8 *)&data, MAH_TO_SOC_CONV_REG, 2,
+ MAH_TO_SOC_CONV_CS_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read mah_to_soc_conv_cs: %d\n", rc);
+ } else {
+ mah_to_soc = data[1] << 8 | data[0];
+ mah_to_soc *= MICRO_UNIT;
+ cc_to_soc_coeff = div64_s64(mah_to_soc, cc_mah);
+ half_float_to_buffer(cc_to_soc_coeff, data);
+ rc = fg_mem_write(chip, (u8 *)data,
+ ACTUAL_CAPACITY_REG, 2,
+ CC_SOC_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to write cc_soc_coeff_offset: %d\n",
+ rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("new cc_soc_coeff %lld [%x %x] saved to sram\n",
+ cc_to_soc_coeff, data[0], data[1]);
+ }
+ return rc;
+}
+
+static void fg_cap_learning_load_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int64_t old_cap = chip->learning_data.learned_cc_uah;
+ int rc;
+
+ rc = fg_mem_read(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to load aged capacity: %d\n", rc);
+ } else {
+ chip->learning_data.learned_cc_uah = cc_mah * 1000;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld-> %lld/%x uah\n",
+ old_cap,
+ chip->learning_data.learned_cc_uah,
+ cc_mah);
+ }
+}
+
+static void fg_cap_learning_save_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc)
+ pr_err("Failed to store aged capacity: %d\n", rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld uah (%d/0x%x uah) saved to sram\n",
+ chip->learning_data.learned_cc_uah,
+ cc_mah, cc_mah);
+
+ if (chip->learning_data.feedback_on) {
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in storing cc_soc_coeff, rc:%d\n", rc);
+ }
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+
+ max_inc_val = chip->learning_data.learned_cc_uah
+ * (1000 + chip->learning_data.max_increment);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->learning_data.learned_cc_uah
+ * (1000 - chip->learning_data.max_decrement);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->learning_data.learned_cc_uah;
+ if (chip->learning_data.cc_uah > max_inc_val)
+ chip->learning_data.learned_cc_uah = max_inc_val;
+ else if (chip->learning_data.cc_uah < min_dec_val)
+ chip->learning_data.learned_cc_uah = min_dec_val;
+ else
+ chip->learning_data.learned_cc_uah =
+ chip->learning_data.cc_uah;
+
+ fg_cap_learning_save_data(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->learning_data.cc_uah,
+ old_cap, chip->learning_data.learned_cc_uah);
+}
+
+static int get_vbat_est_diff(struct fg_chip *chip)
+{
+ return abs(fg_data[FG_DATA_VOLTAGE].value
+ - fg_data[FG_DATA_CPRED_VOLTAGE].value);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define IBATTF_TAU_MASK 0x38
+#define IBATTF_TAU_99_S 0x30
+static int fg_cap_learning_check(struct fg_chip *chip)
+{
+ u8 data[4];
+ int rc = 0, battery_soc, cc_pc_val;
+ int vbat_est_diff, vbat_est_thr_uv;
+ unsigned int cc_pc_100 = FULL_PERCENT_28BIT;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING
+ && !chip->learning_data.active
+ && chip->batt_aging_mode == FG_AGING_CC) {
+ if (chip->learning_data.learned_cc_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("no capacity, aborting\n");
+ goto fail;
+ }
+
+ if (!fg_is_temperature_ok_for_learning(chip))
+ goto fail;
+
+ fg_mem_lock(chip);
+ if (!chip->learning_data.feedback_on) {
+ vbat_est_diff = get_vbat_est_diff(chip);
+ vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv;
+ if (vbat_est_diff >= vbat_est_thr_uv &&
+ vbat_est_thr_uv > 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("vbat_est_diff (%d) < threshold (%d)\n",
+ vbat_est_diff, vbat_est_thr_uv);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ }
+ battery_soc = get_battery_soc_raw(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("checking battery soc (%d vs %d)\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ /* check if the battery is low enough to start soc learning */
+ if (battery_soc * 100 / FULL_PERCENT_3B
+ > chip->learning_data.max_start_soc) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery soc too low (%d < %d), aborting\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* set the coulomb counter to a percentage of the capacity */
+ chip->learning_data.cc_uah = div64_s64(
+ (chip->learning_data.learned_cc_uah * battery_soc),
+ FULL_PERCENT_3B);
+
+ /* Use CC_SOC_REG based capacity learning */
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ fg_mem_release(chip);
+ /* SW_CC_SOC based capacity learning */
+ if (fg_get_cc_soc(chip, &cc_pc_val)) {
+ pr_err("failed to get CC_SOC, stop capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ chip->learning_data.init_cc_pc_val = cc_pc_val;
+ chip->learning_data.active = true;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("SW_CC_SOC based learning init_CC_SOC=%d\n",
+ chip->learning_data.init_cc_pc_val);
+ } else {
+ rc = fg_mem_masked_write(chip, CBITS_INPUT_FILTER_REG,
+ IBATTF_TAU_MASK, IBATTF_TAU_99_S, 0);
+ if (rc) {
+ pr_err("Failed to write IF IBAT Tau: %d\n",
+ rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* clear the i_filtered register */
+ memset(data, 0, 4);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+ chip->learning_data.time_stamp = ktime_get_boottime();
+ chip->learning_data.active = true;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cap learning started, soc = %d cc_uah = %lld\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.cc_uah);
+ alarm_start_relative(&chip->fg_cap_learning_alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ }
+ } else if ((chip->status != POWER_SUPPLY_STATUS_CHARGING)
+ && chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("capacity learning stopped\n");
+ if (!(chip->wa_flag & USE_CC_SOC_REG))
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ /* reset SW_CC_SOC register to 100% */
+ rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ }
+ fg_cap_learning_post_process(chip);
+ }
+
+ fg_cap_learning_stop(chip);
+ }
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return rc;
+}
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
+static bool is_otg_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_USB_OTG, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_charger_available(struct fg_chip *chip)
+{
+ if (!chip->batt_psy_name)
+ return false;
+
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name);
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int set_prop_enable_charging(struct fg_chip *chip, bool enable)
+{
+ int rc = 0;
+ union power_supply_propval ret = {enable, };
+
+ if (!is_charger_available(chip)) {
+ pr_err("Charger not available yet!\n");
+ return -EINVAL;
+ }
+
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ &ret);
+ if (rc) {
+ pr_err("couldn't configure batt chg %d\n", rc);
+ return rc;
+ }
+
+ chip->charging_disabled = !enable;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("%sabling charging\n", enable ? "en" : "dis");
+
+ return rc;
+}
+
+#define MAX_BATTERY_CC_SOC_CAPACITY 150
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ status_change_work);
+ unsigned long current_time = 0;
+ int cc_soc, rc, capacity = get_prop_capacity(chip);
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (capacity >= 99 && chip->hold_soc_while_full
+ && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("holding soc at 100\n");
+ chip->charge_full = true;
+ } else if (fg_debug_mask & FG_STATUS) {
+ pr_info("terminated charging at %d/0x%02x\n",
+ capacity, get_monotonic_soc_raw(chip));
+ }
+ }
+ if (chip->status == POWER_SUPPLY_STATUS_FULL ||
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
+ fg_configure_soc(chip);
+ } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (chip->vbat_low_irq_enabled) {
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ fg_cap_learning_check(chip);
+ schedule_work(&chip->update_esr_work);
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ if (fg_get_cc_soc(chip, &cc_soc)) {
+ pr_err("failed to get CC_SOC\n");
+ return;
+ }
+ }
+
+ if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+ get_current_time(¤t_time);
+ /*
+ * When charging status changes, update SRAM parameters if it
+ * was not updated before 5 seconds from now.
+ */
+ if (chip->last_sram_update_time + 5 < current_time) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ }
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ if ((chip->wa_flag & USE_CC_SOC_REG) &&
+ chip->bad_batt_detection_en &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ chip->sw_cc_soc_data.init_sys_soc = capacity;
+ chip->sw_cc_soc_data.init_cc_soc = cc_soc;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info(" Init_sys_soc %d init_cc_soc %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc);
+ }
+ }
+ if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
+ && chip->safety_timer_expired) {
+ chip->sw_cc_soc_data.delta_soc =
+ DIV_ROUND_CLOSEST(abs(cc_soc -
+ chip->sw_cc_soc_data.init_cc_soc)
+ * 100, FULL_PERCENT_28BIT);
+ chip->sw_cc_soc_data.full_capacity =
+ chip->sw_cc_soc_data.delta_soc +
+ chip->sw_cc_soc_data.init_sys_soc;
+ pr_info("Init_sys_soc %d init_cc_soc %d cc_soc %d delta_soc %d full_capacity %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc, cc_soc,
+ chip->sw_cc_soc_data.delta_soc,
+ chip->sw_cc_soc_data.full_capacity);
+ /*
+ * If sw_cc_soc capacity greater than 150, then it's a bad
+ * battery. else, reset timer and restart charging.
+ */
+ if (chip->sw_cc_soc_data.full_capacity >
+ MAX_BATTERY_CC_SOC_CAPACITY) {
+ pr_info("Battery possibly damaged, do not restart charging\n");
+ } else {
+ pr_info("Reset safety-timer and restart charging\n");
+ rc = set_prop_enable_charging(chip, false);
+ if (rc) {
+ pr_err("failed to disable charging %d\n", rc);
+ return;
+ }
+
+ chip->safety_timer_expired = false;
+ msleep(200);
+
+ rc = set_prop_enable_charging(chip, true);
+ if (rc) {
+ pr_err("failed to enable charging %d\n", rc);
+ return;
+ }
+ }
+ }
+}
+
+/*
+ * Check for change in the status of input or OTG and schedule
+ * IADC gain compensation work.
+ */
+static void check_gain_compensation(struct fg_chip *chip)
+{
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if ((chip->wa_flag & IADC_GAIN_COMP_WA)
+ && ((chip->input_present ^ input_present)
+ || (chip->otg_present ^ otg_present))) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ chip->input_present = input_present;
+ chip->otg_present = otg_present;
+ cancel_work_sync(&chip->gain_comp_work);
+ schedule_work(&chip->gain_comp_work);
+ }
+}
+
+static void fg_hysteresis_config(struct fg_chip *chip)
+{
+ int hard_hot = 0, hard_cold = 0;
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ if (chip->health == POWER_SUPPLY_HEALTH_OVERHEAT && !chip->batt_hot) {
+ /* turn down the hard hot threshold */
+ chip->batt_hot = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard hot hysteresis: old hot=%d, new hot=%d\n",
+ hard_hot, hard_hot - chip->hot_hysteresis);
+ } else if (chip->health == POWER_SUPPLY_HEALTH_COLD &&
+ !chip->batt_cold) {
+ /* turn up the hard cold threshold */
+ chip->batt_cold = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard cold hysteresis: old cold=%d, new cold=%d\n",
+ hard_cold, hard_cold + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_OVERHEAT &&
+ chip->batt_hot) {
+ /* restore the hard hot threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot + chip->hot_hysteresis);
+ chip->batt_hot = !chip->batt_hot;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard hot threshold: old hot=%d, new hot=%d\n",
+ hard_hot,
+ hard_hot + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_COLD &&
+ chip->batt_cold) {
+ /* restore the hard cold threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold - chip->cold_hysteresis);
+ chip->batt_cold = !chip->batt_cold;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard cold threshold: old cold=%d, new cold=%d\n",
+ hard_cold,
+ hard_cold - chip->cold_hysteresis);
+ }
+}
+
+#define BATT_INFO_STS(base) (base + 0x09)
+#define JEITA_HARD_HOT_RT_STS BIT(6)
+#define JEITA_HARD_COLD_RT_STS BIT(5)
+static int fg_init_batt_temp_state(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 batt_info_sts;
+ int hard_hot = 0, hard_cold = 0;
+
+ /*
+ * read the batt_info_sts register to parse battery's
+ * initial status and do hysteresis config accordingly.
+ */
+ rc = fg_read(chip, &batt_info_sts,
+ BATT_INFO_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("failed to read batt info sts, rc=%d\n", rc);
+ return rc;
+ }
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ chip->batt_hot =
+ (batt_info_sts & JEITA_HARD_HOT_RT_STS) ? true : false;
+ chip->batt_cold =
+ (batt_info_sts & JEITA_HARD_COLD_RT_STS) ? true : false;
+ if (chip->batt_hot || chip->batt_cold) {
+ if (chip->batt_hot) {
+ chip->health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ } else {
+ chip->health = POWER_SUPPLY_HEALTH_COLD;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ }
+ }
+
+ return rc;
+}
+
+static int fg_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ int rc = 0, unused;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_COLD, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_HOT, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ if (val->intval)
+ update_sram_data(chip, &unused);
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ chip->prev_status = chip->status;
+ chip->status = val->intval;
+ schedule_work(&chip->status_change_work);
+ check_gain_compensation(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ chip->health = val->intval;
+ if (chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+
+ if (chip->jeita_hysteresis_support)
+ fg_hysteresis_config(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ chip->charge_done = val->intval;
+ if (!chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ if ((val->intval > 0) && (val->intval <= BUCKET_COUNT)) {
+ chip->cyc_ctr.id = val->intval;
+ } else {
+ pr_err("rejecting invalid cycle_count_id = %d\n",
+ val->intval);
+ rc = -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED:
+ chip->safety_timer_expired = val->intval;
+ schedule_work(&chip->status_change_work);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ if (chip->wa_flag & BCL_HI_POWER_FOR_CHGLED_WA) {
+ chip->bcl_lpm_disabled = !!val->intval;
+ schedule_work(&chip->bcl_hi_power_work);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return rc;
+};
+
+static int fg_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define SRAM_DUMP_START 0x400
+#define SRAM_DUMP_LEN 0x200
+static void dump_sram(struct work_struct *work)
+{
+ int i, rc;
+ u8 *buffer, rt_sts;
+ char str[16];
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ dump_sram);
+
+ buffer = devm_kzalloc(chip->dev, SRAM_DUMP_LEN, GFP_KERNEL);
+ if (buffer == NULL) {
+ pr_err("Can't allocate buffer\n");
+ return;
+ }
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ else
+ pr_info("soc rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ else
+ pr_info("batt rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->mem_base), rc);
+ else
+ pr_info("memif rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_mem_read(chip, buffer, SRAM_DUMP_START, SRAM_DUMP_LEN, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return;
+ }
+
+ for (i = 0; i < SRAM_DUMP_LEN; i += 4) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buffer + i, 4);
+ pr_info("%03X %s\n", SRAM_DUMP_START + i, str);
+ }
+ devm_kfree(chip->dev, buffer);
+}
+
+#define MAXRSCHANGE_REG 0x434
+#define ESR_VALUE_OFFSET 1
+#define ESR_STRICT_VALUE 0x4120391F391F3019
+#define ESR_DEFAULT_VALUE 0x58CD4A6761C34A67
+static void update_esr_value(struct work_struct *work)
+{
+ union power_supply_propval prop = {0, };
+ u64 esr_value;
+ int rc = 0;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_esr_work);
+
+ if (!is_charger_available(chip))
+ return;
+
+ power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+
+ if (!chip->esr_strict_filter) {
+ if ((prop.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_FULL)) {
+ esr_value = ESR_STRICT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value,
+ MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write strict ESR value rc=%d\n",
+ rc);
+ else
+ chip->esr_strict_filter = true;
+ }
+ } else if ((prop.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_DISCHARGING)) {
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ chip->esr_strict_filter = false;
+ }
+}
+
+#define TEMP_COUNTER_REG 0x580
+#define VBAT_FILTERED_OFFSET 1
+#define GAIN_REG 0x424
+#define GAIN_OFFSET 1
+#define K_VCOR_REG 0x484
+#define DEF_GAIN_OFFSET 2
+#define PICO_UNIT 0xE8D4A51000LL
+#define ATTO_UNIT 0xDE0B6B3A7640000LL
+#define VBAT_REF 3800000
+
+/*
+ * IADC Gain compensation steps:
+ * If Input/OTG absent:
+ * - read VBAT_FILTERED, KVCOR, GAIN
+ * - calculate the gain compensation using following formula:
+ * gain = (1 + gain) * (1 + kvcor * (vbat_filtered - 3800000)) - 1;
+ * else
+ * - reset to the default gain compensation
+ */
+static void iadc_gain_comp_work(struct work_struct *work)
+{
+ u8 reg[4];
+ int rc;
+ uint64_t vbat_filtered;
+ int64_t gain, kvcor, temp, numerator;
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ gain_comp_work);
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if (!chip->init_done)
+ goto done;
+
+ if (!input_present && !otg_present) {
+ /* read VBAT_FILTERED */
+ rc = fg_mem_read(chip, reg, TEMP_COUNTER_REG, 3,
+ VBAT_FILTERED_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read VBAT: rc=%d\n", rc);
+ goto done;
+ }
+ temp = (reg[2] << 16) | (reg[1] << 8) | reg[0];
+ vbat_filtered = div_u64((u64)temp * LSB_24B_NUMRTR,
+ LSB_24B_DENMTR);
+
+ /* read K_VCOR */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to KVCOR rc=%d\n", rc);
+ goto done;
+ }
+ kvcor = half_float(reg);
+
+ /* calculate gain */
+ numerator = (MICRO_UNIT + chip->iadc_comp_data.dfl_gain)
+ * (PICO_UNIT + kvcor * (vbat_filtered - VBAT_REF))
+ - ATTO_UNIT;
+ gain = div64_s64(numerator, PICO_UNIT);
+
+ /* write back gain */
+ half_float_to_buffer(gain, reg);
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain reg rc=%d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain update [%x %x]\n", reg[1], reg[0]);
+ chip->iadc_comp_data.gain_active = true;
+ } else {
+ /* reset gain register */
+ rc = fg_mem_write(chip, chip->iadc_comp_data.dfl_gain_reg,
+ GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write gain comp: %d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain reset [%x %x]\n",
+ chip->iadc_comp_data.dfl_gain_reg[1],
+ chip->iadc_comp_data.dfl_gain_reg[0]);
+ chip->iadc_comp_data.gain_active = false;
+ }
+
+done:
+ fg_relax(&chip->gain_comp_wakeup_source);
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return false;
+ }
+
+ return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
+#define SOC_FIRST_EST_DONE BIT(5)
+static bool is_first_est_done(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_soc_sts;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
+}
+
+static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ int rc;
+ bool vbatt_low_sts;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("vbatt-low triggered\n");
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+ if (rc) {
+ pr_err("error in reading vbatt_status, rc:%d\n", rc);
+ goto out;
+ }
+ if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("disabling vbatt_low irq\n");
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ chip->battery_missing = true;
+ chip->profile_loaded = false;
+ chip->batt_type = default_batt_type;
+ mutex_lock(&chip->cyc_ctr.lock);
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("battery missing, clearing cycle counters\n");
+ clear_cycle_counter(chip);
+ mutex_unlock(&chip->cyc_ctr.lock);
+ } else {
+ if (!chip->use_otp_profile) {
+ reinit_completion(&chip->batt_id_avail);
+ reinit_completion(&chip->first_soc_done);
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ } else {
+ chip->battery_missing = false;
+ }
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("batt-missing triggered: %s\n",
+ batt_missing ? "missing" : "present");
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_mem_avail_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 mem_if_sts;
+ int rc;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ if (fg_check_sram_access(chip)) {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access granted\n");
+ reinit_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_granted);
+ } else {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access revoked\n");
+ complete_all(&chip->sram_access_revoked);
+ }
+
+ if (!rc && (fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("mem_if sts 0x%02x\n", mem_if_sts);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+
+ schedule_work(&chip->battery_age_work);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ schedule_work(&chip->update_esr_work);
+ if (chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+ if (chip->wa_flag & IADC_GAIN_COMP_WA
+ && chip->iadc_comp_data.gain_active) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ schedule_work(&chip->gain_comp_work);
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG
+ && chip->learning_data.active) {
+ fg_stay_awake(&chip->capacity_learning_wakeup_source);
+ schedule_work(&chip->fg_cap_learning_work);
+ }
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define FG_EMPTY_DEBOUNCE_MS 1500
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+ if (fg_is_batt_empty(chip)) {
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ } else {
+ chip->soc_empty = false;
+ }
+
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered\n");
+
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ complete_all(&chip->first_soc_done);
+
+ return IRQ_HANDLED;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+
+ if (is_input_present(chip) && chip->rslow_comp.active &&
+ chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (!is_input_present(chip) && chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ if (!is_input_present(chip) && chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+}
+
+static void set_resume_soc_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ set_resume_soc_work);
+ int rc, resume_soc_raw;
+
+ if (is_input_present(chip) && !chip->resume_soc_lowered) {
+ if (!chip->charge_done)
+ goto done;
+ resume_soc_raw = get_monotonic_soc_raw(chip)
+ - (0xFF - settings[FG_MEM_RESUME_SOC].value);
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc lowered to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->charge_done = false;
+ chip->resume_soc_lowered = true;
+ } else if (chip->resume_soc_lowered && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc set to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->resume_soc_lowered = false;
+ }
+done:
+ fg_relax(&chip->resume_soc_wakeup_source);
+}
+
+
+#define OCV_COEFFS_START_REG 0x4C0
+#define OCV_JUNCTION_REG 0x4D8
+#define NOM_CAP_REG 0x4F4
+#define CUTOFF_VOLTAGE_REG 0x40C
+#define RSLOW_CFG_REG 0x538
+#define RSLOW_CFG_OFFSET 2
+#define RSLOW_THRESH_REG 0x52C
+#define RSLOW_THRESH_OFFSET 0
+#define TEMP_RS_TO_RSLOW_OFFSET 2
+#define RSLOW_COMP_REG 0x528
+#define RSLOW_COMP_C1_OFFSET 0
+#define RSLOW_COMP_C2_OFFSET 2
+static int populate_system_data(struct fg_chip *chip)
+{
+ u8 buffer[24];
+ int rc, i;
+ int16_t cc_mah;
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
+ if (rc) {
+ pr_err("Failed to read ocv coefficients: %d\n", rc);
+ goto done;
+ }
+ for (i = 0; i < 12; i += 1)
+ chip->ocv_coeffs[i] = half_float(buffer + (i * 2));
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("coeffs1 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[0], chip->ocv_coeffs[1],
+ chip->ocv_coeffs[2], chip->ocv_coeffs[3]);
+ pr_info("coeffs2 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[4], chip->ocv_coeffs[5],
+ chip->ocv_coeffs[6], chip->ocv_coeffs[7]);
+ pr_info("coeffs3 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[8], chip->ocv_coeffs[9],
+ chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
+ }
+ rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
+ chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+ rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
+ chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+ if (rc) {
+ pr_err("Failed to read ocv junctions: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read nominal capacitance: %d\n", rc);
+ goto done;
+ }
+ chip->nom_cap_uah = bcap_uah_2b(buffer);
+ chip->actual_cap_uah = chip->nom_cap_uah;
+ if (chip->learning_data.learned_cc_uah == 0) {
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else if (chip->learning_data.feedback_on) {
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
+ }
+ rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read cutoff voltage: %d\n", rc);
+ goto done;
+ }
+ chip->cutoff_voltage = voltage_2b(buffer);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cutoff_voltage = %lld, nom_cap_uah = %d p1p2 = %d, p2p3 = %d\n",
+ chip->cutoff_voltage, chip->nom_cap_uah,
+ chip->ocv_junction_p1p2,
+ chip->ocv_junction_p2p3);
+
+ rc = fg_mem_read(chip, buffer, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_cfg = buffer[0];
+ rc = fg_mem_read(chip, buffer, RSLOW_THRESH_REG, 1,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow thresh: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_thr = buffer[0];
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
+ rc = fg_mem_read(chip, buffer, RSLOW_COMP_REG, 4,
+ RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow comp: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rslow_comp, buffer, 4);
+
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
+#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3))
+#define RSLOW_THRESH_FULL_VAL 0xFF
+static int fg_rslow_charge_comp_set(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[2];
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, RSLOW_CFG_ON_VAL, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, RSLOW_THRESH_FULL_VAL, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
+ rc = fg_mem_write(chip, buffer,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c1, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c2, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = true;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Activated rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+#define RSLOW_CFG_ORIG_MASK (BIT(4) | BIT(5))
+static int fg_rslow_charge_comp_clear(struct fg_chip *chip)
+{
+ u8 reg;
+ int rc;
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ reg = chip->rslow_comp.rslow_cfg & RSLOW_CFG_ORIG_MASK;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, reg, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, chip->rslow_comp.rslow_thr, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, chip->rslow_comp.rslow_comp,
+ RSLOW_COMP_REG, 4, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = false;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Cleared rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+static void rslow_comp_work(struct work_struct *work)
+{
+ int battery_soc_1b;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ rslow_comp_work);
+
+ battery_soc_1b = get_battery_soc_raw(chip) >> 16;
+ if (battery_soc_1b > chip->rslow_comp.chg_rslow_comp_thr
+ && chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->rslow_comp.active)
+ fg_rslow_charge_comp_set(chip);
+ } else {
+ if (chip->rslow_comp.active)
+ fg_rslow_charge_comp_clear(chip);
+ }
+}
+
+#define MICROUNITS_TO_ADC_RAW(units) \
+ div64_s64(units * LSB_16B_DENMTR, LSB_16B_NUMRTR)
+static int update_chg_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_CHG_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data,
+ settings[FG_MEM_CHG_TERM_CURRENT].address,
+ 2, settings[FG_MEM_CHG_TERM_CURRENT].offset, 0);
+}
+
+#define CC_CV_SETPOINT_REG 0x4F8
+#define CC_CV_SETPOINT_OFFSET 0
+static void update_cc_cv_setpoint(struct fg_chip *chip)
+{
+ int rc;
+ u8 tmp[2];
+
+ if (!chip->cc_cv_threshold_mv)
+ return;
+ batt_to_setpoint_adc(chip->cc_cv_threshold_mv, tmp);
+ rc = fg_mem_write(chip, tmp, CC_CV_SETPOINT_REG, 2,
+ CC_CV_SETPOINT_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write CC_CV_VOLT rc=%d\n", rc);
+ return;
+ }
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Wrote %x %x to address %x for CC_CV setpoint\n",
+ tmp[0], tmp[1], CC_CV_SETPOINT_REG);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define CBITS_RMEAS1_OFFSET 1
+#define CBITS_RMEAS2_OFFSET 2
+#define CBITS_RMEAS1_DEFAULT_VAL 0x65
+#define CBITS_RMEAS2_DEFAULT_VAL 0x65
+#define IMPTR_FAST_TIME_SHIFT 1
+#define IMPTR_LONG_TIME_SHIFT (1 << 4)
+#define IMPTR_PULSE_CTR_CHG 1
+#define IMPTR_PULSE_CTR_DISCHG (1 << 4)
+static int fg_config_imptr_pulse(struct fg_chip *chip, bool slow)
+{
+ int rc;
+ u8 cntr[2] = {0, 0};
+ u8 val;
+
+ if (slow == chip->imptr_pulse_slow_en) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled already\n",
+ slow ? "en" : "dis");
+ return 0;
+ }
+
+ fg_mem_lock(chip);
+
+ val = slow ? (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT) :
+ CBITS_RMEAS1_DEFAULT_VAL;
+ rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write cbits_rmeas1_offset rc=%d\n", rc);
+ goto done;
+ }
+
+ val = slow ? (IMPTR_PULSE_CTR_CHG | IMPTR_PULSE_CTR_DISCHG) :
+ CBITS_RMEAS2_DEFAULT_VAL;
+ rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write cbits_rmeas2_offset rc=%d\n", rc);
+ goto done;
+ }
+
+ if (slow) {
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 4,
+ COUNTER_IMPTR_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto done;
+ }
+ }
+
+ chip->imptr_pulse_slow_en = slow;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled\n", slow ? "en" : "dis");
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define CURRENT_DELTA_MIN_REG 0x42C
+#define CURRENT_DELTA_MIN_OFFSET 1
+#define SYS_CFG_1_REG 0x4AC
+#define SYS_CFG_1_OFFSET 0
+#define CURRENT_DELTA_MIN_DEFAULT 0x16
+#define CURRENT_DELTA_MIN_500MA 0xCD
+#define RSLOW_CFG_USE_FIX_RSER_VAL BIT(7)
+#define ENABLE_ESR_PULSE_VAL BIT(3)
+static int fg_config_esr_extract(struct fg_chip *chip, bool disable)
+{
+ int rc;
+ u8 val;
+
+ if (disable == chip->esr_extract_disabled) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract already %sabled\n",
+ disable ? "dis" : "en");
+ return 0;
+ }
+
+ fg_mem_lock(chip);
+
+ val = disable ? CURRENT_DELTA_MIN_500MA :
+ CURRENT_DELTA_MIN_DEFAULT;
+ rc = fg_mem_write(chip, &val, CURRENT_DELTA_MIN_REG, 1,
+ CURRENT_DELTA_MIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write curr_delta_min rc=%d\n", rc);
+ goto done;
+ }
+
+ val = disable ? RSLOW_CFG_USE_FIX_RSER_VAL : 0;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_USE_FIX_RSER_VAL, val, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg rc= %d\n", rc);
+ goto done;
+ }
+
+ val = disable ? 0 : ENABLE_ESR_PULSE_VAL;
+ rc = fg_mem_masked_write(chip, SYS_CFG_1_REG,
+ ENABLE_ESR_PULSE_VAL, val, SYS_CFG_1_OFFSET);
+ if (rc) {
+ pr_err("unable to write sys_cfg_1 rc= %d\n", rc);
+ goto done;
+ }
+
+ chip->esr_extract_disabled = disable;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract is %sabled\n", disable ? "dis" : "en");
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define ESR_EXTRACT_STOP_SOC 2
+#define IMPTR_PULSE_CONFIG_SOC 5
+static void esr_extract_config_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ esr_extract_config_work);
+ bool input_present = is_input_present(chip);
+ int capacity = get_prop_capacity(chip);
+
+ if (input_present && capacity <= ESR_EXTRACT_STOP_SOC) {
+ fg_config_esr_extract(chip, true);
+ } else if (capacity > ESR_EXTRACT_STOP_SOC) {
+ fg_config_esr_extract(chip, false);
+
+ if (capacity <= IMPTR_PULSE_CONFIG_SOC)
+ fg_config_imptr_pulse(chip, true);
+ else
+ fg_config_imptr_pulse(chip, false);
+ }
+
+ fg_relax(&chip->esr_extract_wakeup_source);
+}
+
+#define LOW_LATENCY BIT(6)
+#define BATT_PROFILE_OFFSET 0x4C0
+#define PROFILE_INTEGRITY_REG 0x53C
+#define PROFILE_INTEGRITY_BIT BIT(0)
+#define FIRST_EST_DONE_BIT BIT(5)
+#define MAX_TRIES_FIRST_EST 3
+#define FIRST_EST_WAIT_MS 2000
+#define PROFILE_LOAD_TIMEOUT_MS 5000
+static int fg_do_restart(struct fg_chip *chip, bool write_profile)
+{
+ int rc, ibat_ua;
+ u8 reg = 0;
+ u8 buf[2];
+ bool tried_once = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restarting fuel gauge...\n");
+
+try_again:
+ if (write_profile) {
+ if (!chip->charging_disabled) {
+ pr_err("Charging not yet disabled!\n");
+ return -EINVAL;
+ }
+
+ ibat_ua = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ if (ibat_ua == -EINVAL) {
+ pr_err("SRAM not updated yet!\n");
+ return ibat_ua;
+ }
+
+ if (ibat_ua < 0) {
+ pr_warn("Charging enabled?, ibat_ua: %d\n", ibat_ua);
+
+ if (!tried_once) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ msleep(1000);
+ tried_once = true;
+ goto try_again;
+ }
+ }
+ }
+
+ chip->fg_restarting = true;
+ /*
+ * save the temperature if the sw rbias control is active so that there
+ * is no gap of time when there is no valid temperature read after the
+ * restart
+ */
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_read(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to read batt temp rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+ /*
+ * release the sram access and configure the correct settings
+ * before re-requesting access.
+ */
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto unlock_and_fail;
+ }
+
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto unlock_and_fail;
+ }
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ LOW_LATENCY, LOW_LATENCY, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+ /* read once to get a fg cycle in */
+ rc = fg_mem_read(chip, ®, PROFILE_INTEGRITY_REG, 1, 0, 0);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto fail;
+ }
+
+ /*
+ * If this is not the first time a profile has been loaded, sleep for
+ * 3 seconds to make sure the NO_OTP_RELOAD is cleared in memory
+ */
+ if (chip->first_profile_loaded)
+ msleep(3000);
+
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), LOW_LATENCY, 0, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ if (write_profile) {
+ /* write the battery profile */
+ rc = fg_mem_write(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ chip->batt_profile_len, 0, 1);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ /* write the integrity bits and release access */
+ rc = fg_mem_masked_write(chip, PROFILE_INTEGRITY_REG,
+ PROFILE_INTEGRITY_BIT,
+ PROFILE_INTEGRITY_BIT, 0);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+
+ /* decrement the user count so that memory access can be released */
+ fg_release_access_if_necessary(chip);
+
+ /*
+ * make sure that the first estimate has completed
+ * in case of a hotswap
+ */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+
+ /*
+ * reinitialize the completion so that the driver knows when the restart
+ * finishes
+ */
+ reinit_completion(&chip->first_soc_done);
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ /*
+ * set the restart bits so that the next fg cycle will not reload
+ * the profile
+ */
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, NO_OTP_PROF_RELOAD, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, reg, 1);
+ if (rc) {
+ pr_err("failed to set fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* wait for the first estimate to complete */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+ rc = fg_read(chip, ®, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto fail;
+ }
+ if ((reg & FIRST_EST_DONE_BIT) == 0)
+ pr_err("Battery profile reloading failed, no first estimate\n");
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* restore the battery temperature reading here */
+ if (chip->sw_rbias_ctrl) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("reloaded 0x%02x%02x into batt temp",
+ buf[0], buf[1]);
+ rc = fg_mem_write(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to write batt temp rc=%d\n", rc);
+ goto fail;
+ }
+ }
+
+ /* Enable charging now as the first estimate is done now */
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ chip->fg_restarting = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("done!\n");
+ return 0;
+
+unlock_and_fail:
+ mutex_unlock(&chip->rw_lock);
+ goto fail;
+sub_and_fail:
+ fg_release_access_if_necessary(chip);
+ goto fail;
+fail:
+ chip->fg_restarting = false;
+ return -EINVAL;
+}
+
+#define FG_PROFILE_LEN 128
+#define PROFILE_COMPARE_LEN 32
+#define THERMAL_COEFF_ADDR 0x444
+#define THERMAL_COEFF_OFFSET 0x2
+#define BATTERY_PSY_WAIT_MS 2000
+static int fg_batt_profile_init(struct fg_chip *chip)
+{
+ int rc = 0, ret, len, batt_id;
+ struct device_node *node = chip->pdev->dev.of_node;
+ struct device_node *batt_node, *profile_node;
+ const char *data, *batt_type_str;
+ bool tried_again = false, vbat_in_range, profiles_same;
+ u8 reg = 0;
+
+wait:
+ fg_stay_awake(&chip->profile_wakeup_source);
+ ret = wait_for_completion_interruptible_timeout(&chip->batt_id_avail,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ pr_debug("interrupted, waiting again\n");
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("profile loading timed out rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ batt_node = of_find_node_by_name(node, "qcom,battery-data");
+ if (!batt_node) {
+ pr_warn("No available batterydata, using OTP defaults\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ batt_id /= 1000;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery id = %dKOhms\n", batt_id);
+
+ profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
+ fg_batt_type);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ goto no_profile;
+ }
+
+ /* read rslow compensation values if they're available */
+ rc = of_property_read_u32(profile_node, "qcom,chg-rs-to-rslow",
+ &chip->rslow_comp.chg_rs_to_rslow);
+ if (rc) {
+ chip->rslow_comp.chg_rs_to_rslow = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rs to rslow: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c1",
+ &chip->rslow_comp.chg_rslow_comp_c1);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c1 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c1: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c2",
+ &chip->rslow_comp.chg_rslow_comp_c2);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c2 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c2: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-thr",
+ &chip->rslow_comp.chg_rslow_comp_thr);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_thr = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp thr: %d\n", rc);
+ }
+
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &chip->batt_max_voltage_uv);
+
+ if (rc)
+ pr_warn("couldn't find battery max voltage\n");
+
+ /*
+ * Only configure from profile if fg-cc-cv-threshold-mv is not
+ * defined in the charger device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,fg-cc-cv-threshold-mv", NULL)) {
+ of_property_read_u32(profile_node,
+ "qcom,fg-cc-cv-threshold-mv",
+ &chip->cc_cv_threshold_mv);
+ }
+
+ data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+ if (!data) {
+ pr_err("no battery profile loaded\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (len != FG_PROFILE_LEN) {
+ pr_err("battery profile incorrect size: %d\n", len);
+ rc = -EINVAL;
+ goto no_profile;
+ }
+
+ rc = of_property_read_string(profile_node, "qcom,battery-type",
+ &batt_type_str);
+ if (rc) {
+ pr_err("Could not find battery data type: %d\n", rc);
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (!chip->batt_profile)
+ chip->batt_profile = devm_kzalloc(chip->dev,
+ sizeof(char) * len, GFP_KERNEL);
+
+ if (!chip->batt_profile) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, ®, PROFILE_INTEGRITY_REG, 1, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ len, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ /* Check whether the charger is ready */
+ if (!is_charger_available(chip))
+ goto reschedule;
+
+ /* Disable charging for a FG cycle before calculating vbat_in_range */
+ if (!chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, false);
+ if (rc)
+ pr_err("Failed to disable charging, rc=%d\n", rc);
+
+ goto reschedule;
+ }
+
+ vbat_in_range = get_vbat_est_diff(chip)
+ < settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
+ profiles_same = memcmp(chip->batt_profile, data,
+ PROFILE_COMPARE_LEN) == 0;
+ if (reg & PROFILE_INTEGRITY_BIT) {
+ fg_cap_learning_load_data(chip);
+ if (vbat_in_range && !fg_is_batt_empty(chip) && profiles_same) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery profiles same, using default\n");
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+ goto done;
+ }
+ } else {
+ pr_info("Battery profile not same, clearing data\n");
+ clear_cycle_counter(chip);
+ chip->learning_data.learned_cc_uah = 0;
+ }
+
+ if (fg_est_dump)
+ dump_sram(&chip->dump_sram);
+
+ if ((fg_debug_mask & FG_STATUS) && !vbat_in_range)
+ pr_info("Vbat out of range: v_current_pred: %d, v:%d\n",
+ fg_data[FG_DATA_CPRED_VOLTAGE].value,
+ fg_data[FG_DATA_VOLTAGE].value);
+
+ if ((fg_debug_mask & FG_STATUS) && fg_is_batt_empty(chip))
+ pr_info("battery empty\n");
+
+ if ((fg_debug_mask & FG_STATUS) && !profiles_same)
+ pr_info("profiles differ\n");
+
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("Using new profile\n");
+ print_hex_dump(KERN_INFO, "FG: loaded profile: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ chip->batt_profile, len, false);
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ memcpy(chip->batt_profile, data, len);
+
+ chip->batt_profile_len = len;
+
+ if (fg_debug_mask & FG_STATUS)
+ print_hex_dump(KERN_INFO, "FG: new profile: ",
+ DUMP_PREFIX_NONE, 16, 1, chip->batt_profile,
+ chip->batt_profile_len, false);
+
+ rc = fg_do_restart(chip, true);
+ if (rc) {
+ pr_err("restart failed: %d\n", rc);
+ goto no_profile;
+ }
+
+ /*
+ * Only configure from profile if thermal-coefficients is not
+ * defined in the FG device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,thermal-coefficients", NULL)) {
+ data = of_get_property(profile_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ rc = fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("spmi write failed addr:%03x, ret:%d\n",
+ THERMAL_COEFF_ADDR, rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery thermal coefficients changed\n");
+ }
+ }
+
+done:
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ if (fg_batt_type)
+ chip->batt_type = fg_batt_type;
+ else
+ chip->batt_type = batt_type_str;
+ chip->first_profile_loaded = true;
+ chip->profile_loaded = true;
+ chip->battery_missing = is_battery_missing(chip);
+ update_chg_iterm(chip);
+ update_cc_cv_setpoint(chip);
+ rc = populate_system_data(chip);
+ if (rc) {
+ pr_err("failed to read ocv properties=%d\n", rc);
+ return rc;
+ }
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+ schedule_work(&chip->status_change_work);
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
+ fg_data[FG_DATA_VOLTAGE].value);
+ return rc;
+no_profile:
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ return rc;
+reschedule:
+ schedule_delayed_work(
+ &chip->batt_profile_init,
+ msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ fg_relax(&chip->profile_wakeup_source);
+ return 0;
+}
+
+static void check_empty_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ check_empty_work.work);
+
+ if (fg_is_batt_empty(chip)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("EMPTY SOC high\n");
+ chip->soc_empty = true;
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ }
+ fg_relax(&chip->empty_check_wakeup_source);
+}
+
+static void batt_profile_init(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ batt_profile_init.work);
+
+ if (fg_batt_profile_init(chip))
+ pr_err("failed to initialize profile\n");
+}
+
+static void sysfs_restart_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ sysfs_restart_work);
+ int rc;
+
+ rc = fg_do_restart(chip, false);
+ if (rc)
+ pr_err("fg restart failed: %d\n", rc);
+ mutex_lock(&chip->sysfs_restart_lock);
+ fg_restart = 0;
+ mutex_unlock(&chip->sysfs_restart_lock);
+}
+
+#define SRAM_MONOTONIC_SOC_REG 0x574
+#define SRAM_MONOTONIC_SOC_OFFSET 2
+#define SRAM_RELEASE_TIMEOUT_MS 500
+static void charge_full_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ charge_full_work);
+ int rc;
+ u8 buffer[3];
+ int bsoc;
+ int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+ bool disable = false;
+ u8 reg;
+
+ if (chip->status != POWER_SUPPLY_STATUS_FULL) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery not full: %d\n", chip->status);
+ disable = true;
+ }
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ goto out;
+ }
+ if (buffer[2] <= resume_soc_raw) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("bsoc = 0x%02x <= resume = 0x%02x\n",
+ buffer[2], resume_soc_raw);
+ disable = true;
+ }
+ if (!disable)
+ goto out;
+
+ rc = fg_mem_write(chip, buffer, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ /* force a full soc value into the monotonic in order to display 100 */
+ buffer[0] = 0xFF;
+ buffer[1] = 0xFF;
+ rc = fg_mem_write(chip, buffer, SRAM_MONOTONIC_SOC_REG, 2,
+ SRAM_MONOTONIC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ bsoc = buffer[0] | buffer[1] << 8 | buffer[2] << 16;
+ pr_info("wrote %06x into soc full\n", bsoc);
+ }
+ fg_mem_release(chip);
+ /*
+ * wait one cycle to make sure the soc is updated before clearing
+ * the soc mask bit
+ */
+ fg_mem_lock(chip);
+ fg_mem_read(chip, ®, PROFILE_INTEGRITY_REG, 1, 0, 0);
+out:
+ fg_mem_release(chip);
+ if (disable)
+ chip->charge_full = false;
+}
+
+static void update_bcl_thresholds(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 mh_offset = 0, lm_offset = 0;
+ u16 address = 0;
+ int ret = 0;
+
+ address = settings[FG_MEM_BCL_MH_THRESHOLD].address;
+ mh_offset = settings[FG_MEM_BCL_MH_THRESHOLD].offset;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ ret = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (ret)
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", ret);
+ else
+ pr_debug("Old BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_MH_THRESHOLD].value, data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_LM_THRESHOLD].value, data[lm_offset]);
+
+ ret = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (ret)
+ pr_err("spmi write failed. addr:%03x, ret:%d\n",
+ address, ret);
+ else
+ pr_debug("New BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+}
+
+static int disable_bcl_lpm(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 lm_offset = 0;
+ u16 address = 0;
+ int rc = 0;
+
+ address = settings[FG_MEM_BCL_LM_THRESHOLD].address;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ rc = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (rc) {
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", rc);
+ return rc;
+ }
+ pr_debug("Old BCL LM threshold:%x\n", data[lm_offset]);
+
+ /* Put BCL always above LPM */
+ BCL_MA_TO_ADC(0, data[lm_offset]);
+
+ rc = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (rc)
+ pr_err("spmi write failed. addr:%03x, rc:%d\n",
+ address, rc);
+ else
+ pr_debug("New BCL LM threshold:%x\n", data[lm_offset]);
+
+ return rc;
+}
+
+static void bcl_hi_power_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ bcl_hi_power_work);
+ int rc;
+
+ if (chip->bcl_lpm_disabled) {
+ rc = disable_bcl_lpm(chip);
+ if (rc)
+ pr_err("failed to disable bcl low mode %d\n",
+ rc);
+ } else {
+ update_bcl_thresholds(chip);
+ }
+}
+
+#define VOLT_UV_TO_VOLTCMP8(volt_uv) \
+ ((volt_uv - 2500000) / 9766)
+static int update_irq_volt_empty(struct fg_chip *chip)
+{
+ u8 data;
+ int volt_mv = settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
+ data = (u8)VOLT_UV_TO_VOLTCMP8(volt_mv * 1000);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %d, converted_raw = %04x\n", volt_mv, data);
+ return fg_mem_write(chip, &data,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].address, 1,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].offset, 0);
+}
+
+static int update_cutoff_voltage(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_voltage_raw;
+ s64 voltage_mv = settings[FG_MEM_CUTOFF_VOLTAGE].value;
+
+ converted_voltage_raw = (s16)MICROUNITS_TO_ADC_RAW(voltage_mv * 1000);
+ data[0] = cpu_to_le16(converted_voltage_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_voltage_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %lld, converted_raw = %04x, data = %02x %02x\n",
+ voltage_mv, converted_voltage_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_CUTOFF_VOLTAGE].address,
+ 2, settings[FG_MEM_CUTOFF_VOLTAGE].offset, 0);
+}
+
+static int update_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_TERM_CURRENT].address,
+ 2, settings[FG_MEM_TERM_CURRENT].offset, 0);
+}
+
+#define OF_READ_SETTING(type, qpnp_dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &settings[type].value); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define OF_READ_PROPERTY(store, qpnp_dt_property, retval, default_val) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &store); \
+ \
+ if (retval == -EINVAL) { \
+ retval = 0; \
+ store = default_val; \
+ } else if (retval) { \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+ } \
+} while (0)
+
+#define DEFAULT_EVALUATION_CURRENT_MA 1000
+static int fg_of_init(struct fg_chip *chip)
+{
+ int rc = 0, sense_type, len = 0;
+ const char *data;
+ struct device_node *node = chip->pdev->dev.of_node;
+ u32 temp[2] = {0};
+
+ OF_READ_SETTING(FG_MEM_SOFT_HOT, "warm-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_SOFT_COLD, "cool-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_HOT, "hot-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_COLD, "cold-bat-decidegc", rc, 1);
+
+ if (of_find_property(node, "qcom,cold-hot-jeita-hysteresis", NULL)) {
+ int hard_hot = 0, soft_hot = 0, hard_cold = 0, soft_cold = 0;
+
+ rc = of_property_read_u32_array(node,
+ "qcom,cold-hot-jeita-hysteresis", temp, 2);
+ if (rc) {
+ pr_err("Error reading cold-hot-jeita-hysteresis rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->jeita_hysteresis_support = true;
+ chip->cold_hysteresis = temp[0];
+ chip->hot_hysteresis = temp[1];
+ hard_hot = settings[FG_MEM_HARD_HOT].value;
+ soft_hot = settings[FG_MEM_SOFT_HOT].value;
+ hard_cold = settings[FG_MEM_HARD_COLD].value;
+ soft_cold = settings[FG_MEM_SOFT_COLD].value;
+ if (((hard_hot - chip->hot_hysteresis) < soft_hot) ||
+ ((hard_cold + chip->cold_hysteresis) > soft_cold)) {
+ chip->jeita_hysteresis_support = false;
+ pr_err("invalid hysteresis: hot_hysterresis = %d cold_hysteresis = %d\n",
+ chip->hot_hysteresis, chip->cold_hysteresis);
+ } else {
+ pr_debug("cold_hysteresis = %d, hot_hysteresis = %d\n",
+ chip->cold_hysteresis, chip->hot_hysteresis);
+ }
+ }
+
+ OF_READ_SETTING(FG_MEM_BCL_LM_THRESHOLD, "bcl-lm-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_BCL_MH_THRESHOLD, "bcl-mh-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_TERM_CURRENT, "fg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CHG_TERM_CURRENT, "fg-chg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CUTOFF_VOLTAGE, "fg-cutoff-voltage-mv", rc, 1);
+ data = of_get_property(chip->pdev->dev.of_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ chip->use_thermal_coefficients = true;
+ }
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc", rc, 1);
+ settings[FG_MEM_RESUME_SOC].value =
+ DIV_ROUND_CLOSEST(settings[FG_MEM_RESUME_SOC].value
+ * FULL_SOC_RAW, FULL_CAPACITY);
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc-raw", rc, 1);
+ OF_READ_SETTING(FG_MEM_IRQ_VOLT_EMPTY, "irq-volt-empty-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_VBAT_EST_DIFF, "vbat-estimate-diff-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_DELTA_SOC, "fg-delta-soc", rc, 1);
+ OF_READ_SETTING(FG_MEM_BATT_LOW, "fg-vbatt-low-threshold", rc, 1);
+ OF_READ_SETTING(FG_MEM_THERM_DELAY, "fg-therm-delay-us", rc, 1);
+ OF_READ_PROPERTY(chip->learning_data.max_increment,
+ "cl-max-increment-deciperc", rc, 5);
+ OF_READ_PROPERTY(chip->learning_data.max_decrement,
+ "cl-max-decrement-deciperc", rc, 100);
+ OF_READ_PROPERTY(chip->learning_data.max_temp,
+ "cl-max-temp-decidegc", rc, 450);
+ OF_READ_PROPERTY(chip->learning_data.min_temp,
+ "cl-min-temp-decidegc", rc, 150);
+ OF_READ_PROPERTY(chip->learning_data.max_start_soc,
+ "cl-max-start-capacity", rc, 15);
+ OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
+ "cl-vbat-est-thr-uv", rc, 40000);
+ OF_READ_PROPERTY(chip->evaluation_current,
+ "aging-eval-current-ma", rc,
+ DEFAULT_EVALUATION_CURRENT_MA);
+ OF_READ_PROPERTY(chip->cc_cv_threshold_mv,
+ "fg-cc-cv-threshold-mv", rc, 0);
+ if (of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-learning-on"))
+ chip->batt_aging_mode = FG_AGING_CC;
+ else if (of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-estimation-on"))
+ chip->batt_aging_mode = FG_AGING_ESR;
+ else
+ chip->batt_aging_mode = FG_AGING_NONE;
+ if (chip->batt_aging_mode == FG_AGING_CC) {
+ chip->learning_data.feedback_on
+ = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-learning-feedback");
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery aging mode: %d\n", chip->batt_aging_mode);
+
+ /* Get the use-otp-profile property */
+ chip->use_otp_profile = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,use-otp-profile");
+ chip->hold_soc_while_full
+ = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,hold-soc-while-full");
+
+ sense_type = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,ext-sense-type");
+ if (rc == 0) {
+ if (fg_sense_type < 0)
+ fg_sense_type = sense_type;
+
+ if (fg_debug_mask & FG_STATUS) {
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ pr_info("Using internal sense\n");
+ else if (fg_sense_type == EXTERNAL_CURRENT_SENSE)
+ pr_info("Using external sense\n");
+ else
+ pr_info("Using default sense\n");
+ }
+ } else {
+ rc = 0;
+ }
+
+ chip->bad_batt_detection_en = of_property_read_bool(node,
+ "qcom,bad-battery-detection-enable");
+
+ chip->sw_rbias_ctrl = of_property_read_bool(node,
+ "qcom,sw-rbias-control");
+
+ chip->cyc_ctr.en = of_property_read_bool(node,
+ "qcom,cycle-counter-en");
+ if (chip->cyc_ctr.en)
+ chip->cyc_ctr.id = 1;
+
+ chip->esr_pulse_tune_en = of_property_read_bool(node,
+ "qcom,esr-pulse-tuning-en");
+
+ return rc;
+}
+
+static int fg_init_irqs(struct fg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+ struct platform_device *pdev = chip->pdev;
+
+ if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ child->full_name, rc);
+ return rc;
+ }
+
+ if ((base == chip->vbat_adc_addr) ||
+ (base == chip->ibat_adc_addr) ||
+ (base == chip->tp_rev_addr))
+ continue;
+
+ rc = fg_read(chip, &subtype,
+ base + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_irq[FULL_SOC].irq = of_irq_get_byname(child,
+ "full-soc");
+ if (chip->soc_irq[FULL_SOC].irq < 0) {
+ pr_err("Unable to get full-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[EMPTY_SOC].irq = of_irq_get_byname(child,
+ "empty-soc");
+ if (chip->soc_irq[EMPTY_SOC].irq < 0) {
+ pr_err("Unable to get low-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[DELTA_SOC].irq = of_irq_get_byname(child,
+ "delta-soc");
+ if (chip->soc_irq[DELTA_SOC].irq < 0) {
+ pr_err("Unable to get delta-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[FIRST_EST_DONE].irq
+ = of_irq_get_byname(child, "first-est-done");
+ if (chip->soc_irq[FIRST_EST_DONE].irq < 0) {
+ pr_err("Unable to get first-est-done irq\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FULL_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "full-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d full-soc: %d\n",
+ chip->soc_irq[FULL_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[EMPTY_SOC].irq,
+ fg_empty_soc_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "empty-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d empty-soc: %d\n",
+ chip->soc_irq[EMPTY_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[DELTA_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "delta-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[DELTA_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FIRST_EST_DONE].irq,
+ fg_first_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "first-est-done", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[FIRST_EST_DONE].irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ break;
+ case FG_MEMIF:
+ chip->mem_irq[FG_MEM_AVAIL].irq
+ = of_irq_get_byname(child, "mem-avail");
+ if (chip->mem_irq[FG_MEM_AVAIL].irq < 0) {
+ pr_err("Unable to get mem-avail irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->mem_irq[FG_MEM_AVAIL].irq,
+ fg_mem_avail_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "mem-avail", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d mem-avail: %d\n",
+ chip->mem_irq[FG_MEM_AVAIL].irq, rc);
+ return rc;
+ }
+ break;
+ case FG_BATT:
+ chip->batt_irq[BATT_MISSING].irq
+ = of_irq_get_byname(child, "batt-missing");
+ if (chip->batt_irq[BATT_MISSING].irq < 0) {
+ pr_err("Unable to get batt-missing irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[BATT_MISSING].irq,
+ NULL,
+ fg_batt_missing_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "batt-missing", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d batt-missing: %d\n",
+ chip->batt_irq[BATT_MISSING].irq, rc);
+ return rc;
+ }
+ chip->batt_irq[VBATT_LOW].irq
+ = of_irq_get_byname(child, "vbatt-low");
+ if (chip->batt_irq[VBATT_LOW].irq < 0) {
+ pr_err("Unable to get vbatt-low irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->batt_irq[VBATT_LOW].irq,
+ fg_vbatt_low_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "vbatt-low", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d vbatt-low: %d\n",
+ chip->batt_irq[VBATT_LOW].irq, rc);
+ return rc;
+ }
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ break;
+ case FG_ADC:
+ break;
+ default:
+ pr_err("subtype %d\n", subtype);
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->esr_extract_config_work);
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+}
+
+static int fg_remove(struct platform_device *pdev)
+{
+ struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ fg_cleanup(chip);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return 0;
+}
+
+static int fg_memif_data_open(struct inode *inode, struct file *file)
+{
+ struct fg_log_buffer *log;
+ struct fg_trans *trans;
+ u8 *data_buf;
+
+ size_t logbufsize = SZ_4K;
+ size_t databufsize = SZ_4K;
+
+ if (!dbgfs_data.chip) {
+ pr_err("Not initialized data\n");
+ return -EINVAL;
+ }
+
+ /* Per file "transaction" data */
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans) {
+ pr_err("Unable to allocate memory for transaction data\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate log buffer */
+ log = kzalloc(logbufsize, GFP_KERNEL);
+
+ if (!log) {
+ kfree(trans);
+ pr_err("Unable to allocate memory for log buffer\n");
+ return -ENOMEM;
+ }
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+
+ /* Allocate data buffer */
+ data_buf = kzalloc(databufsize, GFP_KERNEL);
+
+ if (!data_buf) {
+ kfree(trans);
+ kfree(log);
+ pr_err("Unable to allocate memory for data buffer\n");
+ return -ENOMEM;
+ }
+
+ trans->log = log;
+ trans->data = data_buf;
+ trans->cnt = dbgfs_data.cnt;
+ trans->addr = dbgfs_data.addr;
+ trans->chip = dbgfs_data.chip;
+ trans->offset = trans->addr;
+ mutex_init(&trans->memif_dfs_lock);
+
+ file->private_data = trans;
+ return 0;
+}
+
+static int fg_memif_dfs_close(struct inode *inode, struct file *file)
+{
+ struct fg_trans *trans = file->private_data;
+
+ if (trans && trans->log && trans->data) {
+ file->private_data = NULL;
+ mutex_destroy(&trans->memif_dfs_lock);
+ kfree(trans->log);
+ kfree(trans->data);
+ kfree(trans);
+ }
+
+ return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM. When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int
+write_next_line_to_log(struct fg_trans *trans, int offset, size_t *pcnt)
+{
+ int i, j;
+ u8 data[ITEMS_PER_LINE];
+ struct fg_log_buffer *log = trans->log;
+
+ int cnt = 0;
+ int padding = offset % ITEMS_PER_LINE;
+ int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+ int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+ goto done;
+
+ memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+
+ *pcnt -= items_to_read;
+
+ /* Each line starts with the aligned offset (12-bit address) */
+ cnt = print_to_log(log, "%3.3X ", offset & 0xfff);
+ if (cnt == 0)
+ goto done;
+
+ /* If the offset is unaligned, add padding to right justify items */
+ for (i = 0; i < padding; ++i) {
+ cnt = print_to_log(log, "-- ");
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* Log the data items */
+ for (j = 0; i < items_to_log; ++i, ++j) {
+ cnt = print_to_log(log, "%2.2X ", data[j]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+ int cnt, rc;
+ int last_cnt;
+ int items_read;
+ int total_items_read = 0;
+ u32 offset = trans->offset;
+ size_t item_cnt = trans->cnt;
+ struct fg_log_buffer *log = trans->log;
+
+ if (item_cnt == 0)
+ return 0;
+
+ if (item_cnt > SZ_4K) {
+ pr_err("Reading too many bytes\n");
+ return -EINVAL;
+ }
+
+ rc = fg_mem_read(trans->chip, trans->data,
+ trans->addr, trans->cnt, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return rc;
+ }
+ /* Reset the log buffer 'pointers' */
+ log->wpos = log->rpos = 0;
+
+ /* Keep reading data until the log is full */
+ do {
+ last_cnt = item_cnt;
+ cnt = write_next_line_to_log(trans, offset, &item_cnt);
+ items_read = last_cnt - item_cnt;
+ offset += items_read;
+ total_items_read += items_read;
+ } while (cnt && item_cnt > 0);
+
+ /* Adjust the transaction offset and count */
+ trans->cnt = item_cnt;
+ trans->offset += total_items_read;
+
+ return total_items_read;
+}
+
+/**
+ * fg_memif_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ * byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct fg_trans *trans = file->private_data;
+ struct fg_log_buffer *log = trans->log;
+ size_t ret;
+ size_t len;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ /* Is the the log buffer empty */
+ if (log->rpos >= log->wpos) {
+ if (get_log_data(trans) <= 0) {
+ len = 0;
+ goto unlock_mutex;
+ }
+ }
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret == len) {
+ pr_err("error copy sram register values to user\n");
+ len = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ /* 'ret' is the number of bytes not copied */
+ len -= ret;
+
+ *ppos += len;
+ log->rpos += len;
+
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
+ return len;
+}
+
+/**
+ * fg_memif_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bytes_read;
+ int data;
+ int pos = 0;
+ int cnt = 0;
+ u8 *values;
+ size_t ret = 0;
+ char *kbuf;
+ u32 offset;
+
+ struct fg_trans *trans = file->private_data;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ offset = trans->offset;
+
+ /* Make a copy of the user data */
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+
+ /* Override the text buffer with the raw data */
+ values = kbuf;
+
+ /* Parse the data in the buffer. It should be a string of numbers */
+ while ((pos < count) &&
+ sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ /*
+ * We shouldn't be receiving a string of characters that
+ * exceeds a size of 5 to keep this functionally correct.
+ * Also, we should make sure that pos never gets overflowed
+ * beyond the limit.
+ */
+ if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+ cnt = 0;
+ ret = -EINVAL;
+ break;
+ }
+ pos += bytes_read;
+ values[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ pr_info("address %x, count %d\n", offset, cnt);
+ /* Perform the write(s) */
+
+ ret = fg_mem_write(trans->chip, values, offset,
+ cnt, 0, 0);
+ if (ret) {
+ pr_err("SPMI write failed, err = %zu\n", ret);
+ } else {
+ ret = count;
+ trans->offset += cnt > 4 ? 4 : cnt;
+ }
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
+ return ret;
+}
+
+static const struct file_operations fg_memif_dfs_reg_fops = {
+ .open = fg_memif_data_open,
+ .release = fg_memif_dfs_close,
+ .read = fg_memif_dfs_reg_read,
+ .write = fg_memif_dfs_reg_write,
+};
+
+/**
+ * fg_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *fg_dfs_create_fs(void)
+{
+ struct dentry *root, *file;
+
+ pr_debug("Creating FG_MEM debugfs file-system\n");
+ root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err:%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel");
+ return NULL;
+ }
+
+ dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+ file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ if (!file) {
+ pr_err("error creating help entry\n");
+ goto err_remove_fs;
+ }
+ return root;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return NULL;
+}
+
+/**
+ * fg_dfs_get_root: return a pointer to FG debugfs root directory.
+ * @return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SRAM transaction, namely: address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *fg_dfs_get_root(void)
+{
+ if (dbgfs_data.root)
+ return dbgfs_data.root;
+
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return NULL;
+ /* critical section */
+ if (!dbgfs_data.root) { /* double checking idiom */
+ dbgfs_data.root = fg_dfs_create_fs();
+ }
+ mutex_unlock(&dbgfs_data.lock);
+ return dbgfs_data.root;
+}
+
+/*
+ * fg_dfs_create: adds new fg_mem if debugfs entry
+ * @return zero on success
+ */
+int fg_dfs_create(struct fg_chip *chip)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = fg_dfs_get_root();
+ if (!root)
+ return -ENOENT;
+
+ dbgfs_data.chip = chip;
+
+ file = debugfs_create_u32("count", DFS_MODE, root, &(dbgfs_data.cnt));
+ if (!file) {
+ pr_err("error creating 'count' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_x32("address", DFS_MODE,
+ root, &(dbgfs_data.addr));
+ if (!file) {
+ pr_err("error creating 'address' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data", DFS_MODE, root, &dbgfs_data,
+ &fg_memif_dfs_reg_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+}
+
+#define EXTERNAL_SENSE_OFFSET_REG 0x41C
+#define EXT_OFFSET_TRIM_REG 0xF8
+#define SEC_ACCESS_REG 0xD0
+#define SEC_ACCESS_UNLOCK 0xA5
+#define BCL_TRIM_REV_FIXED 12
+static int bcl_trim_workaround(struct fg_chip *chip)
+{
+ u8 reg, rc;
+
+ if (chip->tp_rev_addr == 0)
+ return 0;
+
+ rc = fg_read(chip, ®, chip->tp_rev_addr, 1);
+ if (rc) {
+ pr_err("Failed to read tp reg, rc = %d\n", rc);
+ return rc;
+ }
+ if (reg >= BCL_TRIM_REV_FIXED) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("workaround not applied, tp_rev = %d\n", reg);
+ return 0;
+ }
+
+ rc = fg_mem_read(chip, ®, EXTERNAL_SENSE_OFFSET_REG, 1, 2, 0);
+ if (rc) {
+ pr_err("Failed to read ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+ rc = fg_masked_write(chip, chip->soc_base + SEC_ACCESS_REG,
+ SEC_ACCESS_UNLOCK, SEC_ACCESS_UNLOCK, 1);
+
+ rc |= fg_masked_write(chip, chip->soc_base + EXT_OFFSET_TRIM_REG,
+ 0xFF, reg, 1);
+ if (rc) {
+ pr_err("Failed to write ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define FG_ALG_SYSCTL_1 0x4B0
+#define SOC_CNFG 0x450
+#define SOC_DELTA_OFFSET 3
+#define DELTA_SOC_PERCENT 1
+#define I_TERM_QUAL_BIT BIT(1)
+#define PATCH_NEG_CURRENT_BIT BIT(3)
+#define KI_COEFF_PRED_FULL_ADDR 0x408
+#define KI_COEFF_PRED_FULL_4_0_MSB 0x88
+#define KI_COEFF_PRED_FULL_4_0_LSB 0x00
+#define TEMP_FRAC_SHIFT_REG 0x4A4
+#define FG_ADC_CONFIG_REG 0x4B8
+#define FG_BCL_CONFIG_OFFSET 0x3
+#define BCL_FORCED_HPM_IN_CHARGE BIT(2)
+static int fg_common_hw_init(struct fg_chip *chip)
+{
+ int rc;
+ int resume_soc_raw;
+ u8 val;
+
+ update_iterm(chip);
+ update_cutoff_voltage(chip);
+ update_irq_volt_empty(chip);
+ update_bcl_thresholds(chip);
+
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ return rc;
+ }
+ } else {
+ pr_info("FG auto recharge threshold not specified in DT\n");
+ }
+
+ if (fg_sense_type >= 0) {
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ if (rc) {
+ pr_err("failed to config sense type %d rc=%d\n",
+ fg_sense_type, rc);
+ return rc;
+ }
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_DELTA_SOC].address, 0xFF,
+ soc_to_setpoint(settings[FG_MEM_DELTA_SOC].value),
+ settings[FG_MEM_DELTA_SOC].offset);
+ if (rc) {
+ pr_err("failed to write delta soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
+ batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
+ settings[FG_MEM_BATT_LOW].offset);
+ if (rc) {
+ pr_err("failed to write Vbatt_low rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_THERM_DELAY].address,
+ THERM_DELAY_MASK,
+ therm_delay_to_setpoint(settings[FG_MEM_THERM_DELAY].value),
+ settings[FG_MEM_THERM_DELAY].offset);
+ if (rc) {
+ pr_err("failed to write therm_delay rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->use_thermal_coefficients) {
+ fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ }
+
+ if (!chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ TEMP_SENSE_ALWAYS_BIT,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_OFFSET rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Read the cycle counter back from FG SRAM */
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
+
+ if (chip->esr_pulse_tune_en) {
+ rc = fg_mem_read(chip, &val, SYS_CFG_1_REG, 1, SYS_CFG_1_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read sys_cfg_1: %d\n", rc);
+ return rc;
+ }
+
+ if (!(val & ENABLE_ESR_PULSE_VAL))
+ chip->esr_extract_disabled = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract is %sabled\n",
+ chip->esr_extract_disabled ? "dis" : "en");
+
+ rc = fg_mem_read(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read cbits_input_filter_reg: %d\n",
+ rc);
+ return rc;
+ }
+
+ if (val & (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT))
+ chip->imptr_pulse_slow_en = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled\n",
+ chip->imptr_pulse_slow_en ? "en" : "dis");
+
+ rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ return rc;
+ }
+
+ if (val & RSLOW_CFG_ON_VAL)
+ chip->rslow_comp.active = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rslow_comp active is %sabled\n",
+ chip->rslow_comp.active ? "en" : "dis");
+ }
+
+ return 0;
+}
+
+static int fg_8994_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 data[4];
+ u64 esr_value;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ PATCH_NEG_CURRENT_BIT,
+ PATCH_NEG_CURRENT_BIT,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write patch current bit rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bcl_trim_workaround(chip);
+ if (rc) {
+ pr_err("failed to redo bcl trim rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, I_TERM_QUAL_BIT, 0, 0);
+
+ data[0] = 0xA2;
+ data[1] = 0x12;
+
+ rc = fg_mem_write(chip, data, TEMP_FRAC_SHIFT_REG, 2, 2, 0);
+ if (rc) {
+ pr_err("failed to write temp ocv constants rc=%d\n", rc);
+ return rc;
+ }
+
+ data[0] = KI_COEFF_PRED_FULL_4_0_LSB;
+ data[1] = KI_COEFF_PRED_FULL_4_0_MSB;
+ fg_mem_write(chip, data, KI_COEFF_PRED_FULL_ADDR, 2, 2, 0);
+
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ pr_debug("set default value to esr filter\n");
+
+ return 0;
+}
+
+#define FG_USBID_CONFIG_OFFSET 0x2
+#define DISABLE_USBID_DETECT_BIT BIT(0)
+static int fg_8996_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable usbid conversions for PMi8996 V1.0 */
+ if (chip->pmic_revision[REVID_DIG_MAJOR] == 1
+ && chip->pmic_revision[REVID_ANA_MAJOR] == 0) {
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ DISABLE_USBID_DETECT_BIT,
+ 0, FG_USBID_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to enable usbid conversions: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int fg_8950_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc)
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+
+ return rc;
+}
+
+static int fg_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+
+ rc = fg_common_hw_init(chip);
+ if (rc) {
+ pr_err("Unable to initialize FG HW rc=%d\n", rc);
+ return rc;
+ }
+
+ /* add PMIC specific hw init */
+ switch (chip->pmic_subtype) {
+ case PMI8994:
+ rc = fg_8994_hw_init(chip);
+ chip->wa_flag |= PULSE_REQUEST_WA;
+ break;
+ case PMI8996:
+ rc = fg_8996_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ case PMI8950:
+ case PMI8937:
+ rc = fg_8950_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ }
+ if (rc)
+ pr_err("Unable to initialize PMIC specific FG HW rc=%d\n", rc);
+
+ pr_debug("wa_flag=0x%x\n", chip->wa_flag);
+
+ return rc;
+}
+
+#define DIG_MINOR 0x0
+#define DIG_MAJOR 0x1
+#define ANA_MINOR 0x2
+#define ANA_MAJOR 0x3
+#define IACS_INTR_SRC_SLCT BIT(3)
+static int fg_setup_memif_offset(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4);
+ if (rc) {
+ pr_err("Unable to read FG revision rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (chip->revision[DIG_MAJOR]) {
+ case DIG_REV_1:
+ case DIG_REV_2:
+ chip->offset = offset[0].address;
+ break;
+ case DIG_REV_3:
+ chip->offset = offset[1].address;
+ chip->ima_supported = true;
+ break;
+ default:
+ pr_err("Digital Major rev=%d not supported\n",
+ chip->revision[DIG_MAJOR]);
+ return -EINVAL;
+ }
+
+ if (chip->ima_supported) {
+ /*
+ * Change the FG_MEM_INT interrupt to track IACS_READY
+ * condition instead of end-of-transaction. This makes sure
+ * that the next transaction starts only after the hw is ready.
+ */
+ rc = fg_masked_write(chip,
+ chip->mem_base + MEM_INTF_IMA_CFG, IACS_INTR_SRC_SLCT,
+ IACS_INTR_SRC_SLCT, 1);
+ if (rc) {
+ pr_err("failed to configure interrupt source %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int fg_detect_pmic_type(struct fg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ pr_err("Unable to get pmic_revid rc=%ld\n",
+ PTR_ERR(pmic_rev_id));
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ case PMI8950:
+ case PMI8937:
+ case PMI8996:
+ chip->pmic_subtype = pmic_rev_id->pmic_subtype;
+ chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
+ chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2;
+ chip->pmic_revision[REVID_ANA_MAJOR] = pmic_rev_id->rev3;
+ chip->pmic_revision[REVID_DIG_MAJOR] = pmic_rev_id->rev4;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported\n",
+ pmic_rev_id->pmic_subtype);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INIT_JEITA_DELAY_MS 1000
+
+static void delayed_init_work(struct work_struct *work)
+{
+ u8 reg[2];
+ int rc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ init_work);
+
+ /* hold memory access until initialization finishes */
+ fg_mem_lock(chip);
+
+ rc = fg_hw_init(chip);
+ if (rc) {
+ pr_err("failed to hw init rc = %d\n", rc);
+ fg_mem_release(chip);
+ fg_cleanup(chip);
+ return;
+ }
+ /* release memory access before update_sram_data is called */
+ fg_mem_release(chip);
+
+ schedule_delayed_work(
+ &chip->update_jeita_setting,
+ msecs_to_jiffies(INIT_JEITA_DELAY_MS));
+
+ if (chip->last_sram_update_time == 0)
+ update_sram_data_work(&chip->update_sram_data.work);
+
+ if (chip->last_temp_update_time == 0)
+ update_temp_data(&chip->update_temp_work.work);
+
+ if (!chip->use_otp_profile)
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+
+ if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+ /* read default gain config */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read default gain rc=%d\n", rc);
+ goto done;
+ }
+
+ if (reg[1] || reg[0]) {
+ /*
+ * Default gain register has valid value:
+ * - write to gain register.
+ */
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain rc=%d\n", rc);
+ goto done;
+ }
+ } else {
+ /*
+ * Default gain register is invalid:
+ * - read gain register for default gain value
+ * - write to default gain register.
+ */
+ rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read gain rc=%d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+ DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write default gain rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+ chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+ chip->iadc_comp_data.dfl_gain = half_float(reg);
+ chip->input_present = is_input_present(chip);
+ chip->otg_present = is_otg_present(chip);
+ chip->init_done = true;
+
+ pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+ reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+ }
+
+ pr_debug("FG: HW_init success\n");
+
+ return;
+done:
+ fg_cleanup(chip);
+}
+
+static int fg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &(pdev->dev);
+ struct fg_chip *chip;
+ struct device_node *child;
+ unsigned int base;
+ u8 subtype, reg;
+ int rc = 0;
+ struct power_supply_config bms_psy_cfg;
+
+ if (!pdev) {
+ pr_err("no valid spmi pointer\n");
+ return -ENODEV;
+ }
+
+ if (!pdev->dev.of_node) {
+ pr_err("device node missing\n");
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(struct fg_chip), GFP_KERNEL);
+ if (chip == NULL) {
+ pr_err("Can't allocate fg_chip\n");
+ return -ENOMEM;
+ }
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->pdev = pdev;
+ chip->dev = &(pdev->dev);
+
+ wakeup_source_init(&chip->empty_check_wakeup_source.source,
+ "qpnp_fg_empty_check");
+ wakeup_source_init(&chip->memif_wakeup_source.source,
+ "qpnp_fg_memaccess");
+ wakeup_source_init(&chip->profile_wakeup_source.source,
+ "qpnp_fg_profile");
+ wakeup_source_init(&chip->update_temp_wakeup_source.source,
+ "qpnp_fg_update_temp");
+ wakeup_source_init(&chip->update_sram_wakeup_source.source,
+ "qpnp_fg_update_sram");
+ wakeup_source_init(&chip->resume_soc_wakeup_source.source,
+ "qpnp_fg_set_resume_soc");
+ wakeup_source_init(&chip->gain_comp_wakeup_source.source,
+ "qpnp_fg_gain_comp");
+ wakeup_source_init(&chip->capacity_learning_wakeup_source.source,
+ "qpnp_fg_cap_learning");
+ wakeup_source_init(&chip->esr_extract_wakeup_source.source,
+ "qpnp_fg_esr_extract");
+ mutex_init(&chip->rw_lock);
+ mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->learning_data.learning_lock);
+ mutex_init(&chip->rslow_comp.lock);
+ mutex_init(&chip->sysfs_restart_lock);
+ INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
+ INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
+ INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
+ INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
+ INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+ INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
+ INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
+ INIT_WORK(&chip->dump_sram, dump_sram);
+ INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_WORK(&chip->cycle_count_work, update_cycle_count);
+ INIT_WORK(&chip->battery_age_work, battery_age_work);
+ INIT_WORK(&chip->update_esr_work, update_esr_value);
+ INIT_WORK(&chip->set_resume_soc_work, set_resume_soc_work);
+ INIT_WORK(&chip->sysfs_restart_work, sysfs_restart_work);
+ INIT_WORK(&chip->init_work, delayed_init_work);
+ INIT_WORK(&chip->charge_full_work, charge_full_work);
+ INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
+ INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
+ INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+ alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
+ fg_cap_learning_alarm_cb);
+ init_completion(&chip->sram_access_granted);
+ init_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_revoked);
+ init_completion(&chip->batt_id_avail);
+ init_completion(&chip->first_soc_done);
+ dev_set_drvdata(&pdev->dev, chip);
+
+ if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ rc = -ENXIO;
+ goto of_init_fail;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ child->full_name, rc);
+ goto of_init_fail;
+ }
+
+ if (strcmp("qcom,fg-adc-vbat", child->name) == 0) {
+ chip->vbat_adc_addr = base;
+ continue;
+ } else if (strcmp("qcom,fg-adc-ibat", child->name) == 0) {
+ chip->ibat_adc_addr = base;
+ continue;
+ } else if (strcmp("qcom,revid-tp-rev", child->name) == 0) {
+ chip->tp_rev_addr = base;
+ continue;
+ }
+
+ rc = fg_read(chip, &subtype,
+ base + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_base = base;
+ break;
+ case FG_MEMIF:
+ chip->mem_base = base;
+ break;
+ case FG_BATT:
+ chip->batt_base = base;
+ break;
+ default:
+ pr_err("Invalid peripheral subtype=0x%x\n", subtype);
+ rc = -EINVAL;
+ }
+ }
+
+ rc = fg_detect_pmic_type(chip);
+ if (rc) {
+ pr_err("Unable to detect PMIC type rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_setup_memif_offset(chip);
+ if (rc) {
+ pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_of_init(chip);
+ if (rc) {
+ pr_err("failed to parse devicetree rc%d\n", rc);
+ goto of_init_fail;
+ }
+
+ if (chip->jeita_hysteresis_support) {
+ rc = fg_init_batt_temp_state(chip);
+ if (rc) {
+ pr_err("failed to get battery status rc%d\n", rc);
+ goto of_init_fail;
+ }
+ }
+
+ /* check if the first estimate is already finished at this time */
+ if (is_first_est_done(chip))
+ complete_all(&chip->first_soc_done);
+
+ reg = 0xFF;
+ rc = fg_write(chip, ®, INT_EN_CLR(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to clear interrupts %d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_init_irqs(chip);
+ if (rc) {
+ pr_err("failed to request interrupts %d\n", rc);
+ goto cancel_work;
+ }
+
+ chip->batt_type = default_batt_type;
+
+ chip->bms_psy_d.name = "bms";
+ chip->bms_psy_d.type = POWER_SUPPLY_TYPE_BMS;
+ chip->bms_psy_d.properties = fg_power_props;
+ chip->bms_psy_d.num_properties = ARRAY_SIZE(fg_power_props);
+ chip->bms_psy_d.get_property = fg_power_get_property;
+ chip->bms_psy_d.set_property = fg_power_set_property;
+ chip->bms_psy_d.external_power_changed = fg_external_power_changed;
+ chip->bms_psy_d.property_is_writeable = fg_property_is_writeable;
+
+ bms_psy_cfg.drv_data = chip;
+ bms_psy_cfg.supplied_to = fg_supplicants;
+ bms_psy_cfg.num_supplicants = ARRAY_SIZE(fg_supplicants);
+ bms_psy_cfg.of_node = NULL;
+ chip->bms_psy = devm_power_supply_register(chip->dev,
+ &chip->bms_psy_d,
+ &bms_psy_cfg);
+ if (IS_ERR(chip->bms_psy)) {
+ pr_err("batt failed to register rc = %ld\n",
+ PTR_ERR(chip->bms_psy));
+ goto of_init_fail;
+ }
+ chip->power_supply_registered = true;
+ /*
+ * Just initialize the batt_psy_name here. Power supply
+ * will be obtained later.
+ */
+ chip->batt_psy_name = "battery";
+
+ if (chip->mem_base) {
+ rc = fg_dfs_create(chip);
+ if (rc < 0) {
+ pr_err("failed to create debugfs rc = %d\n", rc);
+ goto cancel_work;
+ }
+ }
+
+ schedule_work(&chip->init_work);
+
+ pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ chip->pmic_subtype);
+
+ return rc;
+
+cancel_work:
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->bcl_hi_power_work);
+ cancel_work_sync(&chip->esr_extract_config_work);
+of_init_fail:
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ return rc;
+}
+
+static void check_and_update_sram_data(struct fg_chip *chip)
+{
+ unsigned long current_time = 0, next_update_time, time_left;
+
+ get_current_time(¤t_time);
+
+ next_update_time = chip->last_temp_update_time
+ + (TEMP_PERIOD_UPDATE_MS / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_temp_work, msecs_to_jiffies(time_left * 1000));
+
+ next_update_time = chip->last_sram_update_time
+ + (fg_sram_update_period_ms / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_sram_data, msecs_to_jiffies(time_left * 1000));
+}
+
+static int fg_suspend(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ cancel_delayed_work(&chip->update_temp_work);
+ cancel_delayed_work(&chip->update_sram_data);
+
+ return 0;
+}
+
+static int fg_resume(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ check_and_update_sram_data(chip);
+ return 0;
+}
+
+static const struct dev_pm_ops qpnp_fg_pm_ops = {
+ .suspend = fg_suspend,
+ .resume = fg_resume,
+};
+
+static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+ int old_fg_sense_type = fg_sense_type;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_sense_type: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_sense_type != 0 && fg_sense_type != 1) {
+ pr_err("Bad value %d\n", fg_sense_type);
+ fg_sense_type = old_fg_sense_type;
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fg_sense_type set to %d\n", fg_sense_type);
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ return rc;
+}
+
+static struct kernel_param_ops fg_sense_type_ops = {
+ .set = fg_sense_type_set,
+ .get = param_get_int,
+};
+
+module_param_cb(sense_type, &fg_sense_type_ops, &fg_sense_type, 0644);
+
+static int fg_restart_set(const char *val, const struct kernel_param *kp)
+{
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+ chip = power_supply_get_drvdata(bms_psy);
+
+ mutex_lock(&chip->sysfs_restart_lock);
+ if (fg_restart != 0) {
+ mutex_unlock(&chip->sysfs_restart_lock);
+ return 0;
+ }
+ fg_restart = 1;
+ mutex_unlock(&chip->sysfs_restart_lock);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge restart initiated from sysfs...\n");
+
+ schedule_work(&chip->sysfs_restart_work);
+ return 0;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_set,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+static struct platform_driver fg_driver = {
+ .driver = {
+ .name = QPNP_FG_DEV_NAME,
+ .of_match_table = fg_match_table,
+ .pm = &qpnp_fg_pm_ops,
+ },
+ .probe = fg_probe,
+ .remove = fg_remove,
+};
+
+static int __init fg_init(void)
+{
+ return platform_driver_register(&fg_driver);
+}
+
+static void __exit fg_exit(void)
+{
+ return platform_driver_unregister(&fg_driver);
+}
+
+module_init(fg_init);
+module_exit(fg_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel Gauge Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_FG_DEV_NAME);
module_param_named(
weak_chg_icl_ua, __weak_chg_icl_ua, int, S_IRUSR | S_IWUSR);
+static int __try_sink_enabled = 1;
+module_param_named(
+ try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
return rc;
}
+ /*
+ * allow DRP.DFP time to exceed by tPDdebounce time.
+ */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_DRP_DFP_TIME_CFG_BIT,
+ TYPEC_DRP_DFP_TIME_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure DRP.DFP time rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* configure float charger options */
switch (chip->dt.float_option) {
case 1:
chg->dev = &pdev->dev;
chg->param = v1_params;
chg->debug_mask = &__debug_mask;
+ chg->try_sink_enabled = &__try_sink_enabled;
chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
chg->mode = PARALLEL_MASTER;
chg->irq_info = smb2_irqs;
--- /dev/null
+/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "SMBCHG: %s: " fmt, __func__
+
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/leds.h>
+#include <linux/rtc.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/batterydata-lib.h>
+#include <linux/of_batterydata.h>
+#include <linux/msm_bcl.h>
+#include <linux/ktime.h>
+#include <linux/extcon.h>
+#include <linux/pmic-voter.h>
+
+/* Mask/Bit helpers */
+#define _SMB_MASK(BITS, POS) \
+ ((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+ _SMB_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+ (RIGHT_BIT_POS))
+/* Config registers */
+struct smbchg_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+};
+
+struct parallel_usb_cfg {
+ struct power_supply *psy;
+ int min_current_thr_ma;
+ int min_9v_current_thr_ma;
+ int allowed_lowering_ma;
+ int current_max_ma;
+ bool avail;
+ struct mutex lock;
+ int initial_aicl_ma;
+ ktime_t last_disabled;
+ bool enabled_once;
+};
+
+struct ilim_entry {
+ int vmin_uv;
+ int vmax_uv;
+ int icl_pt_ma;
+ int icl_lv_ma;
+ int icl_hv_ma;
+};
+
+struct ilim_map {
+ int num;
+ struct ilim_entry *entries;
+};
+
+struct smbchg_version_tables {
+ const int *dc_ilim_ma_table;
+ int dc_ilim_ma_len;
+ const int *usb_ilim_ma_table;
+ int usb_ilim_ma_len;
+ const int *iterm_ma_table;
+ int iterm_ma_len;
+ const int *fcc_comp_table;
+ int fcc_comp_len;
+ const int *aicl_rerun_period_table;
+ int aicl_rerun_period_len;
+ int rchg_thr_mv;
+};
+
+struct smbchg_chip {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ int schg_version;
+
+ /* peripheral register address bases */
+ u16 chgr_base;
+ u16 bat_if_base;
+ u16 usb_chgpth_base;
+ u16 dc_chgpth_base;
+ u16 otg_base;
+ u16 misc_base;
+
+ int fake_battery_soc;
+ u8 revision[4];
+
+ /* configuration parameters */
+ int iterm_ma;
+ int usb_max_current_ma;
+ int typec_current_ma;
+ int dc_max_current_ma;
+ int dc_target_current_ma;
+ int cfg_fastchg_current_ma;
+ int fastchg_current_ma;
+ int vfloat_mv;
+ int fastchg_current_comp;
+ int float_voltage_comp;
+ int resume_delta_mv;
+ int safety_time;
+ int prechg_safety_time;
+ int bmd_pin_src;
+ int jeita_temp_hard_limit;
+ int aicl_rerun_period_s;
+ bool use_vfloat_adjustments;
+ bool iterm_disabled;
+ bool bmd_algo_disabled;
+ bool soft_vfloat_comp_disabled;
+ bool chg_enabled;
+ bool charge_unknown_battery;
+ bool chg_inhibit_en;
+ bool chg_inhibit_source_fg;
+ bool low_volt_dcin;
+ bool cfg_chg_led_support;
+ bool cfg_chg_led_sw_ctrl;
+ bool vbat_above_headroom;
+ bool force_aicl_rerun;
+ bool hvdcp3_supported;
+ bool restricted_charging;
+ bool skip_usb_suspend_for_fake_battery;
+ bool hvdcp_not_supported;
+ bool otg_pinctrl;
+ u8 original_usbin_allowance;
+ struct parallel_usb_cfg parallel;
+ struct delayed_work parallel_en_work;
+ struct dentry *debug_root;
+ struct smbchg_version_tables tables;
+
+ /* wipower params */
+ struct ilim_map wipower_default;
+ struct ilim_map wipower_pt;
+ struct ilim_map wipower_div2;
+ struct qpnp_vadc_chip *vadc_dev;
+ bool wipower_dyn_icl_avail;
+ struct ilim_entry current_ilim;
+ struct mutex wipower_config;
+ bool wipower_configured;
+ struct qpnp_adc_tm_btm_param param;
+
+ /* flash current prediction */
+ int rpara_uohm;
+ int rslow_uohm;
+ int vled_max_uv;
+
+ /* vfloat adjustment */
+ int max_vbat_sample;
+ int n_vbat_samples;
+
+ /* status variables */
+ int wake_reasons;
+ int previous_soc;
+ int usb_online;
+ bool dc_present;
+ bool usb_present;
+ bool batt_present;
+ int otg_retries;
+ ktime_t otg_enable_time;
+ bool aicl_deglitch_short;
+ bool safety_timer_en;
+ bool aicl_complete;
+ bool usb_ov_det;
+ bool otg_pulse_skip_dis;
+ const char *battery_type;
+ enum power_supply_type usb_supply_type;
+ bool very_weak_charger;
+ bool parallel_charger_detected;
+ bool chg_otg_enabled;
+ bool flash_triggered;
+ bool flash_active;
+ bool icl_disabled;
+ u32 wa_flags;
+ int usb_icl_delta;
+ bool typec_dfp;
+ unsigned int usb_current_max;
+ unsigned int usb_health;
+
+ /* jeita and temperature */
+ bool batt_hot;
+ bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
+ unsigned int thermal_levels;
+ unsigned int therm_lvl_sel;
+ unsigned int *thermal_mitigation;
+
+ /* irqs */
+ int batt_hot_irq;
+ int batt_warm_irq;
+ int batt_cool_irq;
+ int batt_cold_irq;
+ int batt_missing_irq;
+ int vbat_low_irq;
+ int chg_hot_irq;
+ int chg_term_irq;
+ int taper_irq;
+ bool taper_irq_enabled;
+ struct mutex taper_irq_lock;
+ int recharge_irq;
+ int fastchg_irq;
+ int wdog_timeout_irq;
+ int power_ok_irq;
+ int dcin_uv_irq;
+ int usbin_uv_irq;
+ int usbin_ov_irq;
+ int src_detect_irq;
+ int otg_fail_irq;
+ int otg_oc_irq;
+ int aicl_done_irq;
+ int usbid_change_irq;
+ int chg_error_irq;
+ bool enable_aicl_wake;
+
+ /* psy */
+ struct power_supply_desc usb_psy_d;
+ struct power_supply *usb_psy;
+ struct power_supply_desc batt_psy_d;
+ struct power_supply *batt_psy;
+ struct power_supply_desc dc_psy_d;
+ struct power_supply *dc_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *typec_psy;
+ int dc_psy_type;
+ const char *bms_psy_name;
+ const char *battery_psy_name;
+
+ struct regulator *dpdm_reg;
+ struct smbchg_regulator otg_vreg;
+ struct smbchg_regulator ext_otg_vreg;
+ struct work_struct usb_set_online_work;
+ struct delayed_work vfloat_adjust_work;
+ struct delayed_work hvdcp_det_work;
+ spinlock_t sec_access_lock;
+ struct mutex therm_lvl_lock;
+ struct mutex usb_set_online_lock;
+ struct mutex pm_lock;
+ /* aicl deglitch workaround */
+ unsigned long first_aicl_seconds;
+ int aicl_irq_count;
+ struct mutex usb_status_lock;
+ bool hvdcp_3_det_ignore_uv;
+ struct completion src_det_lowered;
+ struct completion src_det_raised;
+ struct completion usbin_uv_lowered;
+ struct completion usbin_uv_raised;
+ int pulse_cnt;
+ struct led_classdev led_cdev;
+ bool skip_usb_notification;
+ u32 vchg_adc_channel;
+ struct qpnp_vadc_chip *vchg_vadc_dev;
+
+ /* voters */
+ struct votable *fcc_votable;
+ struct votable *usb_icl_votable;
+ struct votable *dc_icl_votable;
+ struct votable *usb_suspend_votable;
+ struct votable *dc_suspend_votable;
+ struct votable *battchg_suspend_votable;
+ struct votable *hw_aicl_rerun_disable_votable;
+ struct votable *hw_aicl_rerun_enable_indirect_votable;
+ struct votable *aicl_deglitch_short_votable;
+
+ /* extcon for VBUS / ID notification to USB */
+ struct extcon_dev *extcon;
+};
+
+enum qpnp_schg {
+ QPNP_SCHG,
+ QPNP_SCHG_LITE,
+};
+
+static char *version_str[] = {
+ [QPNP_SCHG] = "SCHG",
+ [QPNP_SCHG_LITE] = "SCHG_LITE",
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum smbchg_wa {
+ SMBCHG_AICL_DEGLITCH_WA = BIT(0),
+ SMBCHG_HVDCP_9V_EN_WA = BIT(1),
+ SMBCHG_USB100_WA = BIT(2),
+ SMBCHG_BATT_OV_WA = BIT(3),
+ SMBCHG_CC_ESR_WA = BIT(4),
+ SMBCHG_FLASH_ICL_DISABLE_WA = BIT(5),
+ SMBCHG_RESTART_WA = BIT(6),
+ SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA = BIT(7),
+};
+
+enum print_reason {
+ PR_REGISTER = BIT(0),
+ PR_INTERRUPT = BIT(1),
+ PR_STATUS = BIT(2),
+ PR_DUMP = BIT(3),
+ PR_PM = BIT(4),
+ PR_MISC = BIT(5),
+ PR_WIPOWER = BIT(6),
+ PR_TYPEC = BIT(7),
+};
+
+enum wake_reason {
+ PM_PARALLEL_CHECK = BIT(0),
+ PM_REASON_VFLOAT_ADJUST = BIT(1),
+ PM_ESR_PULSE = BIT(2),
+ PM_PARALLEL_TAPER = BIT(3),
+ PM_DETECT_HVDCP = BIT(4),
+};
+
+/* fcc_voters */
+#define ESR_PULSE_FCC_VOTER "ESR_PULSE_FCC_VOTER"
+#define BATT_TYPE_FCC_VOTER "BATT_TYPE_FCC_VOTER"
+#define RESTRICTED_CHG_FCC_VOTER "RESTRICTED_CHG_FCC_VOTER"
+
+/* ICL VOTERS */
+#define PSY_ICL_VOTER "PSY_ICL_VOTER"
+#define THERMAL_ICL_VOTER "THERMAL_ICL_VOTER"
+#define HVDCP_ICL_VOTER "HVDCP_ICL_VOTER"
+#define USER_ICL_VOTER "USER_ICL_VOTER"
+#define WEAK_CHARGER_ICL_VOTER "WEAK_CHARGER_ICL_VOTER"
+#define SW_AICL_ICL_VOTER "SW_AICL_ICL_VOTER"
+#define CHG_SUSPEND_WORKAROUND_ICL_VOTER "CHG_SUSPEND_WORKAROUND_ICL_VOTER"
+
+/* USB SUSPEND VOTERS */
+/* userspace has suspended charging altogether */
+#define USER_EN_VOTER "USER_EN_VOTER"
+/*
+ * this specific path has been suspended through the power supply
+ * framework
+ */
+#define POWER_SUPPLY_EN_VOTER "POWER_SUPPLY_EN_VOTER"
+/*
+ * the usb driver has suspended this path by setting a current limit
+ * of < 2MA
+ */
+#define USB_EN_VOTER "USB_EN_VOTER"
+/*
+ * the thermal daemon can suspend a charge path when the system
+ * temperature levels rise
+ */
+#define THERMAL_EN_VOTER "THERMAL_EN_VOTER"
+/*
+ * an external OTG supply is being used, suspend charge path so the
+ * charger does not accidentally try to charge from the external supply.
+ */
+#define OTG_EN_VOTER "OTG_EN_VOTER"
+/*
+ * the charger is very weak, do not draw any current from it
+ */
+#define WEAK_CHARGER_EN_VOTER "WEAK_CHARGER_EN_VOTER"
+/*
+ * fake battery voter, if battery id-resistance around 7.5 Kohm
+ */
+#define FAKE_BATTERY_EN_VOTER "FAKE_BATTERY_EN_VOTER"
+
+/* battchg_enable_voters */
+ /* userspace has disabled battery charging */
+#define BATTCHG_USER_EN_VOTER "BATTCHG_USER_EN_VOTER"
+ /* battery charging disabled while loading battery profiles */
+#define BATTCHG_UNKNOWN_BATTERY_EN_VOTER "BATTCHG_UNKNOWN_BATTERY_EN_VOTER"
+
+/* hw_aicl_rerun_enable_indirect_voters */
+/* enabled via device tree */
+#define DEFAULT_CONFIG_HW_AICL_VOTER "DEFAULT_CONFIG_HW_AICL_VOTER"
+/* Varb workaround voter */
+#define VARB_WORKAROUND_VOTER "VARB_WORKAROUND_VOTER"
+/* SHUTDOWN workaround voter */
+#define SHUTDOWN_WORKAROUND_VOTER "SHUTDOWN_WORKAROUND_VOTER"
+
+/* hw_aicl_rerun_disable_voters */
+/* the results from enabling clients */
+#define HW_AICL_RERUN_ENABLE_INDIRECT_VOTER \
+ "HW_AICL_RERUN_ENABLE_INDIRECT_VOTER"
+/* Weak charger voter */
+#define WEAK_CHARGER_HW_AICL_VOTER "WEAK_CHARGER_HW_AICL_VOTER"
+
+/* aicl_short_deglitch_voters */
+/* Varb workaround voter */
+#define VARB_WORKAROUND_SHORT_DEGLITCH_VOTER \
+ "VARB_WRKARND_SHORT_DEGLITCH_VOTER"
+/* QC 2.0 */
+#define HVDCP_SHORT_DEGLITCH_VOTER "HVDCP_SHORT_DEGLITCH_VOTER"
+
+static const unsigned int smbchg_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static int smbchg_debug_mask;
+module_param_named(
+ debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_parallel_en = 1;
+module_param_named(
+ parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_fcc_percent = 50;
+module_param_named(
+ main_chg_fcc_percent, smbchg_main_chg_fcc_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_icl_percent = 60;
+module_param_named(
+ main_chg_icl_percent, smbchg_main_chg_icl_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp_icl_ma = 1800;
+module_param_named(
+ default_hvdcp_icl_ma, smbchg_default_hvdcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp3_icl_ma = 3000;
+module_param_named(
+ default_hvdcp3_icl_ma, smbchg_default_hvdcp3_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_dcp_icl_ma = 1800;
+module_param_named(
+ default_dcp_icl_ma, smbchg_default_dcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dyn_icl_en;
+module_param_named(
+ dynamic_icl_wipower_en, wipower_dyn_icl_en,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dcin_interval = ADC_MEAS1_INTERVAL_2P0MS;
+module_param_named(
+ wipower_dcin_interval, wipower_dcin_interval,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define WIPOWER_DEFAULT_HYSTERISIS_UV 250000
+static int wipower_dcin_hyst_uv = WIPOWER_DEFAULT_HYSTERISIS_UV;
+module_param_named(
+ wipower_dcin_hyst_uv, wipower_dcin_hyst_uv,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define pr_smb(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define pr_smb_rt(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info_ratelimited(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int smbchg_read(struct smbchg_chip *chip, u8 *val,
+ u16 addr, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+
+ if (addr == 0) {
+ dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(chip->regmap, addr, val, count);
+ if (rc) {
+ dev_err(chip->dev, "spmi read failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid,
+ rc);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * Do not use this function for register writes if possible. Instead use the
+ * smbchg_masked_write function.
+ *
+ * The sec_access_lock must be held for all register writes and this function
+ * does not do that. If this function is used, please hold the spinlock or
+ * random secure access writes may fail.
+ */
+static int smbchg_masked_write_raw(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(chip->regmap, base, mask, val);
+ if (rc) {
+ dev_err(chip->dev, "spmi write failed: addr=%03X, rc=%d\n",
+ base, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * This function holds a spin lock to ensure secure access register writes goes
+ * through. If the secure access unlock register is armed, any old register
+ * write can unarm the secure access unlock, causing the next write to fail.
+ *
+ * Note: do not use this for sec_access registers. Instead use the function
+ * below: smbchg_sec_masked_write
+ */
+static int smbchg_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Unlocks sec access and writes to the register specified.
+ *
+ * This function holds a spin lock to exclude other register writes while
+ * the two writes are taking place.
+ */
+#define SEC_ACCESS_OFFSET 0xD0
+#define SEC_ACCESS_VALUE 0xA5
+#define PERIPHERAL_MASK 0xFF
+static int smbchg_sec_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+ u16 peripheral_base = base & (~PERIPHERAL_MASK);
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+
+ rc = smbchg_masked_write_raw(chip, peripheral_base + SEC_ACCESS_OFFSET,
+ SEC_ACCESS_VALUE, SEC_ACCESS_VALUE);
+ if (rc) {
+ dev_err(chip->dev, "Unable to unlock sec_access: %d", rc);
+ goto out;
+ }
+
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+
+out:
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+ return rc;
+}
+
+static void smbchg_stay_awake(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons | reason;
+ if (reasons != 0 && chip->wake_reasons == 0) {
+ pr_smb(PR_PM, "staying awake: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_stay_awake(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+}
+
+static void smbchg_relax(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons & (~reason);
+ if (reasons == 0 && chip->wake_reasons != 0) {
+ pr_smb(PR_PM, "relaxing: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_relax(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+};
+
+enum pwr_path_type {
+ UNKNOWN = 0,
+ PWR_PATH_BATTERY = 1,
+ PWR_PATH_USB = 2,
+ PWR_PATH_DC = 3,
+};
+
+#define PWR_PATH 0x08
+#define PWR_PATH_MASK 0x03
+static enum pwr_path_type smbchg_get_pwr_path(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + PWR_PATH, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read PWR_PATH rc = %d\n", rc);
+ return PWR_PATH_BATTERY;
+ }
+
+ return reg & PWR_PATH_MASK;
+}
+
+#define RID_STS 0xB
+#define RID_MASK 0xF
+#define IDEV_STS 0x8
+#define RT_STS 0x10
+#define USBID_MSB 0xE
+#define USBIN_UV_BIT BIT(0)
+#define USBIN_OV_BIT BIT(1)
+#define USBIN_SRC_DET_BIT BIT(2)
+#define FMB_STS_MASK SMB_MASK(3, 0)
+#define USBID_GND_THRESHOLD 0x495
+static bool is_otg_present_schg(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+ u8 usbid_reg[2];
+ u16 usbid_val;
+ /*
+ * After the falling edge of the usbid change interrupt occurs,
+ * there may still be some time before the ADC conversion for USB RID
+ * finishes in the fuel gauge. In the worst case, this could be up to
+ * 15 ms.
+ *
+ * Sleep for 20 ms (minimum msleep time) to wait for the conversion to
+ * finish and the USB RID status register to be updated before trying
+ * to detect OTG insertions.
+ */
+
+ msleep(20);
+
+ /*
+ * There is a problem with USBID conversions on PMI8994 revisions
+ * 2.0.0. As a workaround, check that the cable is not
+ * detected as factory test before enabling OTG.
+ */
+ rc = smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read IDEV_STS rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & FMB_STS_MASK) != 0) {
+ pr_smb(PR_STATUS, "IDEV_STS = %02x, not ground\n", reg);
+ return false;
+ }
+
+ rc = smbchg_read(chip, usbid_reg, chip->usb_chgpth_base + USBID_MSB, 2);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read USBID rc = %d\n", rc);
+ return false;
+ }
+ usbid_val = (usbid_reg[0] << 8) | usbid_reg[1];
+
+ if (usbid_val > USBID_GND_THRESHOLD) {
+ pr_smb(PR_STATUS, "USBID = 0x%04x, too high to be ground\n",
+ usbid_val);
+ return false;
+ }
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RID_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read usb rid status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "RID_STS = %02x\n", reg);
+
+ return (reg & RID_MASK) == 0;
+}
+
+#define RID_GND_DET_STS BIT(2)
+static bool is_otg_present_schg_lite(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->otg_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read otg RT status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & RID_GND_DET_STS);
+}
+
+static bool is_otg_present(struct smbchg_chip *chip)
+{
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ return is_otg_present_schg_lite(chip);
+
+ return is_otg_present_schg(chip);
+}
+
+#define USBIN_9V BIT(5)
+#define USBIN_UNREG BIT(4)
+#define USBIN_LV BIT(3)
+#define DCIN_9V BIT(2)
+#define DCIN_UNREG BIT(1)
+#define DCIN_LV BIT(0)
+#define INPUT_STS 0x0D
+#define DCIN_UV_BIT BIT(0)
+#define DCIN_OV_BIT BIT(1)
+static bool is_dc_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->dc_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read dc status rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & DCIN_UV_BIT) || (reg & DCIN_OV_BIT))
+ return false;
+
+ return true;
+}
+
+static bool is_usb_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ if (!(reg & USBIN_SRC_DET_BIT) || (reg & USBIN_OV_BIT))
+ return false;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & (USBIN_9V | USBIN_UNREG | USBIN_LV));
+}
+
+static char *usb_type_str[] = {
+ "SDP", /* bit 0 */
+ "OTHER", /* bit 1 */
+ "DCP", /* bit 2 */
+ "CDP", /* bit 3 */
+ "NONE", /* bit 4 error case */
+};
+
+#define N_TYPE_BITS 4
+#define TYPE_BITS_OFFSET 4
+
+static int get_type(u8 type_reg)
+{
+ unsigned long type = type_reg;
+ type >>= TYPE_BITS_OFFSET;
+ return find_first_bit(&type, N_TYPE_BITS);
+}
+
+/* helper to return the string of USB type */
+static inline char *get_usb_type_name(int type)
+{
+ return usb_type_str[type];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+ POWER_SUPPLY_TYPE_USB, /* bit 0 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 1 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 2 */
+ POWER_SUPPLY_TYPE_USB_CDP, /* bit 3 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 4 error case, report DCP */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static inline enum power_supply_type get_usb_supply_type(int type)
+{
+ return usb_type_enum[type];
+}
+
+static bool is_src_detect_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_SRC_DET_BIT;
+}
+
+static void read_usb_type(struct smbchg_chip *chip, char **usb_type_name,
+ enum power_supply_type *usb_supply_type)
+{
+ int rc, type;
+ u8 reg;
+
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low\n");
+ *usb_type_name = "Absent";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ return;
+ }
+
+ rc = smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ *usb_type_name = "Other";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ return;
+ }
+ type = get_type(reg);
+ *usb_type_name = get_usb_type_name(type);
+ *usb_supply_type = get_usb_supply_type(type);
+}
+
+#define CHGR_STS 0x0E
+#define BATT_LESS_THAN_2V BIT(4)
+#define CHG_HOLD_OFF_BIT BIT(3)
+#define CHG_TYPE_MASK SMB_MASK(2, 1)
+#define CHG_TYPE_SHIFT 1
+#define BATT_NOT_CHG_VAL 0x0
+#define BATT_PRE_CHG_VAL 0x1
+#define BATT_FAST_CHG_VAL 0x2
+#define BATT_TAPER_CHG_VAL 0x3
+#define CHG_INHIBIT_BIT BIT(1)
+#define BAT_TCC_REACHED_BIT BIT(7)
+static int get_prop_batt_status(struct smbchg_chip *chip)
+{
+ int rc, status = POWER_SUPPLY_STATUS_DISCHARGING;
+ u8 reg = 0, chg_type;
+ bool charger_present, chg_inhibit;
+
+ charger_present = is_usb_present(chip) | is_dc_present(chip) |
+ chip->hvdcp_3_det_ignore_uv;
+ if (!charger_present)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ rc = smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & BAT_TCC_REACHED_BIT)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ chg_inhibit = reg & CHG_INHIBIT_BIT;
+ if (chg_inhibit)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ rc = smbchg_read(chip, ®, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & CHG_HOLD_OFF_BIT) {
+ /*
+ * when chg hold off happens the battery is
+ * not charging
+ */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ goto out;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+ if (chg_type == BATT_NOT_CHG_VAL && !chip->hvdcp_3_det_ignore_uv)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+ pr_smb_rt(PR_MISC, "CHGR_STS = 0x%02x\n", reg);
+ return status;
+}
+
+#define BAT_PRES_STATUS 0x08
+#define BAT_PRES_BIT BIT(7)
+static int get_prop_batt_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->bat_if_base + BAT_PRES_STATUS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ return !!(reg & BAT_PRES_BIT);
+}
+
+static int get_prop_charge_type(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, chg_type;
+
+ rc = smbchg_read(chip, ®, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+ if (chg_type == BATT_NOT_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (chg_type == BATT_TAPER_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+ else if (chg_type == BATT_FAST_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (chg_type == BATT_PRE_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int set_property_on_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ ret.intval = val;
+ rc = power_supply_set_property(chip->bms_psy, prop, &ret);
+ if (rc)
+ pr_smb(PR_STATUS,
+ "bms psy does not allow updating prop %d rc = %d\n",
+ prop, rc);
+
+ return rc;
+}
+
+static int get_property_from_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int *val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ rc = power_supply_get_property(chip->bms_psy, prop, &ret);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy doesn't support reading prop %d rc = %d\n",
+ prop, rc);
+ return rc;
+ }
+
+ *val = ret.intval;
+ return rc;
+}
+
+#define DEFAULT_BATT_CAPACITY 50
+static int get_prop_batt_capacity(struct smbchg_chip *chip)
+{
+ int capacity, rc;
+
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CAPACITY, &capacity);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get capacity rc = %d\n", rc);
+ capacity = DEFAULT_BATT_CAPACITY;
+ }
+ return capacity;
+}
+
+#define DEFAULT_BATT_TEMP 200
+static int get_prop_batt_temp(struct smbchg_chip *chip)
+{
+ int temp, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_TEMP, &temp);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get temperature rc = %d\n", rc);
+ temp = DEFAULT_BATT_TEMP;
+ }
+ return temp;
+}
+
+#define DEFAULT_BATT_CURRENT_NOW 0
+static int get_prop_batt_current_now(struct smbchg_chip *chip)
+{
+ int ua, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW, &ua);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get current rc = %d\n", rc);
+ ua = DEFAULT_BATT_CURRENT_NOW;
+ }
+ return ua;
+}
+
+#define DEFAULT_BATT_VOLTAGE_NOW 0
+static int get_prop_batt_voltage_now(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_NOW, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_NOW;
+ }
+ return uv;
+}
+
+#define DEFAULT_BATT_VOLTAGE_MAX_DESIGN 4200000
+static int get_prop_batt_voltage_max_design(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_MAX_DESIGN;
+ }
+ return uv;
+}
+
+static int get_prop_batt_health(struct smbchg_chip *chip)
+{
+ if (chip->batt_hot)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (chip->batt_cold)
+ return POWER_SUPPLY_HEALTH_COLD;
+ else if (chip->batt_warm)
+ return POWER_SUPPLY_HEALTH_WARM;
+ else if (chip->batt_cool)
+ return POWER_SUPPLY_HEALTH_COOL;
+ else
+ return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static void get_property_from_typec(struct smbchg_chip *chip,
+ enum power_supply_property property,
+ union power_supply_propval *prop)
+{
+ int rc;
+
+ rc = power_supply_get_property(chip->typec_psy,
+ property, prop);
+ if (rc)
+ pr_smb(PR_TYPEC,
+ "typec psy doesn't support reading prop %d rc = %d\n",
+ property, rc);
+}
+
+static void update_typec_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+ union power_supply_propval capability = {0, };
+
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ if (type.intval != POWER_SUPPLY_TYPE_UNKNOWN) {
+ get_property_from_typec(chip,
+ POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+ &capability);
+ chip->typec_current_ma = capability.intval;
+ pr_smb(PR_TYPEC, "SMB Type-C mode = %d, current=%d\n",
+ type.intval, capability.intval);
+ } else {
+ pr_smb(PR_TYPEC,
+ "typec detection not completed continuing with USB update\n");
+ }
+}
+
+/*
+ * finds the index of the closest value in the array. If there are two that
+ * are equally close, the lower index will be returned
+ */
+static int find_closest_in_array(const int *arr, int len, int val)
+{
+ int i, closest = 0;
+
+ if (len == 0)
+ return closest;
+ for (i = 0; i < len; i++)
+ if (abs(val - arr[i]) < abs(val - arr[closest]))
+ closest = i;
+
+ return closest;
+}
+
+/* finds the index of the closest smaller value in the array. */
+static int find_smaller_in_array(const int *table, int val, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (val >= table[i])
+ break;
+ }
+
+ return i;
+}
+
+static const int iterm_ma_table_8994[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 500,
+ 600
+};
+
+static const int iterm_ma_table_8996[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 400,
+ 500
+};
+
+static const int usb_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500,
+ 3000
+};
+
+static const int usb_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+ 2500,
+ 2600,
+ 2700,
+ 2800,
+ 2900,
+ 3000
+};
+
+static int dc_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+};
+
+static int dc_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+};
+
+static const int fcc_comp_table_8994[] = {
+ 250,
+ 700,
+ 900,
+ 1200,
+};
+
+static const int fcc_comp_table_8996[] = {
+ 250,
+ 1100,
+ 1200,
+ 1500,
+};
+
+static const int aicl_rerun_period[] = {
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static const int aicl_rerun_period_schg_lite[] = {
+ 3, /* 2.8s */
+ 6, /* 5.6s */
+ 11, /* 11.3s */
+ 23, /* 22.5s */
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static void use_pmi8994_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8994;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8994);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8994;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8994);
+ chip->tables.iterm_ma_table = iterm_ma_table_8994;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8994);
+ chip->tables.fcc_comp_table = fcc_comp_table_8994;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8994);
+ chip->tables.rchg_thr_mv = 200;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+static void use_pmi8996_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8996;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8996);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8996;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8996);
+ chip->tables.iterm_ma_table = iterm_ma_table_8996;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8996);
+ chip->tables.fcc_comp_table = fcc_comp_table_8996;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8996);
+ chip->tables.rchg_thr_mv = 150;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+#define CMD_CHG_REG 0x42
+#define EN_BAT_CHG_BIT BIT(1)
+static int smbchg_charging_en(struct smbchg_chip *chip, bool en)
+{
+ /* The en bit is configured active low */
+ return smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ EN_BAT_CHG_BIT, en ? 0 : EN_BAT_CHG_BIT);
+}
+
+#define CMD_IL 0x40
+#define USBIN_SUSPEND_BIT BIT(4)
+#define CURRENT_100_MA 100
+#define CURRENT_150_MA 150
+#define CURRENT_500_MA 500
+#define CURRENT_900_MA 900
+#define CURRENT_1500_MA 1500
+#define SUSPEND_CURRENT_MA 2
+#define ICL_OVERRIDE_BIT BIT(2)
+static int smbchg_usb_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set usb suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define DCIN_SUSPEND_BIT BIT(3)
+static int smbchg_dc_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc = 0;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ DCIN_SUSPEND_BIT, suspend ? DCIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set dc suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define IL_CFG 0xF2
+#define DCIN_INPUT_MASK SMB_MASK(4, 0)
+static int smbchg_set_dc_current_max(struct smbchg_chip *chip, int current_ma)
+{
+ int i;
+ u8 dc_cur_val;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ current_ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %dma current_table\n",
+ current_ma);
+ return -EINVAL;
+ }
+
+ chip->dc_max_current_ma = chip->tables.dc_ilim_ma_table[i];
+ dc_cur_val = i & DCIN_INPUT_MASK;
+
+ pr_smb(PR_STATUS, "dc current set to %d mA\n",
+ chip->dc_max_current_ma);
+ return smbchg_sec_masked_write(chip, chip->dc_chgpth_base + IL_CFG,
+ DCIN_INPUT_MASK, dc_cur_val);
+}
+
+#define AICL_WL_SEL_CFG 0xF5
+#define AICL_WL_SEL_MASK SMB_MASK(1, 0)
+#define AICL_WL_SEL_SCHG_LITE_MASK SMB_MASK(2, 0)
+static int smbchg_set_aicl_rerun_period_s(struct smbchg_chip *chip,
+ int period_s)
+{
+ int i;
+ u8 reg, mask;
+
+ i = find_smaller_in_array(chip->tables.aicl_rerun_period_table,
+ period_s, chip->tables.aicl_rerun_period_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %ds in aicl rerun period\n",
+ period_s);
+ return -EINVAL;
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ mask = AICL_WL_SEL_SCHG_LITE_MASK;
+ else
+ mask = AICL_WL_SEL_MASK;
+
+ reg = i & mask;
+
+ pr_smb(PR_STATUS, "aicl rerun period set to %ds\n",
+ chip->tables.aicl_rerun_period_table[i]);
+ return smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + AICL_WL_SEL_CFG,
+ mask, reg);
+}
+
+static struct power_supply *get_parallel_psy(struct smbchg_chip *chip)
+{
+ if (!chip->parallel.avail)
+ return NULL;
+ if (chip->parallel.psy)
+ return chip->parallel.psy;
+ chip->parallel.psy = power_supply_get_by_name("usb-parallel");
+ if (!chip->parallel.psy)
+ pr_smb(PR_STATUS, "parallel charger not found\n");
+ return chip->parallel.psy;
+}
+
+static void smbchg_usb_update_online_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ usb_set_online_work);
+ bool user_enabled = !get_client_vote(chip->usb_suspend_votable,
+ USER_EN_VOTER);
+ int online;
+
+ online = user_enabled && chip->usb_present && !chip->very_weak_charger;
+
+ mutex_lock(&chip->usb_set_online_lock);
+ if (chip->usb_online != online) {
+ pr_smb(PR_MISC, "setting usb psy online = %d\n", online);
+ chip->usb_online = online;
+ power_supply_changed(chip->usb_psy);
+ }
+ mutex_unlock(&chip->usb_set_online_lock);
+}
+
+#define CHGPTH_CFG 0xF4
+#define CFG_USB_2_3_SEL_BIT BIT(7)
+#define CFG_USB_2 0
+#define CFG_USB_3 BIT(7)
+#define USBIN_INPUT_MASK SMB_MASK(4, 0)
+#define USBIN_MODE_CHG_BIT BIT(0)
+#define USBIN_LIMITED_MODE 0
+#define USBIN_HC_MODE BIT(0)
+#define USB51_MODE_BIT BIT(1)
+#define USB51_100MA 0
+#define USB51_500MA BIT(1)
+static int smbchg_set_high_usb_chg_current(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 usb_cur_val;
+
+ if (current_ma == CURRENT_100_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CFG_USB_2 rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT | ICL_OVERRIDE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA | ICL_OVERRIDE_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set ICL_OVERRIDE rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS,
+ "Forcing 100mA current limit\n");
+ chip->usb_max_current_ma = CURRENT_100_MA;
+ return rc;
+ }
+
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_150_MA);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_150_MA, rc);
+ else
+ chip->usb_max_current_ma = 150;
+ return rc;
+ }
+
+ usb_cur_val = i & USBIN_INPUT_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + IL_CFG,
+ USBIN_INPUT_MASK, usb_cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT, USBIN_HC_MODE);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+ chip->usb_max_current_ma = chip->tables.usb_ilim_ma_table[i];
+ return rc;
+}
+
+/* if APSD results are used
+ * if SDP is detected it will look at 500mA setting
+ * if set it will draw 500mA
+ * if unset it will draw 100mA
+ * if CDP/DCP it will look at 0x0C setting
+ * i.e. values in 0x41[1, 0] does not matter
+ */
+static int smbchg_set_usb_current_max(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ /*
+ * if the battery is not present, do not allow the usb ICL to lower in
+ * order to avoid browning out the device during a hotswap.
+ */
+ if (!chip->batt_present && current_ma < chip->usb_max_current_ma) {
+ pr_info_ratelimited("Ignoring usb current->%d, battery is absent\n",
+ current_ma);
+ return 0;
+ }
+ pr_smb(PR_STATUS, "USB current_ma = %d\n", current_ma);
+
+ if (current_ma <= SUSPEND_CURRENT_MA) {
+ /* suspend the usb if current <= 2mA */
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, true, 0);
+ chip->usb_max_current_ma = 0;
+ goto out;
+ } else {
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, false, 0);
+ }
+
+ switch (chip->usb_supply_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ if ((current_ma < CURRENT_150_MA) &&
+ (chip->wa_flags & SMBCHG_USB100_WA))
+ current_ma = CURRENT_150_MA;
+
+ if (current_ma < CURRENT_150_MA) {
+ /* force 100mA */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 100;
+ }
+ /* specific current values */
+ if (current_ma == CURRENT_150_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 150;
+ }
+ if (current_ma == CURRENT_500_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 500;
+ }
+ if (current_ma == CURRENT_900_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 900;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ if (current_ma < CURRENT_1500_MA) {
+ /* use override for CDP */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+ }
+ /* fall through */
+ default:
+ rc = smbchg_set_high_usb_chg_current(chip, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't set %dmA rc = %d\n", current_ma, rc);
+ break;
+ }
+
+out:
+ pr_smb(PR_STATUS, "usb type = %d current set to %d mA\n",
+ chip->usb_supply_type, chip->usb_max_current_ma);
+ return rc;
+}
+
+#define USBIN_HVDCP_STS 0x0C
+#define USBIN_HVDCP_SEL_BIT BIT(4)
+#define USBIN_HVDCP_SEL_9V_BIT BIT(1)
+#define SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT BIT(2)
+#define SCHG_LITE_USBIN_HVDCP_SEL_BIT BIT(0)
+static int smbchg_get_min_parallel_current_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel, hvdcp_sel_9v;
+
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return 0;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT;
+ } else {
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = USBIN_HVDCP_SEL_9V_BIT;
+ }
+
+ if ((reg & hvdcp_sel) && (reg & hvdcp_sel_9v))
+ return chip->parallel.min_9v_current_thr_ma;
+ return chip->parallel.min_current_thr_ma;
+}
+
+static bool is_hvdcp_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel;
+
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ pr_err("Couldn't read hvdcp status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP_STS = 0x%02x\n", reg);
+ /*
+ * If a valid HVDCP is detected, notify it to the usb_psy only
+ * if USB is still present.
+ */
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ else
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+
+ if ((reg & hvdcp_sel) && is_usb_present(chip))
+ return true;
+
+ return false;
+}
+
+#define FCC_CFG 0xF2
+#define FCC_500MA_VAL 0x4
+#define FCC_MASK SMB_MASK(4, 0)
+static int smbchg_set_fastchg_current_raw(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 cur_val;
+
+ /* the fcc enumerations are the same as the usb currents */
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_500_MA);
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK,
+ FCC_500MA_VAL);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_500_MA, rc);
+ else
+ chip->fastchg_current_ma = 500;
+ return rc;
+ }
+
+ if (chip->tables.usb_ilim_ma_table[i] == chip->fastchg_current_ma) {
+ pr_smb(PR_STATUS, "skipping fastchg current request: %d\n",
+ chip->fastchg_current_ma);
+ return 0;
+ }
+
+ cur_val = i & FCC_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK, cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to fcc cfg rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "fastcharge current requested %d, set to %d\n",
+ current_ma, chip->tables.usb_ilim_ma_table[cur_val]);
+
+ chip->fastchg_current_ma = chip->tables.usb_ilim_ma_table[cur_val];
+ return rc;
+}
+
+#define ICL_STS_1_REG 0x7
+#define ICL_STS_2_REG 0x9
+#define ICL_STS_MASK 0x1F
+#define AICL_SUSP_BIT BIT(6)
+#define AICL_STS_BIT BIT(5)
+#define USBIN_SUSPEND_STS_BIT BIT(3)
+#define USBIN_ACTIVE_PWR_SRC_BIT BIT(1)
+#define DCIN_ACTIVE_PWR_SRC_BIT BIT(0)
+#define PARALLEL_REENABLE_TIMER_MS 1000
+#define PARALLEL_CHG_THRESHOLD_CURRENT 1800
+static bool smbchg_is_usbin_active_pwr_src(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", rc);
+ return false;
+ }
+
+ return !(reg & USBIN_SUSPEND_STS_BIT)
+ && (reg & USBIN_ACTIVE_PWR_SRC_BIT);
+}
+
+static int smbchg_parallel_usb_charging_en(struct smbchg_chip *chip, bool en)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return 0;
+
+ pval.intval = en;
+ return power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval);
+}
+
+#define ESR_PULSE_CURRENT_DELTA_MA 200
+static int smbchg_sw_esr_pulse_en(struct smbchg_chip *chip, bool en)
+{
+ int rc, fg_current_now, icl_ma;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW,
+ &fg_current_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ icl_ma = max(chip->iterm_ma + ESR_PULSE_CURRENT_DELTA_MA,
+ fg_current_now - ESR_PULSE_CURRENT_DELTA_MA);
+ rc = vote(chip->fcc_votable, ESR_PULSE_FCC_VOTER, en, icl_ma);
+ if (rc < 0) {
+ pr_err("Couldn't Vote FCC en = %d rc = %d\n", en, rc);
+ return rc;
+ }
+ rc = smbchg_parallel_usb_charging_en(chip, !en);
+ return rc;
+}
+
+#define USB_AICL_CFG 0xF3
+#define AICL_EN_BIT BIT(2)
+static void smbchg_rerun_aicl(struct smbchg_chip *chip)
+{
+ pr_smb(PR_STATUS, "Rerunning AICL...\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ /* Add a delay so that AICL successfully clears */
+ msleep(50);
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+}
+
+static void taper_irq_en(struct smbchg_chip *chip, bool en)
+{
+ mutex_lock(&chip->taper_irq_lock);
+ if (en != chip->taper_irq_enabled) {
+ if (en) {
+ enable_irq(chip->taper_irq);
+ enable_irq_wake(chip->taper_irq);
+ } else {
+ disable_irq_wake(chip->taper_irq);
+ disable_irq_nosync(chip->taper_irq);
+ }
+ chip->taper_irq_enabled = en;
+ }
+ mutex_unlock(&chip->taper_irq_lock);
+}
+
+static int smbchg_get_aicl_level_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 1: %d\n", rc);
+ return 0;
+ }
+ if (reg & AICL_SUSP_BIT) {
+ pr_warn("AICL suspended: %02x\n", reg);
+ return 0;
+ }
+ reg &= ICL_STS_MASK;
+ if (reg >= chip->tables.usb_ilim_ma_len) {
+ pr_warn("invalid AICL value: %02x\n", reg);
+ return 0;
+ }
+ return chip->tables.usb_ilim_ma_table[reg];
+}
+
+static void smbchg_parallel_usb_disable(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int fcc_ma, usb_icl_ma;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+ pr_smb(PR_STATUS, "disabling parallel charger\n");
+ chip->parallel.last_disabled = ktime_get_boottime();
+ taper_irq_en(chip, false);
+ chip->parallel.initial_aicl_ma = 0;
+ chip->parallel.current_max_ma = 0;
+ pval.intval = SUSPEND_CURRENT_MA * 1000;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ pval.intval = false;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ if (fcc_ma < 0)
+ pr_err("no voters for fcc, skip it\n");
+ else
+ smbchg_set_fastchg_current_raw(chip, fcc_ma);
+
+ if (usb_icl_ma < 0)
+ pr_err("no voters for usb_icl, skip it\n");
+ else
+ smbchg_set_usb_current_max(chip, usb_icl_ma);
+
+ smbchg_rerun_aicl(chip);
+}
+
+#define PARALLEL_TAPER_MAX_TRIES 3
+#define PARALLEL_FCC_PERCENT_REDUCTION 75
+#define MINIMUM_PARALLEL_FCC_MA 500
+#define CHG_ERROR_BIT BIT(0)
+#define BAT_TAPER_MODE_BIT BIT(6)
+static void smbchg_parallel_usb_taper(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int parallel_fcc_ma, tries = 0;
+ u8 reg = 0;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_TAPER);
+try_again:
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ tries += 1;
+ parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_STATUS, "try #%d parallel charger fcc = %d\n",
+ tries, parallel_fcc_ma);
+ if (parallel_fcc_ma < MINIMUM_PARALLEL_FCC_MA
+ || tries > PARALLEL_TAPER_MAX_TRIES) {
+ smbchg_parallel_usb_disable(chip);
+ goto done;
+ }
+ pval.intval = ((parallel_fcc_ma
+ * PARALLEL_FCC_PERCENT_REDUCTION) / 100);
+ pr_smb(PR_STATUS, "reducing FCC of parallel charger to %d\n",
+ pval.intval);
+ /* Change it to uA */
+ pval.intval *= 1000;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /*
+ * sleep here for 100 ms in order to make sure the charger has a chance
+ * to go back into constant current charging
+ */
+ mutex_unlock(&chip->parallel.lock);
+ msleep(100);
+
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1);
+ if (reg & BAT_TAPER_MODE_BIT) {
+ mutex_unlock(&chip->parallel.lock);
+ goto try_again;
+ }
+ taper_irq_en(chip, true);
+done:
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_TAPER);
+}
+
+static void smbchg_parallel_usb_enable(struct smbchg_chip *chip,
+ int total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int new_parallel_cl_ma, set_parallel_cl_ma, new_pmi_cl_ma, rc;
+ int current_table_index, target_icl_ma;
+ int fcc_ma, main_fastchg_current_ma;
+ int target_parallel_fcc_ma, supplied_parallel_fcc_ma;
+ int parallel_chg_fcc_percent;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ pr_smb(PR_STATUS, "Attempting to enable parallel charger\n");
+ pval.intval = chip->vfloat_mv + 50;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set Vflt on parallel psy rc: %d\n", rc);
+ return;
+ }
+ /* Set USB ICL */
+ target_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ if (target_icl_ma < 0) {
+ pr_err("no voters for usb_icl, skip it\n");
+ return;
+ }
+ new_parallel_cl_ma = total_current_ma
+ * (100 - smbchg_main_chg_icl_percent) / 100;
+ taper_irq_en(chip, true);
+
+ pval.intval = true;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+
+ pval.intval = new_parallel_cl_ma * 1000;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ /* read back the real amount of current we are getting */
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ set_parallel_cl_ma = pval.intval / 1000;
+ chip->parallel.current_max_ma = new_parallel_cl_ma;
+ pr_smb(PR_MISC, "Requested ICL = %d from parallel, got %d\n",
+ new_parallel_cl_ma, set_parallel_cl_ma);
+ new_pmi_cl_ma = max(0, target_icl_ma - set_parallel_cl_ma);
+ pr_smb(PR_STATUS, "New Total USB current = %d[%d, %d]\n",
+ total_current_ma, new_pmi_cl_ma,
+ set_parallel_cl_ma);
+ smbchg_set_usb_current_max(chip, new_pmi_cl_ma);
+
+ /* begin splitting the fast charge current */
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ if (fcc_ma < 0) {
+ pr_err("no voters for fcc, skip it\n");
+ return;
+ }
+ parallel_chg_fcc_percent = 100 - smbchg_main_chg_fcc_percent;
+ target_parallel_fcc_ma = (fcc_ma * parallel_chg_fcc_percent) / 100;
+ pval.intval = target_parallel_fcc_ma * 1000;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /* check how much actual current is supplied by the parallel charger */
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ supplied_parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_MISC, "Requested FCC = %d from parallel, got %d\n",
+ target_parallel_fcc_ma, supplied_parallel_fcc_ma);
+
+ /* then for the main charger, use the left over FCC */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ fcc_ma - supplied_parallel_fcc_ma,
+ chip->tables.usb_ilim_ma_len);
+ main_fastchg_current_ma =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ smbchg_set_fastchg_current_raw(chip, main_fastchg_current_ma);
+ pr_smb(PR_STATUS, "FCC = %d[%d, %d]\n", fcc_ma, main_fastchg_current_ma,
+ supplied_parallel_fcc_ma);
+
+ chip->parallel.enabled_once = true;
+
+ return;
+}
+
+static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip,
+ int *ret_total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int min_current_thr_ma, rc, type;
+ int total_current_ma, current_limit_ma, parallel_cl_ma;
+ ktime_t kt_since_last_disable;
+ u8 reg;
+ int fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ const char *fcc_voter
+ = get_effective_client_locked(chip->fcc_votable);
+ int usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+
+ if (!parallel_psy || !smbchg_parallel_en
+ || !chip->parallel_charger_detected) {
+ pr_smb(PR_STATUS, "Parallel charging not enabled\n");
+ return false;
+ }
+
+ if (fcc_ma < 0) {
+ pr_err("no voters for fcc! Can't enable parallel\n");
+ return false;
+ }
+ if (usb_icl_ma < 0) {
+ pr_err("no voters for usb_icl, Can't enable parallel\n");
+ return false;
+ }
+
+ kt_since_last_disable = ktime_sub(ktime_get_boottime(),
+ chip->parallel.last_disabled);
+ if (chip->parallel.current_max_ma == 0
+ && chip->parallel.enabled_once
+ && ktime_to_ms(kt_since_last_disable)
+ < PARALLEL_REENABLE_TIMER_MS) {
+ pr_smb(PR_STATUS, "Only been %lld since disable, skipping\n",
+ ktime_to_ms(kt_since_last_disable));
+ return false;
+ }
+
+ /*
+ * If the battery is not present, try not to change parallel charging
+ * from OFF to ON or from ON to OFF, as it could cause the device to
+ * brown out in the instant that the USB settings are changed.
+ *
+ * Only allow parallel charging check to report false (thereby turnin
+ * off parallel charging) if the battery is still there, or if parallel
+ * charging is disabled in the first place.
+ */
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST
+ && (get_prop_batt_present(chip)
+ || chip->parallel.current_max_ma == 0)) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return false;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ return false;
+ }
+
+ rc = smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ return false;
+ }
+
+ type = get_type(reg);
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB_CDP) {
+ pr_smb(PR_STATUS, "CDP adapter, skipping\n");
+ return false;
+ }
+
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB) {
+ pr_smb(PR_STATUS, "SDP adapter, skipping\n");
+ return false;
+ }
+
+ /*
+ * If USBIN is suspended or not the active power source, do not enable
+ * parallel charging. The device may be charging off of DCIN.
+ */
+ if (!smbchg_is_usbin_active_pwr_src(chip)) {
+ pr_smb(PR_STATUS, "USB not active power source: %02x\n", reg);
+ return false;
+ }
+
+ min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip);
+ if (min_current_thr_ma <= 0) {
+ pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n",
+ min_current_thr_ma);
+ return false;
+ }
+
+ if (usb_icl_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Weak USB chg skip enable: %d < %d\n",
+ usb_icl_ma, min_current_thr_ma);
+ return false;
+ }
+
+ if (!fcc_voter)
+ return false;
+ /*
+ * Suspend the parallel charger if the charging current is < 1800 mA
+ * and is not because of an ESR pulse.
+ */
+ if ((strcmp(fcc_voter, ESR_PULSE_FCC_VOTER) == 0)
+ && fcc_ma < PARALLEL_CHG_THRESHOLD_CURRENT) {
+ pr_smb(PR_STATUS, "FCC %d lower than %d\n",
+ fcc_ma,
+ PARALLEL_CHG_THRESHOLD_CURRENT);
+ return false;
+ }
+
+ current_limit_ma = smbchg_get_aicl_level_ma(chip);
+ if (current_limit_ma <= 0)
+ return false;
+
+ if (chip->parallel.initial_aicl_ma == 0) {
+ if (current_limit_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Initial AICL very low: %d < %d\n",
+ current_limit_ma, min_current_thr_ma);
+ return false;
+ }
+ chip->parallel.initial_aicl_ma = current_limit_ma;
+ }
+
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ parallel_cl_ma = pval.intval / 1000;
+ /*
+ * Read back the real amount of current we are getting
+ * Treat 2mA as 0 because that is the suspend current setting
+ */
+ if (parallel_cl_ma <= SUSPEND_CURRENT_MA)
+ parallel_cl_ma = 0;
+
+ /*
+ * Set the parallel charge path's input current limit (ICL)
+ * to the total current / 2
+ */
+ total_current_ma = min(current_limit_ma + parallel_cl_ma, usb_icl_ma);
+
+ if (total_current_ma < chip->parallel.initial_aicl_ma
+ - chip->parallel.allowed_lowering_ma) {
+ pr_smb(PR_STATUS,
+ "Total current reduced a lot: %d (%d + %d) < %d - %d\n",
+ total_current_ma,
+ current_limit_ma, parallel_cl_ma,
+ chip->parallel.initial_aicl_ma,
+ chip->parallel.allowed_lowering_ma);
+ return false;
+ }
+
+ *ret_total_current_ma = total_current_ma;
+ return true;
+}
+
+#define PARALLEL_CHARGER_EN_DELAY_MS 500
+static void smbchg_parallel_usb_en_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ parallel_en_work.work);
+ int previous_aicl_ma, total_current_ma, aicl_ma;
+ bool in_progress;
+
+ /* do a check to see if the aicl is stable */
+ previous_aicl_ma = smbchg_get_aicl_level_ma(chip);
+ msleep(PARALLEL_CHARGER_EN_DELAY_MS);
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (previous_aicl_ma == aicl_ma) {
+ pr_smb(PR_STATUS, "AICL at %d\n", aicl_ma);
+ } else {
+ pr_smb(PR_STATUS,
+ "AICL changed [%d -> %d], recheck %d ms\n",
+ previous_aicl_ma, aicl_ma,
+ PARALLEL_CHARGER_EN_DELAY_MS);
+ goto recheck;
+ }
+
+ mutex_lock(&chip->parallel.lock);
+ in_progress = (chip->parallel.current_max_ma != 0);
+ if (smbchg_is_parallel_usb_ok(chip, &total_current_ma)) {
+ smbchg_parallel_usb_enable(chip, total_current_ma);
+ } else {
+ if (in_progress) {
+ pr_smb(PR_STATUS, "parallel charging unavailable\n");
+ smbchg_parallel_usb_disable(chip);
+ }
+ }
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_CHECK);
+ return;
+
+recheck:
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static void smbchg_parallel_usb_check_ok(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_CHECK);
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static int charging_suspend_vote_cb(struct votable *votable, void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_charging_en(chip, !suspend);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't configure batt chg: 0x%x rc = %d\n",
+ !suspend, rc);
+ }
+
+ return rc;
+}
+
+static int usb_suspend_vote_cb(struct votable *votable,
+ void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_usb_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if ((strcmp(client, THERMAL_EN_VOTER) == 0)
+ || (strcmp(client, POWER_SUPPLY_EN_VOTER) == 0)
+ || (strcmp(client, USER_EN_VOTER) == 0)
+ || (strcmp(client, FAKE_BATTERY_EN_VOTER) == 0))
+ smbchg_parallel_usb_check_ok(chip);
+
+ return rc;
+}
+
+static int dc_suspend_vote_cb(struct votable *votable,
+ void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_dc_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if (chip->dc_psy_type != -EINVAL && chip->dc_psy)
+ power_supply_changed(chip->dc_psy);
+
+ return rc;
+}
+
+static int set_fastchg_current_vote_cb(struct votable *votable,
+ void *data,
+ int fcc_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+ int rc;
+
+ if (fcc_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_fastchg_current_raw(chip, fcc_ma);
+ if (rc < 0) {
+ pr_err("Can't set FCC fcc_ma=%d rc=%d\n", fcc_ma, rc);
+ return rc;
+ }
+ }
+ /*
+ * check if parallel charging can be enabled, and if enabled,
+ * distribute the fcc
+ */
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_set_fastchg_current_user(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ pr_smb(PR_STATUS, "User setting FCC to %d\n", current_ma);
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote en rc %d\n", rc);
+ return rc;
+}
+
+static struct ilim_entry *smbchg_wipower_find_entry(struct smbchg_chip *chip,
+ struct ilim_map *map, int uv)
+{
+ int i;
+ struct ilim_entry *ret = &(chip->wipower_default.entries[0]);
+
+ for (i = 0; i < map->num; i++) {
+ if (is_between(map->entries[i].vmin_uv, map->entries[i].vmax_uv,
+ uv))
+ ret = &map->entries[i];
+ }
+ return ret;
+}
+
+#define ZIN_ICL_PT 0xFC
+#define ZIN_ICL_LV 0xFD
+#define ZIN_ICL_HV 0xFE
+#define ZIN_ICL_MASK SMB_MASK(4, 0)
+static int smbchg_dcin_ilim_config(struct smbchg_chip *chip, int offset, int ma)
+{
+ int i, rc;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0)
+ i = 0;
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + offset,
+ ZIN_ICL_MASK, i);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write bat if offset %d value = %d rc = %d\n",
+ offset, i, rc);
+ return rc;
+}
+
+static int smbchg_wipower_ilim_config(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc = 0;
+
+ if (chip->current_ilim.icl_pt_ma != ilim->icl_pt_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_PT, ilim->icl_pt_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_PT, ilim->icl_pt_ma, rc);
+ else
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ }
+
+ if (chip->current_ilim.icl_lv_ma != ilim->icl_lv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_LV, ilim->icl_lv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_LV, ilim->icl_lv_ma, rc);
+ else
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ }
+
+ if (chip->current_ilim.icl_hv_ma != ilim->icl_hv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_HV, ilim->icl_hv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_HV, ilim->icl_hv_ma, rc);
+ else
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ }
+ return rc;
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx);
+static int smbchg_wipower_dcin_btm_configure(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc;
+
+ if (ilim->vmin_uv == chip->current_ilim.vmin_uv
+ && ilim->vmax_uv == chip->current_ilim.vmax_uv)
+ return 0;
+
+ chip->param.channel = DCIN;
+ chip->param.btm_ctx = chip;
+ if (wipower_dcin_interval < ADC_MEAS1_INTERVAL_0MS)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_0MS;
+
+ if (wipower_dcin_interval > ADC_MEAS1_INTERVAL_16S)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_16S;
+
+ chip->param.timer_interval = wipower_dcin_interval;
+ chip->param.threshold_notification = &btm_notify_dcin;
+ chip->param.high_thr = ilim->vmax_uv + wipower_dcin_hyst_uv;
+ chip->param.low_thr = ilim->vmin_uv - wipower_dcin_hyst_uv;
+ chip->param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+ rc = qpnp_vadc_channel_monitor(chip->vadc_dev, &chip->param);
+ if (rc) {
+ dev_err(chip->dev, "Couldn't configure btm for dcin rc = %d\n",
+ rc);
+ } else {
+ chip->current_ilim.vmin_uv = ilim->vmin_uv;
+ chip->current_ilim.vmax_uv = ilim->vmax_uv;
+ pr_smb(PR_STATUS, "btm ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ }
+ return rc;
+}
+
+static int smbchg_wipower_icl_configure(struct smbchg_chip *chip,
+ int dcin_uv, bool div2)
+{
+ int rc = 0;
+ struct ilim_map *map = div2 ? &chip->wipower_div2 : &chip->wipower_pt;
+ struct ilim_entry *ilim = smbchg_wipower_find_entry(chip, map, dcin_uv);
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config ilim rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+
+ rc = smbchg_wipower_dcin_btm_configure(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config btm rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+ chip->wipower_configured = true;
+ return 0;
+}
+
+static void smbchg_wipower_icl_deconfigure(struct smbchg_chip *chip)
+{
+ int rc;
+ struct ilim_entry *ilim = &(chip->wipower_default.entries[0]);
+
+ if (!chip->wipower_configured)
+ return;
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc)
+ dev_err(chip->dev, "Couldn't config default ilim rc = %d\n",
+ rc);
+
+ rc = qpnp_vadc_end_channel_monitor(chip->vadc_dev);
+ if (rc)
+ dev_err(chip->dev, "Couldn't de configure btm for dcin rc = %d\n",
+ rc);
+
+ chip->wipower_configured = false;
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ pr_smb(PR_WIPOWER, "De config btm\n");
+}
+
+#define FV_STS 0x0C
+#define DIV2_ACTIVE BIT(7)
+static void __smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ int chg_type;
+ bool usb_present, dc_present;
+ int rc;
+ int dcin_uv;
+ bool div2;
+ struct qpnp_vadc_result adc_result;
+ u8 reg;
+
+ if (!wipower_dyn_icl_en) {
+ smbchg_wipower_icl_deconfigure(chip);
+ return;
+ }
+
+ chg_type = get_prop_charge_type(chip);
+ usb_present = is_usb_present(chip);
+ dc_present = is_dc_present(chip);
+ if (chg_type != POWER_SUPPLY_CHARGE_TYPE_NONE
+ && !usb_present
+ && dc_present
+ && chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) {
+ rc = qpnp_vadc_read(chip->vadc_dev, DCIN, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ dcin_uv = adc_result.physical;
+
+ /* check div_by_2 */
+ rc = smbchg_read(chip, ®, chip->chgr_base + FV_STS, 1);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ div2 = !!(reg & DIV2_ACTIVE);
+
+ pr_smb(PR_WIPOWER,
+ "config ICL chg_type = %d usb = %d dc = %d dcin_uv(adc_code) = %d (0x%x) div2 = %d\n",
+ chg_type, usb_present, dc_present, dcin_uv,
+ adc_result.adc_code, div2);
+ smbchg_wipower_icl_configure(chip, dcin_uv, div2);
+ } else {
+ pr_smb(PR_WIPOWER,
+ "deconfig ICL chg_type = %d usb = %d dc = %d\n",
+ chg_type, usb_present, dc_present);
+ smbchg_wipower_icl_deconfigure(chip);
+ }
+}
+
+static void smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ if (!chip->wipower_dyn_icl_avail)
+ return;
+
+ mutex_lock(&chip->wipower_config);
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx)
+{
+ struct smbchg_chip *chip = ctx;
+
+ mutex_lock(&chip->wipower_config);
+ pr_smb(PR_WIPOWER, "%s state\n",
+ state == ADC_TM_LOW_STATE ? "low" : "high");
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static int force_dcin_icl_write(void *data, u64 val)
+{
+ struct smbchg_chip *chip = data;
+
+ smbchg_wipower_check(chip);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dcin_icl_ops, NULL,
+ force_dcin_icl_write, "0x%02llx\n");
+
+/*
+ * set the dc charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_dc_current_limit_vote_cb(struct votable *votable,
+ void *data,
+ int icl_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+
+ if (icl_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+
+ return smbchg_set_dc_current_max(chip, icl_ma);
+}
+
+/*
+ * set the usb charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_usb_current_limit_vote_cb(struct votable *votable,
+ void *data,
+ int icl_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+ int rc, aicl_ma;
+ const char *effective_id;
+
+ if (icl_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+ effective_id = get_effective_client_locked(chip->usb_icl_votable);
+
+ if (!effective_id)
+ return 0;
+
+ /* disable parallel charging if HVDCP is voting for 300mA */
+ if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0)
+ smbchg_parallel_usb_disable(chip);
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_usb_current_max(chip, icl_ma);
+ if (rc) {
+ pr_err("Failed to set usb current max: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* skip the aicl rerun if hvdcp icl voter is active */
+ if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0)
+ return 0;
+
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (icl_ma > aicl_ma)
+ smbchg_rerun_aicl(chip);
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_system_temp_level_set(struct smbchg_chip *chip,
+ int lvl_sel)
+{
+ int rc = 0;
+ int prev_therm_lvl;
+ int thermal_icl_ma;
+
+ if (!chip->thermal_mitigation) {
+ dev_err(chip->dev, "Thermal mitigation not supported\n");
+ return -EINVAL;
+ }
+
+ if (lvl_sel < 0) {
+ dev_err(chip->dev, "Unsupported level selected %d\n", lvl_sel);
+ return -EINVAL;
+ }
+
+ if (lvl_sel >= chip->thermal_levels) {
+ dev_err(chip->dev, "Unsupported level selected %d forcing %d\n",
+ lvl_sel, chip->thermal_levels - 1);
+ lvl_sel = chip->thermal_levels - 1;
+ }
+
+ if (lvl_sel == chip->therm_lvl_sel)
+ return 0;
+
+ mutex_lock(&chip->therm_lvl_lock);
+ prev_therm_lvl = chip->therm_lvl_sel;
+ chip->therm_lvl_sel = lvl_sel;
+ if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+ /*
+ * Disable charging if highest value selected by
+ * setting the DC and USB path in suspend
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ goto out;
+ }
+
+ if (chip->therm_lvl_sel == 0) {
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable USB thermal ICL vote rc=%d\n",
+ rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable DC thermal ICL vote rc=%d\n",
+ rc);
+ } else {
+ thermal_icl_ma =
+ (int)chip->thermal_mitigation[chip->therm_lvl_sel];
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for USB thermal ICL rc=%d\n", rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for DC thermal ICL rc=%d\n", rc);
+ }
+
+ if (prev_therm_lvl == chip->thermal_levels - 1) {
+ /*
+ * If previously highest value was selected charging must have
+ * been disabed. Enable charging by taking the DC and USB path
+ * out of suspend.
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER,
+ false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&chip->therm_lvl_lock);
+ return rc;
+}
+
+static int smbchg_ibat_ocp_threshold_ua = 4500000;
+module_param(smbchg_ibat_ocp_threshold_ua, int, 0644);
+
+#define UCONV 1000000LL
+#define MCONV 1000LL
+#define FLASH_V_THRESHOLD 3000000
+#define FLASH_VDIP_MARGIN 100000
+#define VPH_FLASH_VDIP (FLASH_V_THRESHOLD + FLASH_VDIP_MARGIN)
+#define BUCK_EFFICIENCY 800LL
+static int smbchg_calc_max_flash_current(struct smbchg_chip *chip)
+{
+ int ocv_uv, esr_uohm, rbatt_uohm, ibat_now, rc;
+ int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
+ int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_RESISTANCE,
+ &esr_uohm);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support resistance\n");
+ return 0;
+ }
+
+ rc = msm_bcl_read(BCL_PARAM_CURRENT, &ibat_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "BCL current read failed: %d\n", rc);
+ return 0;
+ }
+
+ rbatt_uohm = esr_uohm + chip->rpara_uohm + chip->rslow_uohm;
+ /*
+ * Calculate the maximum current that can pulled out of the battery
+ * before the battery voltage dips below a safe threshold.
+ */
+ ibat_safe_ua = div_s64((ocv_uv - VPH_FLASH_VDIP) * UCONV,
+ rbatt_uohm);
+
+ if (ibat_safe_ua <= smbchg_ibat_ocp_threshold_ua) {
+ /*
+ * If the calculated current is below the OCP threshold, then
+ * use it as the possible flash current.
+ */
+ ibat_flash_ua = ibat_safe_ua - ibat_now;
+ vph_flash_uv = VPH_FLASH_VDIP;
+ } else {
+ /*
+ * If the calculated current is above the OCP threshold, then
+ * use the ocp threshold instead.
+ *
+ * Any higher current will be tripping the battery OCP.
+ */
+ ibat_flash_ua = smbchg_ibat_ocp_threshold_ua - ibat_now;
+ vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm
+ * smbchg_ibat_ocp_threshold_ua, UCONV);
+ }
+ /* Calculate the input voltage of the flash module. */
+ vin_flash_uv = max((chip->vled_max_uv + 500000LL),
+ div64_s64((vph_flash_uv * 1200), 1000));
+ /* Calculate the available power for the flash module. */
+ avail_flash_power_fw = BUCK_EFFICIENCY * vph_flash_uv * ibat_flash_ua;
+ /*
+ * Calculate the available amount of current the flash module can draw
+ * before collapsing the battery. (available power/ flash input voltage)
+ */
+ avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
+ pr_smb(PR_MISC,
+ "avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d\n",
+ avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm);
+ return (int)avail_flash_ua;
+}
+
+#define FCC_CMP_CFG 0xF3
+#define FCC_COMP_MASK SMB_MASK(1, 0)
+static int smbchg_fastchg_current_comp_set(struct smbchg_chip *chip,
+ int comp_current)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < chip->tables.fcc_comp_len; i++)
+ if (comp_current == chip->tables.fcc_comp_table[i])
+ break;
+
+ if (i >= chip->tables.fcc_comp_len)
+ return -EINVAL;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CMP_CFG,
+ FCC_COMP_MASK, i);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define CFG_TCC_REG 0xF9
+#define CHG_ITERM_MASK SMB_MASK(2, 0)
+static int smbchg_iterm_set(struct smbchg_chip *chip, int iterm_ma)
+{
+ int rc;
+ u8 reg;
+
+ reg = find_closest_in_array(
+ chip->tables.iterm_ma_table,
+ chip->tables.iterm_ma_len,
+ iterm_ma);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CFG_TCC_REG,
+ CHG_ITERM_MASK, reg);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set tcc (%d) to 0x%02x\n",
+ iterm_ma, reg);
+ chip->iterm_ma = iterm_ma;
+
+ return 0;
+}
+
+#define FV_CMP_CFG 0xF5
+#define FV_COMP_MASK SMB_MASK(5, 0)
+static int smbchg_float_voltage_comp_set(struct smbchg_chip *chip, int code)
+{
+ int rc;
+ u8 val;
+
+ val = code & FV_COMP_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FV_CMP_CFG,
+ FV_COMP_MASK, val);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define VFLOAT_CFG_REG 0xF4
+#define MIN_FLOAT_MV 3600
+#define MAX_FLOAT_MV 4500
+#define VFLOAT_MASK SMB_MASK(5, 0)
+
+#define MID_RANGE_FLOAT_MV_MIN 3600
+#define MID_RANGE_FLOAT_MIN_VAL 0x05
+#define MID_RANGE_FLOAT_STEP_MV 20
+
+#define HIGH_RANGE_FLOAT_MIN_MV 4340
+#define HIGH_RANGE_FLOAT_MIN_VAL 0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV 10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV 4360
+#define VHIGH_RANGE_FLOAT_MIN_VAL 0x2C
+#define VHIGH_RANGE_FLOAT_STEP_MV 20
+static int smbchg_float_voltage_set(struct smbchg_chip *chip, int vfloat_mv)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval prop;
+ int rc, delta;
+ u8 temp;
+
+ if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+ dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+ vfloat_mv);
+ return -EINVAL;
+ }
+
+ if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+ /* mid range */
+ delta = vfloat_mv - MID_RANGE_FLOAT_MV_MIN;
+ temp = MID_RANGE_FLOAT_MIN_VAL + delta
+ / MID_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % MID_RANGE_FLOAT_STEP_MV;
+ } else if (vfloat_mv <= VHIGH_RANGE_FLOAT_MIN_MV) {
+ /* high range */
+ delta = vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV;
+ temp = HIGH_RANGE_FLOAT_MIN_VAL + delta
+ / HIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % HIGH_RANGE_FLOAT_STEP_MV;
+ } else {
+ /* very high range */
+ delta = vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV;
+ temp = VHIGH_RANGE_FLOAT_MIN_VAL + delta
+ / VHIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % VHIGH_RANGE_FLOAT_STEP_MV;
+ }
+
+ if (parallel_psy) {
+ prop.intval = vfloat_mv + 50;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage on parallel psy rc: %d\n",
+ rc);
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + VFLOAT_CFG_REG,
+ VFLOAT_MASK, temp);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage rc = %d\n", rc);
+ else
+ chip->vfloat_mv = vfloat_mv;
+
+ return rc;
+}
+
+static int smbchg_float_voltage_get(struct smbchg_chip *chip)
+{
+ return chip->vfloat_mv;
+}
+
+#define SFT_CFG 0xFD
+#define SFT_EN_MASK SMB_MASK(5, 4)
+#define SFT_TO_MASK SMB_MASK(3, 2)
+#define PRECHG_SFT_TO_MASK SMB_MASK(1, 0)
+#define SFT_TIMER_DISABLE_BIT BIT(5)
+#define PRECHG_SFT_TIMER_DISABLE_BIT BIT(4)
+#define SAFETY_TIME_MINUTES_SHIFT 2
+static int smbchg_safety_timer_enable(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+ u8 reg;
+
+ if (enable == chip->safety_timer_en)
+ return 0;
+
+ if (enable)
+ reg = 0;
+ else
+ reg = SFT_TIMER_DISABLE_BIT | PRECHG_SFT_TIMER_DISABLE_BIT;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s safety timer rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ chip->safety_timer_en = enable;
+ return 0;
+}
+
+enum skip_reason {
+ REASON_OTG_ENABLED = BIT(0),
+ REASON_FLASH_ENABLED = BIT(1)
+};
+
+#define BAT_IF_TRIM7_REG 0xF7
+#define CFG_750KHZ_BIT BIT(1)
+#define MISC_CFG_NTC_VOUT_REG 0xF3
+#define CFG_NTC_VOUT_FSW_BIT BIT(0)
+static int smbchg_switch_buck_frequency(struct smbchg_chip *chip,
+ bool flash_active)
+{
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA))
+ return 0;
+
+ if (chip->flash_active == flash_active) {
+ pr_smb(PR_STATUS, "Fsw not changed, flash_active: %d\n",
+ flash_active);
+ return 0;
+ }
+
+ /*
+ * As per the systems team recommendation, before the flash fires,
+ * buck switching frequency(Fsw) needs to be increased to 1MHz. Once the
+ * flash is disabled, Fsw needs to be set back to 750KHz.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->misc_base +
+ MISC_CFG_NTC_VOUT_REG, CFG_NTC_VOUT_FSW_BIT,
+ flash_active ? CFG_NTC_VOUT_FSW_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set switching frequency multiplier rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, flash_active ? 0 : CFG_750KHZ_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Fsw @ %sHz\n", flash_active ? "1M" : "750K");
+ chip->flash_active = flash_active;
+ return 0;
+}
+
+#define OTG_TRIM6 0xF6
+#define TR_ENB_SKIP_BIT BIT(2)
+#define OTG_EN_BIT BIT(0)
+static int smbchg_otg_pulse_skip_disable(struct smbchg_chip *chip,
+ enum skip_reason reason, bool disable)
+{
+ int rc;
+ bool disabled;
+
+ disabled = !!chip->otg_pulse_skip_dis;
+ pr_smb(PR_STATUS, "%s pulse skip, reason %d\n",
+ disable ? "disabling" : "enabling", reason);
+ if (disable)
+ chip->otg_pulse_skip_dis |= reason;
+ else
+ chip->otg_pulse_skip_dis &= ~reason;
+ if (disabled == !!chip->otg_pulse_skip_dis)
+ return 0;
+ disabled = !!chip->otg_pulse_skip_dis;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_TRIM6,
+ TR_ENB_SKIP_BIT, disabled ? TR_ENB_SKIP_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg pulse skip rc = %d\n",
+ disabled ? "disable" : "enable", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "%s pulse skip\n", disabled ? "disabled" : "enabled");
+ return 0;
+}
+
+#define LOW_PWR_OPTIONS_REG 0xFF
+#define FORCE_TLIM_BIT BIT(4)
+static int smbchg_force_tlim_en(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + LOW_PWR_OPTIONS_REG,
+ FORCE_TLIM_BIT, enable ? FORCE_TLIM_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg force tlim rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_vfloat_adjust_check(struct smbchg_chip *chip)
+{
+ if (!chip->use_vfloat_adjustments)
+ return;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ pr_smb(PR_STATUS, "Starting vfloat adjustments\n");
+ schedule_delayed_work(&chip->vfloat_adjust_work, 0);
+}
+
+#define FV_STS_REG 0xC
+#define AICL_INPUT_STS_BIT BIT(6)
+static bool smbchg_is_input_current_limited(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->chgr_base + FV_STS_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read FV_STS rc=%d\n", rc);
+ return false;
+ }
+
+ return !!(reg & AICL_INPUT_STS_BIT);
+}
+
+#define SW_ESR_PULSE_MS 1500
+static void smbchg_cc_esr_wa_check(struct smbchg_chip *chip)
+{
+ int rc, esr_count;
+
+ if (!(chip->wa_flags & SMBCHG_CC_ESR_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "No inputs present, skipping\n");
+ return;
+ }
+
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return;
+ }
+
+ if (!smbchg_is_input_current_limited(chip)) {
+ pr_smb(PR_STATUS, "Not input current limited, skipping\n");
+ return;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_ESR_COUNT, &esr_count);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "could not read ESR counter rc = %d\n", rc);
+ return;
+ }
+
+ /*
+ * The esr_count is counting down the number of fuel gauge cycles
+ * before a ESR pulse is needed.
+ *
+ * After a successful ESR pulse, this count is reset to some
+ * high number like 28. If this reaches 0, then the fuel gauge
+ * hardware should force a ESR pulse.
+ *
+ * However, if the device is in constant current charge mode while
+ * being input current limited, the ESR pulse will not affect the
+ * battery current, so the measurement will fail.
+ *
+ * As a failsafe, force a manual ESR pulse if this value is read as
+ * 0.
+ */
+ if (esr_count != 0) {
+ pr_smb(PR_STATUS, "ESR count is not zero, skipping\n");
+ return;
+ }
+
+ pr_smb(PR_STATUS, "Lowering charge current for ESR pulse\n");
+ smbchg_stay_awake(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, true);
+ msleep(SW_ESR_PULSE_MS);
+ pr_smb(PR_STATUS, "Raising charge current for ESR pulse\n");
+ smbchg_relax(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, false);
+}
+
+static void smbchg_soc_changed(struct smbchg_chip *chip)
+{
+ smbchg_cc_esr_wa_check(chip);
+}
+
+#define DC_AICL_CFG 0xF3
+#define MISC_TRIM_OPT_15_8 0xF5
+#define USB_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_LONG 0
+#define DC_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_LONG 0
+#define AICL_RERUN_MASK (BIT(5) | BIT(4))
+#define AICL_RERUN_ON (BIT(5) | BIT(4))
+#define AICL_RERUN_OFF 0
+
+static int smbchg_hw_aicl_rerun_enable_indirect_cb(struct votable *votable,
+ void *data,
+ int enable,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (enable < 0) {
+ pr_err("No voters\n");
+ enable = 0;
+ }
+ /*
+ * If the indirect voting result of all the clients is to enable hw aicl
+ * rerun, then remove our vote to disable hw aicl rerun
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ HW_AICL_RERUN_ENABLE_INDIRECT_VOTER, !enable, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote for hw rerun rc= %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int smbchg_hw_aicl_rerun_disable_cb(struct votable *votable, void *data,
+ int disable,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (disable < 0) {
+ pr_err("No voters\n");
+ disable = 0;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8,
+ AICL_RERUN_MASK, disable ? AICL_RERUN_OFF : AICL_RERUN_ON);
+ if (rc < 0)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_aicl_deglitch_config_cb(struct votable *votable, void *data,
+ int shorter,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (shorter < 0) {
+ pr_err("No voters\n");
+ shorter = 0;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USB_AICL_CFG,
+ USB_AICL_DEGLITCH_MASK,
+ shorter ? USB_AICL_DEGLITCH_SHORT : USB_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to USB_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + DC_AICL_CFG,
+ DC_AICL_DEGLITCH_MASK,
+ shorter ? DC_AICL_DEGLITCH_SHORT : DC_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to DC_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_aicl_deglitch_wa_en(struct smbchg_chip *chip, bool en)
+{
+ int rc;
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote %s deglitch rc=%d\n",
+ en ? "short" : "long", rc);
+ return;
+ }
+ pr_smb(PR_STATUS, "AICL deglitch set to %s\n", en ? "short" : "long");
+
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote hw aicl rerun rc= %d\n", rc);
+ return;
+ }
+ chip->aicl_deglitch_short = en;
+}
+
+static void smbchg_aicl_deglitch_wa_check(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ int rc;
+ bool low_volt_chgr = true;
+
+ if (!(chip->wa_flags & SMBCHG_AICL_DEGLITCH_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "Charger removed\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ if (!chip->bms_psy)
+ return;
+
+ if (is_usb_present(chip)) {
+ if (is_hvdcp_present(chip))
+ low_volt_chgr = false;
+ } else if (is_dc_present(chip)) {
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ low_volt_chgr = false;
+ else
+ low_volt_chgr = chip->low_volt_dcin;
+ }
+
+ if (!low_volt_chgr) {
+ pr_smb(PR_STATUS, "High volt charger! Don't set deglitch\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ /* It is possible that battery voltage went high above threshold
+ * when the charger is inserted and can go low because of system
+ * load. We shouldn't be reconfiguring AICL deglitch when this
+ * happens as it will lead to oscillation again which is being
+ * fixed here. Do it once when the battery voltage crosses the
+ * threshold (e.g. 4.2 V) and clear it only when the charger
+ * is removed.
+ */
+ if (!chip->vbat_above_headroom) {
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN, &prop);
+ if (rc < 0) {
+ pr_err("could not read voltage_min, rc=%d\n", rc);
+ return;
+ }
+ chip->vbat_above_headroom = !prop.intval;
+ }
+ smbchg_aicl_deglitch_wa_en(chip, chip->vbat_above_headroom);
+}
+
+#define MISC_TEST_REG 0xE2
+#define BB_LOOP_DISABLE_ICL BIT(2)
+static int smbchg_icl_loop_disable_check(struct smbchg_chip *chip)
+{
+ bool icl_disabled = !chip->chg_otg_enabled && chip->flash_triggered;
+ int rc = 0;
+
+ if ((chip->wa_flags & SMBCHG_FLASH_ICL_DISABLE_WA)
+ && icl_disabled != chip->icl_disabled) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TEST_REG,
+ BB_LOOP_DISABLE_ICL,
+ icl_disabled ? BB_LOOP_DISABLE_ICL : 0);
+ chip->icl_disabled = icl_disabled;
+ }
+
+ return rc;
+}
+
+#define UNKNOWN_BATT_TYPE "Unknown Battery"
+#define LOADING_BATT_TYPE "Loading Battery Data"
+static int smbchg_config_chg_battery_type(struct smbchg_chip *chip)
+{
+ int rc = 0, max_voltage_uv = 0, fastchg_ma = 0, ret = 0, iterm_ua = 0;
+ struct device_node *batt_node, *profile_node;
+ struct device_node *node = chip->pdev->dev.of_node;
+ union power_supply_propval prop = {0,};
+
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ if (rc) {
+ pr_smb(PR_STATUS, "Unable to read battery-type rc=%d\n", rc);
+ return 0;
+ }
+ if (!strcmp(prop.strval, UNKNOWN_BATT_TYPE) ||
+ !strcmp(prop.strval, LOADING_BATT_TYPE)) {
+ pr_smb(PR_MISC, "Battery-type not identified\n");
+ return 0;
+ }
+ /* quit if there is no change in the battery-type from previous */
+ if (chip->battery_type && !strcmp(prop.strval, chip->battery_type))
+ return 0;
+
+ chip->battery_type = prop.strval;
+ batt_node = of_parse_phandle(node, "qcom,battery-data", 0);
+ if (!batt_node) {
+ pr_smb(PR_MISC, "No batterydata available\n");
+ return 0;
+ }
+
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ if (rc < 0) {
+ pr_smb(PR_STATUS, "Unable to read battery-id rc=%d\n", rc);
+ return 0;
+ }
+
+ profile_node = of_batterydata_get_best_profile(batt_node,
+ prop.intval / 1000, NULL);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ return rc;
+ }
+
+ /* change vfloat */
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &max_voltage_uv);
+ if (rc) {
+ pr_warn("couldn't find battery max voltage rc=%d\n", rc);
+ ret = rc;
+ } else {
+ if (chip->vfloat_mv != (max_voltage_uv / 1000)) {
+ pr_info("Vfloat changed from %dmV to %dmV for battery-type %s\n",
+ chip->vfloat_mv, (max_voltage_uv / 1000),
+ chip->battery_type);
+ rc = smbchg_float_voltage_set(chip,
+ (max_voltage_uv / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+
+ /* change chg term */
+ rc = of_property_read_u32(profile_node, "qcom,chg-term-ua",
+ &iterm_ua);
+ if (rc && rc != -EINVAL) {
+ pr_warn("couldn't read battery term current=%d\n", rc);
+ ret = rc;
+ } else if (!rc) {
+ if (chip->iterm_ma != (iterm_ua / 1000)
+ && !chip->iterm_disabled) {
+ pr_info("Term current changed from %dmA to %dmA for battery-type %s\n",
+ chip->iterm_ma, (iterm_ua / 1000),
+ chip->battery_type);
+ rc = smbchg_iterm_set(chip,
+ (iterm_ua / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ }
+ chip->iterm_ma = iterm_ua / 1000;
+ }
+
+ /*
+ * Only configure from profile if fastchg-ma is not defined in the
+ * charger device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,fastchg-current-ma", NULL)) {
+ rc = of_property_read_u32(profile_node,
+ "qcom,fastchg-current-ma", &fastchg_ma);
+ if (rc) {
+ ret = rc;
+ } else {
+ pr_smb(PR_MISC,
+ "fastchg-ma changed from to %dma for battery-type %s\n",
+ fastchg_ma, chip->battery_type);
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ fastchg_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for fastchg current rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return ret;
+}
+
+#define MAX_INV_BATT_ID 7700
+#define MIN_INV_BATT_ID 7300
+static void check_battery_type(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ bool en;
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (chip->bms_psy) {
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ en = (strcmp(prop.strval, UNKNOWN_BATT_TYPE) != 0
+ || chip->charge_unknown_battery)
+ && (strcmp(prop.strval, LOADING_BATT_TYPE) != 0);
+ vote(chip->battchg_suspend_votable,
+ BATTCHG_UNKNOWN_BATTERY_EN_VOTER, !en, 0);
+
+ if (!chip->skip_usb_suspend_for_fake_battery) {
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ /* suspend USB path for invalid battery-id */
+ en = (prop.intval <= MAX_INV_BATT_ID &&
+ prop.intval >= MIN_INV_BATT_ID) ? 1 : 0;
+ vote(chip->usb_suspend_votable, FAKE_BATTERY_EN_VOTER,
+ en, 0);
+ }
+ }
+}
+
+static void smbchg_external_power_changed(struct power_supply *psy)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+ union power_supply_propval prop = {0,};
+ int rc, current_limit = 0, soc;
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name = "null";
+
+ if (chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->bms_psy) {
+ check_battery_type(chip);
+ soc = get_prop_batt_capacity(chip);
+ if (chip->previous_soc != soc) {
+ chip->previous_soc = soc;
+ smbchg_soc_changed(chip);
+ }
+
+ rc = smbchg_config_chg_battery_type(chip);
+ if (rc)
+ pr_smb(PR_MISC,
+ "Couldn't update charger configuration rc=%d\n",
+ rc);
+ }
+
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+ if (rc == 0)
+ vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !prop.intval, 0);
+
+ current_limit = chip->usb_current_max / 1000;
+
+ /* Override if type-c charger used */
+ if (chip->typec_current_ma > 500 &&
+ current_limit < chip->typec_current_ma)
+ current_limit = chip->typec_current_ma;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB)
+ goto skip_current_for_non_sdp;
+
+ pr_smb(PR_MISC, "usb type = %s current_limit = %d\n",
+ usb_type_name, current_limit);
+
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit);
+ if (rc < 0)
+ pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc);
+
+skip_current_for_non_sdp:
+ smbchg_vfloat_adjust_check(chip);
+
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+}
+
+static int smbchg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ chip->otg_retries = 0;
+ chip->chg_otg_enabled = true;
+ smbchg_icl_loop_disable_check(chip);
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, true);
+
+ /* If pin control mode then return from here */
+ if (chip->otg_pinctrl)
+ return rc;
+
+ /* sleep to make sure the pulse skip is actually disabled */
+ msleep(20);
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n", rc);
+ else
+ chip->otg_enable_time = ktime_get();
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ if (!chip->otg_pinctrl) {
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+ rc);
+ }
+
+ chip->chg_otg_enabled = false;
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, false);
+ smbchg_icl_loop_disable_check(chip);
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 reg = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = smbchg_read(chip, ®, chip->bat_if_base + CMD_CHG_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read OTG enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return (reg & OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smbchg_otg_reg_ops = {
+ .enable = smbchg_otg_regulator_enable,
+ .disable = smbchg_otg_regulator_disable,
+ .is_enabled = smbchg_otg_regulator_is_enable,
+};
+
+#define USBIN_CHGR_CFG 0xF1
+#define ADAPTER_ALLOWANCE_MASK 0x7
+#define USBIN_ADAPTER_9V 0x3
+#define USBIN_ADAPTER_5V_9V_CONT 0x2
+#define USBIN_ADAPTER_5V_UNREGULATED_9V 0x5
+#define HVDCP_EN_BIT BIT(3)
+static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't suspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * To disallow source detect and usbin_uv interrupts, set the adapter
+ * allowance to 9V, so that the audio boost operating in reverse never
+ * gets detected as a valid input
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Reenable HVDCP and set the adapter allowance back to the original
+ * value in order to allow normal USBs to be recognized as a valid
+ * input.
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, chip->original_usbin_allowance);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ return get_client_vote(chip->usb_suspend_votable, OTG_EN_VOTER);
+}
+
+struct regulator_ops smbchg_external_otg_reg_ops = {
+ .enable = smbchg_external_otg_regulator_enable,
+ .disable = smbchg_external_otg_regulator_disable,
+ .is_enabled = smbchg_external_otg_regulator_is_enable,
+};
+
+static int smbchg_regulator_init(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ struct regulator_config cfg = {};
+ struct device_node *regulator_node;
+
+ cfg.dev = chip->dev;
+ cfg.driver_data = chip;
+
+ chip->otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->otg_vreg.rdesc.ops = &smbchg_otg_reg_ops;
+ chip->otg_vreg.rdesc.of_match = "qcom,smbcharger-boost-otg";
+ chip->otg_vreg.rdesc.name = "qcom,smbcharger-boost-otg";
+
+ chip->otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->otg_vreg.rdesc, &cfg);
+ if (IS_ERR(chip->otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->otg_vreg.rdev);
+ chip->otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "OTG reg failed, rc=%d\n", rc);
+ }
+ if (rc)
+ return rc;
+
+ regulator_node = of_get_child_by_name(chip->dev->of_node,
+ "qcom,smbcharger-external-otg");
+ if (!regulator_node) {
+ dev_dbg(chip->dev, "external-otg node absent\n");
+ return 0;
+ }
+
+ chip->ext_otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->ext_otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->ext_otg_vreg.rdesc.ops = &smbchg_external_otg_reg_ops;
+ chip->ext_otg_vreg.rdesc.of_match = "qcom,smbcharger-external-otg";
+ chip->ext_otg_vreg.rdesc.name = "qcom,smbcharger-external-otg";
+ if (of_get_property(chip->dev->of_node, "otg-parent-supply", NULL))
+ chip->ext_otg_vreg.rdesc.supply_name = "otg-parent";
+ cfg.dev = chip->dev;
+ cfg.driver_data = chip;
+
+ chip->ext_otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->ext_otg_vreg.rdesc,
+ &cfg);
+ if (IS_ERR(chip->ext_otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->ext_otg_vreg.rdev);
+ chip->ext_otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "external OTG reg failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+#define CMD_CHG_LED_REG 0x43
+#define CHG_LED_CTRL_BIT BIT(0)
+#define LED_SW_CTRL_BIT 0x1
+#define LED_CHG_CTRL_BIT 0x0
+#define CHG_LED_ON 0x03
+#define CHG_LED_OFF 0x00
+#define LED_BLINKING_PATTERN1 0x01
+#define LED_BLINKING_PATTERN2 0x02
+#define LED_BLINKING_CFG_MASK SMB_MASK(2, 1)
+#define CHG_LED_SHIFT 1
+static int smbchg_chg_led_controls(struct smbchg_chip *chip)
+{
+ u8 reg, mask;
+ int rc;
+
+ if (chip->cfg_chg_led_sw_ctrl) {
+ /* turn-off LED by default for software control */
+ mask = CHG_LED_CTRL_BIT | LED_BLINKING_CFG_MASK;
+ reg = LED_SW_CTRL_BIT;
+ } else {
+ mask = CHG_LED_CTRL_BIT;
+ reg = LED_CHG_CTRL_BIT;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_LED_REG,
+ mask, reg);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't write LED_CTRL_BIT rc=%d\n", rc);
+ return rc;
+}
+
+static void smbchg_chg_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ union power_supply_propval pval = {0, };
+ u8 reg;
+ int rc;
+
+ reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT :
+ CHG_LED_OFF << CHG_LED_SHIFT;
+ pval.intval = value > LED_OFF ? 1 : 0;
+ power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
+ &pval);
+ pr_smb(PR_STATUS,
+ "set the charger led brightness to value=%d\n",
+ value);
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static enum
+led_brightness smbchg_chg_led_brightness_get(struct led_classdev *cdev)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ u8 reg_val, chg_led_sts;
+ int rc;
+
+ rc = smbchg_read(chip, ®_val, chip->bat_if_base + CMD_CHG_LED_REG,
+ 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read CHG_LED_REG sts rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chg_led_sts = (reg_val & LED_BLINKING_CFG_MASK) >> CHG_LED_SHIFT;
+
+ pr_smb(PR_STATUS, "chg_led_sts = %02x\n", chg_led_sts);
+
+ return (chg_led_sts == CHG_LED_OFF) ? LED_OFF : LED_FULL;
+}
+
+static void smbchg_chg_led_blink_set(struct smbchg_chip *chip,
+ unsigned long blinking)
+{
+ union power_supply_propval pval = {0, };
+ u8 reg;
+ int rc;
+
+ pval.intval = (blinking == 0) ? 0 : 1;
+ power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
+ &pval);
+
+ if (blinking == 0) {
+ reg = CHG_LED_OFF << CHG_LED_SHIFT;
+ } else {
+ if (blinking == 1)
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ else if (blinking == 2)
+ reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT;
+ else
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static ssize_t smbchg_chg_led_blink_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct smbchg_chip *chip = container_of(cdev, struct smbchg_chip,
+ led_cdev);
+ unsigned long blinking;
+ ssize_t rc = -EINVAL;
+
+ rc = kstrtoul(buf, 10, &blinking);
+ if (rc)
+ return rc;
+
+ smbchg_chg_led_blink_set(chip, blinking);
+
+ return len;
+}
+
+static DEVICE_ATTR(blink, 0664, NULL, smbchg_chg_led_blink_store);
+
+static struct attribute *led_blink_attributes[] = {
+ &dev_attr_blink.attr,
+ NULL,
+};
+
+static struct attribute_group smbchg_led_attr_group = {
+ .attrs = led_blink_attributes
+};
+
+static int smbchg_register_chg_led(struct smbchg_chip *chip)
+{
+ int rc;
+
+ chip->led_cdev.name = "red";
+ chip->led_cdev.brightness_set = smbchg_chg_led_brightness_set;
+ chip->led_cdev.brightness_get = smbchg_chg_led_brightness_get;
+
+ rc = led_classdev_register(chip->dev, &chip->led_cdev);
+ if (rc) {
+ dev_err(chip->dev, "unable to register charger led, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = sysfs_create_group(&chip->led_cdev.dev->kobj,
+ &smbchg_led_attr_group);
+ if (rc) {
+ dev_err(chip->dev, "led sysfs rc: %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int vf_adjust_low_threshold = 5;
+module_param(vf_adjust_low_threshold, int, 0644);
+
+static int vf_adjust_high_threshold = 7;
+module_param(vf_adjust_high_threshold, int, 0644);
+
+static int vf_adjust_n_samples = 10;
+module_param(vf_adjust_n_samples, int, 0644);
+
+static int vf_adjust_max_delta_mv = 40;
+module_param(vf_adjust_max_delta_mv, int, 0644);
+
+static int vf_adjust_trim_steps_per_adjust = 1;
+module_param(vf_adjust_trim_steps_per_adjust, int, 0644);
+
+#define CENTER_TRIM_CODE 7
+#define MAX_LIN_CODE 14
+#define MAX_TRIM_CODE 15
+#define SCALE_SHIFT 4
+#define VF_TRIM_OFFSET_MASK SMB_MASK(3, 0)
+#define VF_STEP_SIZE_MV 10
+#define SCALE_LSB_MV 17
+static int smbchg_trim_add_steps(int prev_trim, int delta_steps)
+{
+ int scale_steps;
+ int linear_offset, linear_scale;
+ int offset_code = prev_trim & VF_TRIM_OFFSET_MASK;
+ int scale_code = (prev_trim & ~VF_TRIM_OFFSET_MASK) >> SCALE_SHIFT;
+
+ if (abs(delta_steps) > 1) {
+ pr_smb(PR_STATUS,
+ "Cant trim multiple steps delta_steps = %d\n",
+ delta_steps);
+ return prev_trim;
+ }
+ if (offset_code <= CENTER_TRIM_CODE)
+ linear_offset = offset_code + CENTER_TRIM_CODE;
+ else if (offset_code > CENTER_TRIM_CODE)
+ linear_offset = MAX_TRIM_CODE - offset_code;
+
+ if (scale_code <= CENTER_TRIM_CODE)
+ linear_scale = scale_code + CENTER_TRIM_CODE;
+ else if (scale_code > CENTER_TRIM_CODE)
+ linear_scale = scale_code - (CENTER_TRIM_CODE + 1);
+
+ /* check if we can accomodate delta steps with just the offset */
+ if (linear_offset + delta_steps >= 0
+ && linear_offset + delta_steps <= MAX_LIN_CODE) {
+ linear_offset += delta_steps;
+
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+ }
+
+ /* changing offset cannot satisfy delta steps, change the scale bits */
+ scale_steps = delta_steps > 0 ? 1 : -1;
+
+ if (linear_scale + scale_steps < 0
+ || linear_scale + scale_steps > MAX_LIN_CODE) {
+ pr_smb(PR_STATUS,
+ "Cant trim scale_steps = %d delta_steps = %d\n",
+ scale_steps, delta_steps);
+ return prev_trim;
+ }
+
+ linear_scale += scale_steps;
+
+ if (linear_scale > CENTER_TRIM_CODE)
+ scale_code = linear_scale - CENTER_TRIM_CODE;
+ else
+ scale_code = linear_scale + (CENTER_TRIM_CODE + 1);
+ prev_trim = (prev_trim & VF_TRIM_OFFSET_MASK)
+ | scale_code << SCALE_SHIFT;
+
+ /*
+ * now that we have changed scale which is a 17mV jump, change the
+ * offset bits (10mV) too so the effective change is just 7mV
+ */
+ delta_steps = -1 * delta_steps;
+
+ linear_offset = clamp(linear_offset + delta_steps, 0, MAX_LIN_CODE);
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+}
+
+#define TRIM_14 0xFE
+#define VF_TRIM_MASK 0xFF
+static int smbchg_adjust_vfloat_mv_trim(struct smbchg_chip *chip,
+ int delta_mv)
+{
+ int sign, delta_steps, rc = 0;
+ u8 prev_trim, new_trim;
+ int i;
+
+ sign = delta_mv > 0 ? 1 : -1;
+ delta_steps = (delta_mv + sign * VF_STEP_SIZE_MV / 2)
+ / VF_STEP_SIZE_MV;
+
+ rc = smbchg_read(chip, &prev_trim, chip->misc_base + TRIM_14, 1);
+ if (rc) {
+ dev_err(chip->dev, "Unable to read trim 14: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 1; i <= abs(delta_steps)
+ && i <= vf_adjust_trim_steps_per_adjust; i++) {
+ new_trim = (u8)smbchg_trim_add_steps(prev_trim,
+ delta_steps > 0 ? 1 : -1);
+ if (new_trim == prev_trim) {
+ pr_smb(PR_STATUS,
+ "VFloat trim unchanged from %02x\n", prev_trim);
+ /* treat no trim change as an error */
+ return -EINVAL;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_14,
+ VF_TRIM_MASK, new_trim);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't change vfloat trim rc=%d\n", rc);
+ }
+ pr_smb(PR_STATUS,
+ "VFlt trim %02x to %02x, delta steps: %d\n",
+ prev_trim, new_trim, delta_steps);
+ prev_trim = new_trim;
+ }
+
+ return rc;
+}
+
+#define VFLOAT_RESAMPLE_DELAY_MS 10000
+static void smbchg_vfloat_adjust_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ vfloat_adjust_work.work);
+ int vbat_uv, vbat_mv, ibat_ua, rc, delta_vfloat_mv;
+ bool taper, enable;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ taper = (get_prop_charge_type(chip)
+ == POWER_SUPPLY_CHARGE_TYPE_TAPER);
+ enable = taper && (chip->parallel.current_max_ma == 0);
+
+ if (!enable) {
+ pr_smb(PR_MISC,
+ "Stopping vfloat adj taper=%d parallel_ma = %d\n",
+ taper, chip->parallel.current_max_ma);
+ goto stop;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ goto stop;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &vbat_uv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support voltage rc = %d\n", rc);
+ goto stop;
+ }
+ vbat_mv = vbat_uv / 1000;
+
+ if ((vbat_mv - chip->vfloat_mv) < -1 * vf_adjust_max_delta_mv) {
+ pr_smb(PR_STATUS, "Skip vbat out of range: %d\n", vbat_mv);
+ goto reschedule;
+ }
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_CURRENT_NOW, &ibat_ua);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support current_now rc = %d\n", rc);
+ goto stop;
+ }
+
+ if (ibat_ua / 1000 > -chip->iterm_ma) {
+ pr_smb(PR_STATUS, "Skip ibat too high: %d\n", ibat_ua);
+ goto reschedule;
+ }
+
+ pr_smb(PR_STATUS, "sample number = %d vbat_mv = %d ibat_ua = %d\n",
+ chip->n_vbat_samples,
+ vbat_mv,
+ ibat_ua);
+
+ chip->max_vbat_sample = max(chip->max_vbat_sample, vbat_mv);
+ chip->n_vbat_samples += 1;
+ if (chip->n_vbat_samples < vf_adjust_n_samples) {
+ pr_smb(PR_STATUS, "Skip %d samples; max = %d\n",
+ chip->n_vbat_samples, chip->max_vbat_sample);
+ goto reschedule;
+ }
+ /* if max vbat > target vfloat, delta_vfloat_mv could be negative */
+ delta_vfloat_mv = chip->vfloat_mv - chip->max_vbat_sample;
+ pr_smb(PR_STATUS, "delta_vfloat_mv = %d, samples = %d, mvbat = %d\n",
+ delta_vfloat_mv, chip->n_vbat_samples, chip->max_vbat_sample);
+ /*
+ * enough valid samples has been collected, adjust trim codes
+ * based on maximum of collected vbat samples if necessary
+ */
+ if (delta_vfloat_mv > vf_adjust_high_threshold
+ || delta_vfloat_mv < -1 * vf_adjust_low_threshold) {
+ rc = smbchg_adjust_vfloat_mv_trim(chip, delta_vfloat_mv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "Stopping vfloat adj after trim adj rc = %d\n",
+ rc);
+ goto stop;
+ }
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ goto reschedule;
+ }
+
+stop:
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ smbchg_relax(chip, PM_REASON_VFLOAT_ADJUST);
+ return;
+
+reschedule:
+ schedule_delayed_work(&chip->vfloat_adjust_work,
+ msecs_to_jiffies(VFLOAT_RESAMPLE_DELAY_MS));
+ return;
+}
+
+static int smbchg_charging_status_change(struct smbchg_chip *chip)
+{
+ smbchg_vfloat_adjust_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ return 0;
+}
+
+#define BB_CLMP_SEL 0xF8
+#define BB_CLMP_MASK SMB_MASK(1, 0)
+#define BB_CLMP_VFIX_3338MV 0x1
+#define BB_CLMP_VFIX_3512MV 0x2
+static int smbchg_set_optimal_charging_mode(struct smbchg_chip *chip, int type)
+{
+ int rc;
+ bool hvdcp2 = (type == POWER_SUPPLY_TYPE_USB_HVDCP
+ && smbchg_is_usbin_active_pwr_src(chip));
+
+ /*
+ * Set the charger switching freq to 1MHZ if HVDCP 2.0,
+ * or 750KHZ otherwise
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, hvdcp2 ? 0 : CFG_750KHZ_BIT);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Set the charger switch frequency clamp voltage threshold to 3.338V
+ * if HVDCP 2.0, or 3.512V otherwise.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BB_CLMP_SEL,
+ BB_CLMP_MASK,
+ hvdcp2 ? BB_CLMP_VFIX_3338MV : BB_CLMP_VFIX_3512MV);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define DEFAULT_SDP_MA 100
+#define DEFAULT_CDP_MA 1500
+static int smbchg_change_usb_supply_type(struct smbchg_chip *chip,
+ enum power_supply_type type)
+{
+ int rc, current_limit_ma;
+
+ /*
+ * if the type is not unknown, set the type before changing ICL vote
+ * in order to ensure that the correct current limit registers are
+ * used
+ */
+ if (type != POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ /*
+ * Type-C only supports STD(900), MEDIUM(1500) and HIGH(3000) current
+ * modes, skip all BC 1.2 current if external typec is supported.
+ * Note: for SDP supporting current based on USB notifications.
+ */
+ if (chip->typec_psy && (type != POWER_SUPPLY_TYPE_USB))
+ current_limit_ma = chip->typec_current_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB)
+ current_limit_ma = DEFAULT_SDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_CDP)
+ current_limit_ma = DEFAULT_CDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP)
+ current_limit_ma = smbchg_default_hvdcp_icl_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP_3)
+ current_limit_ma = smbchg_default_hvdcp3_icl_ma;
+ else
+ current_limit_ma = smbchg_default_dcp_icl_ma;
+
+ pr_smb(PR_STATUS, "Type %d: setting mA = %d\n",
+ type, current_limit_ma);
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit_ma);
+ if (rc < 0) {
+ pr_err("Couldn't vote for new USB ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ /* otherwise if it is unknown, set type after the vote */
+ if (type == POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ if (!chip->skip_usb_notification)
+ power_supply_changed(chip->usb_psy);
+
+ /* set the correct buck switching frequency */
+ rc = smbchg_set_optimal_charging_mode(chip, type);
+ if (rc < 0)
+ pr_err("Couldn't set charger optimal mode rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_5V 0x00
+#define HVDCP_9V 0x10
+#define USB_CMD_HVDCP_1 0x42
+#define FORCE_HVDCP_2p0 BIT(3)
+
+static int force_9v_hvdcp(struct smbchg_chip *chip)
+{
+ int rc;
+
+ /* Force 5V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Force QC2.0 */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, FORCE_HVDCP_2p0);
+ rc |= smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, 0);
+ if (rc < 0) {
+ pr_err("Couldn't force QC2.0 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Delay to switch into HVDCP 2.0 and avoid UV */
+ msleep(500);
+
+ /* Force 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc)
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+
+ return rc;
+}
+
+static void smbchg_hvdcp_det_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ hvdcp_det_work.work);
+ int rc;
+
+ if (is_hvdcp_present(chip)) {
+ if (!chip->hvdcp3_supported &&
+ (chip->wa_flags & SMBCHG_HVDCP_9V_EN_WA)) {
+ /* force HVDCP 2.0 */
+ rc = force_9v_hvdcp(chip);
+ if (rc)
+ pr_err("could not force 9V HVDCP continuing rc=%d\n",
+ rc);
+ }
+ smbchg_change_usb_supply_type(chip,
+ POWER_SUPPLY_TYPE_USB_HVDCP);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_aicl_deglitch_wa_check(chip);
+ }
+ smbchg_relax(chip, PM_DETECT_HVDCP);
+}
+
+static int set_usb_psy_dp_dm(struct smbchg_chip *chip, int state)
+{
+ int rc;
+ u8 reg;
+ union power_supply_propval pval = {0, };
+
+ /*
+ * ensure that we are not in the middle of an insertion where usbin_uv
+ * is low and src_detect hasnt gone high. If so force dp=F dm=F
+ * which guarantees proper type detection
+ */
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1);
+ if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "overwriting state = %d with %d\n",
+ state, POWER_SUPPLY_DP_DM_DPF_DMF);
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ return regulator_enable(chip->dpdm_reg);
+ }
+ pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state);
+ pval.intval = state;
+ return power_supply_set_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_DP_DM, &pval);
+}
+
+#define APSD_CFG 0xF5
+#define AUTO_SRC_DETECT_EN_BIT BIT(0)
+#define APSD_TIMEOUT_MS 1500
+static void restore_from_hvdcp_detection(struct smbchg_chip *chip)
+{
+ int rc;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ /* switch to 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0)
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+
+ /* enable HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+
+ /* enable APSD */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+
+ /* Reset back to 5V unregulated */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK, USBIN_ADAPTER_5V_UNREGULATED_9V);
+ if (rc < 0)
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ chip->pulse_cnt = 0;
+}
+
+#define RESTRICTED_CHG_FCC_PERCENT 50
+static int smbchg_restricted_charging(struct smbchg_chip *chip, bool enable)
+{
+ int current_table_index, fastchg_current;
+ int rc = 0;
+
+ /* If enable, set the fcc to the set point closest
+ * to 50% of the configured fcc while remaining below it
+ */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ chip->cfg_fastchg_current_ma
+ * RESTRICTED_CHG_FCC_PERCENT / 100,
+ chip->tables.usb_ilim_ma_len);
+ fastchg_current =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ rc = vote(chip->fcc_votable, RESTRICTED_CHG_FCC_VOTER, enable,
+ fastchg_current);
+
+ pr_smb(PR_STATUS, "restricted_charging set to %d\n", enable);
+ chip->restricted_charging = enable;
+
+ return rc;
+}
+
+static void handle_usb_removal(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ pr_smb(PR_STATUS, "triggered\n");
+ smbchg_aicl_deglitch_wa_check(chip);
+ /* Clear the OV detected status set before */
+ if (chip->usb_ov_det)
+ chip->usb_ov_det = false;
+ /* Clear typec current status */
+ if (chip->typec_psy)
+ chip->typec_current_ma = 0;
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN);
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present);
+ if (chip->dpdm_reg)
+ regulator_disable(chip->dpdm_reg);
+ schedule_work(&chip->usb_set_online_work);
+
+ pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ power_supply_changed(chip->usb_psy);
+
+ if (parallel_psy && chip->parallel_charger_detected) {
+ pval.intval = false;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ }
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && chip->enable_aicl_wake) {
+ disable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = false;
+ }
+ chip->parallel.enabled_once = false;
+ chip->vbat_above_headroom = false;
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+
+ vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER, false, 0);
+ chip->usb_icl_delta = 0;
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+ if (!chip->hvdcp_not_supported)
+ restore_from_hvdcp_detection(chip);
+}
+
+static bool is_usbin_uv_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_UV_BIT;
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static void handle_usb_insertion(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ enum power_supply_type usb_supply_type;
+ int rc;
+ char *usb_type_name = "null";
+
+ pr_smb(PR_STATUS, "triggered\n");
+ /* usb inserted */
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ pr_smb(PR_STATUS,
+ "inserted type = %d (%s)", usb_supply_type, usb_type_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->typec_psy)
+ update_typec_status(chip);
+ smbchg_change_usb_supply_type(chip, usb_supply_type);
+
+ /* Only notify USB if it's not a charger */
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB ||
+ usb_supply_type == POWER_SUPPLY_TYPE_USB_CDP)
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB,
+ chip->usb_present);
+
+ /* Notify the USB psy if OV condition is not present */
+ if (!chip->usb_ov_det) {
+ /*
+ * Note that this could still be a very weak charger
+ * if the handle_usb_insertion was triggered from
+ * the falling edge of an USBIN_OV interrupt
+ */
+ pr_smb(PR_MISC, "setting usb psy health %s\n",
+ chip->very_weak_charger
+ ? "UNSPEC_FAILURE" : "GOOD");
+ chip->usb_health = chip->very_weak_charger
+ ? POWER_SUPPLY_HEALTH_UNSPEC_FAILURE
+ : POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_changed(chip->usb_psy);
+ }
+ schedule_work(&chip->usb_set_online_work);
+
+ if (!chip->hvdcp_not_supported &&
+ (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP)) {
+ cancel_delayed_work_sync(&chip->hvdcp_det_work);
+ smbchg_stay_awake(chip, PM_DETECT_HVDCP);
+ schedule_delayed_work(&chip->hvdcp_det_work,
+ msecs_to_jiffies(HVDCP_NOTIFY_MS));
+ }
+
+ if (parallel_psy) {
+ pval.intval = true;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ chip->parallel_charger_detected = rc ? false : true;
+ if (rc)
+ pr_debug("parallel-charger absent rc=%d\n", rc);
+ }
+
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && !chip->enable_aicl_wake) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+}
+
+void update_usb_status(struct smbchg_chip *chip, bool usb_present, bool force)
+{
+ mutex_lock(&chip->usb_status_lock);
+ if (force) {
+ chip->usb_present = usb_present;
+ chip->usb_present ? handle_usb_insertion(chip)
+ : handle_usb_removal(chip);
+ goto unlock;
+ }
+ if (!chip->usb_present && usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_insertion(chip);
+ } else if (chip->usb_present && !usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_removal(chip);
+ }
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+unlock:
+ mutex_unlock(&chip->usb_status_lock);
+}
+
+static int otg_oc_reset(struct smbchg_chip *chip)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc)
+ pr_err("Failed to disable OTG rc=%d\n", rc);
+
+ msleep(20);
+
+ /*
+ * There is a possibility that an USBID interrupt might have
+ * occurred notifying USB power supply to disable OTG. We
+ * should not enable OTG in such cases.
+ */
+ if (!is_otg_present(chip)) {
+ pr_smb(PR_STATUS,
+ "OTG is not present, not enabling OTG_EN_BIT\n");
+ goto out;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc)
+ pr_err("Failed to re-enable OTG rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define AICL_IRQ_LIMIT_SECONDS 60
+#define AICL_IRQ_LIMIT_COUNT 25
+static void increment_aicl_count(struct smbchg_chip *chip)
+{
+ bool bad_charger = false;
+ int max_aicl_count, rc;
+ u8 reg;
+ long elapsed_seconds;
+ unsigned long now_seconds;
+
+ pr_smb(PR_INTERRUPT, "aicl count c:%d dgltch:%d first:%ld\n",
+ chip->aicl_irq_count, chip->aicl_deglitch_short,
+ chip->first_aicl_seconds);
+
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (!rc)
+ chip->aicl_complete = reg & AICL_STS_BIT;
+ else
+ chip->aicl_complete = false;
+
+ if (chip->aicl_deglitch_short || chip->force_aicl_rerun) {
+ if (!chip->aicl_irq_count)
+ get_current_time(&chip->first_aicl_seconds);
+ get_current_time(&now_seconds);
+ elapsed_seconds = now_seconds
+ - chip->first_aicl_seconds;
+
+ if (elapsed_seconds > AICL_IRQ_LIMIT_SECONDS) {
+ pr_smb(PR_INTERRUPT,
+ "resetting: elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ chip->aicl_irq_count = 1;
+ get_current_time(&chip->first_aicl_seconds);
+ return;
+ }
+ /*
+ * Double the amount of AICLs allowed if parallel charging is
+ * enabled.
+ */
+ max_aicl_count = AICL_IRQ_LIMIT_COUNT
+ * (chip->parallel.avail ? 2 : 1);
+ chip->aicl_irq_count++;
+
+ if (chip->aicl_irq_count > max_aicl_count) {
+ pr_smb(PR_INTERRUPT, "elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ pr_smb(PR_INTERRUPT, "Disable AICL rerun\n");
+ chip->very_weak_charger = true;
+ bad_charger = true;
+
+ /*
+ * Disable AICL rerun since many interrupts were
+ * triggered in a short time
+ */
+ /* disable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ return;
+ }
+
+ /* Vote 100mA current limit */
+ rc = vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER,
+ true, CURRENT_100_MA);
+ if (rc < 0) {
+ pr_err("Can't vote %d current limit rc=%d\n",
+ CURRENT_100_MA, rc);
+ }
+
+ chip->aicl_irq_count = 0;
+ } else if ((get_prop_charge_type(chip) ==
+ POWER_SUPPLY_CHARGE_TYPE_FAST) &&
+ (reg & AICL_SUSP_BIT)) {
+ /*
+ * If the AICL_SUSP_BIT is on, then AICL reruns have
+ * already been disabled. Set the very weak charger
+ * flag so that the driver reports a bad charger
+ * and does not reenable AICL reruns.
+ */
+ chip->very_weak_charger = true;
+ bad_charger = true;
+ }
+ if (bad_charger) {
+ pr_smb(PR_MISC,
+ "setting usb psy health UNSPEC_FAILURE\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ power_supply_changed(chip->usb_psy);
+ schedule_work(&chip->usb_set_online_work);
+ }
+ }
+}
+
+static int wait_for_usbin_uv(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->usbin_uv_lowered;
+ bool usbin_uv;
+
+ if (high)
+ completion = &chip->usbin_uv_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ usbin_uv = is_usbin_uv_high(chip);
+
+ if (high == usbin_uv)
+ return 0;
+
+ pr_err("usbin uv didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ usbin_uv ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int wait_for_src_detect(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->src_det_lowered;
+ bool src_detect;
+
+ if (high)
+ completion = &chip->src_det_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ src_detect = is_src_detect_high(chip);
+
+ if (high == src_detect)
+ return 0;
+
+ pr_err("src detect didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ src_detect ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int fake_insertion_removal(struct smbchg_chip *chip, bool insertion)
+{
+ int rc;
+ bool src_detect;
+ bool usbin_uv;
+
+ if (insertion) {
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ } else {
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+ }
+
+ /* ensure that usbin uv real time status is in the right state */
+ usbin_uv = is_usbin_uv_high(chip);
+ if (usbin_uv != insertion) {
+ pr_err("Skip faking, usbin uv is already %d\n", usbin_uv);
+ return -EINVAL;
+ }
+
+ /* ensure that src_detect real time status is in the right state */
+ src_detect = is_src_detect_high(chip);
+ if (src_detect == insertion) {
+ pr_err("Skip faking, src detect is already %d\n", src_detect);
+ return -EINVAL;
+ }
+
+ pr_smb(PR_MISC, "Allow only %s charger\n",
+ insertion ? "5-9V" : "9V only");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK,
+ insertion ?
+ USBIN_ADAPTER_5V_9V_CONT : USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s usbin uv\n",
+ insertion ? "falling" : "rising");
+ rc = wait_for_usbin_uv(chip, !insertion);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s src det\n",
+ insertion ? "rising" : "falling");
+ rc = wait_for_src_detect(chip, insertion);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+ goto handle_removal;
+ }
+
+ /* disable APSD */
+ pr_smb(PR_MISC, "Disabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable APSD rc=%d\n", rc);
+ goto out;
+ }
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto handle_removal;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DMF);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ smbchg_read(chip, ®, chip->misc_base + IDEV_STS, 1);
+ if ((reg >> TYPE_BITS_OFFSET) != 0) {
+ pr_smb(PR_MISC, "type bits set after 2s sleep - abort\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DM3P3);
+ /* Wait 60mS after entering continuous mode */
+ msleep(60);
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+handle_removal:
+ chip->hvdcp_3_det_ignore_uv = false;
+ update_usb_status(chip, 0, 0);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+
+ /* switch to 9V HVDCP */
+ pr_smb(PR_MISC, "Switch to 9V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable HVDCP */
+ pr_smb(PR_MISC, "Enable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable APSD */
+ pr_smb(PR_MISC, "Enabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Disable AICL */
+ pr_smb(PR_MISC, "Disable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+ /* fake a removal */
+ chip->hvdcp_3_det_ignore_uv = true;
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal rc=%d\n", rc);
+ goto out;
+ }
+
+ /*
+ * reset the enabled once flag for parallel charging so
+ * parallel charging can immediately restart after the HVDCP pulsing
+ * is complete
+ */
+ chip->parallel.enabled_once = false;
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto out;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ /* Enable AICL */
+ pr_smb(PR_MISC, "Enable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+out:
+ /*
+ * There are many QC 2.0 chargers that collapse before the aicl deglitch
+ * timer can mitigate. Hence set the aicl deglitch time to a shorter
+ * period.
+ */
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't reduce aicl deglitch rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "HVDCP removed\n");
+ update_usb_status(chip, 0, 0);
+ }
+ return rc;
+}
+
+#define USB_CMD_APSD 0x41
+#define APSD_RERUN BIT(0)
+static int rerun_apsd(struct smbchg_chip *chip)
+{
+ int rc;
+
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+
+ /* re-run APSD */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + USB_CMD_APSD,
+ APSD_RERUN, APSD_RERUN);
+ if (rc) {
+ pr_err("Couldn't re-run APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising usbin uv\n");
+ rc = wait_for_usbin_uv(chip, true);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling src det\n");
+ rc = wait_for_src_detect(chip, false);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling usbin uv\n");
+ rc = wait_for_usbin_uv(chip, false);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising src det\n");
+ rc = wait_for_src_detect(chip, true);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define SCHG_LITE_USBIN_HVDCP_5_9V 0x8
+#define SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK 0x38
+#define SCHG_LITE_USBIN_HVDCP_SEL_IDLE BIT(3)
+static bool is_hvdcp_5v_cont_mode(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc) {
+ pr_err("Unable to read HVDCP status rc=%d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP status = %x\n", reg);
+
+ if (reg & SCHG_LITE_USBIN_HVDCP_SEL_IDLE) {
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc) {
+ pr_err("Unable to read INPUT status rc=%d\n", rc);
+ return false;
+ }
+ pr_smb(PR_STATUS, "INPUT status = %x\n", reg);
+ if ((reg & SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK) ==
+ SCHG_LITE_USBIN_HVDCP_5_9V)
+ return true;
+ }
+ return false;
+}
+
+static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /* check if HVDCP is already in 5V continuous mode */
+ if (is_hvdcp_5v_cont_mode(chip)) {
+ pr_smb(PR_MISC, "HVDCP by default is in 5V continuous mode\n");
+ return 0;
+ }
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+
+ /* re-run APSD */
+ rc = rerun_apsd(chip);
+ if (rc) {
+ pr_err("APSD rerun failed\n");
+ goto out;
+ }
+
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* We are set if HVDCP in 5V continuous mode */
+ if (!is_hvdcp_5v_cont_mode(chip)) {
+ pr_err("HVDCP could not be set in 5V continuous mode\n");
+ goto out;
+ }
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Forcing 9V HVDCP 2.0\n");
+ rc = force_9v_hvdcp(chip);
+ if (rc) {
+ pr_err("Failed to force 9V HVDCP=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ return rc;
+}
+
+#define CMD_HVDCP_2 0x43
+#define SINGLE_INCREMENT BIT(0)
+#define SINGLE_DECREMENT BIT(1)
+static int smbchg_dp_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Increment DP\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_INCREMENT, SINGLE_INCREMENT);
+ if (rc)
+ pr_err("Single-increment failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_dm_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Decrement DM\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_DECREMENT, SINGLE_DECREMENT);
+ if (rc)
+ pr_err("Single-decrement failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_hvdcp3_confirmed(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /*
+ * reset the enabled once flag for parallel charging because this is
+ * effectively a new insertion.
+ */
+ chip->parallel.enabled_once = false;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_USB_HVDCP_3);
+
+ return rc;
+}
+
+static int smbchg_dp_dm(struct smbchg_chip *chip, int val)
+{
+ int rc = 0;
+ int target_icl_vote_ma;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_PREPARE:
+ if (!is_hvdcp_present(chip)) {
+ pr_err("No pulsing unless HVDCP\n");
+ return -ENODEV;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_prepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_prepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_UNPREPARE:
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_unprepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_unprepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3:
+ rc = smbchg_hvdcp3_confirmed(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DP_PULSE);
+ else
+ rc = smbchg_dp_pulse_lite(chip);
+ if (!rc)
+ chip->pulse_cnt++;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DM_PULSE);
+ else
+ rc = smbchg_dm_pulse_lite(chip);
+ if (!rc && chip->pulse_cnt)
+ chip->pulse_cnt--;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED:
+ chip->hvdcp3_supported = true;
+ pr_smb(PR_MISC, "HVDCP3 supported\n");
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ chip->usb_icl_delta -= 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ chip->usb_icl_delta += 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static void update_typec_capability_status(struct smbchg_chip *chip,
+ const union power_supply_propval *val)
+{
+ pr_smb(PR_TYPEC, "typec capability = %dma\n", val->intval);
+
+ pr_debug("changing ICL from %dma to %dma\n", chip->typec_current_ma,
+ val->intval);
+ chip->typec_current_ma = val->intval;
+ smbchg_change_usb_supply_type(chip, chip->usb_supply_type);
+}
+
+static void update_typec_otg_status(struct smbchg_chip *chip, int mode,
+ bool force)
+{
+ union power_supply_propval pval = {0, };
+ pr_smb(PR_TYPEC, "typec mode = %d\n", mode);
+
+ if (mode == POWER_SUPPLY_TYPE_DFP) {
+ chip->typec_dfp = true;
+ pval.intval = 1;
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST,
+ chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ } else if (force || chip->typec_dfp) {
+ chip->typec_dfp = false;
+ pval.intval = 0;
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST,
+ chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ }
+}
+
+static int smbchg_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->usb_current_max;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->usb_present;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->usb_online;
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = chip->usb_supply_type;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = chip->usb_health;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ chip->usb_current_max = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ chip->usb_online = val->intval;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ power_supply_changed(psy);
+ return 0;
+}
+
+static int
+smbchg_usb_is_writeable(struct power_supply *psy, enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+static char *smbchg_usb_supplicants[] = {
+ "battery",
+ "bms",
+};
+
+static enum power_supply_property smbchg_usb_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+#define CHARGE_OUTPUT_VTG_RATIO 840
+static int smbchg_get_iusb(struct smbchg_chip *chip)
+{
+ int rc, iusb_ua = -EINVAL;
+ struct qpnp_vadc_result adc_result;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip))
+ return 0;
+
+ if (chip->vchg_vadc_dev && chip->vchg_adc_channel != -EINVAL) {
+ rc = qpnp_vadc_read(chip->vchg_vadc_dev,
+ chip->vchg_adc_channel, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "error in VCHG (channel-%d) read rc = %d\n",
+ chip->vchg_adc_channel, rc);
+ return 0;
+ }
+ iusb_ua = div_s64(adc_result.physical * 1000,
+ CHARGE_OUTPUT_VTG_RATIO);
+ }
+
+ return iusb_ua;
+}
+
+static enum power_supply_property smbchg_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+};
+
+static int smbchg_battery_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ vote(chip->battchg_suspend_votable, BATTCHG_USER_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->usb_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ rc = vote(chip->dc_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ chip->chg_enabled = val->intval;
+ schedule_work(&chip->usb_set_online_work);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smbchg_system_temp_level_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smbchg_set_fastchg_current_user(chip, val->intval / 1000);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smbchg_float_voltage_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ rc = smbchg_safety_timer_enable(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ rc = smbchg_switch_buck_frequency(chip, val->intval);
+ if (rc) {
+ pr_err("Couldn't switch buck frequency, rc=%d\n", rc);
+ /*
+ * Trigger a panic if there is an error while switching
+ * buck frequency. This will prevent LS FET damage.
+ */
+ BUG_ON(1);
+ }
+
+ rc = smbchg_otg_pulse_skip_disable(chip,
+ REASON_FLASH_ENABLED, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ chip->flash_triggered = !!val->intval;
+ smbchg_icl_loop_disable_check(chip);
+ break;
+ case POWER_SUPPLY_PROP_FORCE_TLIM:
+ rc = smbchg_force_tlim_en(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smbchg_dp_dm(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ smbchg_rerun_aicl(chip);
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = smbchg_restricted_charging(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+ if (chip->typec_psy)
+ update_typec_capability_status(chip, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_MODE:
+ if (chip->typec_psy)
+ update_typec_otg_status(chip, val->intval, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_battery_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static int smbchg_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = get_prop_batt_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = get_prop_batt_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ val->intval
+ = get_effective_result(chip->battchg_suspend_votable);
+ if (val->intval < 0) /* no votes */
+ val->intval = 1;
+ else
+ val->intval = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = chip->chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = get_prop_charge_type(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = smbchg_float_voltage_get(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = get_prop_batt_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
+ val->intval = smbchg_calc_max_flash_current(chip);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = chip->fastchg_current_ma * 1000;
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ val->intval = chip->therm_lvl_sel;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ val->intval = smbchg_get_aicl_level_ma(chip) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ val->intval = (int)chip->aicl_complete;
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ val->intval = (int)chip->restricted_charging;
+ break;
+ /* properties from fg */
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_batt_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_prop_batt_current_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_prop_batt_voltage_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_prop_batt_temp(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = get_prop_batt_voltage_max_design(chip);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ val->intval = chip->safety_timer_en;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ val->intval = chip->otg_pulse_skip_dis;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ val->intval = chip->flash_triggered;
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chip->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ val->intval = smbchg_is_input_current_limited(chip);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+ val->intval = smbchg_get_iusb(chip);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static char *smbchg_dc_supplicants[] = {
+ "bms",
+};
+
+static enum power_supply_property smbchg_dc_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smbchg_dc_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->dc_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = vote(chip->dc_icl_votable, USER_ICL_VOTER, true,
+ val->intval / 1000);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_dc_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = is_dc_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = get_effective_result(chip->dc_suspend_votable);
+ if (val->intval < 0) /* no votes */
+ val->intval = 1;
+ else
+ val->intval = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ /* return if dc is charging the battery */
+ val->intval = (smbchg_get_pwr_path(chip) == PWR_PATH_DC)
+ && (get_prop_batt_status(chip)
+ == POWER_SUPPLY_STATUS_CHARGING);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->dc_max_current_ma * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smbchg_dc_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+#define HOT_BAT_HARD_BIT BIT(0)
+#define HOT_BAT_SOFT_BIT BIT(1)
+#define COLD_BAT_HARD_BIT BIT(2)
+#define COLD_BAT_SOFT_BIT BIT(3)
+#define BAT_OV_BIT BIT(4)
+#define BAT_LOW_BIT BIT(5)
+#define BAT_MISSING_BIT BIT(6)
+#define BAT_TERM_MISSING_BIT BIT(7)
+static irqreturn_t batt_hot_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1);
+ chip->batt_hot = !!(reg & HOT_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cold_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cold = !!(reg & COLD_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_warm_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1);
+ chip->batt_warm = !!(reg & HOT_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cool_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cool = !!(reg & COLD_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_pres_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1);
+ chip->batt_present = !(reg & BAT_MISSING_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vbat_low_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("vbat low\n");
+ return IRQ_HANDLED;
+}
+
+#define CHG_COMP_SFT_BIT BIT(3)
+static irqreturn_t chg_error_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc = 0;
+ u8 reg;
+
+ pr_smb(PR_INTERRUPT, "chg-error triggered\n");
+
+ rc = smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ } else {
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (reg & CHG_COMP_SFT_BIT)
+ set_property_on_fg(chip,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+ 1);
+ }
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fastchg_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "p2f triggered\n");
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_hot_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("chg hot\n");
+ smbchg_wipower_check(_chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_term_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "tcc triggered\n");
+ /*
+ * Charge termination is a pulse and not level triggered. That means,
+ * TCC bit in RT_STS can get cleared by the time this interrupt is
+ * handled. Instead of relying on that to determine whether the
+ * charge termination had happened, we've to simply notify the FG
+ * about this as long as the interrupt is handled.
+ */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_CHARGE_DONE, 1);
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t taper_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ taper_irq_en(chip, false);
+ smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_taper(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t recharge_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wdog_timeout_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->misc_base + RT_STS, 1);
+ pr_warn_ratelimited("wdog timeout rt_stat = 0x%02x\n", reg);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static irqreturn_t power_ok_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, ®, chip->misc_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+#define DCIN_UNSUSPEND_DELAY_MS 1000
+static irqreturn_t dcin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool dc_present = is_dc_present(chip);
+
+ pr_smb(PR_STATUS, "chip->dc_present = %d dc_present = %d\n",
+ chip->dc_present, dc_present);
+
+ if (chip->dc_present != dc_present) {
+ /* dc changed */
+ chip->dc_present = dc_present;
+ if (chip->dc_psy_type != -EINVAL && chip->batt_psy)
+ power_supply_changed(chip->dc_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_aicl_deglitch_wa_check(chip);
+ chip->vbat_above_headroom = false;
+ }
+
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_ov_handler() - this is called when an overvoltage condition occurs
+ * @chip: pointer to smbchg_chip chip
+ */
+static irqreturn_t usbin_ov_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc;
+ u8 reg;
+ bool usb_present;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ goto out;
+ }
+
+ /* OV condition is detected. Notify it to USB psy */
+ if (reg & USBIN_OV_BIT) {
+ chip->usb_ov_det = true;
+ pr_smb(PR_MISC, "setting usb psy health OV\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ power_supply_changed(chip->usb_psy);
+ } else {
+ chip->usb_ov_det = false;
+ /* If USB is present, then handle the USB insertion */
+ usb_present = is_usb_present(chip);
+ if (usb_present)
+ update_usb_status(chip, usb_present, false);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_uv_handler() - this is called when USB charger is removed
+ * @chip: pointer to smbchg_chip chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+#define ICL_MODE_MASK SMB_MASK(5, 4)
+#define ICL_MODE_HIGH_CURRENT 0
+static irqreturn_t usbin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, ®, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc) {
+ pr_err("could not read rt sts: %d", rc);
+ goto out;
+ }
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d rt_sts = 0x%02x hvdcp_3_det_ignore_uv = %d aicl = %d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, reg, chip->hvdcp_3_det_ignore_uv,
+ aicl_level);
+
+ /*
+ * set usb_psy's dp=f dm=f if this is a new insertion, i.e. it is
+ * not already src_detected and usbin_uv is seen falling
+ */
+ if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (reg & USBIN_UV_BIT)
+ complete_all(&chip->usbin_uv_raised);
+ else
+ complete_all(&chip->usbin_uv_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ if ((reg & USBIN_UV_BIT) && (reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_STATUS, "Very weak charger detected\n");
+ chip->very_weak_charger = true;
+ rc = smbchg_read(chip, ®,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n",
+ rc);
+ goto out;
+ }
+ if ((reg & ICL_MODE_MASK) != ICL_MODE_HIGH_CURRENT) {
+ /*
+ * If AICL is not even enabled, this is either an
+ * SDP or a grossly out of spec charger. Do not
+ * draw any current from it.
+ */
+ rc = vote(chip->usb_suspend_votable,
+ WEAK_CHARGER_EN_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("could not disable charger: %d", rc);
+ } else if (aicl_level == chip->tables.usb_ilim_ma_table[0]) {
+ /*
+ * we are in a situation where the adapter is not able
+ * to supply even 300mA. Disable hw aicl reruns else it
+ * is only a matter of time when we get back here again
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ }
+ pr_smb(PR_MISC, "setting usb psy health UNSPEC_FAILURE\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ power_supply_changed(chip->usb_psy);
+ schedule_work(&chip->usb_set_online_work);
+ }
+
+ smbchg_wipower_check(chip);
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB charger type
+ * is detected and on falling edge when USB voltage falls
+ * below the coarse detect voltage(1V), use it for
+ * handling USB charger insertion and removal.
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static irqreturn_t src_detect_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ bool src_detect = is_src_detect_high(chip);
+ int rc;
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d usb_present = %d src_detect = %d hvdcp_3_det_ignore_uv=%d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, usb_present, src_detect,
+ chip->hvdcp_3_det_ignore_uv);
+
+ if (src_detect)
+ complete_all(&chip->src_det_raised);
+ else
+ complete_all(&chip->src_det_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ /*
+ * When VBAT is above the AICL threshold (4.25V) - 180mV (4.07V),
+ * an input collapse due to AICL will actually cause an USBIN_UV
+ * interrupt to fire as well.
+ *
+ * Handle USB insertions and removals in the source detect handler
+ * instead of the USBIN_UV handler since the latter is untrustworthy
+ * when the battery voltage is high.
+ */
+ chip->very_weak_charger = false;
+ /*
+ * a src detect marks a new insertion or a real removal,
+ * vote for enable aicl hw reruns
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't enable hw aicl rerun rc=%d\n", rc);
+
+ rc = vote(chip->usb_suspend_votable, WEAK_CHARGER_EN_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("could not enable charger: %d\n", rc);
+
+ if (src_detect) {
+ update_usb_status(chip, usb_present, 0);
+ } else {
+ update_usb_status(chip, 0, false);
+ chip->aicl_irq_count = 0;
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_oc_handler() - called when the usb otg goes over current
+ */
+#define NUM_OTG_RETRIES 5
+#define OTG_OC_RETRY_DELAY_US 50000
+static irqreturn_t otg_oc_handler(int irq, void *_chip)
+{
+ int rc;
+ struct smbchg_chip *chip = _chip;
+ s64 elapsed_us = ktime_us_delta(ktime_get(), chip->otg_enable_time);
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ pr_warn("OTG OC triggered - OTG disabled\n");
+ return IRQ_HANDLED;
+ }
+
+ if (elapsed_us > OTG_OC_RETRY_DELAY_US)
+ chip->otg_retries = 0;
+
+ /*
+ * Due to a HW bug in the PMI8994 charger, the current inrush that
+ * occurs when connecting certain OTG devices can cause the OTG
+ * overcurrent protection to trip.
+ *
+ * The work around is to try reenabling the OTG when getting an
+ * overcurrent interrupt once.
+ */
+ if (chip->otg_retries < NUM_OTG_RETRIES) {
+ chip->otg_retries += 1;
+ pr_smb(PR_STATUS,
+ "Retrying OTG enable. Try #%d, elapsed_us %lld\n",
+ chip->otg_retries, elapsed_us);
+ rc = otg_oc_reset(chip);
+ if (rc)
+ pr_err("Failed to reset OTG OC state rc=%d\n", rc);
+ chip->otg_enable_time = ktime_get();
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_fail_handler() - called when the usb otg fails
+ * (when vbat < OTG UVLO threshold)
+ */
+static irqreturn_t otg_fail_handler(int irq, void *_chip)
+{
+ pr_smb(PR_INTERRUPT, "triggered\n");
+ return IRQ_HANDLED;
+}
+
+/**
+ * aicl_done_handler() - called when the usb AICL algorithm is finished
+ * and a current is set.
+ */
+static irqreturn_t aicl_done_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+
+ pr_smb(PR_INTERRUPT, "triggered, aicl: %d\n", aicl_level);
+
+ increment_aicl_count(chip);
+
+ if (usb_present)
+ smbchg_parallel_usb_check_ok(chip);
+
+ if (chip->aicl_complete && chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbid_change_handler() - called when the usb RID changes.
+ * This is used mostly for detecting OTG
+ */
+static irqreturn_t usbid_change_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool otg_present;
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ otg_present = is_otg_present(chip);
+ pr_smb(PR_MISC, "setting usb psy OTG = %d\n",
+ otg_present ? 1 : 0);
+
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST, otg_present);
+
+ if (otg_present)
+ pr_smb(PR_STATUS, "OTG detected\n");
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+
+ return IRQ_HANDLED;
+}
+
+static int determine_initial_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+
+ /*
+ * It is okay to read the interrupt status here since
+ * interrupts aren't requested. reading interrupt status
+ * clears the interrupt so be careful to read interrupt
+ * status only in interrupt handling code
+ */
+
+ batt_pres_handler(0, chip);
+ batt_hot_handler(0, chip);
+ batt_warm_handler(0, chip);
+ batt_cool_handler(0, chip);
+ batt_cold_handler(0, chip);
+ if (chip->typec_psy) {
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ update_typec_otg_status(chip, type.intval, true);
+ } else {
+ usbid_change_handler(0, chip);
+ }
+ src_detect_handler(0, chip);
+
+ chip->usb_present = is_usb_present(chip);
+ chip->dc_present = is_dc_present(chip);
+
+ if (chip->usb_present) {
+ int rc = 0;
+ pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+ handle_usb_insertion(chip);
+ } else {
+ handle_usb_removal(chip);
+ }
+
+ return 0;
+}
+
+static int prechg_time[] = {
+ 24,
+ 48,
+ 96,
+ 192,
+};
+static int chg_time[] = {
+ 192,
+ 384,
+ 768,
+ 1536,
+};
+
+enum bpd_type {
+ BPD_TYPE_BAT_NONE,
+ BPD_TYPE_BAT_ID,
+ BPD_TYPE_BAT_THM,
+ BPD_TYPE_BAT_THM_BAT_ID,
+ BPD_TYPE_DEFAULT,
+};
+
+static const char * const bpd_label[] = {
+ [BPD_TYPE_BAT_NONE] = "bpd_none",
+ [BPD_TYPE_BAT_ID] = "bpd_id",
+ [BPD_TYPE_BAT_THM] = "bpd_thm",
+ [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id",
+};
+
+static inline int get_bpd(const char *name)
+{
+ int i = 0;
+ for (i = 0; i < ARRAY_SIZE(bpd_label); i++) {
+ if (strcmp(bpd_label[i], name) == 0)
+ return i;
+ }
+ return -EINVAL;
+}
+
+#define REVISION1_REG 0x0
+#define DIG_MINOR 0
+#define DIG_MAJOR 1
+#define ANA_MINOR 2
+#define ANA_MAJOR 3
+#define CHGR_CFG1 0xFB
+#define RECHG_THRESHOLD_SRC_BIT BIT(1)
+#define TERM_I_SRC_BIT BIT(2)
+#define TERM_SRC_FG BIT(2)
+#define CHG_INHIB_CFG_REG 0xF7
+#define CHG_INHIBIT_50MV_VAL 0x00
+#define CHG_INHIBIT_100MV_VAL 0x01
+#define CHG_INHIBIT_200MV_VAL 0x02
+#define CHG_INHIBIT_300MV_VAL 0x03
+#define CHG_INHIBIT_MASK 0x03
+#define USE_REGISTER_FOR_CURRENT BIT(2)
+#define CHGR_CFG2 0xFC
+#define CHG_EN_SRC_BIT BIT(7)
+#define CHG_EN_POLARITY_BIT BIT(6)
+#define P2F_CHG_TRAN BIT(5)
+#define CHG_BAT_OV_ECC BIT(4)
+#define I_TERM_BIT BIT(3)
+#define AUTO_RECHG_BIT BIT(2)
+#define CHARGER_INHIBIT_BIT BIT(0)
+#define USB51_COMMAND_POL BIT(2)
+#define USB51AC_CTRL BIT(1)
+#define TR_8OR32B 0xFE
+#define BUCK_8_16_FREQ_BIT BIT(0)
+#define BM_CFG 0xF3
+#define BATT_MISSING_ALGO_BIT BIT(2)
+#define BMD_PIN_SRC_MASK SMB_MASK(1, 0)
+#define PIN_SRC_SHIFT 0
+#define CHGR_CFG 0xFF
+#define RCHG_LVL_BIT BIT(0)
+#define VCHG_EN_BIT BIT(1)
+#define VCHG_INPUT_CURRENT_BIT BIT(3)
+#define CFG_AFVC 0xF6
+#define VFLOAT_COMP_ENABLE_MASK SMB_MASK(2, 0)
+#define TR_RID_REG 0xFA
+#define FG_INPUT_FET_DELAY_BIT BIT(3)
+#define TRIM_OPTIONS_7_0 0xF6
+#define INPUT_MISSING_POLLER_EN_BIT BIT(3)
+#define CHGR_CCMP_CFG 0xFA
+#define JEITA_TEMP_HARD_LIMIT_BIT BIT(5)
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_ADAPTER_SEL_9V_BIT BIT(4)
+#define HVDCP_AUTH_ALG_EN_BIT BIT(6)
+#define CMD_APSD 0x41
+#define APSD_RERUN_BIT BIT(0)
+#define OTG_CFG 0xF1
+#define HICCUP_ENABLED_BIT BIT(6)
+#define OTG_PIN_POLARITY_BIT BIT(4)
+#define OTG_PIN_ACTIVE_LOW BIT(4)
+#define OTG_EN_CTRL_MASK SMB_MASK(3, 2)
+#define OTG_PIN_CTRL_RID_DIS 0x04
+#define OTG_CMD_CTRL_RID_EN 0x08
+#define AICL_ADC_BIT BIT(6)
+static void batt_ov_wa_check(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ /* disable-'battery OV disables charging' feature */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_BAT_OV_ECC, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * if battery OV is set:
+ * restart charging by disable/enable charging
+ */
+ rc = smbchg_read(chip, ®, chip->bat_if_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read Battery RT status rc = %d\n", rc);
+ return;
+ }
+
+ if (reg & BAT_OV_BIT) {
+ rc = smbchg_charging_en(chip, false);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't disable charging: rc = %d\n", rc);
+ return;
+ }
+
+ /* delay for charging-disable to take affect */
+ msleep(200);
+
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't enable charging: rc = %d\n", rc);
+ return;
+ }
+ }
+}
+
+static int smbchg_hw_init(struct smbchg_chip *chip)
+{
+ int rc, i;
+ u8 reg, mask;
+
+ rc = smbchg_read(chip, chip->revision,
+ chip->misc_base + REVISION1_REG, 4);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read revision rc=%d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "Charger Revision DIG: %d.%d; ANA: %d.%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]);
+
+ /* Setup 9V HVDCP */
+ if (!chip->hvdcp_not_supported) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->aicl_rerun_period_s > 0) {
+ rc = smbchg_set_aicl_rerun_period_s(chip,
+ chip->aicl_rerun_period_s);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set AICL rerun timer rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_RID_REG,
+ FG_INPUT_FET_DELAY_BIT, FG_INPUT_FET_DELAY_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable fg input fet delay rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_OPTIONS_7_0,
+ INPUT_MISSING_POLLER_EN_BIT,
+ INPUT_MISSING_POLLER_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable input missing poller rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Do not force using current from the register i.e. use auto
+ * power source detect (APSD) mA ratings for the initial current values.
+ *
+ * If this is set, AICL will not rerun at 9V for HVDCPs
+ */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USE_REGISTER_FOR_CURRENT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * set chg en by cmd register, set chg en by writing bit 1,
+ * enable auto pre to fast, enable auto recharge by default.
+ * enable current termination and charge inhibition based on
+ * the device tree configuration.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_EN_SRC_BIT | CHG_EN_POLARITY_BIT | P2F_CHG_TRAN
+ | I_TERM_BIT | AUTO_RECHG_BIT | CHARGER_INHIBIT_BIT,
+ CHG_EN_POLARITY_BIT
+ | (chip->chg_inhibit_en ? CHARGER_INHIBIT_BIT : 0)
+ | (chip->iterm_disabled ? I_TERM_BIT : 0));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * enable battery charging to make sure it hasn't been changed earlier
+ * by the bootloader.
+ */
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable battery charging=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Based on the configuration, use the analog sensors or the fuelgauge
+ * adc for recharge threshold source.
+ */
+
+ if (chip->chg_inhibit_source_fg)
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT,
+ TERM_SRC_FG | RECHG_THRESHOLD_SRC_BIT);
+ else
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * control USB suspend via command bits and set correct 100/500mA
+ * polarity on the usb current
+ */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ USB51_COMMAND_POL | USB51AC_CTRL, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ check_battery_type(chip);
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smbchg_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set vfloat to %d\n", chip->vfloat_mv);
+ }
+
+ /* set the fast charge current compensation */
+ if (chip->fastchg_current_comp != -EINVAL) {
+ rc = smbchg_fastchg_current_comp_set(chip,
+ chip->fastchg_current_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set fastchg current comp to %d\n",
+ chip->fastchg_current_comp);
+ }
+
+ /* set the float voltage compensation */
+ if (chip->float_voltage_comp != -EINVAL) {
+ rc = smbchg_float_voltage_comp_set(chip,
+ chip->float_voltage_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set float voltage comp to %d\n",
+ chip->float_voltage_comp);
+ }
+
+ /* set iterm */
+ if (chip->iterm_ma != -EINVAL) {
+ if (chip->iterm_disabled) {
+ dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+ return -EINVAL;
+ } else {
+ smbchg_iterm_set(chip, chip->iterm_ma);
+ }
+ }
+
+ /* set the safety time voltage */
+ if (chip->safety_time != -EINVAL) {
+ reg = (chip->safety_time > 0 ? 0 : SFT_TIMER_DISABLE_BIT) |
+ (chip->prechg_safety_time > 0
+ ? 0 : PRECHG_SFT_TIMER_DISABLE_BIT);
+
+ for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+ if (chip->safety_time <= chg_time[i]) {
+ reg |= i << SAFETY_TIME_MINUTES_SHIFT;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(prechg_time); i++) {
+ if (chip->prechg_safety_time <= prechg_time[i]) {
+ reg |= i;
+ break;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK | SFT_TO_MASK |
+ (chip->prechg_safety_time > 0
+ ? PRECHG_SFT_TO_MASK : 0), reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set safety timer rc = %d\n",
+ rc);
+ return rc;
+ }
+ chip->safety_timer_en = true;
+ } else {
+ rc = smbchg_read(chip, ®, chip->chgr_base + SFT_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Unable to read SFT_CFG rc = %d\n",
+ rc);
+ else if (!(reg & SFT_EN_MASK))
+ chip->safety_timer_en = true;
+ }
+
+ /* configure jeita temperature hard limit */
+ if (chip->jeita_temp_hard_limit >= 0) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CCMP_CFG,
+ JEITA_TEMP_HARD_LIMIT_BIT,
+ chip->jeita_temp_hard_limit
+ ? 0 : JEITA_TEMP_HARD_LIMIT_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set jeita temp hard limit rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* make the buck switch faster to prevent some vbus oscillation */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + TR_8OR32B,
+ BUCK_8_16_FREQ_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set buck frequency rc = %d\n", rc);
+ return rc;
+ }
+
+ /* battery missing detection */
+ mask = BATT_MISSING_ALGO_BIT;
+ reg = chip->bmd_algo_disabled ? 0 : BATT_MISSING_ALGO_BIT;
+ if (chip->bmd_pin_src < BPD_TYPE_DEFAULT) {
+ mask |= BMD_PIN_SRC_MASK;
+ reg |= chip->bmd_pin_src << PIN_SRC_SHIFT;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BM_CFG, mask, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+ rc);
+ return rc;
+ }
+
+ if (chip->vchg_adc_channel != -EINVAL) {
+ /* configure and enable VCHG */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ smbchg_charging_status_change(chip);
+
+ vote(chip->usb_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ vote(chip->dc_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ /* resume threshold */
+ if (chip->resume_delta_mv != -EINVAL) {
+
+ /*
+ * Configure only if the recharge threshold source is not
+ * fuel gauge ADC.
+ */
+ if (!chip->chg_inhibit_source_fg) {
+ if (chip->resume_delta_mv < 100)
+ reg = CHG_INHIBIT_50MV_VAL;
+ else if (chip->resume_delta_mv < 200)
+ reg = CHG_INHIBIT_100MV_VAL;
+ else if (chip->resume_delta_mv < 300)
+ reg = CHG_INHIBIT_200MV_VAL;
+ else
+ reg = CHG_INHIBIT_300MV_VAL;
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHG_INHIB_CFG_REG,
+ CHG_INHIBIT_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CFG,
+ RCHG_LVL_BIT,
+ (chip->resume_delta_mv
+ < chip->tables.rchg_thr_mv)
+ ? 0 : RCHG_LVL_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* DC path current settings */
+ if (chip->dc_psy_type != -EINVAL) {
+ rc = vote(chip->dc_icl_votable, PSY_ICL_VOTER, true,
+ chip->dc_target_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for initial DC ICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+
+ /*
+ * on some devices the battery is powered via external sources which
+ * could raise its voltage above the float voltage. smbchargers go
+ * in to reverse boost in such a situation and the workaround is to
+ * disable float voltage compensation (note that the battery will appear
+ * hot/cold when powered via external source).
+ */
+ if (chip->soft_vfloat_comp_disabled) {
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_AFVC,
+ VFLOAT_COMP_ENABLE_MASK, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ chip->cfg_fastchg_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't vote fastchg ma rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+
+ if (chip->wipower_dyn_icl_avail) {
+ rc = smbchg_wipower_ilim_config(chip,
+ &(chip->wipower_default.entries[0]));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set default wipower ilim = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ /* unsuspend dc path, it could be suspended by the bootloader */
+ rc = smbchg_dc_suspend(chip, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend dc path= %d\n", rc);
+ return rc;
+ }
+
+ if (chip->force_aicl_rerun) {
+ /* vote to enable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ DEFAULT_CONFIG_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote enable hw aicl rerun rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ /* enable OTG hiccup mode */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ HICCUP_ENABLED_BIT, HICCUP_ENABLED_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set OTG OC config rc = %d\n",
+ rc);
+ }
+
+ if (chip->otg_pinctrl) {
+ /* configure OTG enable to pin control active low */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ OTG_PIN_POLARITY_BIT | OTG_EN_CTRL_MASK,
+ OTG_PIN_ACTIVE_LOW | OTG_PIN_CTRL_RID_DIS);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set OTG EN config rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->wa_flags & SMBCHG_BATT_OV_WA)
+ batt_ov_wa_check(chip);
+
+ /* turn off AICL adc for improved accuracy */
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8, AICL_ADC_BIT, 0);
+ if (rc)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static struct of_device_id smbchg_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-smbcharger",
+ },
+ { },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define OF_PROP_READ(chip, prop, dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ if (optional) \
+ prop = -EINVAL; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," dt_property , \
+ &prop); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ dev_err(chip->dev, "Error reading " #dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define ILIM_ENTRIES 3
+#define VOLTAGE_RANGE_ENTRIES 2
+#define RANGE_ENTRY (ILIM_ENTRIES + VOLTAGE_RANGE_ENTRIES)
+static int smb_parse_wipower_map_dt(struct smbchg_chip *chip,
+ struct ilim_map *map, char *property)
+{
+ struct device_node *node = chip->dev->of_node;
+ int total_elements, size;
+ struct property *prop;
+ const __be32 *data;
+ int num, i;
+
+ prop = of_find_property(node, property, &size);
+ if (!prop) {
+ dev_err(chip->dev, "%s missing\n", property);
+ return -EINVAL;
+ }
+
+ total_elements = size / sizeof(int);
+ if (total_elements % RANGE_ENTRY) {
+ dev_err(chip->dev, "%s table not in multiple of %d, total elements = %d\n",
+ property, RANGE_ENTRY, total_elements);
+ return -EINVAL;
+ }
+
+ data = prop->value;
+ num = total_elements / RANGE_ENTRY;
+ map->entries = devm_kzalloc(chip->dev,
+ num * sizeof(struct ilim_entry), GFP_KERNEL);
+ if (!map->entries) {
+ dev_err(chip->dev, "kzalloc failed for default ilim\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < num; i++) {
+ map->entries[i].vmin_uv = be32_to_cpup(data++);
+ map->entries[i].vmax_uv = be32_to_cpup(data++);
+ map->entries[i].icl_pt_ma = be32_to_cpup(data++);
+ map->entries[i].icl_lv_ma = be32_to_cpup(data++);
+ map->entries[i].icl_hv_ma = be32_to_cpup(data++);
+ }
+ map->num = num;
+ return 0;
+}
+
+static int smb_parse_wipower_dt(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ chip->wipower_dyn_icl_avail = false;
+
+ if (!chip->vadc_dev)
+ goto err;
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_default,
+ "qcom,wipower-default-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_pt,
+ "qcom,wipower-pt-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_div2,
+ "qcom,wipower-div2-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-div2-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+ chip->wipower_dyn_icl_avail = true;
+ return 0;
+err:
+ chip->wipower_default.num = 0;
+ chip->wipower_pt.num = 0;
+ chip->wipower_default.num = 0;
+ if (chip->wipower_default.entries)
+ devm_kfree(chip->dev, chip->wipower_default.entries);
+ if (chip->wipower_pt.entries)
+ devm_kfree(chip->dev, chip->wipower_pt.entries);
+ if (chip->wipower_div2.entries)
+ devm_kfree(chip->dev, chip->wipower_div2.entries);
+ chip->wipower_default.entries = NULL;
+ chip->wipower_pt.entries = NULL;
+ chip->wipower_div2.entries = NULL;
+ chip->vadc_dev = NULL;
+ return rc;
+}
+
+#define DEFAULT_VLED_MAX_UV 3500000
+#define DEFAULT_FCC_MA 2000
+static int smb_parse_dt(struct smbchg_chip *chip)
+{
+ int rc = 0, ocp_thresh = -EINVAL;
+ struct device_node *node = chip->dev->of_node;
+ const char *dc_psy_type, *bpd;
+
+ if (!node) {
+ dev_err(chip->dev, "device tree info. missing\n");
+ return -EINVAL;
+ }
+
+ /* read optional u32 properties */
+ OF_PROP_READ(chip, ocp_thresh,
+ "ibat-ocp-threshold-ua", rc, 1);
+ if (ocp_thresh >= 0)
+ smbchg_ibat_ocp_threshold_ua = ocp_thresh;
+ OF_PROP_READ(chip, chip->iterm_ma, "iterm-ma", rc, 1);
+ OF_PROP_READ(chip, chip->cfg_fastchg_current_ma,
+ "fastchg-current-ma", rc, 1);
+ if (chip->cfg_fastchg_current_ma == -EINVAL)
+ chip->cfg_fastchg_current_ma = DEFAULT_FCC_MA;
+ OF_PROP_READ(chip, chip->vfloat_mv, "float-voltage-mv", rc, 1);
+ OF_PROP_READ(chip, chip->safety_time, "charging-timeout-mins", rc, 1);
+ OF_PROP_READ(chip, chip->vled_max_uv, "vled-max-uv", rc, 1);
+ if (chip->vled_max_uv < 0)
+ chip->vled_max_uv = DEFAULT_VLED_MAX_UV;
+ OF_PROP_READ(chip, chip->rpara_uohm, "rparasitic-uohm", rc, 1);
+ if (chip->rpara_uohm < 0)
+ chip->rpara_uohm = 0;
+ OF_PROP_READ(chip, chip->prechg_safety_time, "precharging-timeout-mins",
+ rc, 1);
+ OF_PROP_READ(chip, chip->fastchg_current_comp, "fastchg-current-comp",
+ rc, 1);
+ OF_PROP_READ(chip, chip->float_voltage_comp, "float-voltage-comp",
+ rc, 1);
+ if (chip->safety_time != -EINVAL &&
+ (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+ dev_err(chip->dev, "Bad charging-timeout-mins %d\n",
+ chip->safety_time);
+ return -EINVAL;
+ }
+ if (chip->prechg_safety_time != -EINVAL &&
+ (chip->prechg_safety_time >
+ prechg_time[ARRAY_SIZE(prechg_time) - 1])) {
+ dev_err(chip->dev, "Bad precharging-timeout-mins %d\n",
+ chip->prechg_safety_time);
+ return -EINVAL;
+ }
+ OF_PROP_READ(chip, chip->resume_delta_mv, "resume-delta-mv", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_current_thr_ma,
+ "parallel-usb-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_9v_current_thr_ma,
+ "parallel-usb-9v-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.allowed_lowering_ma,
+ "parallel-allowed-lowering-ma", rc, 1);
+ if (chip->parallel.min_current_thr_ma != -EINVAL
+ && chip->parallel.min_9v_current_thr_ma != -EINVAL)
+ chip->parallel.avail = true;
+ /*
+ * use the dt values if they exist, otherwise do not touch the params
+ */
+ of_property_read_u32(node, "qcom,parallel-main-chg-fcc-percent",
+ &smbchg_main_chg_fcc_percent);
+ of_property_read_u32(node, "qcom,parallel-main-chg-icl-percent",
+ &smbchg_main_chg_icl_percent);
+ pr_smb(PR_STATUS, "parallel usb thr: %d, 9v thr: %d\n",
+ chip->parallel.min_current_thr_ma,
+ chip->parallel.min_9v_current_thr_ma);
+ OF_PROP_READ(chip, chip->jeita_temp_hard_limit,
+ "jeita-temp-hard-limit", rc, 1);
+ OF_PROP_READ(chip, chip->aicl_rerun_period_s,
+ "aicl-rerun-period-s", rc, 1);
+ OF_PROP_READ(chip, chip->vchg_adc_channel,
+ "vchg-adc-channel-id", rc, 1);
+
+ /* read boolean configuration properties */
+ chip->use_vfloat_adjustments = of_property_read_bool(node,
+ "qcom,autoadjust-vfloat");
+ chip->bmd_algo_disabled = of_property_read_bool(node,
+ "qcom,bmd-algo-disabled");
+ chip->iterm_disabled = of_property_read_bool(node,
+ "qcom,iterm-disabled");
+ chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+ "qcom,soft-vfloat-comp-disabled");
+ chip->chg_enabled = !(of_property_read_bool(node,
+ "qcom,charging-disabled"));
+ chip->charge_unknown_battery = of_property_read_bool(node,
+ "qcom,charge-unknown-battery");
+ chip->chg_inhibit_en = of_property_read_bool(node,
+ "qcom,chg-inhibit-en");
+ chip->chg_inhibit_source_fg = of_property_read_bool(node,
+ "qcom,chg-inhibit-fg");
+ chip->low_volt_dcin = of_property_read_bool(node,
+ "qcom,low-volt-dcin");
+ chip->force_aicl_rerun = of_property_read_bool(node,
+ "qcom,force-aicl-rerun");
+ chip->skip_usb_suspend_for_fake_battery = of_property_read_bool(node,
+ "qcom,skip-usb-suspend-for-fake-battery");
+
+ /* parse the battery missing detection pin source */
+ rc = of_property_read_string(chip->pdev->dev.of_node,
+ "qcom,bmd-pin-src", &bpd);
+ if (rc) {
+ /* Select BAT_THM as default BPD scheme */
+ chip->bmd_pin_src = BPD_TYPE_DEFAULT;
+ rc = 0;
+ } else {
+ chip->bmd_pin_src = get_bpd(bpd);
+ if (chip->bmd_pin_src < 0) {
+ dev_err(chip->dev,
+ "failed to determine bpd schema %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* parse the dc power supply configuration */
+ rc = of_property_read_string(node, "qcom,dc-psy-type", &dc_psy_type);
+ if (rc) {
+ chip->dc_psy_type = -EINVAL;
+ rc = 0;
+ } else {
+ if (strcmp(dc_psy_type, "Mains") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+ else if (strcmp(dc_psy_type, "Wireless") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+ else if (strcmp(dc_psy_type, "Wipower") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIPOWER;
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ OF_PROP_READ(chip, chip->dc_target_current_ma,
+ "dc-psy-ma", rc, 0);
+ if (rc)
+ return rc;
+ if (chip->dc_target_current_ma < DC_MA_MIN
+ || chip->dc_target_current_ma > DC_MA_MAX) {
+ dev_err(chip->dev, "Bad dc mA %d\n",
+ chip->dc_target_current_ma);
+ return -EINVAL;
+ }
+ }
+
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ smb_parse_wipower_dt(chip);
+
+ /* read the bms power supply name */
+ rc = of_property_read_string(node, "qcom,bms-psy-name",
+ &chip->bms_psy_name);
+ if (rc)
+ chip->bms_psy_name = NULL;
+
+ /* read the battery power supply name */
+ rc = of_property_read_string(node, "qcom,battery-psy-name",
+ &chip->battery_psy_name);
+ if (rc)
+ chip->battery_psy_name = "battery";
+
+ /* Get the charger led support property */
+ chip->cfg_chg_led_sw_ctrl =
+ of_property_read_bool(node, "qcom,chg-led-sw-controls");
+ chip->cfg_chg_led_support =
+ of_property_read_bool(node, "qcom,chg-led-support");
+
+ if (of_find_property(node, "qcom,thermal-mitigation",
+ &chip->thermal_levels)) {
+ chip->thermal_mitigation = devm_kzalloc(chip->dev,
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ dev_err(chip->dev, "thermal mitigation kzalloc() failed.\n");
+ return -ENOMEM;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ chip->skip_usb_notification
+ = of_property_read_bool(node,
+ "qcom,skip-usb-notification");
+
+ chip->otg_pinctrl = of_property_read_bool(node, "qcom,otg-pinctrl");
+
+ return 0;
+}
+
+#define SUBTYPE_REG 0x5
+#define SMBCHG_CHGR_SUBTYPE 0x1
+#define SMBCHG_OTG_SUBTYPE 0x8
+#define SMBCHG_BAT_IF_SUBTYPE 0x3
+#define SMBCHG_USB_CHGPTH_SUBTYPE 0x4
+#define SMBCHG_DC_CHGPTH_SUBTYPE 0x5
+#define SMBCHG_MISC_SUBTYPE 0x7
+#define SMBCHG_LITE_CHGR_SUBTYPE 0x51
+#define SMBCHG_LITE_OTG_SUBTYPE 0x58
+#define SMBCHG_LITE_BAT_IF_SUBTYPE 0x53
+#define SMBCHG_LITE_USB_CHGPTH_SUBTYPE 0x54
+#define SMBCHG_LITE_DC_CHGPTH_SUBTYPE 0x55
+#define SMBCHG_LITE_MISC_SUBTYPE 0x57
+static int smbchg_request_irq(struct smbchg_chip *chip,
+ struct device_node *child,
+ int irq_num, char *irq_name,
+ irqreturn_t (irq_handler)(int irq, void *_chip),
+ int flags)
+{
+ int rc;
+
+ irq_num = of_irq_get_byname(child, irq_name);
+ if (irq_num < 0) {
+ dev_err(chip->dev, "Unable to get %s irqn", irq_name);
+ rc = -ENXIO;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ irq_num, NULL, irq_handler, flags, irq_name,
+ chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to request %s irq: %dn",
+ irq_name, rc);
+ rc = -ENXIO;
+ }
+ return 0;
+}
+
+static int smbchg_request_irqs(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+ unsigned long flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT;
+
+ if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(chip->pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ rc = 0;
+ continue;
+ }
+
+ rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->chg_error_irq, "chg-error",
+ chg_error_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->taper_irq,
+ "chg-taper-thr", taper_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ disable_irq_nosync(chip->taper_irq);
+ rc = smbchg_request_irq(chip, child, chip->chg_term_irq,
+ "chg-tcc-thr", chg_term_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->recharge_irq,
+ "chg-rechg-thr", recharge_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->fastchg_irq,
+ "chg-p2f-thr", fastchg_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->chg_term_irq);
+ enable_irq_wake(chip->chg_error_irq);
+ enable_irq_wake(chip->fastchg_irq);
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->batt_hot_irq,
+ "batt-hot", batt_hot_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_warm_irq,
+ "batt-warm", batt_warm_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_cool_irq,
+ "batt-cool", batt_cool_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_cold_irq,
+ "batt-cold", batt_cold_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_missing_irq,
+ "batt-missing", batt_pres_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->vbat_low_irq,
+ "batt-low", vbat_low_handler, flags);
+ if (rc < 0)
+ return rc;
+
+ enable_irq_wake(chip->batt_hot_irq);
+ enable_irq_wake(chip->batt_warm_irq);
+ enable_irq_wake(chip->batt_cool_irq);
+ enable_irq_wake(chip->batt_cold_irq);
+ enable_irq_wake(chip->batt_missing_irq);
+ enable_irq_wake(chip->vbat_low_irq);
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->usbin_uv_irq,
+ "usbin-uv", usbin_uv_handler,
+ flags | IRQF_EARLY_RESUME);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->usbin_ov_irq,
+ "usbin-ov", usbin_ov_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->src_detect_irq,
+ "usbin-src-det",
+ src_detect_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->aicl_done_irq,
+ "aicl-done",
+ aicl_done_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+
+ if (chip->schg_version != QPNP_SCHG_LITE) {
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ }
+ enable_irq_wake(chip->usbin_uv_irq);
+ enable_irq_wake(chip->usbin_ov_irq);
+ enable_irq_wake(chip->src_detect_irq);
+ if (chip->parallel.avail && chip->usb_present) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->dcin_uv_irq,
+ "dcin-uv", dcin_uv_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->dcin_uv_irq);
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->power_ok_irq,
+ "power-ok", power_ok_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->chg_hot_irq,
+ "temp-shutdown", chg_hot_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->wdog_timeout_irq,
+ "wdog-timeout",
+ wdog_timeout_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->chg_hot_irq);
+ enable_irq_wake(chip->wdog_timeout_irq);
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ break;
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+#define REQUIRE_BASE(chip, base, rc) \
+do { \
+ if (!rc && !chip->base) { \
+ dev_err(chip->dev, "Missing " #base "\n"); \
+ rc = -EINVAL; \
+ } \
+} while (0)
+
+static int smbchg_parse_peripherals(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+
+ if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(chip->pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ rc = 0;
+ continue;
+ }
+
+ rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ chip->chgr_base = base;
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ chip->bat_if_base = base;
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ chip->usb_chgpth_base = base;
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ chip->dc_chgpth_base = base;
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ chip->misc_base = base;
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ chip->otg_base = base;
+ break;
+ }
+ }
+
+ REQUIRE_BASE(chip, chgr_base, rc);
+ REQUIRE_BASE(chip, bat_if_base, rc);
+ REQUIRE_BASE(chip, usb_chgpth_base, rc);
+ REQUIRE_BASE(chip, dc_chgpth_base, rc);
+ REQUIRE_BASE(chip, misc_base, rc);
+
+ return rc;
+}
+
+static inline void dump_reg(struct smbchg_chip *chip, u16 addr,
+ const char *name)
+{
+ u8 reg;
+
+ smbchg_read(chip, ®, addr, 1);
+ pr_smb(PR_DUMP, "%s - %04X = %02X\n", name, addr, reg);
+}
+
+/* dumps useful registers for debug */
+static void dump_regs(struct smbchg_chip *chip)
+{
+ u16 addr;
+
+ /* charger peripheral */
+ for (addr = 0xB; addr <= 0x10; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Status");
+ for (addr = 0xF0; addr <= 0xFF; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Config");
+ /* battery interface peripheral */
+ dump_reg(chip, chip->bat_if_base + RT_STS, "BAT_IF Status");
+ dump_reg(chip, chip->bat_if_base + CMD_CHG_REG, "BAT_IF Command");
+ for (addr = 0xF0; addr <= 0xFB; addr++)
+ dump_reg(chip, chip->bat_if_base + addr, "BAT_IF Config");
+ /* usb charge path peripheral */
+ for (addr = 0x7; addr <= 0x10; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Status");
+ dump_reg(chip, chip->usb_chgpth_base + CMD_IL, "USB Command");
+ for (addr = 0xF0; addr <= 0xF5; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Config");
+ /* dc charge path peripheral */
+ dump_reg(chip, chip->dc_chgpth_base + RT_STS, "DC Status");
+ for (addr = 0xF0; addr <= 0xF6; addr++)
+ dump_reg(chip, chip->dc_chgpth_base + addr, "DC Config");
+ /* misc peripheral */
+ dump_reg(chip, chip->misc_base + IDEV_STS, "MISC Status");
+ dump_reg(chip, chip->misc_base + RT_STS, "MISC Status");
+ for (addr = 0xF0; addr <= 0xF3; addr++)
+ dump_reg(chip, chip->misc_base + addr, "MISC CFG");
+}
+
+static int create_debugfs_entries(struct smbchg_chip *chip)
+{
+ struct dentry *ent;
+
+ chip->debug_root = debugfs_create_dir("qpnp-smbcharger", NULL);
+ if (!chip->debug_root) {
+ dev_err(chip->dev, "Couldn't create debug dir\n");
+ return -EINVAL;
+ }
+
+ ent = debugfs_create_file("force_dcin_icl_check",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_dcin_icl_ops);
+ if (!ent) {
+ dev_err(chip->dev,
+ "Couldn't create force dcin icl check file\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_check_chg_version(struct smbchg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+ int rc;
+
+ revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(pmic_rev_id)) {
+ rc = PTR_ERR(revid_dev_node);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA
+ | SMBCHG_BATT_OV_WA
+ | SMBCHG_CC_ESR_WA
+ | SMBCHG_RESTART_WA;
+ use_pmi8994_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ case PMI8950:
+ case PMI8937:
+ chip->wa_flags |= SMBCHG_BATT_OV_WA;
+ if (pmic_rev_id->rev4 < 2) /* PMI8950 1.0 */ {
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA;
+ } else { /* rev > PMI8950 v1.0 */
+ chip->wa_flags |= SMBCHG_HVDCP_9V_EN_WA
+ | SMBCHG_USB100_WA;
+ }
+ use_pmi8994_tables(chip);
+ chip->tables.aicl_rerun_period_table =
+ aicl_rerun_period_schg_lite;
+ chip->tables.aicl_rerun_period_len =
+ ARRAY_SIZE(aicl_rerun_period_schg_lite);
+
+ chip->schg_version = QPNP_SCHG_LITE;
+ if (pmic_rev_id->pmic_subtype == PMI8937)
+ chip->hvdcp_not_supported = true;
+ break;
+ case PMI8996:
+ chip->wa_flags |= SMBCHG_CC_ESR_WA
+ | SMBCHG_FLASH_ICL_DISABLE_WA
+ | SMBCHG_RESTART_WA
+ | SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA;
+ use_pmi8996_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported, WA flags not set\n",
+ pmic_rev_id->pmic_subtype);
+ }
+
+ pr_smb(PR_STATUS, "pmic=%s, wa_flags=0x%x, hvdcp_supported=%s\n",
+ pmic_rev_id->pmic_name, chip->wa_flags,
+ chip->hvdcp_not_supported ? "false" : "true");
+
+ return 0;
+}
+
+static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip)
+{
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name;
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP
+ && !is_hvdcp_present(chip)) {
+ pr_smb(PR_STATUS, "DCP found rerunning APSD\n");
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, true, 300);
+ if (rc < 0)
+ pr_err("Couldn't vote for 300mA for suspend wa, going ahead rc=%d\n",
+ rc);
+
+ pr_smb(PR_STATUS, "Faking Removal\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Faking Insertion\n");
+ fake_insertion_removal(chip, true);
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB_DCP) {
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Removal again as type!=DCP\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Insert again as type!=DCP\n");
+ fake_insertion_removal(chip, true);
+ }
+
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote for 0 for suspend wa, going ahead rc=%d\n",
+ rc);
+ }
+}
+
+static int smbchg_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct smbchg_chip *chip;
+ struct power_supply *typec_psy = NULL;
+ struct qpnp_vadc_chip *vadc_dev, *vchg_vadc_dev;
+ const char *typec_psy_name;
+ struct power_supply_config usb_psy_cfg = {};
+ struct power_supply_config batt_psy_cfg = {};
+ struct power_supply_config dc_psy_cfg = {};
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,external-typec")) {
+ /* read the type power supply name */
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,typec-psy-name", &typec_psy_name);
+ if (rc) {
+ pr_err("failed to get prop typec-psy-name rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ typec_psy = power_supply_get_by_name(typec_psy_name);
+ if (!typec_psy) {
+ pr_smb(PR_STATUS,
+ "Type-C supply not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ vadc_dev = NULL;
+ if (of_find_property(pdev->dev.of_node, "qcom,dcin-vadc", NULL)) {
+ vadc_dev = qpnp_get_vadc(&pdev->dev, "dcin");
+ if (IS_ERR(vadc_dev)) {
+ rc = PTR_ERR(vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Couldn't get vadc rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ vchg_vadc_dev = NULL;
+ if (of_find_property(pdev->dev.of_node, "qcom,vchg_sns-vadc", NULL)) {
+ vchg_vadc_dev = qpnp_get_vadc(&pdev->dev, "vchg_sns");
+ if (IS_ERR(vchg_vadc_dev)) {
+ rc = PTR_ERR(vchg_vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't get vadc 'vchg' rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->fcc_votable = create_votable("BATT_FCC",
+ VOTE_MIN,
+ set_fastchg_current_vote_cb, chip);
+ if (IS_ERR(chip->fcc_votable)) {
+ rc = PTR_ERR(chip->fcc_votable);
+ goto votables_cleanup;
+ }
+
+ chip->usb_icl_votable = create_votable("USB_ICL",
+ VOTE_MIN,
+ set_usb_current_limit_vote_cb, chip);
+ if (IS_ERR(chip->usb_icl_votable)) {
+ rc = PTR_ERR(chip->usb_icl_votable);
+ goto votables_cleanup;
+ }
+
+ chip->dc_icl_votable = create_votable("DCIN_ICL",
+ VOTE_MIN,
+ set_dc_current_limit_vote_cb, chip);
+ if (IS_ERR(chip->dc_icl_votable)) {
+ rc = PTR_ERR(chip->dc_icl_votable);
+ goto votables_cleanup;
+ }
+
+ chip->usb_suspend_votable = create_votable("USB_SUSPEND",
+ VOTE_SET_ANY,
+ usb_suspend_vote_cb, chip);
+ if (IS_ERR(chip->usb_suspend_votable)) {
+ rc = PTR_ERR(chip->usb_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->dc_suspend_votable = create_votable("DC_SUSPEND",
+ VOTE_SET_ANY,
+ dc_suspend_vote_cb, chip);
+ if (IS_ERR(chip->dc_suspend_votable)) {
+ rc = PTR_ERR(chip->dc_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->battchg_suspend_votable = create_votable("BATTCHG_SUSPEND",
+ VOTE_SET_ANY,
+ charging_suspend_vote_cb, chip);
+ if (IS_ERR(chip->battchg_suspend_votable)) {
+ rc = PTR_ERR(chip->battchg_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->hw_aicl_rerun_disable_votable = create_votable("HWAICL_DISABLE",
+ VOTE_SET_ANY,
+ smbchg_hw_aicl_rerun_disable_cb, chip);
+ if (IS_ERR(chip->hw_aicl_rerun_disable_votable)) {
+ rc = PTR_ERR(chip->hw_aicl_rerun_disable_votable);
+ goto votables_cleanup;
+ }
+
+ chip->hw_aicl_rerun_enable_indirect_votable = create_votable(
+ "HWAICL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ smbchg_hw_aicl_rerun_enable_indirect_cb, chip);
+ if (IS_ERR(chip->hw_aicl_rerun_enable_indirect_votable)) {
+ rc = PTR_ERR(chip->hw_aicl_rerun_enable_indirect_votable);
+ goto votables_cleanup;
+ }
+
+ chip->aicl_deglitch_short_votable = create_votable(
+ "HWAICL_SHORT_DEGLITCH",
+ VOTE_SET_ANY,
+ smbchg_aicl_deglitch_config_cb, chip);
+ if (IS_ERR(chip->aicl_deglitch_short_votable)) {
+ rc = PTR_ERR(chip->aicl_deglitch_short_votable);
+ goto votables_cleanup;
+ }
+
+ INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work);
+ INIT_DELAYED_WORK(&chip->parallel_en_work,
+ smbchg_parallel_usb_en_work);
+ INIT_DELAYED_WORK(&chip->vfloat_adjust_work, smbchg_vfloat_adjust_work);
+ INIT_DELAYED_WORK(&chip->hvdcp_det_work, smbchg_hvdcp_det_work);
+ init_completion(&chip->src_det_lowered);
+ init_completion(&chip->src_det_raised);
+ init_completion(&chip->usbin_uv_lowered);
+ init_completion(&chip->usbin_uv_raised);
+ chip->vadc_dev = vadc_dev;
+ chip->vchg_vadc_dev = vchg_vadc_dev;
+ chip->pdev = pdev;
+ chip->dev = &pdev->dev;
+
+ chip->typec_psy = typec_psy;
+ chip->fake_battery_soc = -EINVAL;
+ chip->usb_online = -EINVAL;
+ dev_set_drvdata(&pdev->dev, chip);
+
+ spin_lock_init(&chip->sec_access_lock);
+ mutex_init(&chip->therm_lvl_lock);
+ mutex_init(&chip->usb_set_online_lock);
+ mutex_init(&chip->parallel.lock);
+ mutex_init(&chip->taper_irq_lock);
+ mutex_init(&chip->pm_lock);
+ mutex_init(&chip->wipower_config);
+ mutex_init(&chip->usb_status_lock);
+ device_init_wakeup(chip->dev, true);
+
+ rc = smbchg_parse_peripherals(chip);
+ if (rc) {
+ dev_err(chip->dev, "Error parsing DT peripherals: %d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smbchg_check_chg_version(chip);
+ if (rc) {
+ pr_err("Unable to check schg version rc=%d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smb_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to parse DT nodes: %d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smbchg_regulator_init(chip);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Couldn't initialize regulator rc=%d\n", rc);
+ goto votables_cleanup;
+ }
+
+ chip->extcon = devm_extcon_dev_allocate(chip->dev, smbchg_extcon_cable);
+ if (IS_ERR(chip->extcon)) {
+ dev_err(chip->dev, "failed to allocate extcon device\n");
+ rc = PTR_ERR(chip->extcon);
+ goto votables_cleanup;
+ }
+
+ rc = devm_extcon_dev_register(chip->dev, chip->extcon);
+ if (rc) {
+ dev_err(chip->dev, "failed to register extcon device\n");
+ goto votables_cleanup;
+ }
+
+ chip->usb_psy_d.name = "usb";
+ chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB;
+ chip->usb_psy_d.get_property = smbchg_usb_get_property;
+ chip->usb_psy_d.set_property = smbchg_usb_set_property;
+ chip->usb_psy_d.properties = smbchg_usb_properties;
+ chip->usb_psy_d.num_properties = ARRAY_SIZE(smbchg_usb_properties);
+ chip->usb_psy_d.property_is_writeable = smbchg_usb_is_writeable;
+
+ usb_psy_cfg.drv_data = chip;
+ usb_psy_cfg.supplied_to = smbchg_usb_supplicants;
+ usb_psy_cfg.num_supplicants = ARRAY_SIZE(smbchg_usb_supplicants);
+
+ chip->usb_psy = devm_power_supply_register(chip->dev,
+ &chip->usb_psy_d, &usb_psy_cfg);
+ if (IS_ERR(chip->usb_psy)) {
+ dev_err(&pdev->dev, "Unable to register usb_psy rc = %ld\n",
+ PTR_ERR(chip->usb_psy));
+ rc = PTR_ERR(chip->usb_psy);
+ goto votables_cleanup;
+ }
+
+ if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) {
+ chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
+ if (IS_ERR(chip->dpdm_reg)) {
+ rc = PTR_ERR(chip->dpdm_reg);
+ goto votables_cleanup;
+ }
+ }
+
+ rc = smbchg_hw_init(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to intialize hardware rc = %d\n", rc);
+ goto out;
+ }
+
+ rc = determine_initial_status(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to determine init status rc = %d\n", rc);
+ goto out;
+ }
+
+ chip->previous_soc = -EINVAL;
+ chip->batt_psy_d.name = chip->battery_psy_name;
+ chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy_d.get_property = smbchg_battery_get_property;
+ chip->batt_psy_d.set_property = smbchg_battery_set_property;
+ chip->batt_psy_d.properties = smbchg_battery_properties;
+ chip->batt_psy_d.num_properties = ARRAY_SIZE(smbchg_battery_properties);
+ chip->batt_psy_d.external_power_changed = smbchg_external_power_changed;
+ chip->batt_psy_d.property_is_writeable = smbchg_battery_is_writeable;
+
+ batt_psy_cfg.drv_data = chip;
+ batt_psy_cfg.num_supplicants = 0;
+ chip->batt_psy = devm_power_supply_register(chip->dev,
+ &chip->batt_psy_d,
+ &batt_psy_cfg);
+ if (IS_ERR(chip->batt_psy)) {
+ dev_err(&pdev->dev,
+ "Unable to register batt_psy rc = %ld\n",
+ PTR_ERR(chip->batt_psy));
+ goto out;
+ }
+
+ if (chip->dc_psy_type != -EINVAL) {
+ chip->dc_psy_d.name = "dc";
+ chip->dc_psy_d.type = chip->dc_psy_type;
+ chip->dc_psy_d.get_property = smbchg_dc_get_property;
+ chip->dc_psy_d.set_property = smbchg_dc_set_property;
+ chip->dc_psy_d.property_is_writeable = smbchg_dc_is_writeable;
+ chip->dc_psy_d.properties = smbchg_dc_properties;
+ chip->dc_psy_d.num_properties
+ = ARRAY_SIZE(smbchg_dc_properties);
+
+ dc_psy_cfg.drv_data = chip;
+ dc_psy_cfg.num_supplicants
+ = ARRAY_SIZE(smbchg_dc_supplicants);
+ dc_psy_cfg.supplied_to = smbchg_dc_supplicants;
+
+ chip->dc_psy = devm_power_supply_register(chip->dev,
+ &chip->dc_psy_d,
+ &dc_psy_cfg);
+ if (IS_ERR(chip->dc_psy)) {
+ dev_err(&pdev->dev,
+ "Unable to register dc_psy rc = %ld\n",
+ PTR_ERR(chip->dc_psy));
+ goto out;
+ }
+ }
+
+ if (chip->cfg_chg_led_support &&
+ chip->schg_version == QPNP_SCHG_LITE) {
+ rc = smbchg_register_chg_led(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Unable to register charger led: %d\n",
+ rc);
+ goto out;
+ }
+
+ rc = smbchg_chg_led_controls(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Failed to set charger led controld bit: %d\n",
+ rc);
+ goto unregister_led_class;
+ }
+ }
+
+ rc = smbchg_request_irqs(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to request irqs rc = %d\n", rc);
+ goto unregister_led_class;
+ }
+
+ rerun_hvdcp_det_if_necessary(chip);
+
+ dump_regs(chip);
+ create_debugfs_entries(chip);
+ dev_info(chip->dev,
+ "SMBCHG successfully probe Charger version=%s Revision DIG:%d.%d ANA:%d.%d batt=%d dc=%d usb=%d\n",
+ version_str[chip->schg_version],
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ get_prop_batt_present(chip),
+ chip->dc_present, chip->usb_present);
+ return 0;
+
+unregister_led_class:
+ if (chip->cfg_chg_led_support && chip->schg_version == QPNP_SCHG_LITE)
+ led_classdev_unregister(&chip->led_cdev);
+out:
+ handle_usb_removal(chip);
+votables_cleanup:
+ if (chip->aicl_deglitch_short_votable)
+ destroy_votable(chip->aicl_deglitch_short_votable);
+ if (chip->hw_aicl_rerun_enable_indirect_votable)
+ destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable);
+ if (chip->hw_aicl_rerun_disable_votable)
+ destroy_votable(chip->hw_aicl_rerun_disable_votable);
+ if (chip->battchg_suspend_votable)
+ destroy_votable(chip->battchg_suspend_votable);
+ if (chip->dc_suspend_votable)
+ destroy_votable(chip->dc_suspend_votable);
+ if (chip->usb_suspend_votable)
+ destroy_votable(chip->usb_suspend_votable);
+ if (chip->dc_icl_votable)
+ destroy_votable(chip->dc_icl_votable);
+ if (chip->usb_icl_votable)
+ destroy_votable(chip->usb_icl_votable);
+ if (chip->fcc_votable)
+ destroy_votable(chip->fcc_votable);
+ return rc;
+}
+
+static int smbchg_remove(struct platform_device *pdev)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ debugfs_remove_recursive(chip->debug_root);
+
+ destroy_votable(chip->aicl_deglitch_short_votable);
+ destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable);
+ destroy_votable(chip->hw_aicl_rerun_disable_votable);
+ destroy_votable(chip->battchg_suspend_votable);
+ destroy_votable(chip->dc_suspend_votable);
+ destroy_votable(chip->usb_suspend_votable);
+ destroy_votable(chip->dc_icl_votable);
+ destroy_votable(chip->usb_icl_votable);
+ destroy_votable(chip->fcc_votable);
+
+ return 0;
+}
+
+static void smbchg_shutdown(struct platform_device *pdev)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ if (!is_hvdcp_present(chip))
+ return;
+
+ pr_smb(PR_MISC, "Disable Parallel\n");
+ mutex_lock(&chip->parallel.lock);
+ smbchg_parallel_en = 0;
+ smbchg_parallel_usb_disable(chip);
+ mutex_unlock(&chip->parallel.lock);
+
+ pr_smb(PR_MISC, "Disable all interrupts\n");
+ disable_irq(chip->aicl_done_irq);
+ disable_irq(chip->batt_cold_irq);
+ disable_irq(chip->batt_cool_irq);
+ disable_irq(chip->batt_hot_irq);
+ disable_irq(chip->batt_missing_irq);
+ disable_irq(chip->batt_warm_irq);
+ disable_irq(chip->chg_error_irq);
+ disable_irq(chip->chg_hot_irq);
+ disable_irq(chip->chg_term_irq);
+ disable_irq(chip->dcin_uv_irq);
+ disable_irq(chip->fastchg_irq);
+ disable_irq(chip->otg_fail_irq);
+ disable_irq(chip->otg_oc_irq);
+ disable_irq(chip->power_ok_irq);
+ disable_irq(chip->recharge_irq);
+ disable_irq(chip->src_detect_irq);
+ disable_irq(chip->taper_irq);
+ disable_irq(chip->usbid_change_irq);
+ disable_irq(chip->usbin_ov_irq);
+ disable_irq(chip->usbin_uv_irq);
+ disable_irq(chip->vbat_low_irq);
+ disable_irq(chip->wdog_timeout_irq);
+
+ /* remove all votes for short deglitch */
+ vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_SHORT_DEGLITCH_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+
+ /* vote to ensure AICL rerun is enabled */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ SHUTDOWN_WORKAROUND_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable indirect AICL rerun\n");
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable AICL rerun\n");
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ return;
+ }
+
+ pr_smb(PR_MISC, "Wait 500mS to lower to 5V\n");
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ return;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0)
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0)
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Wait 1S to settle\n");
+ msleep(1000);
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_STATUS, "wrote power off configurations\n");
+}
+
+static const struct dev_pm_ops smbchg_pm_ops = {
+};
+
+MODULE_DEVICE_TABLE(spmi, smbchg_id);
+
+static struct platform_driver smbchg_driver = {
+ .driver = {
+ .name = "qpnp-smbcharger",
+ .owner = THIS_MODULE,
+ .of_match_table = smbchg_match_table,
+ .pm = &smbchg_pm_ops,
+ },
+ .probe = smbchg_probe,
+ .remove = smbchg_remove,
+ .shutdown = smbchg_shutdown,
+};
+
+static int __init smbchg_init(void)
+{
+ return platform_driver_register(&smbchg_driver);
+}
+
+static void __exit smbchg_exit(void)
+{
+ return platform_driver_unregister(&smbchg_driver);
+}
+
+module_init(smbchg_init);
+module_exit(smbchg_exit);
+
+MODULE_DESCRIPTION("QPNP SMB Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-smbcharger");
return IRQ_HANDLED;
}
+static int typec_try_sink(struct smb_charger *chg)
+{
+ union power_supply_propval val;
+ bool debounce_done, vbus_detected, sink;
+ u8 stat;
+ int exit_mode = ATTACHED_SRC, rc;
+
+ /* ignore typec interrupt while try.snk WIP */
+ chg->try_sink_active = true;
+
+ /* force SNK mode */
+ val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /* reduce Tccdebounce time to ~20ms */
+ rc = smblib_masked_write(chg, MISC_CFG_REG,
+ TCC_DEBOUNCE_20MS_BIT, TCC_DEBOUNCE_20MS_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /*
+ * give opportunity to the other side to be a SRC,
+ * for tDRPTRY + Tccdebounce time
+ */
+ msleep(120);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+ if (!debounce_done)
+ /*
+ * The other side didn't switch to source, either it
+ * is an adamant sink or is removed go back to showing Rp
+ */
+ goto try_wait_src;
+
+ /*
+ * We are in force sink mode and the other side has switched to
+ * showing Rp. Config DRP in case the other side removes Rp so we
+ * can quickly (20ms) switch to showing our Rp. Note that the spec
+ * needs us to show Rp for 80mS while the drp DFP residency is just
+ * 54mS. But 54mS is plenty time for us to react and force Rp for
+ * the remaining 26mS.
+ */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set DFP mode rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ /*
+ * while other side is Rp, wait for VBUS from it; exit if other side
+ * removes Rp
+ */
+ do {
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+ vbus_detected = stat & TYPEC_VBUS_STATUS_BIT;
+
+ /* Successfully transitioned to ATTACHED.SNK */
+ if (vbus_detected && debounce_done) {
+ exit_mode = ATTACHED_SINK;
+ goto try_sink_exit;
+ }
+
+ /*
+ * Ensure sink since drp may put us in source if other
+ * side switches back to Rd
+ */
+ sink = !(stat & UFP_DFP_MODE_STATUS_BIT);
+
+ usleep_range(1000, 2000);
+ } while (debounce_done && sink);
+
+try_wait_src:
+ /*
+ * Transition to trywait.SRC state. check if other side still wants
+ * to be SNK or has been removed.
+ */
+ val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /* Need to be in this state for tDRPTRY time, 75ms~150ms */
+ msleep(80);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+ if (debounce_done)
+ /* the other side wants to be a sink */
+ exit_mode = ATTACHED_SRC;
+ else
+ /* the other side is detached */
+ exit_mode = UNATTACHED_SINK;
+
+try_sink_exit:
+ /* release forcing of SRC/SNK mode */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set DFP mode rc=%d\n", rc);
+
+ /* revert Tccdebounce time back to ~120ms */
+ rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+
+ chg->try_sink_active = false;
+
+ return exit_mode;
+}
+
static void typec_sink_insertion(struct smb_charger *chg)
{
+ int exit_mode;
+
+ /*
+ * Try.SNK entry status - ATTACHWAIT.SRC state and detected Rd-open
+ * or RD-Ra for TccDebounce time.
+ */
+
+ if (*chg->try_sink_enabled) {
+ exit_mode = typec_try_sink(chg);
+
+ if (exit_mode != ATTACHED_SRC) {
+ smblib_usb_typec_change(chg);
+ return;
+ }
+ }
+
/* when a sink is inserted we should not wait on hvdcp timeout to
* enable pd
*/
smblib_typec_mode_name[chg->typec_mode]);
}
-static void smblib_usb_typec_change(struct smb_charger *chg)
+void smblib_usb_typec_change(struct smb_charger *chg)
{
int rc;
return IRQ_HANDLED;
}
- if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+ if (chg->cc2_detach_wa_active || chg->typec_en_dis_active ||
+ chg->try_sink_active) {
smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
chg->cc2_detach_wa_active ?
"cc2_detach_wa" : "typec_en_dis");
SMB_IRQ_MAX,
};
+enum try_sink_exit_mode {
+ ATTACHED_SRC = 0,
+ ATTACHED_SINK,
+ UNATTACHED_SINK,
+};
+
struct smb_irq_info {
const char *name;
const irq_handler_t handler;
struct smb_params param;
struct smb_iio iio;
int *debug_mask;
+ int *try_sink_enabled;
enum smb_mode mode;
struct smb_chg_freq chg_freq;
int smb_version;
u32 wa_flags;
bool cc2_detach_wa_active;
bool typec_en_dis_active;
+ bool try_sink_active;
int boost_current_ua;
int temp_speed_reading_count;
union power_supply_propval *val);
int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
const union power_supply_propval *val);
+void smblib_usb_typec_change(struct smb_charger *chg);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
#define TAPER_TIMER_SEL_CFG_REG (USBIN_BASE + 0x64)
#define TYPEC_SPARE_CFG_BIT BIT(7)
+#define TYPEC_DRP_DFP_TIME_CFG_BIT BIT(5)
#define TAPER_TIMER_SEL_MASK GENMASK(1, 0)
#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
bool assert_falling_edge;
bool capture_clear;
unsigned int gpio_pin;
+ bool use_system_time_ts;
};
/*
struct pps_event_time ts;
int rising_edge;
- /* Get the time stamp first */
- get_monotonic_boottime(&ts.ts_real);
-
info = data;
+ /* Get the time stamp first */
+ if (!info->use_system_time_ts)
+ get_monotonic_boottime(&ts.ts_real);
+ else
+ pps_get_ts(&ts);
+
rising_edge = gpio_get_value(info->gpio_pin);
if ((rising_edge && !info->assert_falling_edge) ||
(!rising_edge && info->assert_falling_edge))
if (of_get_property(np, "assert-falling-edge", NULL))
data->assert_falling_edge = true;
+
+ if (of_get_property(np, "use-system-time-ts", NULL))
+ data->use_system_time_ts = true;
}
/* GPIO setup */
return rc;
}
+/* For PMI8998 and earlier PMICs */
static const struct ibb_ver_ops ibb_ops_v1 = {
.set_default_voltage = qpnp_ibb_set_default_voltage_v1,
.set_voltage = qpnp_ibb_set_voltage_v1,
.voltage_at_one_pulse = qpnp_ibb_output_voltage_at_one_pulse_v1,
};
+/* For PM660A and later PMICs */
static const struct ibb_ver_ops ibb_ops_v2 = {
.set_default_voltage = qpnp_ibb_set_default_voltage_v2,
.set_voltage = qpnp_ibb_set_voltage_v2,
u32 thresh, bool enable)
{
int rc = 0;
- u8 val;
+ u8 val, mask;
+ mask = LAB_PS_CTL_EN;
if (enable) {
for (val = 0; val < ARRAY_SIZE(lab_ps_thresh_table_v2); val++)
if (lab_ps_thresh_table_v2[val] == thresh)
}
val |= LAB_PS_CTL_EN;
+ mask |= LAB_PS_THRESH_MASK;
} else {
val = 0;
}
- rc = qpnp_labibb_write(labibb, labibb->lab_base +
- REG_LAB_PS_CTL, &val, 1);
-
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_PS_CTL, mask, val);
if (rc < 0)
pr_err("write register %x failed rc = %d\n",
REG_LAB_PS_CTL, rc);
return rc;
}
+/* For PMI8996 and earlier PMICs */
static const struct lab_ver_ops lab_ops_v1 = {
.set_default_voltage = qpnp_lab_set_default_voltage_v1,
.ps_ctl = qpnp_lab_ps_ctl_v1,
};
-static const struct lab_ver_ops lab_ops_v2 = {
+static const struct lab_ver_ops pmi8998_lab_ops = {
+ .set_default_voltage = qpnp_lab_set_default_voltage_v1,
+ .ps_ctl = qpnp_lab_ps_ctl_v2,
+};
+
+static const struct lab_ver_ops pm660_lab_ops = {
.set_default_voltage = qpnp_lab_set_default_voltage_v2,
.ps_ctl = qpnp_lab_ps_ctl_v2,
};
struct device_node *of_node)
{
int rc = 0;
- u32 i, tmp = 0;
+ u32 i = 0, tmp = 0;
u8 val, mask;
/*
rc = of_property_read_u32(of_node,
"qcom,qpnp-ibb-lab-pwrdn-delay", &tmp);
if (!rc) {
- for (val = 0; val < ARRAY_SIZE(ibb_pwrdn_dly_table); val++)
- if (ibb_pwrdn_dly_table[val] == tmp)
- break;
+ if (tmp > 0) {
+ for (i = 0; i < ARRAY_SIZE(ibb_pwrdn_dly_table); i++) {
+ if (ibb_pwrdn_dly_table[i] == tmp)
+ break;
+ }
- if (val == ARRAY_SIZE(ibb_pwrdn_dly_table)) {
- pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrdn-delay\n");
- return -EINVAL;
+ if (i == ARRAY_SIZE(ibb_pwrdn_dly_table)) {
+ pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrdn-delay\n");
+ return -EINVAL;
+ }
}
labibb->ibb_vreg.pwrdn_dly = tmp;
- val |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
+
+ if (tmp > 0)
+ val = i | IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
+
mask |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
}
rc = of_property_read_u32(of_node,
"qcom,qpnp-ibb-lab-pwrup-delay", &tmp);
if (!rc) {
- for (i = 0; i < ARRAY_SIZE(ibb_pwrup_dly_table); i++)
- if (ibb_pwrup_dly_table[i] == tmp)
- break;
+ if (tmp > 0) {
+ for (i = 0; i < ARRAY_SIZE(ibb_pwrup_dly_table); i++) {
+ if (ibb_pwrup_dly_table[i] == tmp)
+ break;
+ }
- if (i == ARRAY_SIZE(ibb_pwrup_dly_table)) {
- pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrup-delay\n");
- return -EINVAL;
+ if (i == ARRAY_SIZE(ibb_pwrup_dly_table)) {
+ pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrup-delay\n");
+ return -EINVAL;
+ }
}
labibb->ibb_vreg.pwrup_dly = tmp;
+ if (tmp > 0)
+ val |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY1;
+
val |= (i << IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT);
- val |= (IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 |
- IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK);
+ val |= IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK;
mask |= (IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 |
IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK |
IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK);
if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
labibb->ibb_ver_ops = &ibb_ops_v2;
- labibb->lab_ver_ops = &lab_ops_v2;
+ labibb->lab_ver_ops = &pm660_lab_ops;
+ } else if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+ labibb->ibb_ver_ops = &ibb_ops_v1;
+ labibb->lab_ver_ops = &pmi8998_lab_ops;
} else {
labibb->ibb_ver_ops = &ibb_ops_v1;
labibb->lab_ver_ops = &lab_ops_v1;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ if (!rtc->ops || !rtc->ops->alarm_irq_enable)
+ return;
+
+ rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
+}
+
/* Called once per device from rtc_device_register */
int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
rtc->aie_timer.enabled = 1;
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+ } else if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 >=
+ rtc->aie_timer.node.expires.tv64)){
+ rtc_alarm_disable(rtc);
}
+
mutex_unlock(&rtc->ops_lock);
return err;
}
return 0;
}
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
- if (!rtc->ops || !rtc->ops->alarm_irq_enable)
- return;
-
- rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
-}
-
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
+
+ /* cap all but accept CT responses to at least the CT header */
+ resph = (struct fc_ct_hdr *)acc;
+ if ((ct_els->status) ||
+ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
+ rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
+ /* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
- (u16)ZFCP_DBF_PAY_MAX_REC);
- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
- "fcp_sns", fsf->req_id);
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
+ /* complete FCP_RSP IU in associated PAYload record
+ * but only if there are optional parts
+ */
+ if (fcp_rsp->resp.fr_flags != 0)
+ zfcp_dbf_pl_write(
+ dbf, fcp_rsp,
+ /* at least one full PAY record
+ * but not beyond hardware response field
+ */
+ min_t(u16, max_t(u16, rec->pl_len,
+ ZFCP_DBF_PAY_MAX_REC),
+ FSF_FCP_RSP_SIZE),
+ "fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
+ u32 scsi_lun_64_hi;
} __packed;
/**
{
struct fsf_qtcb *qtcb = req->qtcb;
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+ ZFCP_STATUS_FSFREQ_ERROR))) {
+ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+ struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
+ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+ /* FCP_DL was not sufficient for SCSI data length */
+ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
}
}
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
+ zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- if (scsi_prot_sg_count(scsi_cmnd)) {
+ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+ scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ if (ret) {
+ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
- if (!fsf_req)
+ if (!fsf_req) {
+ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
+ }
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err;
+ int err = SCSI_DH_OK;
char *sp_model;
err = send_inquiry_cmd(sdev, 0, csdev);
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+ cmd_mfi->frame->hdr.cmd_status =
+ MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
+ }
}
}
} else {
prev_aen.word =
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+ dev_info(&instance->pdev->dev,
+ "%s %d out of range class %d send by application\n",
+ __func__, __LINE__, curr_aen.members.class);
+ return 0;
+ }
+
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
return -EINVAL;
if (start > ha->optrom_size)
return -EINVAL;
+ if (size > ha->optrom_size - start)
+ size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
switch (val) {
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
uint32_t group;
nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) ||
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}
sd_read_write_same(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
- struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
- unsigned save_scat_len; /* original length of trunc. scat. element */
- Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
return result;
if (val) {
sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
+
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
- ++val, srp = srp ? srp->nextrp : srp) {
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- if (srp) {
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned =
- srp->sg_io_owned;
- rinfo[val].pack_id =
- srp->header.pack_id;
- rinfo[val].usr_ptr =
- srp->header.usr_ptr;
- }
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
+ result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
+ int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
- if (req_sz > rsv_schp->bufflen)
- return -ENOMEM; /* cannot map more than reserved buffer */
+ mutex_lock(&sfp->f_mutex);
+ if (req_sz > rsv_schp->bufflen) {
+ ret = -ENOMEM; /* cannot map more than reserved buffer */
+ goto out;
+ }
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
- return 0;
+out:
+ mutex_unlock(&sfp->f_mutex);
+ return ret;
}
static void
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if (hp->flags & SG_FLAG_MMAP_IO) {
+ res = -EBUSY; /* sfp->res_in_use == 1 */
+ if (dxfer_len > rsv_schp->bufflen)
+ res = -ENOMEM;
+ mutex_unlock(&sfp->f_mutex);
+ return res;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
- sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
{
int k;
unsigned long iflags;
- Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- resp = sfp->headrp;
- if (!resp) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- resp = rp;
- sfp->headrp = resp;
- } else {
- if (0 == sfp->cmd_q)
- resp = NULL; /* command queuing disallowed */
- else {
- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
- if (!rp->parentfp)
- break;
- }
- if (k < SG_MAX_QUEUE) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- while (resp->nextrp)
- resp = resp->nextrp;
- resp->nextrp = rp;
- resp = rp;
- } else
- resp = NULL;
+ if (!list_empty(&sfp->rq_list)) {
+ if (!sfp->cmd_q)
+ goto out_unlock;
+
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
}
+ if (k >= SG_MAX_QUEUE)
+ goto out_unlock;
}
- if (resp) {
- resp->nextrp = NULL;
- resp->header.duration = jiffies_to_msecs(jiffies);
- }
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ rp->header.duration = jiffies_to_msecs(jiffies);
+ list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return rp;
+out_unlock:
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
- Sg_request *prev_rp;
- Sg_request *rp;
unsigned long iflags;
int res = 0;
- if ((!sfp) || (!srp) || (!sfp->headrp))
+ if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- prev_rp = sfp->headrp;
- if (srp == prev_rp) {
- sfp->headrp = prev_rp->nextrp;
- prev_rp->parentfp = NULL;
+ if (!list_empty(&srp->entry)) {
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
res = 1;
- } else {
- while ((rp = prev_rp->nextrp)) {
- if (srp == rp) {
- prev_rp->nextrp = rp->nextrp;
- rp->parentfp = NULL;
- res = 1;
- break;
- }
- prev_rp = rp;
- }
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
-
+ INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ Sg_request *srp;
/* Cleanup any responses which were never read(). */
- while (sfp->headrp)
- sg_finish_rem_req(sfp->headrp);
+ while (!list_empty(&sfp->rq_list)) {
+ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ sg_finish_rem_req(srp);
+ }
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
- int k, m, new_interface, blen, usg;
+ int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
- for (m = 0, srp = fp->headrp;
- srp != NULL;
- ++m, srp = srp->nextrp) {
+ list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
- if (new_interface &&
+ if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
- if (0 == m)
+ if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
/* no more space */
return SCSI_MLQUEUE_DEVICE_BUSY;
}
/*
- * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
loff_t *ppos)
{
struct ufs_qcom_host *host = file->f_mapping->host->i_private;
- char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {0};
+ char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {'\0'};
loff_t buff_pos = 0;
char *comma;
int ret = 0;
int major;
int minor;
+ unsigned long flags;
+ struct ufs_hba *hba = host->hba;
+
ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
&buff_pos, ubuf, cnt);
__func__);
goto out;
}
+ configuration[ret] = '\0';
comma = strnchr(configuration, TESTBUS_CFG_BUFF_LINE_SIZE, ',');
if (!comma || comma == configuration) {
goto out;
}
+ if (!ufs_qcom_testbus_cfg_is_ok(host, major, minor)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
host->testbus.select_major = (u8)major;
host->testbus.select_minor = (u8)minor;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* Sanity check of the {major, minor} tuple is done in the
host->testbus.select_minor = 37;
}
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
+ u8 select_major, u8 select_minor)
{
- if (host->testbus.select_major >= TSTBUS_MAX) {
+ if (select_major >= TSTBUS_MAX) {
dev_err(host->hba->dev,
"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
+ __func__, select_major);
return false;
}
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0xFF) {
+ if (select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
+ __func__, select_minor);
return false;
}
*/
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
- int reg;
- int offset;
+ int reg = 0;
+ int offset, ret = 0, testbus_sel_offset = 19;
u32 mask = TEST_BUS_SUB_SEL_MASK;
+ unsigned long flags;
+ struct ufs_hba *hba;
if (!host)
return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
+ hba = host->hba;
+ spin_lock_irqsave(hba->host->host_lock, flags);
switch (host->testbus.select_major) {
case TSTBUS_UAWM:
reg = UFS_TEST_BUS_CTRL_0;
*/
}
mask <<= offset;
-
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (reg) {
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << testbus_sel_offset,
REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
+ ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
+ } else {
+ dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
ufs_qcom_enable_test_bus(host);
/*
* Make sure the test bus configuration is
* committed before returning.
*/
mb();
-
- return 0;
+out:
+ return ret;
}
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
+#define TEST_BUS_SEL 0x780000
#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, u8 select_major,
+ u8 select_minor);
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
used by audio driver to configure QDSP6's
ASM, ADM and AFE.
+config MSM_QDSP6_APRV2_VM
+ bool "Audio QDSP6 APRv2 virtualization support"
+ depends on MSM_HAB
+ help
+ Enable APRv2 IPC protocol support over
+ HAB between application processor and
+ QDSP6. APR is used by audio driver to
+ configure QDSP6's ASM, ADM and AFE.
+
config MSM_QDSP6_APRV3
bool "Audio QDSP6 APRv3 support"
depends on MSM_SMD
config MSM_AVTIMER
tristate "Avtimer Driver"
- depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK || \
+ MSM_QDSP6_APRV2_VM
help
This driver gets the Q6 out of power collapsed state and
exposes ioctl control to read avtimer tick.
rwref_read_get(&xprt_ptr->xprt_state_lhb0);
ctx = get_first_ch_ctx(xprt_ptr);
while (ctx) {
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ if (!list_empty(&ctx->tx_active))
+ glink_qos_done_ch_tx(ctx);
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
ctx->local_open_state == GLINK_CHANNEL_OPENING) {
if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
goto out;
- if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+ if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
event_data->crashed, priv->state);
ICNSS_ASSERT(0);
clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
fw_down_data.crashed = event_data->crashed;
- icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+ if (test_bit(ICNSS_FW_READY, &priv->state))
+ icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
+ &fw_down_data);
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_EVENT_SYNC, event_data);
done:
module_param_named(debug_mask, ipc_router_glink_xprt_debug_mask,
int, S_IRUGO | S_IWUSR | S_IWGRP);
+#define IPCRTR_INTENT_REQ_TIMEOUT_MS 5000
#if defined(DEBUG)
#define D(x...) do { \
if (ipc_router_glink_xprt_debug_mask) \
#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
#define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
#define PIL_SUBSYSTEM_NAME_LEN 32
+#define IPC_RTR_WS_NAME_LEN ((2 * GLINK_NAME_SIZE) + 4)
#define MAX_NUM_LO_INTENTS 5
#define MAX_NUM_MD_INTENTS 3
* @transport: Physical Transport Name as identified by Glink.
* @pil_edge: Edge name understood by PIL.
* @ipc_rtr_xprt_name: XPRT Name to be registered with IPC Router.
+ * @notify_rx_ws_name: Name of wakesource used in notify rx path.
* @xprt: IPC Router XPRT structure to contain XPRT specific info.
* @ch_hndl: Opaque Channel handle returned by GLink.
* @xprt_wq: Workqueue to queue read & other XPRT related works.
char transport[GLINK_NAME_SIZE];
char pil_edge[PIL_SUBSYSTEM_NAME_LEN];
char ipc_rtr_xprt_name[IPC_RTR_XPRT_NAME_LEN];
+ char notify_rx_ws_name[IPC_RTR_WS_NAME_LEN];
struct msm_ipc_router_xprt xprt;
void *ch_hndl;
struct workqueue_struct *xprt_wq;
open_cfg.notify_state = glink_xprt_notify_state;
open_cfg.notify_rx_intent_req = glink_xprt_notify_rx_intent_req;
open_cfg.priv = glink_xprtp;
+ open_cfg.rx_intent_req_timeout_ms = IPCRTR_INTENT_REQ_TIMEOUT_MS;
glink_xprtp->pil = msm_ipc_load_subsystem(glink_xprtp);
glink_xprtp->ch_hndl = glink_open(&open_cfg);
kfree(glink_xprtp);
return -EFAULT;
}
-
- wakeup_source_init(&glink_xprtp->notify_rxv_ws, xprt_wq_name);
+ scnprintf(glink_xprtp->notify_rx_ws_name, IPC_RTR_WS_NAME_LEN,
+ "%s_%s_rx", glink_xprtp->ch_name, glink_xprtp->edge);
+ wakeup_source_init(&glink_xprtp->notify_rxv_ws,
+ glink_xprtp->notify_rx_ws_name);
mutex_lock(&glink_xprt_list_lock_lha1);
list_add(&glink_xprtp->list, &glink_xprt_list);
mutex_unlock(&glink_xprt_list_lock_lha1);
obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_VM) += apr_vm.o apr_v2.o voice_svc.o
obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += msm_audio_ion_vm.o
obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
--- /dev/null
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/apr_tal.h>
+#include <linux/qdsp6v2/aprv2_vm.h>
+#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <linux/ipc_logging.h>
+#include <linux/habmm.h>
+
+#define APR_PKT_IPC_LOG_PAGE_CNT 2
+#define APR_VM_CB_THREAD_NAME "apr_vm_cb_thread"
+#define APR_TX_BUF_SIZE 4096
+#define APR_RX_BUF_SIZE 4096
+
+static struct apr_q6 q6;
+static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX];
+static void *apr_pkt_ctx;
+static wait_queue_head_t dsp_wait;
+static wait_queue_head_t modem_wait;
+static bool is_modem_up;
+static bool is_initial_boot;
+/* Subsystem restart: QDSP6 data, functions */
+static struct workqueue_struct *apr_reset_workqueue;
+static void apr_reset_deregister(struct work_struct *work);
+static void dispatch_event(unsigned long code, uint16_t proc);
+struct apr_reset_work {
+ void *handle;
+ struct work_struct work;
+};
+
+static bool apr_cf_debug;
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_apr_debug;
+static ssize_t apr_debug_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char cmd;
+
+ if (copy_from_user(&cmd, ubuf, 1))
+ return -EFAULT;
+
+ apr_cf_debug = (cmd == '1') ? true : false;
+
+ return cnt;
+}
+
+static const struct file_operations apr_debug_ops = {
+ .write = apr_debug_write,
+};
+#endif
+
+#define APR_PKT_INFO(x...) \
+do { \
+ if (apr_pkt_ctx) \
+ ipc_log_string(apr_pkt_ctx, "<APR>: "x); \
+} while (0)
+
+/* hab handle */
+static uint32_t hab_handle_tx;
+static uint32_t hab_handle_rx;
+static char apr_tx_buf[APR_TX_BUF_SIZE];
+static char apr_rx_buf[APR_RX_BUF_SIZE];
+
+/* apr callback thread task */
+static struct task_struct *apr_vm_cb_thread_task;
+static int pid;
+
+
+struct apr_svc_table {
+ char name[64];
+ int idx;
+ int id;
+ int dest_svc;
+ int client_id;
+ int handle;
+};
+
+/*
+ * src svc should be assigned dynamically through apr registration:
+ * 1. replace with a proper string name for registration.
+ * e.g. "qcom.apps.lnx." + name
+ * 2. register apr BE, retrieve dynamic src svc address,
+ * apr handle and store in svc tbl.
+ */
+
+static struct mutex m_lock_tbl_qdsp6;
+
+static struct apr_svc_table svc_tbl_qdsp6[] = {
+ {
+ .name = "AFE",
+ .idx = 0,
+ .id = 0,
+ .dest_svc = APR_SVC_AFE,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "ASM",
+ .idx = 1,
+ .id = 0,
+ .dest_svc = APR_SVC_ASM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "ADM",
+ .idx = 2,
+ .id = 0,
+ .dest_svc = APR_SVC_ADM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CORE",
+ .idx = 3,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CORE,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "TEST",
+ .idx = 4,
+ .id = 0,
+ .dest_svc = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "MVM",
+ .idx = 5,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_MVM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CVS",
+ .idx = 6,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CVS,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CVP",
+ .idx = 7,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CVP,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "USM",
+ .idx = 8,
+ .id = 0,
+ .dest_svc = APR_SVC_USM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "VIDC",
+ .idx = 9,
+ .id = 0,
+ .dest_svc = APR_SVC_VIDC,
+ .handle = 0,
+ },
+ {
+ .name = "LSM",
+ .idx = 10,
+ .id = 0,
+ .dest_svc = APR_SVC_LSM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+};
+
+static struct mutex m_lock_tbl_voice;
+
+static struct apr_svc_table svc_tbl_voice[] = {
+ {
+ .name = "VSM",
+ .idx = 0,
+ .id = 0,
+ .dest_svc = APR_SVC_VSM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "VPM",
+ .idx = 1,
+ .id = 0,
+ .dest_svc = APR_SVC_VPM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "MVS",
+ .idx = 2,
+ .id = 0,
+ .dest_svc = APR_SVC_MVS,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "MVM",
+ .idx = 3,
+ .id = 0,
+ .dest_svc = APR_SVC_MVM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "CVS",
+ .idx = 4,
+ .id = 0,
+ .dest_svc = APR_SVC_CVS,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "CVP",
+ .idx = 5,
+ .id = 0,
+ .dest_svc = APR_SVC_CVP,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "SRD",
+ .idx = 6,
+ .id = 0,
+ .dest_svc = APR_SVC_SRD,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "TEST",
+ .idx = 7,
+ .id = 0,
+ .dest_svc = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+};
+
+enum apr_subsys_state apr_get_modem_state(void)
+{
+ return atomic_read(&q6.modem_state);
+}
+
+void apr_set_modem_state(enum apr_subsys_state state)
+{
+ atomic_set(&q6.modem_state, state);
+}
+
+enum apr_subsys_state apr_cmpxchg_modem_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.modem_state, prev, new);
+}
+
+static void apr_modem_down(unsigned long opcode)
+{
+ apr_set_modem_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_MODEM);
+}
+
+static void apr_modem_up(void)
+{
+ if (apr_cmpxchg_modem_state(APR_SUBSYS_DOWN, APR_SUBSYS_UP) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&modem_wait);
+ is_modem_up = 1;
+}
+
+enum apr_subsys_state apr_get_q6_state(void)
+{
+ return atomic_read(&q6.q6_state);
+}
+EXPORT_SYMBOL(apr_get_q6_state);
+
+int apr_set_q6_state(enum apr_subsys_state state)
+{
+ pr_debug("%s: setting adsp state %d\n", __func__, state);
+ if (state < APR_SUBSYS_DOWN || state > APR_SUBSYS_LOADED)
+ return -EINVAL;
+ atomic_set(&q6.q6_state, state);
+ return 0;
+}
+EXPORT_SYMBOL(apr_set_q6_state);
+
+enum apr_subsys_state apr_cmpxchg_q6_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.q6_state, prev, new);
+}
+
+static void apr_adsp_down(unsigned long opcode)
+{
+ apr_set_q6_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_QDSP6);
+}
+
+static void apr_adsp_up(void)
+{
+ if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN, APR_SUBSYS_LOADED) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&dsp_wait);
+}
+
+int apr_wait_for_device_up(int dest_id)
+{
+ int rc = -1;
+
+ if (dest_id == APR_DEST_MODEM)
+ rc = wait_event_interruptible_timeout(modem_wait,
+ (apr_get_modem_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else if (dest_id == APR_DEST_QDSP6)
+ rc = wait_event_interruptible_timeout(dsp_wait,
+ (apr_get_q6_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else
+ pr_err("%s: unknown dest_id %d\n", __func__, dest_id);
+ /* returns left time */
+ return rc;
+}
+
+static int apr_vm_nb_receive(int32_t handle, void *dest_buff,
+ uint32_t *size_bytes, uint32_t timeout)
+{
+ int rc;
+ uint32_t dest_buff_bytes = *size_bytes;
+ unsigned long delay = jiffies + (HZ / 2);
+
+ do {
+ *size_bytes = dest_buff_bytes;
+ rc = habmm_socket_recv(handle,
+ dest_buff,
+ size_bytes,
+ timeout,
+ HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
+ } while (time_before(jiffies, delay) && (rc == -EAGAIN) &&
+ (*size_bytes == 0));
+
+ return rc;
+}
+
+static int apr_vm_cb_process_evt(char *buf, int len)
+{
+ struct apr_client_data data;
+ struct apr_client *apr_client;
+ struct apr_svc *c_svc;
+ struct apr_hdr *hdr;
+ uint16_t hdr_size;
+ uint16_t msg_type;
+ uint16_t ver;
+ uint16_t src;
+ uint16_t svc;
+ uint16_t clnt;
+ int i;
+ int temp_port = 0;
+ uint32_t *ptr;
+ uint32_t evt_id;
+
+ pr_debug("APR: len = %d\n", len);
+ ptr = (uint32_t *)buf;
+ pr_debug("\n*****************\n");
+ for (i = 0; i < len/4; i++)
+ pr_debug("%x ", ptr[i]);
+ pr_debug("\n");
+ pr_debug("\n*****************\n");
+
+ if (!buf || len <= APR_HDR_SIZE + sizeof(uint32_t)) {
+ pr_err("APR: Improper apr pkt received: %p %d\n", buf, len);
+ return -EINVAL;
+ }
+
+ evt_id = *((int32_t *)buf);
+ if (evt_id != APRV2_VM_EVT_RX_PKT_AVAILABLE) {
+ pr_err("APR: Wrong evt id: %d\n", evt_id);
+ return -EINVAL;
+ }
+ hdr = (struct apr_hdr *)(buf + sizeof(uint32_t));
+
+ ver = hdr->hdr_field;
+ ver = (ver & 0x000F);
+ if (ver > APR_PKT_VER + 1) {
+ pr_err("APR: Wrong version: %d\n", ver);
+ return -EINVAL;
+ }
+
+ hdr_size = hdr->hdr_field;
+ hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4;
+ if (hdr_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = hdr->hdr_field;
+ msg_type = (msg_type >> 0x08) & 0x0003;
+ if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+ pr_err("APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ /*
+ * dest_svc is dynamic created by apr service
+ * no need to check the range of dest_svc
+ */
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX) {
+ pr_err("APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc = hdr->dest_svc;
+ if (hdr->src_domain == APR_DOMAIN_MODEM)
+ clnt = APR_CLIENT_VOICE;
+ else if (hdr->src_domain == APR_DOMAIN_ADSP)
+ clnt = APR_CLIENT_AUDIO;
+ else {
+ pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
+ return -EINVAL;
+ }
+
+ src = apr_get_data_src(hdr);
+ if (src == APR_DEST_MAX)
+ return -EINVAL;
+
+ pr_debug("src =%d clnt = %d\n", src, clnt);
+ apr_client = &client[src][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++)
+ if (apr_client->svc[i].id == svc) {
+ pr_debug("svc_id = %d\n", apr_client->svc[i].id);
+ c_svc = &apr_client->svc[i];
+ break;
+ }
+
+ if (i == APR_SVC_MAX) {
+ pr_err("APR: service is not registered\n");
+ return -ENXIO;
+ }
+
+ pr_debug("svc_idx = %d\n", i);
+ pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id,
+ c_svc->client_id, c_svc->fn, c_svc->priv);
+
+ data.payload_size = hdr->pkt_size - hdr_size;
+ data.opcode = hdr->opcode;
+ data.src = src;
+ data.src_port = hdr->src_port;
+ data.dest_port = hdr->dest_port;
+ data.token = hdr->token;
+ data.msg_type = msg_type;
+ if (data.payload_size > 0)
+ data.payload = (char *)hdr + hdr_size;
+
+ if (unlikely(apr_cf_debug)) {
+ if (hdr->opcode == APR_BASIC_RSP_RESULT && data.payload) {
+ uint32_t *ptr = data.payload;
+
+ APR_PKT_INFO(
+ "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X] rc[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc,
+ hdr->opcode, hdr->token, ptr[1]);
+ } else {
+ APR_PKT_INFO(
+ "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+ hdr->token);
+ }
+ }
+
+ temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
+ if (c_svc->port_cnt && c_svc->port_fn[temp_port])
+ c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
+ else if (c_svc->fn)
+ c_svc->fn(&data, c_svc->priv);
+ else
+ pr_err("APR: Rxed a packet for NULL callback\n");
+
+ return 0;
+}
+
+static int apr_vm_cb_thread(void *data)
+{
+ uint32_t apr_rx_buf_len;
+ struct aprv2_vm_ack_rx_pkt_available_t apr_ack;
+ int status = 0;
+ int ret = 0;
+
+ while (1) {
+ apr_rx_buf_len = sizeof(apr_rx_buf);
+ ret = habmm_socket_recv(hab_handle_rx,
+ (void *)&apr_rx_buf,
+ &apr_rx_buf_len,
+ 0xFFFFFFFF,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, ret);
+ /*
+ * TODO: depends on the HAB error code,
+ * may need to implement
+ * a retry mechanism.
+ * break if recv failed ?
+ */
+ break;
+ }
+
+ status = apr_vm_cb_process_evt(apr_rx_buf, apr_rx_buf_len);
+
+ apr_ack.status = status;
+ ret = habmm_socket_send(hab_handle_rx,
+ (void *)&apr_ack,
+ sizeof(apr_ack),
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ /* TODO: break if send failed ? */
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
+ int *svc_idx, int *svc_id, int *dest_svc, int *handle)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ struct mutex *lock;
+ struct aprv2_vm_cmd_register_rsp_t apr_rsp;
+ uint32_t apr_len;
+ int ret = 0;
+ struct {
+ uint32_t cmd_id;
+ struct aprv2_vm_cmd_register_t reg_cmd;
+ } tx_data;
+
+ if (domain_id == APR_DOMAIN_ADSP) {
+ tbl = svc_tbl_qdsp6;
+ size = ARRAY_SIZE(svc_tbl_qdsp6);
+ lock = &m_lock_tbl_qdsp6;
+ } else {
+ tbl = svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ lock = &m_lock_tbl_voice;
+ }
+
+ mutex_lock(lock);
+ for (i = 0; i < size; i++) {
+ if (!strcmp(svc_name, tbl[i].name)) {
+ *client_id = tbl[i].client_id;
+ *svc_idx = tbl[i].idx;
+ if (!tbl[i].id && !tbl[i].handle) {
+ /* need to register a new service */
+ memset((void *) &tx_data, 0, sizeof(tx_data));
+
+ apr_len = sizeof(tx_data);
+ tx_data.cmd_id = APRV2_VM_CMDID_REGISTER;
+ tx_data.reg_cmd.name_size = snprintf(
+ tx_data.reg_cmd.name,
+ APRV2_VM_MAX_DNS_SIZE,
+ "qcom.apps.lnx.%s",
+ svc_name);
+ tx_data.reg_cmd.addr = 0;
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *) &tx_data,
+ apr_len,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ mutex_unlock(lock);
+ return ret;
+ }
+ /* wait for response */
+ apr_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_len,
+ 0xFFFFFFFF);
+ if (ret) {
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ mutex_unlock(lock);
+ return ret;
+ }
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ ret = apr_rsp.status;
+ mutex_unlock(lock);
+ return ret;
+ }
+ /* update svc table */
+ tbl[i].handle = apr_rsp.handle;
+ tbl[i].id = apr_rsp.addr &
+ APRV2_VM_PKT_SERVICE_ID_MASK;
+ }
+ *svc_id = tbl[i].id;
+ *dest_svc = tbl[i].dest_svc;
+ *handle = tbl[i].handle;
+ break;
+ }
+ }
+ mutex_unlock(lock);
+
+ pr_debug("%s: svc_name = %s client_id = %d domain_id = %d\n",
+ __func__, svc_name, *client_id, domain_id);
+ pr_debug("%s: src_svc = %d dest_svc = %d handle = %d\n",
+ __func__, *svc_id, *dest_svc, *handle);
+
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc name %s\n", __func__, svc_name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int apr_vm_rel_svc(int domain_id, int svc_id, int handle)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ struct mutex *lock;
+ struct aprv2_vm_cmd_deregister_rsp_t apr_rsp;
+ uint32_t apr_len;
+ int ret = 0;
+ struct {
+ uint32_t cmd_id;
+ struct aprv2_vm_cmd_deregister_t dereg_cmd;
+ } tx_data;
+
+ if (domain_id == APR_DOMAIN_ADSP) {
+ tbl = svc_tbl_qdsp6;
+ size = ARRAY_SIZE(svc_tbl_qdsp6);
+ lock = &m_lock_tbl_qdsp6;
+ } else {
+ tbl = svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ lock = &m_lock_tbl_voice;
+ }
+
+ mutex_lock(lock);
+ for (i = 0; i < size; i++) {
+ if (tbl[i].id == svc_id && tbl[i].handle == handle) {
+ /* need to deregister a service */
+ memset((void *) &tx_data, 0, sizeof(tx_data));
+
+ apr_len = sizeof(tx_data);
+ tx_data.cmd_id = APRV2_VM_CMDID_DEREGISTER;
+ tx_data.dereg_cmd.handle = handle;
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *) &tx_data,
+ apr_len,
+ 0);
+ if (ret)
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ /*
+ * TODO: if send failed, should not wait for recv.
+ * should clear regardless?
+ */
+ /* wait for response */
+ apr_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_len,
+ 0xFFFFFFFF);
+ if (ret)
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ ret = apr_rsp.status;
+ }
+ /* clear svc table */
+ tbl[i].handle = 0;
+ tbl[i].id = 0;
+ break;
+ }
+ }
+ mutex_unlock(lock);
+
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc id %d handle %d\n",
+ __func__, svc_id, handle);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int apr_send_pkt(void *handle, uint32_t *buf)
+{
+ struct apr_svc *svc = handle;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ uint32_t *cmd_id = (uint32_t *)apr_tx_buf;
+ struct aprv2_vm_cmd_async_send_t *apr_send =
+ (struct aprv2_vm_cmd_async_send_t *)(apr_tx_buf +
+ sizeof(uint32_t));
+ uint32_t apr_send_len;
+ struct aprv2_vm_cmd_async_send_rsp_t apr_rsp;
+ uint32_t apr_rsp_len;
+ int ret = 0;
+
+ if (!handle || !buf) {
+ pr_err("APR: Wrong parameters\n");
+ return -EINVAL;
+ }
+ if (svc->need_reset) {
+ pr_err("APR: send_pkt service need reset\n");
+ return -ENETRESET;
+ }
+
+ if ((svc->dest_id == APR_DEST_QDSP6) &&
+ (apr_get_q6_state() != APR_SUBSYS_LOADED)) {
+ pr_err("%s: Still dsp is not Up\n", __func__);
+ return -ENETRESET;
+ } else if ((svc->dest_id == APR_DEST_MODEM) &&
+ (apr_get_modem_state() == APR_SUBSYS_DOWN)) {
+ pr_err("%s: Still Modem is not Up\n", __func__);
+ return -ENETRESET;
+ }
+
+ spin_lock_irqsave(&svc->w_lock, flags);
+ if (!svc->id || !svc->vm_handle) {
+ pr_err("APR: Still service is not yet opened\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ hdr = (struct apr_hdr *)buf;
+
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = svc->id;
+ hdr->dest_domain = svc->dest_domain;
+ hdr->dest_svc = svc->vm_dest_svc;
+
+ if (unlikely(apr_cf_debug)) {
+ APR_PKT_INFO(
+ "Tx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+ hdr->token);
+ }
+
+ memset((void *)&apr_tx_buf, 0, sizeof(apr_tx_buf));
+ /* pkt_size + cmd_id + handle */
+ apr_send_len = hdr->pkt_size + sizeof(uint32_t) * 2;
+ *cmd_id = APRV2_VM_CMDID_ASYNC_SEND;
+ apr_send->handle = svc->vm_handle;
+
+ /* safe check */
+ if (hdr->pkt_size > APR_TX_BUF_SIZE - (sizeof(uint32_t) * 2)) {
+ pr_err("APR: Wrong pkt size %d\n", hdr->pkt_size);
+ ret = -ENOMEM;
+ goto done;
+ }
+ memcpy(&apr_send->pkt_header, buf, hdr->pkt_size);
+
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *)&apr_tx_buf,
+ apr_send_len,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+ /* wait for response */
+ apr_rsp_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_rsp_len,
+ 0xFFFFFFFF);
+ if (ret) {
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ /* should translate status properly */
+ ret = -ECOMM;
+ goto done;
+ }
+
+ /* upon successful send, return packet size */
+ ret = hdr->pkt_size;
+
+done:
+ spin_unlock_irqrestore(&svc->w_lock, flags);
+ return ret;
+}
+
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+ uint32_t src_port, void *priv)
+{
+ struct apr_client *clnt;
+ int client_id = 0;
+ int svc_idx = 0;
+ int svc_id = 0;
+ int dest_id = 0;
+ int domain_id = 0;
+ int temp_port = 0;
+ struct apr_svc *svc = NULL;
+ int rc = 0;
+ bool can_open_channel = true;
+ int dest_svc = 0;
+ int handle = 0;
+
+ if (!dest || !svc_name || !svc_fn)
+ return NULL;
+
+ if (!strcmp(dest, "ADSP"))
+ domain_id = APR_DOMAIN_ADSP;
+ else if (!strcmp(dest, "MODEM")) {
+ /* Don't request for SMD channels if destination is MODEM,
+ * as these channels are no longer used and these clients
+ * are to listen only for MODEM SSR events
+ */
+ can_open_channel = false;
+ domain_id = APR_DOMAIN_MODEM;
+ } else {
+ pr_err("APR: wrong destination\n");
+ goto done;
+ }
+
+ dest_id = apr_get_dest_id(dest);
+
+ if (dest_id == APR_DEST_QDSP6) {
+ if (apr_get_q6_state() != APR_SUBSYS_LOADED) {
+ pr_err("%s: adsp not up\n", __func__);
+ return NULL;
+ }
+ pr_debug("%s: adsp Up\n", __func__);
+ } else if (dest_id == APR_DEST_MODEM) {
+ if (apr_get_modem_state() == APR_SUBSYS_DOWN) {
+ if (is_modem_up) {
+ pr_err("%s: modem shutdown due to SSR, ret",
+ __func__);
+ return NULL;
+ }
+ pr_debug("%s: Wait for modem to bootup\n", __func__);
+ rc = apr_wait_for_device_up(APR_DEST_MODEM);
+ if (rc == 0) {
+ pr_err("%s: Modem is not Up\n", __func__);
+ return NULL;
+ }
+ }
+ pr_debug("%s: modem Up\n", __func__);
+ }
+
+ if (apr_vm_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id,
+ &dest_svc, &handle)) {
+ pr_err("%s: apr_vm_get_svc failed\n", __func__);
+ goto done;
+ }
+
+ clnt = &client[dest_id][client_id];
+ svc = &clnt->svc[svc_idx];
+ mutex_lock(&svc->m_lock);
+ clnt->id = client_id;
+ if (svc->need_reset) {
+ mutex_unlock(&svc->m_lock);
+ pr_err("APR: Service needs reset\n");
+ goto done;
+ }
+ svc->id = svc_id;
+ svc->vm_dest_svc = dest_svc;
+ svc->dest_id = dest_id;
+ svc->client_id = client_id;
+ svc->dest_domain = domain_id;
+ svc->pkt_owner = APR_PKT_OWNER_DRIVER;
+ svc->vm_handle = handle;
+
+ if (src_port != 0xFFFFFFFF) {
+ temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", src_port, temp_port);
+ if (temp_port >= APR_MAX_PORTS || temp_port < 0) {
+ pr_err("APR: temp_port out of bounds\n");
+ mutex_unlock(&svc->m_lock);
+ return NULL;
+ }
+ if (!svc->port_cnt && !svc->svc_cnt)
+ clnt->svc_cnt++;
+ svc->port_cnt++;
+ svc->port_fn[temp_port] = svc_fn;
+ svc->port_priv[temp_port] = priv;
+ } else {
+ if (!svc->fn) {
+ if (!svc->port_cnt && !svc->svc_cnt)
+ clnt->svc_cnt++;
+ svc->fn = svc_fn;
+ if (svc->port_cnt)
+ svc->svc_cnt++;
+ svc->priv = priv;
+ }
+ }
+
+ mutex_unlock(&svc->m_lock);
+done:
+ return svc;
+}
+
+static void apr_reset_deregister(struct work_struct *work)
+{
+ struct apr_svc *handle = NULL;
+ struct apr_reset_work *apr_reset =
+ container_of(work, struct apr_reset_work, work);
+
+ handle = apr_reset->handle;
+ pr_debug("%s:handle[%pK]\n", __func__, handle);
+ apr_deregister(handle);
+ kfree(apr_reset);
+}
+
+int apr_deregister(void *handle)
+{
+ struct apr_svc *svc = handle;
+ struct apr_client *clnt;
+ uint16_t dest_id;
+ uint16_t client_id;
+
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&svc->m_lock);
+ dest_id = svc->dest_id;
+ client_id = svc->client_id;
+ clnt = &client[dest_id][client_id];
+
+ if (svc->port_cnt > 0 || svc->svc_cnt > 0) {
+ if (svc->port_cnt)
+ svc->port_cnt--;
+ else if (svc->svc_cnt)
+ svc->svc_cnt--;
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ client[dest_id][client_id].svc_cnt--;
+ svc->need_reset = 0x0;
+ }
+ } else if (client[dest_id][client_id].svc_cnt > 0) {
+ client[dest_id][client_id].svc_cnt--;
+ if (!client[dest_id][client_id].svc_cnt) {
+ svc->need_reset = 0x0;
+ pr_debug("%s: service is reset %p\n", __func__, svc);
+ }
+ }
+
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ if (apr_vm_rel_svc(svc->dest_domain, svc->id, svc->vm_handle))
+ pr_err("%s: apr_vm_rel_svc failed\n", __func__);
+ svc->priv = NULL;
+ svc->id = 0;
+ svc->vm_dest_svc = 0;
+ svc->fn = NULL;
+ svc->dest_id = 0;
+ svc->client_id = 0;
+ svc->need_reset = 0x0;
+ svc->vm_handle = 0;
+ }
+ mutex_unlock(&svc->m_lock);
+
+ return 0;
+}
+
+void apr_reset(void *handle)
+{
+ struct apr_reset_work *apr_reset_worker = NULL;
+
+ if (!handle)
+ return;
+ pr_debug("%s: handle[%pK]\n", __func__, handle);
+
+ if (apr_reset_workqueue == NULL) {
+ pr_err("%s: apr_reset_workqueue is NULL\n", __func__);
+ return;
+ }
+
+ apr_reset_worker = kzalloc(sizeof(struct apr_reset_work),
+ GFP_ATOMIC);
+
+ if (apr_reset_worker == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return;
+ }
+
+ apr_reset_worker->handle = handle;
+ INIT_WORK(&apr_reset_worker->work, apr_reset_deregister);
+ queue_work(apr_reset_workqueue, &apr_reset_worker->work);
+}
+
+/* Dispatch the Reset events to Modem and audio clients */
+static void dispatch_event(unsigned long code, uint16_t proc)
+{
+ struct apr_client *apr_client;
+ struct apr_client_data data;
+ struct apr_svc *svc;
+ uint16_t clnt;
+ int i, j;
+
+ memset(&data, 0, sizeof(data));
+ data.opcode = RESET_EVENTS;
+ data.reset_event = code;
+
+ /* Service domain can be different from the processor */
+ data.reset_proc = apr_get_reset_domain(proc);
+
+ clnt = APR_CLIENT_AUDIO;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+
+ clnt = APR_CLIENT_VOICE;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+}
+
+static int apr_notifier_service_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ struct audio_notifier_cb_data *cb_data = data;
+
+ if (cb_data == NULL) {
+ pr_err("%s: Callback data is NULL!\n", __func__);
+ goto done;
+ }
+
+ pr_debug("%s: Service opcode 0x%lx, domain %d\n",
+ __func__, opcode, cb_data->domain);
+
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ /*
+ * Use flag to ignore down notifications during
+ * initial boot. There is no benefit from error
+ * recovery notifications during initial boot
+ * up since everything is expected to be down.
+ */
+ if (is_initial_boot) {
+ is_initial_boot = false;
+ break;
+ }
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_down(opcode);
+ else
+ apr_adsp_down(opcode);
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ is_initial_boot = false;
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_up();
+ else
+ apr_adsp_up();
+ break;
+ default:
+ break;
+ }
+done:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block adsp_service_nb = {
+ .notifier_call = apr_notifier_service_cb,
+ .priority = 0,
+};
+
+static struct notifier_block modem_service_nb = {
+ .notifier_call = apr_notifier_service_cb,
+ .priority = 0,
+};
+
+static void apr_vm_set_subsys_state(void)
+{
+ /* set default subsys state in vm env.
+ * Both q6 and modem should be in LOADED state,
+ * since vm boots up at late stage after pm.
+ */
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ apr_set_modem_state(APR_SUBSYS_LOADED);
+}
+
+static int __init apr_init(void)
+{
+ int i, j, k;
+ int ret;
+
+ /* open apr channel tx and rx, store as global */
+ ret = habmm_socket_open(&hab_handle_tx,
+ MM_AUD_1,
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (ret) {
+ pr_err("%s: habmm_socket_open tx failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = habmm_socket_open(&hab_handle_rx,
+ MM_AUD_2,
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (ret) {
+ pr_err("%s: habmm_socket_open rx failed %d\n", __func__, ret);
+ habmm_socket_close(hab_handle_tx);
+ return ret;
+ }
+ pr_info("%s: hab_handle_tx %x hab_handle_rx %x\n",
+ __func__, hab_handle_tx, hab_handle_rx);
+
+ /* create apr ch rx cb thread */
+ apr_vm_cb_thread_task = kthread_run(apr_vm_cb_thread,
+ NULL,
+ APR_VM_CB_THREAD_NAME);
+ if (IS_ERR(apr_vm_cb_thread_task)) {
+ ret = PTR_ERR(apr_vm_cb_thread_task);
+ pr_err("%s: kthread_run failed %d\n", __func__, ret);
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ return ret;
+ }
+ pid = apr_vm_cb_thread_task->pid;
+ pr_info("%s: apr_vm_cb_thread started pid %d\n",
+ __func__, pid);
+
+ mutex_init(&m_lock_tbl_qdsp6);
+ mutex_init(&m_lock_tbl_voice);
+
+ for (i = 0; i < APR_DEST_MAX; i++)
+ for (j = 0; j < APR_CLIENT_MAX; j++) {
+ mutex_init(&client[i][j].m_lock);
+ for (k = 0; k < APR_SVC_MAX; k++) {
+ mutex_init(&client[i][j].svc[k].m_lock);
+ spin_lock_init(&client[i][j].svc[k].w_lock);
+ }
+ }
+
+ apr_vm_set_subsys_state();
+ mutex_init(&q6.lock);
+ apr_reset_workqueue = create_singlethread_workqueue("apr_driver");
+ if (!apr_reset_workqueue) {
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ kthread_stop(apr_vm_cb_thread_task);
+ return -ENOMEM;
+ }
+
+ apr_pkt_ctx = ipc_log_context_create(APR_PKT_IPC_LOG_PAGE_CNT,
+ "apr", 0);
+ if (!apr_pkt_ctx)
+ pr_err("%s: Unable to create ipc log context\n", __func__);
+
+ is_initial_boot = true;
+ subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &adsp_service_nb);
+ subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
+ &modem_service_nb);
+
+ return 0;
+}
+device_initcall(apr_init);
+
+static int __init apr_late_init(void)
+{
+ int ret = 0;
+
+ init_waitqueue_head(&dsp_wait);
+ init_waitqueue_head(&modem_wait);
+
+ return ret;
+}
+late_initcall(apr_late_init);
+
+static void __exit apr_exit(void)
+{
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ kthread_stop(apr_vm_cb_thread_task);
+}
+__exitcall(apr_exit);
+
+#ifdef CONFIG_DEBUG_FS
+static int __init apr_debug_init(void)
+{
+ debugfs_apr_debug = debugfs_create_file("msm_apr_debug",
+ S_IFREG | S_IRUGO, NULL, NULL,
+ &apr_debug_ops);
+ return 0;
+}
+device_initcall(apr_debug_init);
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/habmm.h>
+#include "../../../staging/android/ion/ion_priv.h"
+#include "../../../staging/android/ion/ion_hvenv_driver.h"
+
+#define MSM_AUDIO_ION_PROBED (1 << 0)
+
+#define MSM_AUDIO_SMMU_VM_CMD_MAP 0x00000001
+#define MSM_AUDIO_SMMU_VM_CMD_UNMAP 0x00000002
+#define MSM_AUDIO_SMMU_VM_HAB_MINOR_ID 1
+
+struct msm_audio_ion_private {
+ bool smmu_enabled;
+ bool audioheap_enabled;
+ u8 device_status;
+ struct list_head smmu_map_list;
+ struct mutex smmu_map_mutex;
+};
+
+struct msm_audio_smmu_map_data {
+ struct ion_client *client;
+ struct ion_handle *handle;
+ u32 export_id;
+ struct list_head list;
+};
+
+struct msm_audio_smmu_vm_map_cmd {
+ int cmd_id;
+ u32 export_id;
+ u32 buf_size;
+};
+
+struct msm_audio_smmu_vm_map_cmd_rsp {
+ int status;
+ u64 addr;
+};
+
+struct msm_audio_smmu_vm_unmap_cmd {
+ int cmd_id;
+ u32 export_id;
+};
+
+struct msm_audio_smmu_vm_unmap_cmd_rsp {
+ int status;
+};
+
+static struct msm_audio_ion_private msm_audio_ion_data = {0,};
+static u32 msm_audio_ion_hab_handle;
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len,
+ void *vaddr);
+
+static int msm_audio_ion_smmu_map(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, void *vaddr)
+{
+ int rc;
+ u32 export_id;
+ u32 cmd_rsp_size;
+ bool exported = false;
+ struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
+ struct msm_audio_smmu_map_data *map_data = NULL;
+ struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
+
+ rc = ion_handle_get_size(client, handle, len);
+ if (rc) {
+ pr_err("%s: ion_handle_get_size failed, client = %pK, handle = %pK, rc = %d\n",
+ __func__, client, handle, rc);
+ goto err;
+ }
+
+ /* Data required to track per buffer mapping */
+ map_data = kzalloc(sizeof(*map_data), GFP_KERNEL);
+ if (!map_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /* Export the buffer to physical VM */
+ rc = habmm_export(msm_audio_ion_hab_handle, vaddr, *len,
+ &export_id, 0);
+ if (rc) {
+ pr_err("%s: habmm_export failed vaddr = %pK, len = %zd, rc = %d\n",
+ __func__, vaddr, *len, rc);
+ goto err;
+ }
+
+ exported = true;
+ smmu_map_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_MAP;
+ smmu_map_cmd.export_id = export_id;
+ smmu_map_cmd.buf_size = *len;
+
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ rc = habmm_socket_send(msm_audio_ion_hab_handle,
+ (void *)&smmu_map_cmd, sizeof(smmu_map_cmd), 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, rc);
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ goto err;
+ }
+
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, rc);
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ goto err;
+ }
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ if (cmd_rsp_size != sizeof(cmd_rsp)) {
+ pr_err("%s: invalid size for cmd rsp %lu, expected %lu\n",
+ __func__, cmd_rsp_size, sizeof(cmd_rsp));
+ rc = -EIO;
+ goto err;
+ }
+
+ if (cmd_rsp.status) {
+ pr_err("%s: SMMU map command failed %d\n",
+ __func__, cmd_rsp.status);
+ rc = cmd_rsp.status;
+ goto err;
+ }
+
+ *addr = (ion_phys_addr_t)cmd_rsp.addr;
+
+ map_data->client = client;
+ map_data->handle = handle;
+ map_data->export_id = export_id;
+
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ list_add_tail(&(map_data->list),
+ &(msm_audio_ion_data.smmu_map_list));
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ return 0;
+
+err:
+ if (exported)
+ (void)habmm_unexport(msm_audio_ion_hab_handle, export_id, 0);
+
+ kfree(map_data);
+
+ return rc;
+}
+
+static int msm_audio_ion_smmu_unmap(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ int rc;
+ bool found = false;
+ u32 cmd_rsp_size;
+ struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
+ struct msm_audio_smmu_map_data *map_data, *next;
+ struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
+
+ /*
+ * Though list_for_each_entry_safe is delete safe, lock
+ * should be explicitly acquired to avoid race condition
+ * on adding elements to the list.
+ */
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ list_for_each_entry_safe(map_data, next,
+ &(msm_audio_ion_data.smmu_map_list), list) {
+
+ if (map_data->handle == handle && map_data->client == client) {
+ found = true;
+ smmu_unmap_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_UNMAP;
+ smmu_unmap_cmd.export_id = map_data->export_id;
+
+ rc = habmm_socket_send(msm_audio_ion_hab_handle,
+ (void *)&smmu_unmap_cmd,
+ sizeof(smmu_unmap_cmd), 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ if (cmd_rsp_size != sizeof(cmd_rsp)) {
+ pr_err("%s: invalid size for cmd rsp %lu\n",
+ __func__, cmd_rsp_size);
+ rc = -EIO;
+ goto err;
+ }
+
+ if (cmd_rsp.status) {
+ pr_err("%s: SMMU unmap command failed %d\n",
+ __func__, cmd_rsp.status);
+ rc = cmd_rsp.status;
+ goto err;
+ }
+
+ rc = habmm_unexport(msm_audio_ion_hab_handle,
+ map_data->export_id, 0xFFFFFFFF);
+ if (rc) {
+ pr_err("%s: habmm_unexport failed export_id = %d, rc = %d\n",
+ __func__, map_data->export_id, rc);
+ }
+
+ list_del(&(map_data->list));
+ kfree(map_data);
+ break;
+ }
+ }
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ if (!found) {
+ pr_err("%s: cannot find map_data ion_handle %pK, ion_client %pK\n",
+ __func__, handle, client);
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ if (found) {
+ (void)habmm_unexport(msm_audio_ion_hab_handle,
+ map_data->export_id, 0xFFFFFFFF);
+ list_del(&(map_data->list));
+ kfree(map_data);
+ }
+
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ return rc;
+}
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+ struct ion_handle **handle, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = -EINVAL;
+ unsigned long err_ion_ptr = 0;
+
+ if ((msm_audio_ion_data.smmu_enabled == true) &&
+ !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!name || !client || !handle || !paddr || !vaddr
+ || !bufsz || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ goto err;
+ }
+
+ *handle = ion_alloc(*client, bufsz, SZ_4K,
+ ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ if (msm_audio_ion_data.smmu_enabled == true) {
+ pr_debug("system heap is used");
+ msm_audio_ion_data.audioheap_enabled = 0;
+ *handle = ion_alloc(*client, bufsz, SZ_4K,
+ ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+ }
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ if (IS_ERR((void *)(*handle)))
+ err_ion_ptr = PTR_ERR((int *)(*handle));
+ pr_err("%s:ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
+ __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
+ rc = -ENOMEM;
+ goto err_ion_client;
+ }
+ } else {
+ pr_debug("audio heap is used");
+ msm_audio_ion_data.audioheap_enabled = 1;
+ }
+
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ goto err_ion_handle;
+ }
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+ *vaddr, bufsz);
+
+ if (bufsz != 0) {
+ pr_debug("%s: memset to 0 %pK %zd\n", __func__, *vaddr, bufsz);
+ memset((void *)*vaddr, 0, bufsz);
+ }
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return rc;
+
+err_get_phys:
+ ion_unmap_kernel(*client, *handle);
+err_ion_handle:
+ ion_free(*client, *handle);
+err_ion_client:
+ msm_audio_ion_client_destroy(*client);
+ *handle = NULL;
+ *client = NULL;
+err:
+ return rc;
+}
+EXPORT_SYMBOL(msm_audio_ion_alloc);
+
+int msm_audio_ion_phys_free(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!client || !handle || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* hyp assign is not supported in VM */
+
+ ion_free(client, handle);
+ ion_client_destroy(client);
+
+ return 0;
+}
+
+int msm_audio_ion_phys_assign(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ int ret;
+
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !client || !handle || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client failed\n", __func__);
+ return -EINVAL;
+ }
+
+ *handle = ion_import_dma_buf(*client, fd);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ ret = ion_phys(*client, *handle, paddr, pa_len);
+ if (ret) {
+ pr_err("%s: could not get physical address for handle, ret = %d\n",
+ __func__, ret);
+ goto err_ion_handle;
+ }
+
+ /* hyp assign is not supported in VM */
+
+ return ret;
+
+err_ion_handle:
+ ion_free(*client, *handle);
+
+err_destroy_client:
+ ion_client_destroy(*client);
+ *client = NULL;
+ *handle = NULL;
+
+ return ret;
+}
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ if ((msm_audio_ion_data.smmu_enabled == true) &&
+ !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ * bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(*client, fd);
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(*client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ goto err_ion_handle;
+ }
+ }
+
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ rc = -ENOMEM;
+ goto err_ion_handle;
+ }
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+ *vaddr, bufsz);
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return 0;
+
+err_get_phys:
+ ion_unmap_kernel(*client, *handle);
+err_ion_handle:
+ ion_free(*client, *handle);
+err_destroy_client:
+ msm_audio_ion_client_destroy(*client);
+ *client = NULL;
+ *handle = NULL;
+err:
+ return rc;
+}
+
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ int ret = 0;
+
+ if (!client || !handle) {
+ pr_err("%s Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ ret = msm_audio_ion_smmu_unmap(client, handle);
+ if (ret)
+ pr_err("%s: smmu unmap failed with ret %d\n",
+ __func__, ret);
+ }
+
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ msm_audio_ion_client_destroy(client);
+ return ret;
+}
+EXPORT_SYMBOL(msm_audio_ion_free);
+
+int msm_audio_ion_mmap(struct audio_buffer *ab,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned int i;
+ struct page *page;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ table = ion_sg_table(ab->client, ab->handle);
+
+ if (IS_ERR(table)) {
+ pr_err("%s: Unable to get sg_table from ion: %ld\n",
+ __func__, PTR_ERR(table));
+ return PTR_ERR(table);
+ } else if (!table) {
+ pr_err("%s: sg_list is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* uncached */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* We need to check if a page is associated with this sg list because:
+ * If the allocation came from a carveout we currently don't have
+ * pages associated with carved out memory. This might change in the
+ * future and we can remove this check and the else statement.
+ */
+ page = sg_page(table->sgl);
+ if (page) {
+ pr_debug("%s: page is NOT null\n", __func__);
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ page = sg_page(sg);
+
+ if (offset >= len) {
+ offset -= len;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len -= offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+ vma, (unsigned int)addr, len,
+ (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end,
+ (unsigned long int)vma->vm_page_prot);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ } else {
+ ion_phys_addr_t phys_addr;
+ size_t phys_len;
+ size_t va_len = 0;
+
+ pr_debug("%s: page is NULL\n", __func__);
+
+ ret = ion_phys(ab->client, ab->handle, &phys_addr, &phys_len);
+ if (ret) {
+ pr_err("%s: Unable to get phys address from ION buffer: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ pr_debug("phys=%pK len=%zd\n", &phys_addr, phys_len);
+ pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
+ vma, (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end, vma->vm_pgoff,
+ (unsigned long int)vma->vm_page_prot);
+ va_len = vma->vm_end - vma->vm_start;
+ if ((offset > phys_len) || (va_len > phys_len-offset)) {
+ pr_err("wrong offset size %ld, lens= %zd, va_len=%zd\n",
+ offset, phys_len, va_len);
+ return -EINVAL;
+ }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(phys_addr) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return 0;
+}
+
+
+bool msm_audio_ion_is_smmu_available(void)
+{
+ return msm_audio_ion_data.smmu_enabled;
+}
+
+/* move to static section again */
+struct ion_client *msm_audio_ion_client_create(const char *name)
+{
+ struct ion_client *pclient = NULL;
+
+ pclient = hvenv_ion_client_create(name);
+ return pclient;
+}
+
+
+void msm_audio_ion_client_destroy(struct ion_client *client)
+{
+ pr_debug("%s: client = %pK smmu_enabled = %d\n", __func__,
+ client, msm_audio_ion_data.smmu_enabled);
+
+ ion_client_destroy(client);
+}
+
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+ /* client is already created for legacy and given*/
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ * bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(client, fd);
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *)(*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_ion_handle;
+ }
+ }
+
+ /*Need to add condition SMMU enable or not */
+ *vaddr = ion_map_kernel(client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ rc = -EINVAL;
+ goto err_ion_handle;
+ }
+
+ if (bufsz != 0)
+ memset((void *)*vaddr, 0, bufsz);
+
+ rc = msm_audio_ion_get_phys(client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return 0;
+
+err_get_phys:
+ ion_unmap_kernel(client, *handle);
+err_ion_handle:
+ ion_free(client, *handle);
+err:
+ return rc;
+}
+
+int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ /* no client_destrody in legacy*/
+ return 0;
+}
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, void *vaddr)
+{
+ int rc = 0;
+
+ pr_debug("%s: smmu_enabled = %d\n", __func__,
+ msm_audio_ion_data.smmu_enabled);
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ rc = msm_audio_ion_smmu_map(client, handle, addr, len, vaddr);
+ if (rc) {
+ pr_err("%s: failed to do smmu map, err = %d\n",
+ __func__, rc);
+ goto err;
+ }
+ } else {
+ rc = ion_phys(client, handle, addr, len);
+ }
+
+ pr_debug("%s: phys=%pK, len=%zd, rc=%d\n",
+ __func__, &(*addr), *len, rc);
+err:
+ return rc;
+}
+
+static const struct of_device_id msm_audio_ion_dt_match[] = {
+ { .compatible = "qcom,msm-audio-ion-vm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
+
+u32 msm_audio_populate_upper_32_bits(ion_phys_addr_t pa)
+{
+ return upper_32_bits(pa);
+}
+
+static int msm_audio_ion_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ const char *msm_audio_ion_dt = "qcom,smmu-enabled";
+ bool smmu_enabled;
+ struct device *dev = &pdev->dev;
+
+ if (dev->of_node == NULL) {
+ pr_err("%s: device tree is not found\n",
+ __func__);
+ msm_audio_ion_data.smmu_enabled = 0;
+ return 0;
+ }
+
+ smmu_enabled = of_property_read_bool(dev->of_node,
+ msm_audio_ion_dt);
+ msm_audio_ion_data.smmu_enabled = smmu_enabled;
+
+ pr_info("%s: SMMU is %s\n", __func__,
+ (smmu_enabled) ? "Enabled" : "Disabled");
+
+ if (smmu_enabled) {
+ rc = habmm_socket_open(&msm_audio_ion_hab_handle,
+ HAB_MMID_CREATE(MM_AUD_3,
+ MSM_AUDIO_SMMU_VM_HAB_MINOR_ID),
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (rc) {
+ pr_err("%s: habmm_socket_open failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ pr_info("%s: msm_audio_ion_hab_handle %x\n",
+ __func__, msm_audio_ion_hab_handle);
+
+ INIT_LIST_HEAD(&msm_audio_ion_data.smmu_map_list);
+ mutex_init(&(msm_audio_ion_data.smmu_map_mutex));
+ }
+
+ if (!rc)
+ msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
+
+ return rc;
+}
+
+static int msm_audio_ion_remove(struct platform_device *pdev)
+{
+ if (msm_audio_ion_data.smmu_enabled) {
+ if (msm_audio_ion_hab_handle)
+ habmm_socket_close(msm_audio_ion_hab_handle);
+
+ mutex_destroy(&(msm_audio_ion_data.smmu_map_mutex));
+ }
+ msm_audio_ion_data.smmu_enabled = 0;
+ msm_audio_ion_data.device_status = 0;
+
+ return 0;
+}
+
+static struct platform_driver msm_audio_ion_driver = {
+ .driver = {
+ .name = "msm-audio-ion-vm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_audio_ion_dt_match,
+ },
+ .probe = msm_audio_ion_probe,
+ .remove = msm_audio_ion_remove,
+};
+
+static int __init msm_audio_ion_init(void)
+{
+ return platform_driver_register(&msm_audio_ion_driver);
+}
+module_init(msm_audio_ion_init);
+
+static void __exit msm_audio_ion_exit(void)
+{
+ platform_driver_unregister(&msm_audio_ion_driver);
+}
+module_exit(msm_audio_ion_exit);
+
+MODULE_DESCRIPTION("MSM Audio ION VM module");
+MODULE_LICENSE("GPL v2");
cmd += 6;
while (*cmd == ' ')
cmd++;
- if ((cmd != '\0') && sysrq_on())
+ if ((*cmd != '\0') && sysrq_on())
kernel_restart(cmd);
else
kernel_restart(NULL);
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
unsigned long long scale_uv;
int i, ret, id;
- u8 ones[6];
/* reset the serial interface */
- memset(&ones, 0xFF, 6);
- ret = spi_write(st->sd.spi, &ones, 6);
+ ret = ad_sd_reset(&st->sd, 48);
if (ret < 0)
goto out;
usleep_range(500, 1000); /* Wait for at least 500us */
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
- sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->write_data_done +
- ((cmd->se_cmd.data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
+ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
return;
}
};
struct msm_hs_port {
- bool startup_locked;
+ atomic_t startup_locked;
struct uart_port uport;
unsigned long imr_reg; /* shadow value of UARTDM_IMR */
struct clk *clk;
unsigned long flags;
int ret = 0;
- msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
if (val) {
}
/* Calling CLOCK API. Hence mb() requires here. */
mb();
- msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
return 0;
}
unsigned long flags;
int ret = 0;
- msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
spin_unlock_irqrestore(&uport->lock, flags);
- msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_WARN("%s(): Clocks are off\n", __func__);
- /* Make sure resource_on doesn't get called */
- if (msm_hs_clk_bus_vote(msm_uport))
- MSM_HS_ERR("%s:Failed clock vote\n", __func__);
- msm_hs_disable_rx(uport);
- msm_hs_clk_bus_unvote(msm_uport);
+ MSM_HS_WARN("%s(): Clocks are off, Rx still active\n",
+ __func__);
+ return;
} else
msm_hs_disable_rx(uport);
if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
dump_uart_hs_registers(msm_uport);
- /* Stop further loging */
+ /* Stop further logging */
MSM_HS_ERR("%s(): Stop IPC logging\n", __func__);
}
struct msm_hs_tx *tx = &msm_uport->tx;
unsigned int isr;
- if (msm_uport->startup_locked) {
- MSM_HS_DBG("%s(): No Tx Request, startup_locked=%d\n",
- __func__, msm_uport->startup_locked);
- return;
- }
-
/* Bail if transfer in progress */
if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
isr = msm_hs_read(uport, UART_DM_ISR);
- if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
- MSM_HS_DBG("%s():CTS 1: Peer is Busy, ISR 0x%x",
- __func__, isr);
+ if (UARTDM_ISR_CURRENT_CTS_BMSK & isr) {
+ MSM_HS_DBG("%s():CTS 1: Peer is Busy\n",
+ __func__);
+ MSM_HS_DBG("%s():ISR 0x%x\n",
+ __func__, isr);
+ }
} else
MSM_HS_WARN("%s(): Clocks are off\n", __func__);
unsigned int data;
unsigned long flags;
- if (msm_uport->startup_locked) {
- MSM_HS_WARN("%s(): startup_locked=%d\n",
- __func__, msm_uport->startup_locked);
+ if (atomic_read(&msm_uport->startup_locked)) {
+ MSM_HS_DBG("%s(): Port open in progress\n", __func__);
return;
}
+ msm_hs_disable_flow_control(uport, false);
if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
msm_uport->rx.flush == FLUSH_STOP) {
spin_unlock_irqrestore(&uport->lock, flags);
}
msm_hs_spsconnect_tx(msm_uport);
+
+ msm_hs_enable_flow_control(uport, false);
}
/* Request to turn off uart clock once pending TX is flushed */
struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
- msm_uport->startup_locked = true;
+ atomic_set(&msm_uport->startup_locked, 1);
rfr_level = uport->fifosize;
if (rfr_level > 16)
rfr_level -= 16;
atomic_set(&msm_uport->client_req_state, 0);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: Client_Count 0\n", __func__);
- msm_uport->startup_locked = false;
+ atomic_set(&msm_uport->startup_locked, 0);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
free_uart_irq:
free_irq(uport->irq, msm_uport);
unvote_exit:
+ atomic_set(&msm_uport->startup_locked, 0);
msm_hs_resource_unvote(msm_uport);
MSM_HS_ERR("%s(): Error return\n", __func__);
return ret;
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
msm_hs_resource_off(msm_uport);
obs_manage_irq(msm_uport, false);
- if (!atomic_read(&msm_uport->client_req_state))
- enable_wakeup_interrupt(msm_uport);
msm_hs_clk_bus_unvote(msm_uport);
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
__func__);
}
+ if (!atomic_read(&msm_uport->client_req_state))
+ enable_wakeup_interrupt(msm_uport);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: PM State Suspended client_count %d\n", __func__,
client_count);
static struct uart_port *sunhv_port;
+void sunhv_migrate_hvcons_irq(int cpu)
+{
+ /* Migrate hvcons irq to param cpu */
+ irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
+}
+
/* Copy 's' into the con_write_page, decoding "\n" into
* "\r\n" along the way. We have to return two lengths
* because the caller needs to know how much to advance
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
+ * __tty_insert_flip_char - Add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte to the tty buffering, with an optional flag.
+ * This is the slow path of tty_insert_flip_char.
+ */
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
+{
+ struct tty_buffer *tb;
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+
+ if (!__tty_buffer_request_room(port, 1, flags))
+ return 0;
+
+ tb = port->buf.tail;
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
+
+ return 1;
+}
+EXPORT_SYMBOL(__tty_insert_flip_char);
+
+/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
*
if (!ci->is_otg)
return;
- if (hw_read_otgsc(ci, OTGSC_BSV))
+ if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
usb_gadget_vbus_connect(&ci->gadget);
- else
+ else if (!hw_read_otgsc(ci, OTGSC_BSV) && ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget);
}
ci_role_stop(ci);
- if (role == CI_ROLE_GADGET)
+ if (role == CI_ROLE_GADGET &&
+ IS_ERR(ci->platdata->vbus_extcon.edev))
/*
- * wait vbus lower than OTGSC_BSV before connecting
- * to host
+ * Wait vbus lower than OTGSC_BSV before connecting
+ * to host. If connecting status is from an external
+ * connector instead of register, we don't need to
+ * care vbus on the board, since it will not affect
+ * external connector status.
*/
hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
+ /* vbus change may have already occurred */
+ if (role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
}
}
/**
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
+ struct usb_interface_assoc_descriptor *d;
+
+ d = (struct usb_interface_assoc_descriptor *)header;
+ if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+ dev_warn(ddev,
+ "config %d has an invalid interface association descriptor of length %d, skipping\n",
+ cfgno, d->bLength);
+ continue;
+ }
+
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
- config->intf_assoc[iad_num] =
- (struct usb_interface_assoc_descriptor
- *)header;
+ config->intf_assoc[iad_num] = d;
iad_num++;
}
}
if (dev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(100);
+ msleep(200);
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
+
+ wake_up(&ps->wait);
spin_unlock(&ps->lock);
if (signr) {
put_pid(pid);
put_cred(cred);
}
-
- wake_up(&ps->wait);
}
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
- uurb->buffer_length = totlen;
+ if (totlen <= uurb->buffer_length)
+ uurb->buffer_length = totlen;
+ else
+ WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
+ totlen, uurb->buffer_length);
break;
default:
goto loop;
if (udev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(1000);
+ msleep(2000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920 and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair Strafe RGB */
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
+ struct usb_string *dev_str = gstr->strings;
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
composite_dev_cleanup(cdev);
+ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
+ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
+
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
struct completion thread_notifier;
struct task_struct *thread_task;
- /* Callback functions. */
- const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
+ int i;
/*
* Allow the thread to be killed by a signal, but set the signal mask
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (!common->ops || !common->ops->thread_exits
- || common->ops->thread_exits(common) < 0) {
- int i;
+ /* Eject media from all LUNs */
- down_write(&common->filesem);
- for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
- struct fsg_lun *curlun = common->luns[i];
- if (!curlun || !fsg_lun_is_open(curlun))
- continue;
+ down_write(&common->filesem);
+ for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
+ struct fsg_lun *curlun = common->luns[i];
+ if (curlun && fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
- curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
- }
- up_write(&common->filesem);
}
+ up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops)
-{
- common->ops = ops;
-}
-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
-
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
struct fsg_common;
/* FSF callback functions */
-struct fsg_operations {
- /*
- * Callback function to call when thread exits. If no
- * callback is set or it returns value lower then zero MSF
- * will force eject all LUNs it operates on (including those
- * marked as non-removable or with prevent_medium_removal flag
- * set).
- */
- int (*thread_exits)(struct fsg_common *common);
-};
-
struct fsg_lun_opts {
struct config_group group;
struct fsg_lun *lun;
void fsg_common_remove_luns(struct fsg_common *common);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops);
-
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx);
DBG(c->cdev, "ncm unbind\n");
+ opts->bound = false;
+
hrtimer_cancel(&ncm->task_timer);
tasklet_kill(&ncm->tx_tasklet);
usb_ep_free_request(ncm->notify, ncm->notify_req);
gether_cleanup(netdev_priv(opts->net));
- opts->bound = false;
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
unsigned qmult; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
qmult = gether_get_qmult(opts->net); \
mutex_unlock(&opts->lock); \
u8 val; \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
#include <linux/mmu_context.h>
#include <linux/aio.h>
#include <linux/uio.h>
-
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
struct dev_data {
spinlock_t lock;
atomic_t count;
+ int udc_usage;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
- spin_unlock(&epdata->dev->lock);
usb_ep_free_request(ep, req);
+ spin_unlock(&epdata->dev->lock);
put_ep(epdata);
}
struct usb_request *req = dev->req;
if ((retval = setup_req (ep, req, 0)) == 0) {
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
retval = usb_ep_queue (ep, req, GFP_KERNEL);
spin_lock_irq (&dev->lock);
+ --dev->udc_usage;
}
dev->state = STATE_DEV_CONNECTED;
retval = -EIO;
else {
len = min (len, (size_t)dev->req->actual);
-// FIXME don't call this with the spinlock held ...
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
if (copy_to_user (buf, dev->req->buf, len))
retval = -EFAULT;
else
retval = len;
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
clean_req (dev->gadget->ep0, dev->req);
/* NOTE userspace can't yet choose to stall */
}
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
dev->state = STATE_DEV_CONNECTED;
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
if (copy_from_user (dev->req->buf, buf, len))
retval = -EFAULT;
dev->gadget->ep0, dev->req,
GFP_KERNEL);
}
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
if (retval < 0) {
- spin_lock_irq (&dev->lock);
clean_req (dev->gadget->ep0, dev->req);
- spin_unlock_irq (&dev->lock);
} else
retval = len;
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
- if (gadget->ops->ioctl)
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_DEV_OPENED ||
+ dev->state == STATE_DEV_UNBOUND) {
+ /* Not bound to a UDC */
+ } else if (gadget->ops->ioctl) {
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
+
ret = gadget->ops->ioctl (gadget, code, value);
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
+ }
+ spin_unlock_irq(&dev->lock);
+
return ret;
}
if (value < 0)
break;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
GFP_KERNEL);
spin_lock (&dev->lock);
+ --dev->udc_usage;
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
req->length = value;
req->zero = value < w_length;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
+ spin_lock(&dev->lock);
+ --dev->udc_usage;
+ spin_unlock(&dev->lock);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
/* break link to FS */
ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
list_del_init (&ep->epfiles);
+ spin_unlock_irq (&dev->lock);
+
dentry = ep->dentry;
ep->dentry = NULL;
parent = d_inode(dentry->d_parent);
/* break link to controller */
+ mutex_lock(&ep->lock);
if (ep->state == STATE_EP_ENABLED)
(void) usb_ep_disable (ep->ep);
ep->state = STATE_EP_UNBOUND;
usb_ep_free_request (ep->ep, ep->req);
ep->ep = NULL;
+ mutex_unlock(&ep->lock);
+
wake_up (&ep->wait);
put_ep (ep);
- spin_unlock_irq (&dev->lock);
-
/* break link to dcache */
mutex_lock (&parent->i_mutex);
d_delete (dentry);
spin_lock_irq (&dev->lock);
dev->state = STATE_DEV_UNBOUND;
+ while (dev->udc_usage > 0) {
+ spin_unlock_irq(&dev->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dev->lock);
+ }
spin_unlock_irq (&dev->lock);
destroy_ep_files (dev);
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
-static unsigned long msg_registered;
-static void msg_cleanup(void);
-
-static int msg_thread_exits(struct fsg_common *common)
-{
- msg_cleanup();
- return 0;
-}
-
static int msg_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
static int msg_bind(struct usb_composite_dev *cdev)
{
- static const struct fsg_operations ops = {
- .thread_exits = msg_thread_exits,
- };
struct fsg_opts *opts;
struct fsg_config config;
int status;
if (status)
goto fail;
- fsg_common_set_ops(opts->common, &ops);
-
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
DRIVER_DESC ", version: " DRIVER_VERSION "\n");
- set_bit(0, &msg_registered);
return 0;
fail_otg_desc:
}
module_init(msg_init);
-static void msg_cleanup(void)
+static void __exit msg_cleanup(void)
{
- if (test_and_clear_bit(0, &msg_registered))
- usb_composite_unregister(&msg_driver);
+ usb_composite_unregister(&msg_driver);
}
module_exit(msg_cleanup);
#include <asm/gpio.h>
#include "atmel_usba_udc.h"
+#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
+ | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(udc->vbus_pin), NULL,
- usba_vbus_irq_thread, IRQF_ONESHOT,
+ usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = -ENODEV;
struct usb_device *udev;
struct list_head urbp_list;
+ struct urbp *next_frame_urbp;
+
u32 stream_en_ep;
u8 num_stream[30 / 2];
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
+ int callback_usage;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
+ unsigned ints_enabled:1;
unsigned udc_suspended:1;
unsigned pullup:1;
static void set_link_state(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
+ unsigned int power_bit;
dum_hcd->active = 0;
if (dum->pullup)
return;
set_link_state_by_speed(dum_hcd);
+ power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+ USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* Currently !connected or in reset */
- if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+ if ((dum_hcd->port_status & power_bit) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
- unsigned disconnect = USB_PORT_STAT_CONNECTION &
+ unsigned int disconnect = power_bit &
dum_hcd->old_status & (~dum_hcd->port_status);
- unsigned reset = USB_PORT_STAT_RESET &
+ unsigned int reset = USB_PORT_STAT_RESET &
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
- if (dum->driver && (disconnect || reset)) {
+ if (dum->ints_enabled && (disconnect || reset)) {
stop_activity(dum);
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
- } else if (dum_hcd->active != dum_hcd->old_active) {
+ } else if (dum_hcd->active != dum_hcd->old_active &&
+ dum->ints_enabled) {
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
dum_hcd->old_status = dum_hcd->port_status;
* can't enumerate without help from the driver we're binding.
*/
+ spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
+ dum->ints_enabled = 1;
+ spin_unlock_irq(&dum->lock);
return 0;
}
struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
+ dum->ints_enabled = 0;
+ stop_activity(dum);
+
+ /* emulate synchronize_irq(): wait for callbacks to finish */
+ while (dum->callback_usage > 0) {
+ spin_unlock_irq(&dum->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dum->lock);
+ }
+
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.max_speed = USB_SPEED_SUPER;
+ if (mod_data.is_super_speed)
+ dum->gadget.max_speed = USB_SPEED_SUPER;
+ else if (mod_data.is_high_speed)
+ dum->gadget.max_speed = USB_SPEED_HIGH;
+ else
+ dum->gadget.max_speed = USB_SPEED_FULL;
dum->gadget.dev.parent = &pdev->dev;
init_dummy_udc_hw(dum);
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
+ if (!dum_hcd->next_frame_urbp)
+ dum_hcd->next_frame_urbp = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
+ if (!dum->ints_enabled)
+ return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
+ dum_hcd->next_frame_urbp = NULL;
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_info[i].name)
int type;
int status = -EINPROGRESS;
+ /* stop when we reach URBs queued after the timer interrupt */
+ if (urbp == dum_hcd->next_frame_urbp)
+ break;
+
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
+ ++dum->callback_usage;
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
+ --dum->callback_usage;
if (value >= 0) {
/* no delays (max 64KB data stage) */
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
- .flags = HCD_USB3 | HCD_SHARED,
-
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
dum = *((void **)dev_get_platdata(&pdev->dev));
- if (!mod_data.is_super_speed)
+ if (mod_data.is_super_speed)
+ dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
+ else if (mod_data.is_high_speed)
dummy_hcd.flags = HCD_USB2;
+ else
+ dummy_hcd.flags = HCD_USB11;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
- }
- pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x145c, NULL);
- if (pinfo->smbus_dev) {
- pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
- if (!pinfo->smbus_dev) {
- pinfo->sb_type.gen = NOT_AMD_CHIPSET;
- return 0;
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
}
-
- rev = pinfo->smbus_dev->revision;
- if (rev >= 0x11 && rev <= 0x14)
- pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
- else if (rev >= 0x15 && rev <= 0x18)
- pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
- else if (rev >= 0x39 && rev <= 0x3a)
- pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
}
-
pinfo->sb_type.rev = rev;
return 1;
}
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
- * and then waits 5 seconds for the BIOS to hand over control.
+ * and then waits 1 second for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
- /* Wait for 5 seconds with 10 microsecond polling interval */
+ /* Wait for 1 second with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
- 0, 5000, 10);
+ 0, 1000000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
- 5000, 10);
+ 5000000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
return 0;
else
return 1;
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ int ret = 0;
- if (!usbhs_pipe_is_dcp(pipe))
- usbhsf_fifo_barrier(priv, fifo);
+ if (!usbhs_pipe_is_dcp(pipe)) {
+ /*
+ * This driver checks the pipe condition first to avoid -EBUSY
+ * from usbhsf_fifo_barrier() with about 10 msec delay in
+ * the interrupt handler if the pipe is RX direction and empty.
+ */
+ if (usbhs_pipe_is_dir_in(pipe))
+ ret = usbhs_pipe_is_accessible(pipe);
+ if (!ret)
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ }
- usbhs_write(priv, fifo->ctr, BCLR);
+ /*
+ * if non-DCP pipe, this driver should set BCLR when
+ * usbhsf_fifo_barrier() returns 0.
+ */
+ if (!ret)
+ usbhs_write(priv, fifo->ctr, BCLR);
}
static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
usbhs_pipe_running(pipe, 1);
- usbhsf_dma_start(pipe, fifo);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
xfer_work_end:
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
+ info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ } /* Terminating entry */
};
#define ADI_GNICEPLUS_PID 0xF001
/*
+ * Cypress WICED USB UART
+ */
+#define CYPRESS_VID 0x04B4
+#define CYPRESS_WICED_BT_USB_PID 0x009B
+#define CYPRESS_WICED_WL_USB_PID 0xF900
+
+/*
* Microchip Technology, Inc.
*
* MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
status = usb_control_msg(usbdev, pipe, request, requesttype, value,
index, buf, 1, MOS_WDR_TIMEOUT);
- if (status == 1)
+ if (status == 1) {
*data = *buf;
- else if (status < 0)
+ } else {
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d\n", status);
+ if (status >= 0)
+ status = -EIO;
+ *data = 0;
+ }
+
kfree(buf);
return status;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ if (ret < VENDOR_READ_LENGTH) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
+
*val = buf[0];
dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val);
-
+out:
kfree(buf);
return ret;
}
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ if (ret < VENDOR_READ_LENGTH) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
*val = buf[0];
-
+out:
kfree(buf);
return ret;
}
return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
- if (status != 1)
+ if (status < 0)
return -EIO;
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
- if (status != 1)
+ if (status < 0)
return -EIO;
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
+#define TPLINK_PRODUCT_LTE 0x000D
#define TPLINK_PRODUCT_MA180 0x0201
/* Changhong products */
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
-static int uas_find_uas_alt_setting(struct usb_interface *intf)
+static struct usb_host_interface *uas_find_uas_alt_setting(
+ struct usb_interface *intf)
{
int i;
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt))
- return alt->desc.bAlternateSetting;
+ return alt;
}
- return -ENODEV;
+ return NULL;
}
static int uas_find_endpoints(struct usb_host_interface *alt,
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
unsigned long flags = id->driver_info;
- int r, alt;
-
+ struct usb_host_interface *alt;
+ int r;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
+ if (!alt)
return 0;
- r = uas_find_endpoints(&intf->altsetting[alt], eps);
+ r = uas_find_endpoints(alt, eps);
if (r < 0)
return 0;
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
- int alt;
+ struct usb_host_interface *alt;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
- return alt;
+ if (!alt)
+ return -ENODEV;
- return usb_set_interface(udev,
- intf->altsetting[0].desc.bInterfaceNumber, alt);
+ return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
}
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
+/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
+UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
+ "Seagate",
+ "External",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT ),
+
UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
"Maxtor",
"USB to SATA",
if (iface->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
+ return -ENODEV;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
/** Start the UWB daemon */
void uwbd_start(struct uwb_rc *rc)
{
- rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
- if (rc->uwbd.task == NULL)
+ struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
+ if (IS_ERR(task)) {
+ rc->uwbd.task = NULL;
printk(KERN_ERR "UWB: Cannot start management daemon; "
"UWB won't work\n");
- else
+ } else {
+ rc->uwbd.task = task;
rc->uwbd.pid = rc->uwbd.task->pid;
+ }
}
/* Stop the UWB daemon and free any unprocessed events */
void uwbd_stop(struct uwb_rc *rc)
{
- kthread_stop(rc->uwbd.task);
+ if (rc->uwbd.task)
+ kthread_stop(rc->uwbd.task);
uwbd_flush(rc);
}
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
if (M64_HAS(INTEGRATED)) {
- struct atyclk clk;
+ struct atyclk clk = { 0 };
union aty_pll *pll = &par->pll;
u32 dsp_config = pll->ct.dsp_config;
u32 dsp_on_off = pll->ct.dsp_on_off;
{
int ret = 0;
char ln_map[4];
+ bool connected;
+
+ mutex_lock(&dp_drv->attention_lock);
+ connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+
+ /*
+ * If DP cable disconnected, Avoid link training or turning on DP Path
+ */
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -EINVAL;
+ }
/* wait until link training is completed */
pr_debug("enter, lt_needed=%s\n", lt_needed ? "true" : "false");
dp_drv->power_on = true;
ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
+ if (ret) {
+ if (ret == -ENODEV || ret == -EINVAL) {
+ pr_err("main link setup failed\n");
+ mutex_unlock(&dp_drv->train_mutex);
+ return ret;
+ }
+ }
exit_loop:
mutex_unlock(&dp_drv->train_mutex);
ret = mdss_dp_dpcd_cap_read(dp);
if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) ||
!mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) {
- if (ret == EDP_AUX_ERR_TOUT) {
+ if ((ret == -ENODEV) || (ret == EDP_AUX_ERR_TOUT)) {
pr_err("DPCD read timedout, skip connect notification\n");
goto end;
}
read_edid:
ret = mdss_dp_edid_read(dp);
if (ret) {
+ if (ret == -ENODEV)
+ goto end;
+
pr_err("edid read error, setting default resolution\n");
goto notify;
}
static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp)
{
+ int ret = 0;
+
pr_debug("enter\n");
mdss_dp_reset_event_list(dp);
+
+ /*
+ * IRQ_HPD attention event handler first turns on DP path and then
+ * notifies CONNECT_IRQ_HPD and waits for userspace to trigger UNBLANK.
+ * In such cases, before UNBLANK call, if cable is disconnected, if
+ * DISCONNECT is notified immediately, userspace might not sense any
+ * change in connection status, leaving DP controller ON.
+ *
+ * To avoid such cases, wait for the connection event to complete before
+ * sending disconnection event
+ */
+ if (atomic_read(&dp->notification_pending)) {
+ pr_debug("waiting for the pending notitfication\n");
+ ret = wait_for_completion_timeout(&dp->notification_comp, HZ);
+ if (ret <= 0) {
+ pr_err("%s timed out\n",
+ mdss_dp_notification_status_to_string(
+ dp->hpd_notification_status));
+ }
+ }
+
atomic_set(&dp->notification_pending, 0);
+ /* complete any waiting completions */
complete_all(&dp->notification_comp);
}
if (!connected) {
pr_err("dp cable disconnected\n");
- break;
+ ret = -ENODEV;
+ goto end;
}
dp->aux_error_num = EDP_AUX_ERR_NONE;
static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
{
- int cnt, ret;
+ int cnt, ret = 0;
char data = 0;
for (cnt = 5; cnt; cnt--) {
ret, mdss_dp_get_aux_error(ep->aux_error_num));
if (ret >= 0)
break;
+
+ if (ret == -ENODEV)
+ return ret;
+
msleep(100);
}
u32 checksum = 0;
bool phy_aux_update_requested = false;
bool ext_block_parsing_done = false;
+ bool connected = false;
ret = dp_aux_chan_ready(dp);
if (ret) {
u8 segment;
u8 edid_buf[EDID_BLOCK_SIZE] = {0};
+ mutex_lock(&dp->attention_lock);
+ connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -ENODEV;
+ }
+
/*
* Write the segment first.
* Segment = 0, for blocks 0 and 1
rlen = dp_aux_read_buf(ep, 0x202, len, 0);
if (rlen < len) {
pr_err("edp aux read failed\n");
- return 0;
+ return rlen;
}
rp = &ep->rxp;
bp = rp->data;
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
+
if (mdss_dp_aux_clock_recovery_done(ep)) {
ret = 0;
break;
}
if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
if (old_v_level == ep->v_level) {
tries++;
if (tries >= maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
} else {
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
if (mdss_dp_aux_channel_eq_done(ep)) {
ret = 0;
}
if (tries > maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break;
}
tries++;
ret = dp_start_link_train_1(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
ret = dp_start_link_train_2(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
ret = mdss_dp_aux_link_status_read(ep, 6);
- if (ret) {
+ if (ret > 0) {
sp = &ep->link_status;
ret = sp->port_0_in_sync; /* 1 == sync */
}
{
int ret = 0;
u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0;
- struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ struct mdss_panel_info *pinfo;
- if (ctrl->panel_mode == DSI_CMD_MODE)
+ /* for dsi 2.1 and above dma scheduling is used */
+ if ((!ctrl) || (ctrl->panel_mode == DSI_CMD_MODE) ||
+ (ctrl->shared_data->hw_rev > MDSS_DSI_HW_REV_200))
return ret;
+ pinfo = &ctrl->panel_data.panel_info;
+
if (ctrl->ctrl_state & CTRL_STATE_MDP_ACTIVE) {
mdss_dsi_wait4video_done(ctrl);
v_total = mdss_panel_get_vtotal(pinfo);
return ret;
}
+static void mdss_dsi_schedule_dma_cmd(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 v_blank, val = 0x0;
+ struct mdss_panel_info *pinfo;
+
+ /* for dsi 2.0 and below dma scheduling is not supported */
+ if ((!ctrl) || (ctrl->panel_mode == DSI_CMD_MODE) ||
+ (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_201))
+ return;
+
+ pinfo = &ctrl->panel_data.panel_info;
+ v_blank = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+
+ /* DMA_SCHEDULE_CTRL */
+ val = MIPI_INP(ctrl->ctrl_io.base + 0x100);
+ val = val | (1 << 28); /* DMA_SCHEDULE_EN */
+ MIPI_OUTP(ctrl->ctrl_io.base + 0x100, val);
+ val |= (pinfo->yres + v_blank);
+ MIPI_OUTP(ctrl->ctrl_io.base + 0x100, val); /* DMA_SCHEDULE_LINE */
+ wmb();
+
+ pr_debug("%s schedule at line %x", __func__, val);
+ MDSS_XLOG(ctrl->ndx, val);
+}
+
static void mdss_dsi_wait4active_region(struct mdss_dsi_ctrl_pdata *ctrl)
{
int in_blanking = 0;
int retry_count = 0;
- if (ctrl->panel_mode != DSI_VIDEO_MODE)
+ /* for dsi 2.1 and above dma scheduling is used */
+ if ((!ctrl) || (ctrl->panel_mode != DSI_VIDEO_MODE) ||
+ (ctrl->shared_data->hw_rev > MDSS_DSI_HW_REV_200))
return;
while (retry_count != MAX_BTA_WAIT_RETRY) {
MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
wmb();
+ /* schedule dma cmds at start of blanking region */
+ mdss_dsi_schedule_dma_cmd(ctrl);
+
+ /* DSI_CMD_MODE_DMA_SW_TRIGGER */
MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
wmb();
MDSS_XLOG(ctrl->dma_addr, len);
unsigned int timeout)
{
struct kempld_device_data *pld = wdt_data->pld;
- u32 prescaler = kempld_prescaler[PRESCALER_21];
+ u32 prescaler;
u64 stage_timeout64;
u32 stage_timeout;
u32 remainder;
u8 stage_cfg;
+#if GCC_VERSION < 40400
+ /* work around a bug compiling do_div() */
+ prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
+#else
+ prescaler = kempld_prescaler[PRESCALER_21];
+#endif
+
if (!stage)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->mmap)
+ return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+ dma_addr, size, attrs);
+#endif
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
for filesystems like NFS and for the flock() system
call. Disabling this option saves about 11k.
+source "fs/crypto/Kconfig"
+
source "fs/notify/Kconfig"
source "fs/quota/Kconfig"
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_FS_DAX) += dax.o
+obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif
+ bdev->bd_bdi = &noop_backing_dev_info;
inode_init_once(&ei->vfs_inode);
/* Initialize mutex for freeze. */
mutex_init(&bdev->bd_fsfreeze_mutex);
}
list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock);
+ /* Detach inode from wb early as bdi_put() may free bdi->wb */
+ inode_detach_wb(inode);
+ if (bdev->bd_bdi != &noop_backing_dev_info) {
+ bdi_put(bdev->bd_bdi);
+ bdev->bd_bdi = &noop_backing_dev_info;
+ }
}
static const struct super_operations bdev_sops = {
static LIST_HEAD(all_bdevs);
+/*
+ * If there is a bdev inode for this device, unhash it so that it gets evicted
+ * as soon as last inode reference is dropped.
+ */
+void bdev_unhash_inode(dev_t dev)
+{
+ struct inode *inode;
+
+ inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
+ if (inode) {
+ remove_inode_hash(inode);
+ iput(inode);
+ }
+}
+
struct block_device *bdget(dev_t dev)
{
struct block_device *bdev;
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
+
bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
if (!partno) {
ret = -ENXIO;
(bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
bdev->bd_inode->i_flags &= ~S_DAX;
}
+
+ if (bdev->bd_bdi == &noop_backing_dev_info)
+ bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
} else {
if (bdev->bd_contains == bdev) {
ret = 0;
kill_bdev(bdev);
bdev_write_inode(bdev);
- /*
- * Detaching bdev inode from its wb in __destroy_inode()
- * is too late: the queue which embeds its bdi (along with
- * root wb) can be gone as soon as we put_disk() below.
- */
- inode_detach_wb(bdev->bd_inode);
}
if (bdev->bd_contains == bdev) {
if (disk->fops->release)
{
SHASH_DESC_ON_STACK(shash, tfm);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
int err;
shash->tfm = tfm;
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
}
out:
if (ret)
btrfs_cmp_data_free(cmp);
- return 0;
+ return ret;
}
static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
ret = PTR_ERR(new_root);
goto out;
}
+ if (!is_fstree(new_root->objectid)) {
+ ret = -ENOENT;
+ goto out;
+ }
path = btrfs_alloc_path();
if (!path) {
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
+ __del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->node = NULL;
reloc_root->commit_root = NULL;
- __del_reloc_root(reloc_root);
}
}
{
int ret;
+ if (ino == BTRFS_FIRST_FREE_OBJECTID)
+ return 1;
+
ret = get_cur_inode_state(sctx, ino, gen);
if (ret < 0)
goto out;
* not delted and then re-created, if it was then we have no overwrite
* and we can just unlink this entry.
*/
- if (sctx->parent_root) {
+ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
NULL, NULL, NULL);
if (ret < 0 && ret != -ENOENT)
goto restore;
}
+ btrfs_qgroup_rescan_resume(fs_info);
+
if (!fs_info->uuid_root) {
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
},
};
-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
+const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
/*
* read a single page, without unlocking it.
*/
-static int readpage_nounlock(struct file *filp, struct page *page)
+static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
- goto out;
+ return -EINPROGRESS;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
static int ceph_readpage(struct file *filp, struct page *page)
{
- int r = readpage_nounlock(filp, page);
- unlock_page(page);
+ int r = ceph_do_readpage(filp, page);
+ if (r != -EINPROGRESS)
+ unlock_page(page);
+ else
+ r = 0;
return r;
}
goto retry_locked;
r = writepage_nounlock(page, NULL);
if (r < 0)
- goto fail_nosnap;
+ goto fail_unlock;
goto retry_locked;
}
}
/* we need to read it. */
- r = readpage_nounlock(file, page);
- if (r < 0)
- goto fail_nosnap;
+ r = ceph_do_readpage(file, page);
+ if (r < 0) {
+ if (r == -EINPROGRESS)
+ return -EAGAIN;
+ goto fail_unlock;
+ }
goto retry_locked;
-fail_nosnap:
+fail_unlock:
unlock_page(page);
return r;
}
fscache_relinquish_cookie(cookie, 0);
}
-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
-{
- if (!error)
- SetPageUptodate(page);
-}
-
-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
+static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
{
if (!error)
SetPageUptodate(page);
return -ENOBUFS;
ret = fscache_read_or_alloc_page(ci->fscache, page,
- ceph_vfs_readpage_complete, NULL,
+ ceph_readpage_from_fscache_complete, NULL,
GFP_KERNEL);
switch (ret) {
return -ENOBUFS;
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
- ceph_vfs_readpage_complete_unlock,
+ ceph_readpage_from_fscache_complete,
NULL, mapping_gfp_mask(mapping));
switch (ret) {
int *pfreepath)
{
char *path;
+ struct inode *dir;
- if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
- *pino = ceph_ino(d_inode(dentry->d_parent));
+ rcu_read_lock();
+ dir = d_inode_rcu(dentry->d_parent);
+ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
+ *pino = ceph_ino(dir);
+ rcu_read_unlock();
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
return 0;
}
+ rcu_read_unlock();
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
if (IS_ERR(path))
return PTR_ERR(path);
unsigned int (*calc_smb_size)(void *);
/* check for STATUS_PENDING and process it in a positive case */
bool (*is_status_pending)(char *, struct TCP_Server_Info *, int);
+ /* check for STATUS_NETWORK_SESSION_EXPIRED */
+ bool (*is_session_expired)(char *);
/* send oplock break response */
int (*oplock_response)(struct cifs_tcon *, struct cifs_fid *,
struct cifsInodeInfo *);
return length;
server->total_read += length;
+ if (server->ops->is_session_expired &&
+ server->ops->is_session_expired(buf)) {
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return -1;
+ }
+
if (server->ops->is_status_pending &&
server->ops->is_status_pending(buf, server, 0)) {
discard_remaining_data(server);
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
+ if (server->ops->is_session_expired &&
+ server->ops->is_session_expired(buf)) {
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return -1;
+ }
+
if (server->ops->is_status_pending &&
server->ops->is_status_pending(buf, server, length))
return -1;
cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
server->sec_mode, server->capabilities, server->timeAdj);
+ if (ses->auth_key.response) {
+ cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
+ ses->auth_key.response);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;
+ }
+
if (server->ops->sess_setup)
rc = server->ops->sess_setup(xid, ses, nls_info);
int i;
if (unlikely(direntry->d_name.len >
- tcon->fsAttrInfo.MaxPathNameComponentLength))
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (f_flags & O_SYNC)
+ create_options |= CREATE_WRITE_THROUGH;
+
+ if (f_flags & O_DIRECT)
+ create_options |= CREATE_NO_BUFFER;
+
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
return true;
}
+static bool
+smb2_is_session_expired(char *buf)
+{
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+
+ if (hdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
+ return false;
+
+ cifs_dbg(FYI, "Session expired\n");
+ return true;
+}
+
static int
smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
struct cifsInodeInfo *cinode)
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
req->NegotiateContextCount = cpu_to_le16(2);
- inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
+ inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
+ sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
}
#else
/*
* validation ioctl must be signed, so no point sending this if we
- * can not sign it. We could eventually change this to selectively
+ * can not sign it (ie are not known user). Even if signing is not
+ * required (enabled but not negotiated), in those cases we selectively
* sign just this, the first and only signed request on a connection.
- * This is good enough for now since a user who wants better security
- * would also enable signing on the mount. Having validation of
- * negotiate info for signed connections helps reduce attack vectors
+ * Having validation of negotiate info helps reduce attack vectors.
*/
- if (tcon->ses->server->sign == false)
+ if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
return 0; /* validation requires signing */
+ if (tcon->ses->user_name == NULL) {
+ cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
+ return 0; /* validation requires signing */
+ }
+
+ if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+ cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
+
vneg_inbuf.Capabilities =
cpu_to_le32(tcon->ses->server->vals->req_capabilities);
memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
--- /dev/null
+config FS_ENCRYPTION
+ tristate "FS Encryption (Per-file encryption)"
+ depends on BLOCK
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_XTS
+ select CRYPTO_CTS
+ select CRYPTO_CTR
+ select CRYPTO_SHA256
+ select KEYS
+ select ENCRYPTED_KEYS
+ help
+ Enable encryption of files and directories. This
+ feature is similar to ecryptfs, but it is more memory
+ efficient since it avoids caching the encrypted and
+ decrypted pages in the page cache.
--- /dev/null
+obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
+
+fscrypto-y := crypto.o fname.o policy.o keyinfo.o
+fscrypto-$(CONFIG_BLOCK) += bio.o
--- /dev/null
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ * Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/namei.h>
+#include "fscrypt_private.h"
+
+/*
+ * Call fscrypt_decrypt_page on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+ struct fscrypt_ctx *ctx =
+ container_of(work, struct fscrypt_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
+ struct bio_vec *bv;
+ int i;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+ int ret = fscrypt_decrypt_page(page->mapping->host, page,
+ PAGE_SIZE, 0, page->index);
+
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ }
+ fscrypt_release_ctx(ctx);
+ bio_put(bio);
+}
+
+void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+{
+ INIT_WORK(&ctx->r.work, completion_pages);
+ ctx->r.bio = bio;
+ queue_work(fscrypt_read_workqueue, &ctx->r.work);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+
+void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *bounce_page;
+
+ /* The bounce data pages are unmapped. */
+ if ((*page)->mapping)
+ return;
+
+ /* The bounce data page is unmapped. */
+ bounce_page = *page;
+ ctx = (struct fscrypt_ctx *)page_private(bounce_page);
+
+ /* restore control page */
+ *page = ctx->w.control_page;
+
+ if (restore)
+ fscrypt_restore_control_page(bounce_page);
+}
+EXPORT_SYMBOL(fscrypt_pullback_bio_page);
+
+int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ struct bio *bio;
+ int ret, err = 0;
+
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
+
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
+ if (IS_ERR(ciphertext_page)) {
+ err = PTR_ERR(ciphertext_page);
+ goto errout;
+ }
+
+ while (len--) {
+ err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
+ ZERO_PAGE(0), ciphertext_page,
+ PAGE_SIZE, 0, GFP_NOFS);
+ if (err)
+ goto errout;
+
+ bio = bio_alloc(GFP_NOWAIT, 1);
+ if (!bio) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio->bi_iter.bi_sector =
+ pblk << (inode->i_sb->s_blocksize_bits - 9);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ ret = bio_add_page(bio, ciphertext_page,
+ inode->i_sb->s_blocksize, 0);
+ if (ret != inode->i_sb->s_blocksize) {
+ /* should never happen! */
+ WARN_ON(1);
+ bio_put(bio);
+ err = -EIO;
+ goto errout;
+ }
+ err = submit_bio_wait(0, bio);
+ bio_put(bio);
+ if (err)
+ goto errout;
+ lblk++;
+ pblk++;
+ }
+ err = 0;
+errout:
+ fscrypt_release_ctx(ctx);
+ return err;
+}
+EXPORT_SYMBOL(fscrypt_zeroout_range);
--- /dev/null
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ * Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <crypto/aes.h>
+#include "fscrypt_private.h"
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+ "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+ "Number of crypto contexts to preallocate");
+
+static mempool_t *fscrypt_bounce_page_pool = NULL;
+
+static LIST_HEAD(fscrypt_free_ctxs);
+static DEFINE_SPINLOCK(fscrypt_ctx_lock);
+
+struct workqueue_struct *fscrypt_read_workqueue;
+static DEFINE_MUTEX(fscrypt_init_mutex);
+
+static struct kmem_cache *fscrypt_ctx_cachep;
+struct kmem_cache *fscrypt_info_cachep;
+
+/**
+ * fscrypt_release_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
+ mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
+ ctx->w.bounce_page = NULL;
+ }
+ ctx->w.control_page = NULL;
+ if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+ kmem_cache_free(fscrypt_ctx_cachep, ctx);
+ } else {
+ spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+ list_add(&ctx->free_list, &fscrypt_free_ctxs);
+ spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+ }
+}
+EXPORT_SYMBOL(fscrypt_release_ctx);
+
+/**
+ * fscrypt_get_ctx() - Gets an encryption context
+ * @inode: The inode for which we are doing the crypto
+ * @gfp_flags: The gfp flag for memory allocation
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
+{
+ struct fscrypt_ctx *ctx = NULL;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ unsigned long flags;
+
+ if (ci == NULL)
+ return ERR_PTR(-ENOKEY);
+
+ /*
+ * We first try getting the ctx from a free list because in
+ * the common case the ctx will have an allocated and
+ * initialized crypto tfm, so it's probably a worthwhile
+ * optimization. For the bounce page, we first try getting it
+ * from the kernel allocator because that's just about as fast
+ * as getting it from a list and because a cache of free pages
+ * should generally be a "last resort" option for a filesystem
+ * to be able to do its job.
+ */
+ spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+ ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
+ struct fscrypt_ctx, free_list);
+ if (ctx)
+ list_del(&ctx->free_list);
+ spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+ if (!ctx) {
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
+ return ctx;
+}
+EXPORT_SYMBOL(fscrypt_get_ctx);
+
+/**
+ * page_crypt_complete() - completion callback for page crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void page_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
+ u64 lblk_num, struct page *src_page,
+ struct page *dest_page, unsigned int len,
+ unsigned int offs, gfp_t gfp_flags)
+{
+ struct {
+ __le64 index;
+ u8 padding[FS_IV_SIZE - sizeof(__le64)];
+ } iv;
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist dst, src;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+
+ BUG_ON(len == 0);
+
+ BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
+ BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
+ iv.index = cpu_to_le64(lblk_num);
+ memset(iv.padding, 0, sizeof(iv.padding));
+
+ if (ci->ci_essiv_tfm != NULL) {
+ crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
+ (u8 *)&iv);
+ }
+
+ req = skcipher_request_alloc(tfm, gfp_flags);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ page_crypt_complete, &ecr);
+
+ sg_init_table(&dst, 1);
+ sg_set_page(&dst, dest_page, len, offs);
+ sg_init_table(&src, 1);
+ sg_set_page(&src, src_page, len, offs);
+ skcipher_request_set_crypt(req, &src, &dst, len, &iv);
+ if (rw == FS_DECRYPT)
+ res = crypto_skcipher_decrypt(req);
+ else
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_skcipher_encrypt() returned %d\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
+ gfp_t gfp_flags)
+{
+ ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
+ if (ctx->w.bounce_page == NULL)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
+ return ctx->w.bounce_page;
+}
+
+/**
+ * fscypt_encrypt_page() - Encrypts a page
+ * @inode: The inode for which the encryption should take place
+ * @page: The page to encrypt. Must be locked for bounce-page
+ * encryption.
+ * @len: Length of data to encrypt in @page and encrypted
+ * data in returned page.
+ * @offs: Offset of data within @page and returned
+ * page holding encrypted data.
+ * @lblk_num: Logical block number. This must be unique for multiple
+ * calls with same inode, except when overwriting
+ * previously written data.
+ * @gfp_flags: The gfp flag for memory allocation
+ *
+ * Encrypts @page using the ctx encryption context. Performs encryption
+ * either in-place or into a newly allocated bounce page.
+ * Called on the page write path.
+ *
+ * Bounce page allocation is the default.
+ * In this case, the contents of @page are encrypted and stored in an
+ * allocated bounce page. @page has to be locked and the caller must call
+ * fscrypt_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
+ * fscrypt_operations. Here, the input-page is returned with its content
+ * encrypted.
+ *
+ * Return: A page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *fscrypt_encrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
+
+{
+ struct fscrypt_ctx *ctx;
+ struct page *ciphertext_page = page;
+ int err;
+
+ BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
+
+ if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
+ /* with inplace-encryption we just encrypt the page */
+ err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
+ ciphertext_page, len, offs,
+ gfp_flags);
+ if (err)
+ return ERR_PTR(err);
+
+ return ciphertext_page;
+ }
+
+ BUG_ON(!PageLocked(page));
+
+ ctx = fscrypt_get_ctx(inode, gfp_flags);
+ if (IS_ERR(ctx))
+ return (struct page *)ctx;
+
+ /* The encryption operation will require a bounce page. */
+ ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
+ if (IS_ERR(ciphertext_page))
+ goto errout;
+
+ ctx->w.control_page = page;
+ err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
+ page, ciphertext_page, len, offs,
+ gfp_flags);
+ if (err) {
+ ciphertext_page = ERR_PTR(err);
+ goto errout;
+ }
+ SetPagePrivate(ciphertext_page);
+ set_page_private(ciphertext_page, (unsigned long)ctx);
+ lock_page(ciphertext_page);
+ return ciphertext_page;
+
+errout:
+ fscrypt_release_ctx(ctx);
+ return ciphertext_page;
+}
+EXPORT_SYMBOL(fscrypt_encrypt_page);
+
+/**
+ * fscrypt_decrypt_page() - Decrypts a page in-place
+ * @inode: The corresponding inode for the page to decrypt.
+ * @page: The page to decrypt. Must be locked in case
+ * it is a writeback page (FS_CFLG_OWN_PAGES unset).
+ * @len: Number of bytes in @page to be decrypted.
+ * @offs: Start of data in @page.
+ * @lblk_num: Logical block number.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs, u64 lblk_num)
+{
+ if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
+ BUG_ON(!PageLocked(page));
+
+ return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
+ len, offs, GFP_NOFS);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_page);
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct dentry *dir;
+ int dir_has_key, cached_with_key;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ dir = dget_parent(dentry);
+ if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+ dput(dir);
+ return 0;
+ }
+
+ /* this should eventually be an flag in d_flags */
+ spin_lock(&dentry->d_lock);
+ cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+ dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
+ dput(dir);
+
+ /*
+ * If the dentry was cached without the key, and it is a
+ * negative dentry, it might be a valid name. We can't check
+ * if the key has since been made available due to locking
+ * reasons, so we fail the validation so ext4_lookup() can do
+ * this check.
+ *
+ * We also fail the validation if the dentry was created with
+ * the key present, but we no longer have the key, or vice versa.
+ */
+ if ((!cached_with_key && d_is_negative(dentry)) ||
+ (!cached_with_key && dir_has_key) ||
+ (cached_with_key && !dir_has_key))
+ return 0;
+ return 1;
+}
+
+const struct dentry_operations fscrypt_d_ops = {
+ .d_revalidate = fscrypt_d_revalidate,
+};
+EXPORT_SYMBOL(fscrypt_d_ops);
+
+void fscrypt_restore_control_page(struct page *page)
+{
+ struct fscrypt_ctx *ctx;
+
+ ctx = (struct fscrypt_ctx *)page_private(page);
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ unlock_page(page);
+ fscrypt_release_ctx(ctx);
+}
+EXPORT_SYMBOL(fscrypt_restore_control_page);
+
+static void fscrypt_destroy(void)
+{
+ struct fscrypt_ctx *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
+ kmem_cache_free(fscrypt_ctx_cachep, pos);
+ INIT_LIST_HEAD(&fscrypt_free_ctxs);
+ mempool_destroy(fscrypt_bounce_page_pool);
+ fscrypt_bounce_page_pool = NULL;
+}
+
+/**
+ * fscrypt_initialize() - allocate major buffers for fs encryption.
+ * @cop_flags: fscrypt operations flags
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_initialize(unsigned int cop_flags)
+{
+ int i, res = -ENOMEM;
+
+ /*
+ * No need to allocate a bounce page pool if there already is one or
+ * this FS won't use it.
+ */
+ if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
+ return 0;
+
+ mutex_lock(&fscrypt_init_mutex);
+ if (fscrypt_bounce_page_pool)
+ goto already_initialized;
+
+ for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+ struct fscrypt_ctx *ctx;
+
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+ if (!ctx)
+ goto fail;
+ list_add(&ctx->free_list, &fscrypt_free_ctxs);
+ }
+
+ fscrypt_bounce_page_pool =
+ mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+ if (!fscrypt_bounce_page_pool)
+ goto fail;
+
+already_initialized:
+ mutex_unlock(&fscrypt_init_mutex);
+ return 0;
+fail:
+ fscrypt_destroy();
+ mutex_unlock(&fscrypt_init_mutex);
+ return res;
+}
+
+/**
+ * fscrypt_init() - Set up for fs encryption.
+ */
+static int __init fscrypt_init(void)
+{
+ fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
+ WQ_HIGHPRI, 0);
+ if (!fscrypt_read_workqueue)
+ goto fail;
+
+ fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_ctx_cachep)
+ goto fail_free_queue;
+
+ fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_info_cachep)
+ goto fail_free_ctx;
+
+ return 0;
+
+fail_free_ctx:
+ kmem_cache_destroy(fscrypt_ctx_cachep);
+fail_free_queue:
+ destroy_workqueue(fscrypt_read_workqueue);
+fail:
+ return -ENOMEM;
+}
+module_init(fscrypt_init)
+
+/**
+ * fscrypt_exit() - Shutdown the fs encryption system
+ */
+static void __exit fscrypt_exit(void)
+{
+ fscrypt_destroy();
+
+ if (fscrypt_read_workqueue)
+ destroy_workqueue(fscrypt_read_workqueue);
+ kmem_cache_destroy(fscrypt_ctx_cachep);
+ kmem_cache_destroy(fscrypt_info_cachep);
+
+ fscrypt_essiv_cleanup();
+}
+module_exit(fscrypt_exit);
+
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * This contains functions for filename crypto management
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Uday Savagaonkar, 2014.
+ * Modified by Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include "fscrypt_private.h"
+
+/**
+ * fname_crypt_complete() - completion callback for filename crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void fname_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+/**
+ * fname_encrypt() - encrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_encrypt(struct inode *inode,
+ const struct qstr *iname, struct fscrypt_str *oname)
+{
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+ char iv[FS_CRYPTO_BLOCK_SIZE];
+ struct scatterlist sg;
+ int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+ unsigned int lim;
+ unsigned int cryptlen;
+
+ lim = inode->i_sb->s_cop->max_namelen(inode);
+ if (iname->len <= 0 || iname->len > lim)
+ return -EIO;
+
+ /*
+ * Copy the filename to the output buffer for encrypting in-place and
+ * pad it with the needed number of NUL bytes.
+ */
+ cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
+ cryptlen = round_up(cryptlen, padding);
+ cryptlen = min(cryptlen, lim);
+ memcpy(oname->name, iname->name, iname->len);
+ memset(oname->name + iname->len, 0, cryptlen - iname->len);
+
+ /* Initialize the IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+ /* Set up the encryption request */
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: skcipher_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ fname_crypt_complete, &ecr);
+ sg_init_one(&sg, oname->name, cryptlen);
+ skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
+
+ /* Do the encryption */
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ /* Request is being completed asynchronously; wait for it */
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(KERN_ERR
+ "%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
+
+ oname->len = cryptlen;
+ return 0;
+}
+
+/**
+ * fname_decrypt() - decrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_decrypt(struct inode *inode,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+ char iv[FS_CRYPTO_BLOCK_SIZE];
+ unsigned lim;
+
+ lim = inode->i_sb->s_cop->max_namelen(inode);
+ if (iname->len <= 0 || iname->len > lim)
+ return -EIO;
+
+ /* Allocate request */
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ fname_crypt_complete, &ecr);
+
+ /* Initialize IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+ /* Create decryption request */
+ sg_init_one(&src_sg, iname->name, iname->len);
+ sg_init_one(&dst_sg, oname->name, oname->len);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+ res = crypto_skcipher_decrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(KERN_ERR
+ "%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
+
+ oname->len = strnlen(oname->name, iname->len);
+ return 0;
+}
+
+static const char *lookup_table =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+/**
+ * digest_encode() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+static int digest_encode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ char *cp = dst;
+
+ while (i < len) {
+ ac += (((unsigned char) src[i]) << bits);
+ bits += 8;
+ do {
+ *cp++ = lookup_table[ac & 0x3f];
+ ac >>= 6;
+ bits -= 6;
+ } while (bits >= 6);
+ i++;
+ }
+ if (bits)
+ *cp++ = lookup_table[ac & 0x3f];
+ return cp - dst;
+}
+
+static int digest_decode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ const char *p;
+ char *cp = dst;
+
+ while (i < len) {
+ p = strchr(lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+ ac += (p - lookup_table) << bits;
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = ac & 0xff;
+ ac >>= 8;
+ bits -= 8;
+ }
+ i++;
+ }
+ if (ac)
+ return -1;
+ return cp - dst;
+}
+
+u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen)
+{
+ int padding = 32;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (ci)
+ padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+ ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
+ return round_up(ilen, padding);
+}
+EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
+
+/**
+ * fscrypt_fname_crypto_alloc_obuff() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int fscrypt_fname_alloc_buffer(const struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ u32 olen = fscrypt_fname_encrypted_size(inode, ilen);
+ const u32 max_encoded_len =
+ max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE),
+ 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)));
+
+ crypto_str->len = olen;
+ olen = max(olen, max_encoded_len);
+
+ /*
+ * Allocated buffer can hold one more character to null-terminate the
+ * string
+ */
+ crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
+ if (!(crypto_str->name))
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
+
+/**
+ * fscrypt_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ if (!crypto_str)
+ return;
+ kfree(crypto_str->name);
+ crypto_str->name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_fname_free_buffer);
+
+/**
+ * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * If the key is available, we'll decrypt the disk name; otherwise, we'll encode
+ * it for presentation. Short names are directly base64-encoded, while long
+ * names are encoded in fscrypt_digested_name format.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ const struct qstr qname = FSTR_TO_QSTR(iname);
+ struct fscrypt_digested_name digested_name;
+
+ if (fscrypt_is_dot_dotdot(&qname)) {
+ oname->name[0] = '.';
+ oname->name[iname->len - 1] = '.';
+ oname->len = iname->len;
+ return 0;
+ }
+
+ if (iname->len < FS_CRYPTO_BLOCK_SIZE)
+ return -EUCLEAN;
+
+ if (inode->i_crypt_info)
+ return fname_decrypt(inode, iname, oname);
+
+ if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
+ oname->len = digest_encode(iname->name, iname->len,
+ oname->name);
+ return 0;
+ }
+ if (hash) {
+ digested_name.hash = hash;
+ digested_name.minor_hash = minor_hash;
+ } else {
+ digested_name.hash = 0;
+ digested_name.minor_hash = 0;
+ }
+ memcpy(digested_name.digest,
+ FSCRYPT_FNAME_DIGEST(iname->name, iname->len),
+ FSCRYPT_FNAME_DIGEST_SIZE);
+ oname->name[0] = '_';
+ oname->len = 1 + digest_encode((const char *)&digested_name,
+ sizeof(digested_name), oname->name + 1);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
+
+/**
+ * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ if (fscrypt_is_dot_dotdot(iname)) {
+ oname->name[0] = '.';
+ oname->name[iname->len - 1] = '.';
+ oname->len = iname->len;
+ return 0;
+ }
+ if (inode->i_crypt_info)
+ return fname_encrypt(inode, iname, oname);
+ /*
+ * Without a proper key, a user is not allowed to modify the filenames
+ * in a directory. Consequently, a user space name cannot be mapped to
+ * a disk-space name
+ */
+ return -ENOKEY;
+}
+EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
+
+/**
+ * fscrypt_setup_filename() - prepare to search a possibly encrypted directory
+ * @dir: the directory that will be searched
+ * @iname: the user-provided filename being searched for
+ * @lookup: 1 if we're allowed to proceed without the key because it's
+ * ->lookup() or we're finding the dir_entry for deletion; 0 if we cannot
+ * proceed without the key because we're going to create the dir_entry.
+ * @fname: the filename information to be filled in
+ *
+ * Given a user-provided filename @iname, this function sets @fname->disk_name
+ * to the name that would be stored in the on-disk directory entry, if possible.
+ * If the directory is unencrypted this is simply @iname. Else, if we have the
+ * directory's encryption key, then @iname is the plaintext, so we encrypt it to
+ * get the disk_name.
+ *
+ * Else, for keyless @lookup operations, @iname is the presented ciphertext, so
+ * we decode it to get either the ciphertext disk_name (for short names) or the
+ * fscrypt_digested_name (for long names). Non-@lookup operations will be
+ * impossible in this case, so we fail them with ENOKEY.
+ *
+ * If successful, fscrypt_free_filename() must be called later to clean up.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ int ret;
+ int digested;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+
+ if (!dir->i_sb->s_cop->is_encrypted(dir) ||
+ fscrypt_is_dot_dotdot(iname)) {
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+ }
+ ret = fscrypt_get_encryption_info(dir);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ if (dir->i_crypt_info) {
+ ret = fscrypt_fname_alloc_buffer(dir, iname->len,
+ &fname->crypto_buf);
+ if (ret)
+ return ret;
+ ret = fname_encrypt(dir, iname, &fname->crypto_buf);
+ if (ret)
+ goto errout;
+ fname->disk_name.name = fname->crypto_buf.name;
+ fname->disk_name.len = fname->crypto_buf.len;
+ return 0;
+ }
+ if (!lookup)
+ return -ENOKEY;
+
+ /*
+ * We don't have the key and we are doing a lookup; decode the
+ * user-supplied name
+ */
+ if (iname->name[0] == '_') {
+ if (iname->len !=
+ 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)))
+ return -ENOENT;
+ digested = 1;
+ } else {
+ if (iname->len >
+ BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE))
+ return -ENOENT;
+ digested = 0;
+ }
+
+ fname->crypto_buf.name =
+ kmalloc(max_t(size_t, FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE,
+ sizeof(struct fscrypt_digested_name)),
+ GFP_KERNEL);
+ if (fname->crypto_buf.name == NULL)
+ return -ENOMEM;
+
+ ret = digest_decode(iname->name + digested, iname->len - digested,
+ fname->crypto_buf.name);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto errout;
+ }
+ fname->crypto_buf.len = ret;
+ if (digested) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ fname->hash = n->hash;
+ fname->minor_hash = n->minor_hash;
+ } else {
+ fname->disk_name.name = fname->crypto_buf.name;
+ fname->disk_name.len = fname->crypto_buf.len;
+ }
+ return 0;
+
+errout:
+ fscrypt_fname_free_buffer(&fname->crypto_buf);
+ return ret;
+}
+EXPORT_SYMBOL(fscrypt_setup_filename);
--- /dev/null
+/*
+ * fscrypt_private.h
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#ifndef _FSCRYPT_PRIVATE_H
+#define _FSCRYPT_PRIVATE_H
+
+#include <linux/fscrypt_supp.h>
+#include <crypto/hash.h>
+
+/* Encryption parameters */
+#define FS_IV_SIZE 16
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_128_CBC_KEY_SIZE 16
+#define FS_AES_128_CTS_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+
+#define FS_KEY_DERIVATION_NONCE_SIZE 16
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Flags
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+
+/*
+ * A pointer to this structure is stored in the file system's in-core
+ * representation of an inode.
+ */
+struct fscrypt_info {
+ u8 ci_data_mode;
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+ struct crypto_cipher *ci_essiv_tfm;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+typedef enum {
+ FS_DECRYPT = 0,
+ FS_ENCRYPT,
+} fscrypt_direction_t;
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
+
+struct fscrypt_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+ struct fscrypt_completion_result ecr = { \
+ COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 }
+
+/* bio stuffs */
+#define REQ_OP_READ READ
+#define REQ_OP_WRITE WRITE
+#define bio_op(bio) ((bio)->bi_rw & 1)
+
+static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
+ unsigned op_flags)
+{
+ bio->bi_rw = op | op_flags;
+}
+
+/* crypto.c */
+extern int fscrypt_initialize(unsigned int cop_flags);
+extern struct workqueue_struct *fscrypt_read_workqueue;
+extern int fscrypt_do_page_crypto(const struct inode *inode,
+ fscrypt_direction_t rw, u64 lblk_num,
+ struct page *src_page,
+ struct page *dest_page,
+ unsigned int len, unsigned int offs,
+ gfp_t gfp_flags);
+extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
+ gfp_t gfp_flags);
+
+/* keyinfo.c */
+extern void __exit fscrypt_essiv_cleanup(void);
+
+#endif /* _FSCRYPT_PRIVATE_H */
--- /dev/null
+/*
+ * key management facility for FS encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/user-type.h>
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include "fscrypt_private.h"
+
+static struct crypto_shash *essiv_hash_tfm;
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (rc == -EINPROGRESS)
+ return;
+
+ ecr->res = rc;
+ complete(&ecr->completion);
+}
+
+/**
+ * derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivation.
+ * @source_key: Source key to which to apply derivation.
+ * @derived_raw_key: Derived raw key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
+ const struct fscrypt_key *source_key,
+ u8 derived_raw_key[FS_MAX_KEY_SIZE])
+{
+ int res = 0;
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+
+ if (IS_ERR(tfm)) {
+ res = PTR_ERR(tfm);
+ tfm = NULL;
+ goto out;
+ }
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ res = -ENOMEM;
+ goto out;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ derive_crypt_complete, &ecr);
+ res = crypto_skcipher_setkey(tfm, deriving_key,
+ FS_AES_128_ECB_KEY_SIZE);
+ if (res < 0)
+ goto out;
+
+ sg_init_one(&src_sg, source_key->raw, source_key->size);
+ sg_init_one(&dst_sg, derived_raw_key, source_key->size);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
+ NULL);
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return res;
+}
+
+static int validate_user_key(struct fscrypt_info *crypt_info,
+ struct fscrypt_context *ctx, u8 *raw_key,
+ const char *prefix, int min_keysize)
+{
+ char *description;
+ struct key *keyring_key;
+ struct fscrypt_key *master_key;
+ const struct user_key_payload *ukp;
+ int res;
+
+ description = kasprintf(GFP_NOFS, "%s%*phN", prefix,
+ FS_KEY_DESCRIPTOR_SIZE,
+ ctx->master_key_descriptor);
+ if (!description)
+ return -ENOMEM;
+
+ keyring_key = request_key(&key_type_logon, description, NULL);
+ kfree(description);
+ if (IS_ERR(keyring_key))
+ return PTR_ERR(keyring_key);
+ down_read(&keyring_key->sem);
+
+ if (keyring_key->type != &key_type_logon) {
+ printk_once(KERN_WARNING
+ "%s: key type must be logon\n", __func__);
+ res = -ENOKEY;
+ goto out;
+ }
+ ukp = user_key_payload(keyring_key);
+ if (ukp->datalen != sizeof(struct fscrypt_key)) {
+ res = -EINVAL;
+ goto out;
+ }
+ master_key = (struct fscrypt_key *)ukp->data;
+ BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
+
+ if (master_key->size < min_keysize || master_key->size > FS_MAX_KEY_SIZE
+ || master_key->size % AES_BLOCK_SIZE != 0) {
+ printk_once(KERN_WARNING
+ "%s: key size incorrect: %d\n",
+ __func__, master_key->size);
+ res = -ENOKEY;
+ goto out;
+ }
+ res = derive_key_aes(ctx->nonce, master_key, raw_key);
+out:
+ up_read(&keyring_key->sem);
+ key_put(keyring_key);
+ return res;
+}
+
+static const struct {
+ const char *cipher_str;
+ int keysize;
+} available_modes[] = {
+ [FS_ENCRYPTION_MODE_AES_256_XTS] = { "xts(aes)",
+ FS_AES_256_XTS_KEY_SIZE },
+ [FS_ENCRYPTION_MODE_AES_256_CTS] = { "cts(cbc(aes))",
+ FS_AES_256_CTS_KEY_SIZE },
+ [FS_ENCRYPTION_MODE_AES_128_CBC] = { "cbc(aes)",
+ FS_AES_128_CBC_KEY_SIZE },
+ [FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))",
+ FS_AES_128_CTS_KEY_SIZE },
+};
+
+static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
+ const char **cipher_str_ret, int *keysize_ret)
+{
+ u32 mode;
+
+ if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
+ pr_warn_ratelimited("fscrypt: inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)\n",
+ inode->i_ino,
+ ci->ci_data_mode, ci->ci_filename_mode);
+ return -EINVAL;
+ }
+
+ if (S_ISREG(inode->i_mode)) {
+ mode = ci->ci_data_mode;
+ } else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ mode = ci->ci_filename_mode;
+ } else {
+ WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
+ inode->i_ino, (inode->i_mode & S_IFMT));
+ return -EINVAL;
+ }
+
+ *cipher_str_ret = available_modes[mode].cipher_str;
+ *keysize_ret = available_modes[mode].keysize;
+ return 0;
+}
+
+static void put_crypt_info(struct fscrypt_info *ci)
+{
+ if (!ci)
+ return;
+
+ crypto_free_skcipher(ci->ci_ctfm);
+ crypto_free_cipher(ci->ci_essiv_tfm);
+ kmem_cache_free(fscrypt_info_cachep, ci);
+}
+
+static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
+{
+ struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);
+
+ /* init hash transform on demand */
+ if (unlikely(!tfm)) {
+ struct crypto_shash *prev_tfm;
+
+ tfm = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
+ if (prev_tfm) {
+ crypto_free_shash(tfm);
+ tfm = prev_tfm;
+ }
+ }
+
+ {
+ SHASH_DESC_ON_STACK(desc, tfm);
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ return crypto_shash_digest(desc, key, keysize, salt);
+ }
+}
+
+static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
+ int keysize)
+{
+ int err;
+ struct crypto_cipher *essiv_tfm;
+ u8 salt[SHA256_DIGEST_SIZE];
+
+ essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(essiv_tfm))
+ return PTR_ERR(essiv_tfm);
+
+ ci->ci_essiv_tfm = essiv_tfm;
+
+ err = derive_essiv_salt(raw_key, keysize, salt);
+ if (err)
+ goto out;
+
+ /*
+ * Using SHA256 to derive the salt/key will result in AES-256 being
+ * used for IV generation. File contents encryption will still use the
+ * configured keysize (AES-128) nevertheless.
+ */
+ err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
+ if (err)
+ goto out;
+
+out:
+ memzero_explicit(salt, sizeof(salt));
+ return err;
+}
+
+void __exit fscrypt_essiv_cleanup(void)
+{
+ crypto_free_shash(essiv_hash_tfm);
+}
+
+int fscrypt_get_encryption_info(struct inode *inode)
+{
+ struct fscrypt_info *crypt_info;
+ struct fscrypt_context ctx;
+ struct crypto_skcipher *ctfm;
+ const char *cipher_str;
+ int keysize;
+ u8 *raw_key = NULL;
+ int res;
+
+ if (inode->i_crypt_info)
+ return 0;
+
+ res = fscrypt_initialize(inode->i_sb->s_cop->flags);
+ if (res)
+ return res;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res < 0) {
+ if (!fscrypt_dummy_context_enabled(inode) ||
+ inode->i_sb->s_cop->is_encrypted(inode))
+ return res;
+ /* Fake up a context for an unencrypted directory */
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+ memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
+ } else if (res != sizeof(ctx)) {
+ return -EINVAL;
+ }
+
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
+
+ crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
+ if (!crypt_info)
+ return -ENOMEM;
+
+ crypt_info->ci_flags = ctx.flags;
+ crypt_info->ci_data_mode = ctx.contents_encryption_mode;
+ crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
+ crypt_info->ci_ctfm = NULL;
+ crypt_info->ci_essiv_tfm = NULL;
+ memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
+ sizeof(crypt_info->ci_master_key));
+
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ if (res)
+ goto out;
+
+ /*
+ * This cannot be a stack buffer because it is passed to the scatterlist
+ * crypto API as part of key derivation.
+ */
+ res = -ENOMEM;
+ raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
+ if (!raw_key)
+ goto out;
+
+ res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
+ keysize);
+ if (res && inode->i_sb->s_cop->key_prefix) {
+ int res2 = validate_user_key(crypt_info, &ctx, raw_key,
+ inode->i_sb->s_cop->key_prefix,
+ keysize);
+ if (res2) {
+ if (res2 == -ENOKEY)
+ res = -ENOKEY;
+ goto out;
+ }
+ } else if (res) {
+ goto out;
+ }
+ ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
+ if (!ctfm || IS_ERR(ctfm)) {
+ res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+ pr_debug("%s: error %d (inode %lu) allocating crypto tfm\n",
+ __func__, res, inode->i_ino);
+ goto out;
+ }
+ crypt_info->ci_ctfm = ctfm;
+ crypto_skcipher_clear_flags(ctfm, ~0);
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ /*
+ * if the provided key is longer than keysize, we use the first
+ * keysize bytes of the derived key only
+ */
+ res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
+ if (res)
+ goto out;
+
+ if (S_ISREG(inode->i_mode) &&
+ crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
+ res = init_essiv_generator(crypt_info, raw_key, keysize);
+ if (res) {
+ pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n",
+ __func__, res, inode->i_ino);
+ goto out;
+ }
+ }
+ if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
+ crypt_info = NULL;
+out:
+ if (res == -ENOKEY)
+ res = 0;
+ put_crypt_info(crypt_info);
+ kzfree(raw_key);
+ return res;
+}
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
+
+void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
+{
+ struct fscrypt_info *prev;
+
+ if (ci == NULL)
+ ci = ACCESS_ONCE(inode->i_crypt_info);
+ if (ci == NULL)
+ return;
+
+ prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
+ if (prev != ci)
+ return;
+
+ put_crypt_info(ci);
+}
+EXPORT_SYMBOL(fscrypt_put_encryption_info);
--- /dev/null
+/*
+ * Encryption policy functions for per-file encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/mount.h>
+#include "fscrypt_private.h"
+
+/*
+ * check whether an encryption policy is consistent with an encryption context
+ */
+static bool is_encryption_context_consistent_with_policy(
+ const struct fscrypt_context *ctx,
+ const struct fscrypt_policy *policy)
+{
+ return memcmp(ctx->master_key_descriptor, policy->master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (ctx->flags == policy->flags) &&
+ (ctx->contents_encryption_mode ==
+ policy->contents_encryption_mode) &&
+ (ctx->filenames_encryption_mode ==
+ policy->filenames_encryption_mode);
+}
+
+static int create_encryption_context_from_policy(struct inode *inode,
+ const struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+
+ if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ policy->filenames_encryption_mode))
+ return -EINVAL;
+
+ if (policy->flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
+
+ ctx.contents_encryption_mode = policy->contents_encryption_mode;
+ ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+ ctx.flags = policy->flags;
+ BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
+ get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+
+ return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
+}
+
+int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
+{
+ struct fscrypt_policy policy;
+ struct inode *inode = file_inode(filp);
+ int ret;
+ struct fscrypt_context ctx;
+
+ if (copy_from_user(&policy, arg, sizeof(policy)))
+ return -EFAULT;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (policy.version != 0)
+ return -EINVAL;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
+ ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (ret == -ENODATA) {
+ if (!S_ISDIR(inode->i_mode))
+ ret = -ENOTDIR;
+ else if (!inode->i_sb->s_cop->empty_dir(inode))
+ ret = -ENOTEMPTY;
+ else
+ ret = create_encryption_context_from_policy(inode,
+ &policy);
+ } else if (ret == sizeof(ctx) &&
+ is_encryption_context_consistent_with_policy(&ctx,
+ &policy)) {
+ /* The file already uses the same encryption policy. */
+ ret = 0;
+ } else if (ret >= 0 || ret == -ERANGE) {
+ /* The file already uses a different encryption policy. */
+ ret = -EEXIST;
+ }
+
+ inode_unlock(inode);
+
+ mnt_drop_write_file(filp);
+ return ret;
+}
+EXPORT_SYMBOL(fscrypt_ioctl_set_policy);
+
+int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct fscrypt_context ctx;
+ struct fscrypt_policy policy;
+ int res;
+
+ if (!inode->i_sb->s_cop->is_encrypted(inode))
+ return -ENODATA;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res < 0 && res != -ERANGE)
+ return res;
+ if (res != sizeof(ctx))
+ return -EINVAL;
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ policy.version = 0;
+ policy.contents_encryption_mode = ctx.contents_encryption_mode;
+ policy.filenames_encryption_mode = ctx.filenames_encryption_mode;
+ policy.flags = ctx.flags;
+ memcpy(policy.master_key_descriptor, ctx.master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+
+ if (copy_to_user(arg, &policy, sizeof(policy)))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
+
+/**
+ * fscrypt_has_permitted_context() - is a file's encryption policy permitted
+ * within its directory?
+ *
+ * @parent: inode for parent directory
+ * @child: inode for file being looked up, opened, or linked into @parent
+ *
+ * Filesystems must call this before permitting access to an inode in a
+ * situation where the parent directory is encrypted (either before allowing
+ * ->lookup() to succeed, or for a regular file before allowing it to be opened)
+ * and before any operation that involves linking an inode into an encrypted
+ * directory, including link, rename, and cross rename. It enforces the
+ * constraint that within a given encrypted directory tree, all files use the
+ * same encryption policy. The pre-access check is needed to detect potentially
+ * malicious offline violations of this constraint, while the link and rename
+ * checks are needed to prevent online violations of this constraint.
+ *
+ * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
+ * the filesystem operation with EPERM.
+ */
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+{
+ const struct fscrypt_operations *cops = parent->i_sb->s_cop;
+ const struct fscrypt_info *parent_ci, *child_ci;
+ struct fscrypt_context parent_ctx, child_ctx;
+ int res;
+
+ /* No restrictions on file types which are never encrypted */
+ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
+ !S_ISLNK(child->i_mode))
+ return 1;
+
+ /* No restrictions if the parent directory is unencrypted */
+ if (!cops->is_encrypted(parent))
+ return 1;
+
+ /* Encrypted directories must not contain unencrypted files */
+ if (!cops->is_encrypted(child))
+ return 0;
+
+ /*
+ * Both parent and child are encrypted, so verify they use the same
+ * encryption policy. Compare the fscrypt_info structs if the keys are
+ * available, otherwise retrieve and compare the fscrypt_contexts.
+ *
+ * Note that the fscrypt_context retrieval will be required frequently
+ * when accessing an encrypted directory tree without the key.
+ * Performance-wise this is not a big deal because we already don't
+ * really optimize for file access without the key (to the extent that
+ * such access is even possible), given that any attempted access
+ * already causes a fscrypt_context retrieval and keyring search.
+ *
+ * In any case, if an unexpected error occurs, fall back to "forbidden".
+ */
+
+ res = fscrypt_get_encryption_info(parent);
+ if (res)
+ return 0;
+ res = fscrypt_get_encryption_info(child);
+ if (res)
+ return 0;
+ parent_ci = parent->i_crypt_info;
+ child_ci = child->i_crypt_info;
+
+ if (parent_ci && child_ci) {
+ return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+ (parent_ci->ci_filename_mode ==
+ child_ci->ci_filename_mode) &&
+ (parent_ci->ci_flags == child_ci->ci_flags);
+ }
+
+ res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
+ if (res != sizeof(parent_ctx))
+ return 0;
+
+ res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
+ if (res != sizeof(child_ctx))
+ return 0;
+
+ return memcmp(parent_ctx.master_key_descriptor,
+ child_ctx.master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ctx.contents_encryption_mode ==
+ child_ctx.contents_encryption_mode) &&
+ (parent_ctx.filenames_encryption_mode ==
+ child_ctx.filenames_encryption_mode) &&
+ (parent_ctx.flags == child_ctx.flags);
+}
+EXPORT_SYMBOL(fscrypt_has_permitted_context);
+
+/**
+ * fscrypt_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child: Child inode that inherits the context from @parent.
+ * @fs_data: private data given by FS.
+ * @preload: preload child i_crypt_info if true
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_inherit_context(struct inode *parent, struct inode *child,
+ void *fs_data, bool preload)
+{
+ struct fscrypt_context ctx;
+ struct fscrypt_info *ci;
+ int res;
+
+ res = fscrypt_get_encryption_info(parent);
+ if (res < 0)
+ return res;
+
+ ci = parent->i_crypt_info;
+ if (ci == NULL)
+ return -ENOKEY;
+
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ ctx.contents_encryption_mode = ci->ci_data_mode;
+ ctx.filenames_encryption_mode = ci->ci_filename_mode;
+ ctx.flags = ci->ci_flags;
+ memcpy(ctx.master_key_descriptor, ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE);
+ get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+ res = parent->i_sb->s_cop->set_context(child, &ctx,
+ sizeof(ctx), fs_data);
+ if (res)
+ return res;
+ return preload ? fscrypt_get_encryption_info(child): 0;
+}
+EXPORT_SYMBOL(fscrypt_inherit_context);
*/
if (sdio->boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- dio_bio_submit(dio, sdio);
+ if (sdio->bio)
+ dio_bio_submit(dio, sdio);
page_cache_release(sdio->cur_page);
sdio->cur_page = NULL;
}
error = misc_register(&ls->ls_device);
if (error) {
kfree(ls->ls_device.name);
+ /* this has to be set to NULL
+ * to avoid a double-free in dlm_device_deregister
+ */
+ ls->ls_device.name = NULL;
}
fail:
return error;
wait_queue_head_t *whead;
rcu_read_lock();
- /* If it is cleared by POLLFREE, it should be rcu-safe */
- whead = rcu_dereference(pwq->whead);
+ /*
+ * If it is cleared by POLLFREE, it should be rcu-safe.
+ * If we read NULL we need a barrier paired with
+ * smp_store_release() in ep_poll_callback(), otherwise
+ * we rely on whead->lock.
+ */
+ whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
- if ((unsigned long)key & POLLFREE) {
- ep_pwq_from_wait(wait)->whead = NULL;
- /*
- * whead = NULL above can race with ep_remove_wait_queue()
- * which can do another remove_wait_queue() after us, so we
- * can't use __remove_wait_queue(). whead->lock is held by
- * the caller.
- */
- list_del_init(&wait->task_list);
- }
-
spin_lock_irqsave(&ep->lock, flags);
/*
if (pwake)
ep_poll_safewake(&ep->poll_wait);
+
+ if ((unsigned long)key & POLLFREE) {
+ /*
+ * If we race with ep_remove_wait_queue() it can miss
+ * ->whead = NULL and do another remove_wait_queue() after
+ * us, so we can't use __remove_wait_queue().
+ */
+ list_del_init(&wait->task_list);
+ /*
+ * ->whead != NULL protects us from the race with ep_free()
+ * or ep_remove(), ep_remove_wait_queue() takes whead->lock
+ * held by the caller. Once we nullify it, nothing protects
+ * ep/epi or even wait.
+ */
+ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
+ }
+
return 1;
}
switch (type) {
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (error)
- return error;
- inode->i_ctime = ext4_current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
- }
break;
case ACL_TYPE_DEFAULT:
{
handle_t *handle;
int error, retries = 0;
+ umode_t mode = inode->i_mode;
+ int update_mode = 0;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR,
if (IS_ERR(handle))
return PTR_ERR(handle);
+ if ((type == ACL_TYPE_ACCESS) && acl) {
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error)
+ goto out_stop;
+ update_mode = 1;
+ }
+
error = __ext4_set_acl(handle, inode, type, acl);
+ if (!error && update_mode) {
+ inode->i_mode = mode;
+ inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+ }
+out_stop:
ext4_journal_stop(handle);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
page->index, page, page, GFP_NOFS);
}
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
- ext4_fsblk_t pblk = ext4_ext_pblock(ex);
- unsigned int len = ext4_ext_get_actual_len(ex);
int ret, err = 0;
#if 0
struct page *plaintext_page,
gfp_t gfp_flags);
int ext4_decrypt(struct page *page);
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len);
extern const struct dentry_operations ext4_encrypted_d_ops;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
+extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len);
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
{
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
- int ret;
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
-
- if (ext4_encrypted_inode(inode))
- return ext4_encrypted_zeroout(inode, ex);
-
- ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
- if (ret > 0)
- ret = 0;
-
- return ret;
+ return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
+ ee_len);
}
/*
mutex_lock(&inode->i_mutex);
isize = i_size_read(inode);
- if (offset >= isize) {
+ if (offset < 0 || offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
mutex_lock(&inode->i_mutex);
isize = i_size_read(inode);
- if (offset >= isize) {
+ if (offset < 0 || offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
return 0;
}
+int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ ext4_lblk_t len)
+{
+ int ret;
+
+ if (ext4_encrypted_inode(inode))
+ return ext4_encrypted_zeroout(inode, lblk, pblk, len);
+
+ ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
{
int len;
- loff_t size = i_size_read(mpd->inode);
+ loff_t size;
int err;
BUG_ON(page->index != mpd->first_page);
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
- else
- len = PAGE_CACHE_SIZE;
clear_page_dirty_for_io(page);
+ /*
+ * We have to be very careful here! Nothing protects writeback path
+ * against i_size changes and the page can be writeably mapped into
+ * page tables. So an application can be growing i_size and writing
+ * data through mmap while writeback runs. clear_page_dirty_for_io()
+ * write-protects our page in page tables and the page cannot get
+ * written to again until we release page lock. So only after
+ * clear_page_dirty_for_io() we are safe to sample i_size for
+ * ext4_bio_write_page() to zero-out tail of the written page. We rely
+ * on the barrier provided by TestClearPageDirty in
+ * clear_page_dirty_for_io() to make sure i_size is really sampled only
+ * after page tables are updated.
+ */
+ size = i_size_read(mpd->inode);
+ if (page->index == size >> PAGE_SHIFT)
+ len = size & ~PAGE_MASK;
+ else
+ len = PAGE_SIZE;
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
mpd->wbc->nr_to_write--;
int credits;
u8 old_file_type;
+ if ((ext4_encrypted_inode(old_dir) &&
+ !ext4_has_encryption_key(old_dir)) ||
+ (ext4_encrypted_inode(new_dir) &&
+ !ext4_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
retval = dquot_initialize(old.dir);
if (retval)
return retval;
u8 new_file_type;
int retval;
+ if ((ext4_encrypted_inode(old_dir) &&
+ !ext4_has_encryption_key(old_dir)) ||
+ (ext4_encrypted_inode(new_dir) &&
+ !ext4_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
if ((ext4_encrypted_inode(old_dir) ||
ext4_encrypted_inode(new_dir)) &&
(old_dir != new_dir) &&
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
+ int quota_update = 0;
int i;
#endif
if (!es->s_last_orphan) {
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+ int ret = ext4_enable_quotas(sb);
+
+ if (!ret)
+ quota_update = 1;
+ else
+ ext4_msg(sb, KERN_ERR,
+ "Cannot turn on quotas: error %d", ret);
+ }
+
+ /* Turn on journaled quotas used for old sytle */
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (EXT4_SB(sb)->s_qf_names[i]) {
int ret = ext4_quota_on_mount(sb, i);
- if (ret < 0)
+
+ if (!ret)
+ quota_update = 1;
+ else
ext4_msg(sb, KERN_ERR,
"Cannot turn on journaled "
- "quota: error %d", ret);
+ "quota: type %d: error %d", i, ret);
}
}
#endif
ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
- /* Turn quotas off */
- for (i = 0; i < EXT4_MAXQUOTAS; i++) {
- if (sb_dqopt(sb)->files[i])
- dquot_quota_off(sb, i);
+ /* Turn off quotas if they were enabled for orphan cleanup */
+ if (quota_update) {
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (sb_dqopt(sb)->files[i])
+ dquot_quota_off(sb, i);
+ }
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED);
if (err) {
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
config F2FS_FS
tristate "F2FS filesystem support"
depends on BLOCK
+ select CRYPTO
+ select CRYPTO_CRC32
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on
bool "F2FS Encryption"
depends on F2FS_FS
depends on F2FS_FS_XATTR
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_XTS
- select CRYPTO_CTS
- select CRYPTO_CTR
- select CRYPTO_SHA256
- select KEYS
- select ENCRYPTED_KEYS
+ select FS_ENCRYPTION
help
Enable encryption of f2fs files and directories. This
feature is similar to ecryptfs, but it is more memory
information and block IO patterns in the filesystem level.
If unsure, say N.
+
+config F2FS_FAULT_INJECTION
+ bool "F2FS fault injection facility"
+ depends on F2FS_FS
+ help
+ Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
+
+ If unsure, say N.
f2fs-y := dir.o file.o inode.o namei.o hash.o super.o inline.o
f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o
-f2fs-y += shrinker.o extent_cache.o
+f2fs-y += shrinker.o extent_cache.o sysfs.o
f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
-f2fs-$(CONFIG_F2FS_FS_ENCRYPTION) += crypto_policy.o crypto.o \
- crypto_key.o crypto_fname.o
return ERR_PTR(-EINVAL);
}
-static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
+static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
+ const struct posix_acl *acl, size_t *size)
{
struct f2fs_acl_header *f2fs_acl;
struct f2fs_acl_entry *entry;
int i;
- f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
- sizeof(struct f2fs_acl_entry), GFP_NOFS);
+ f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
+ acl->a_count * sizeof(struct f2fs_acl_entry),
+ GFP_NOFS);
if (!f2fs_acl)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) {
- value = kmalloc(retval, GFP_F2FS_ZERO);
+ value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
int error;
+ umode_t mode = inode->i_mode;
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl && !ipage) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ error = posix_acl_update_mode(inode, &mode, &acl);
if (error)
return error;
- set_acl_inode(fi, inode->i_mode);
+ set_acl_inode(inode, mode);
}
break;
}
if (acl) {
- value = f2fs_acl_to_disk(acl, &size);
+ value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
if (IS_ERR(value)) {
- clear_inode_flag(fi, FI_ACL_MODE);
- return (int)PTR_ERR(value);
+ clear_inode_flag(inode, FI_ACL_MODE);
+ return PTR_ERR(value);
}
}
if (!error)
set_cached_acl(inode, type, acl);
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return error;
}
if (error)
return error;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
#ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int);
-extern int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
-#define f2fs_check_acl NULL
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *inode_entry_slab;
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
+{
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ sbi->sb->s_flags |= MS_RDONLY;
+ if (!end_io)
+ f2fs_flush_merged_writes(sbi);
+}
+
/*
* We guarantee no failure on the returned page.
*/
struct address_space *mapping = META_MAPPING(sbi);
struct page *page = NULL;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
}
- f2fs_wait_on_page_writeback(page, META);
- SetPageUptodate(page);
+ f2fs_wait_on_page_writeback(page, META, true);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
return page;
}
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = READ_SYNC | REQ_META | REQ_PRIO,
- .blk_addr = index,
+ .op = REQ_OP_READ,
+ .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
+ .old_blkaddr = index,
+ .new_blkaddr = index,
.encrypted_page = NULL,
};
if (unlikely(!is_meta))
- fio.rw &= ~REQ_META;
+ fio.op_flags &= ~REQ_META;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
* meta page.
*/
if (unlikely(!PageUptodate(page)))
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
out:
return page;
}
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
{
- block_t prev_blk_addr = 0;
struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
+ .op = REQ_OP_READ,
+ .op_flags = sync ? (REQ_SYNC | REQ_META | REQ_PRIO) :
+ REQ_RAHEAD,
.encrypted_page = NULL,
+ .in_list = false,
};
+ struct blk_plug plug;
if (unlikely(type == META_POR))
- fio.rw &= ~REQ_META;
+ fio.op_flags &= ~REQ_META;
+ blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
if (!is_valid_blkaddr(sbi, blkno, type))
NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
blkno = 0;
/* get nat block addr */
- fio.blk_addr = current_nat_addr(sbi,
+ fio.new_blkaddr = current_nat_addr(sbi,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
/* get sit block addr */
- fio.blk_addr = current_sit_addr(sbi,
+ fio.new_blkaddr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
- if (blkno != start && prev_blk_addr + 1 != fio.blk_addr)
- goto out;
- prev_blk_addr = fio.blk_addr;
break;
case META_SSA:
case META_CP:
case META_POR:
- fio.blk_addr = blkno;
+ fio.new_blkaddr = blkno;
break;
default:
BUG();
}
- page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr);
+ page = f2fs_grab_cache_page(META_MAPPING(sbi),
+ fio.new_blkaddr, false);
if (!page)
continue;
if (PageUptodate(page)) {
}
fio.page = page;
- f2fs_submit_page_mbio(&fio);
+ f2fs_submit_page_bio(&fio);
f2fs_put_page(page, 0);
}
out:
- f2fs_submit_merged_bio(sbi, META, READ);
+ blk_finish_plug(&plug);
return blkno - start;
}
bool readahead = false;
page = find_get_page(META_MAPPING(sbi), index);
- if (!page || (page && !PageUptodate(page)))
+ if (!page || !PageUptodate(page))
readahead = true;
f2fs_put_page(page, 0);
if (readahead)
- ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
+ ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
}
-static int f2fs_write_meta_page(struct page *page,
- struct writeback_control *wbc)
+static int __f2fs_write_meta_page(struct page *page,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- f2fs_wait_on_page_writeback(page, META);
- write_meta_page(sbi, page);
+ write_meta_page(sbi, page, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
if (wbc->for_reclaim)
- f2fs_submit_merged_bio(sbi, META, WRITE);
+ f2fs_submit_merged_write_cond(sbi, page->mapping->host,
+ 0, page->index, META);
+
+ unlock_page(page);
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ f2fs_submit_merged_write(sbi, META);
+
return 0;
redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
+static int f2fs_write_meta_page(struct page *page,
+ struct writeback_control *wbc)
+{
+ return __f2fs_write_meta_page(page, wbc, FS_META_IO);
+}
+
static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
long diff, written;
- trace_f2fs_writepages(mapping->host, wbc, META);
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
/* collect a number of dirty meta pages and write together */
if (wbc->for_kupdate ||
get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
goto skip_write;
- /* if mounting is failed, skip writing node pages */
- mutex_lock(&sbi->cp_mutex);
+ /* if locked failed, cp will flush dirty pages instead */
+ if (!mutex_trylock(&sbi->cp_mutex))
+ goto skip_write;
+
+ trace_f2fs_writepages(mapping->host, wbc, META);
diff = nr_pages_to_write(sbi, META, wbc);
- written = sync_meta_pages(sbi, META, wbc->nr_to_write);
+ written = sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+ trace_f2fs_writepages(mapping->host, wbc, META);
return 0;
}
long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
- long nr_to_write)
+ long nr_to_write, enum iostat_type io_type)
{
struct address_space *mapping = META_MAPPING(sbi);
- pgoff_t index = 0, end = LONG_MAX, prev = LONG_MAX;
+ pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
struct pagevec pvec;
long nwritten = 0;
struct writeback_control wbc = {
.for_reclaim = 0,
};
+ struct blk_plug plug;
pagevec_init(&pvec, 0);
+ blk_start_plug(&plug);
+
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- if (prev == LONG_MAX)
+ if (prev == ULONG_MAX)
prev = page->index - 1;
if (nr_to_write != LONG_MAX && page->index != prev + 1) {
pagevec_release(&pvec);
goto continue_unlock;
}
+ f2fs_wait_on_page_writeback(page, META, true);
+
+ BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- if (mapping->a_ops->writepage(page, &wbc)) {
+ if (__f2fs_write_meta_page(page, &wbc, io_type)) {
unlock_page(page);
break;
}
}
stop:
if (nwritten)
- f2fs_submit_merged_bio(sbi, type, WRITE);
+ f2fs_submit_merged_write(sbi, type);
+
+ blk_finish_plug(&plug);
return nwritten;
}
{
trace_f2fs_set_page_dirty(page, META);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
spin_unlock(&im->ino_lock);
}
-void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* add new dirty ino entry into list */
__add_ino_entry(sbi, ino, type);
}
-void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* remove dirty ino entry from list */
__remove_ino_entry(sbi, ino, type);
return e ? true : false;
}
-void release_dirty_inode(struct f2fs_sb_info *sbi)
+void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
{
struct ino_entry *e, *tmp;
int i;
- for (i = APPEND_INO; i <= UPDATE_INO; i++) {
+ for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
struct inode_management *im = &sbi->im[i];
spin_lock(&im->ino_lock);
int err = 0;
spin_lock(&im->ino_lock);
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ORPHAN)) {
+ spin_unlock(&im->ino_lock);
+ f2fs_show_injection_info(FAULT_ORPHAN);
+ return -ENOSPC;
+ }
+#endif
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
spin_unlock(&im->ino_lock);
}
-void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
- __add_ino_entry(sbi, ino, ORPHAN_INO);
+ __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
+ update_inode_page(inode);
}
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
+ struct node_info ni;
+ int err = acquire_orphan_inode(sbi);
+
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return err;
+ }
- inode = f2fs_iget(sbi->sb, ino);
+ __add_ino_entry(sbi, ino, ORPHAN_INO);
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/*
* there should be a bug that we can't find the entry
/* truncate all the data during iput */
iput(inode);
+
+ get_node_info(sbi, ino, &ni);
+
+ /* ENOMEM was fully retried in f2fs_evict_inode. */
+ if (ni.blk_addr != NULL_ADDR) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x) by kernel, retry mount.",
+ __func__, ino);
+ return -EIO;
+ }
+ __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
}
int recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
block_t start_blk, orphan_blocks, i, j;
- int err;
+ unsigned int s_flags = sbi->sb->s_flags;
+ int err = 0;
- if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
+ if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
+ if (s_flags & MS_RDONLY) {
+ f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+ sbi->sb->s_flags &= ~MS_RDONLY;
+ }
+
+#ifdef CONFIG_QUOTA
+ /* Needed for iput() to work correctly and not trash data */
+ sbi->sb->s_flags |= MS_ACTIVE;
+ /* Turn on quotas so that they are updated correctly */
+ f2fs_enable_quota_files(sbi);
+#endif
+
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
err = recover_orphan_inode(sbi, ino);
if (err) {
f2fs_put_page(page, 1);
- return err;
+ goto out;
}
}
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
- clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
- return 0;
+ clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
+out:
+#ifdef CONFIG_QUOTA
+ /* Turn quotas off */
+ f2fs_quota_off_umount(sbi->sb);
+#endif
+ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+
+ return err;
}
static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
}
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
- block_t cp_addr, unsigned long long *version)
+static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+ struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ unsigned long long *version)
{
- struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize;
- struct f2fs_checkpoint *cp_block;
- unsigned long long cur_version = 0, pre_version = 0;
- size_t crc_offset;
+ size_t crc_offset = 0;
__u32 crc = 0;
- /* Read the 1st cp block in this CP pack */
- cp_page_1 = get_meta_page(sbi, cp_addr);
+ *cp_page = get_meta_page(sbi, cp_addr);
+ *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
- /* get the version number */
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp1;
+ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
+ if (crc_offset > (blk_size - sizeof(__le32))) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid crc_offset: %zu", crc_offset);
+ return -EINVAL;
+ }
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
- goto invalid_cp1;
+ crc = cur_cp_crc(*cp_block);
+ if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+ return -EINVAL;
+ }
- pre_version = cur_cp_version(cp_block);
+ *version = cur_cp_version(*cp_block);
+ return 0;
+}
- /* Read the 2nd cp block in this CP pack */
- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
- cp_page_2 = get_meta_page(sbi, cp_addr);
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+ block_t cp_addr, unsigned long long *version)
+{
+ struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct f2fs_checkpoint *cp_block = NULL;
+ unsigned long long cur_version = 0, pre_version = 0;
+ int err;
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp2;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_1, version);
+ if (err)
+ goto invalid_cp1;
+ pre_version = *version;
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_2, version);
+ if (err)
goto invalid_cp2;
-
- cur_version = cur_cp_version(cp_block);
+ cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
+ /* Sanity checking of checkpoint */
+ if (sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
+ if (cur_page == cp1)
+ sbi->cur_cp_pack = 1;
+ else
+ sbi->cur_cp_pack = 2;
+
if (cp_blks <= 1)
goto done;
f2fs_put_page(cp2, 1);
return 0;
+free_fail_no_cp:
+ f2fs_put_page(cp1, 1);
+ f2fs_put_page(cp2, 1);
fail_no_cp:
kfree(sbi->ckpt);
return -EINVAL;
}
-static int __add_dirty_inode(struct inode *inode, struct inode_entry *new)
+static void __add_dirty_inode(struct inode *inode, enum inode_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
- if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
- return -EEXIST;
+ if (is_inode_flag_set(inode, flag))
+ return;
- set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
- F2FS_I(inode)->dirty_dir = new;
- list_add_tail(&new->list, &sbi->dir_inode_list);
- stat_inc_dirty_dir(sbi);
- return 0;
+ set_inode_flag(inode, flag);
+ if (!f2fs_is_volatile_file(inode))
+ list_add_tail(&F2FS_I(inode)->dirty_list,
+ &sbi->inode_list[type]);
+ stat_inc_dirty_inode(sbi, type);
+}
+
+static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
+{
+ int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
+
+ if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
+ return;
+
+ list_del_init(&F2FS_I(inode)->dirty_list);
+ clear_inode_flag(inode, flag);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
void update_dirty_page(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new;
- int ret = 0;
+ enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
!S_ISLNK(inode->i_mode))
return;
- if (!S_ISDIR(inode->i_mode)) {
- inode_inc_dirty_pages(inode);
- goto out;
- }
-
- new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
+ spin_lock(&sbi->inode_lock[type]);
+ if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
+ __add_dirty_inode(inode, type);
inode_inc_dirty_pages(inode);
- spin_unlock(&sbi->dir_inode_lock);
+ spin_unlock(&sbi->inode_lock[type]);
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-out:
SetPagePrivate(page);
f2fs_trace_pid(page);
}
-void add_dirty_dir_inode(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new =
- f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- int ret = 0;
-
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
- spin_unlock(&sbi->dir_inode_lock);
-
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-}
-
-void remove_dirty_dir_inode(struct inode *inode)
+void remove_dirty_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *entry;
+ enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
- if (!S_ISDIR(inode->i_mode))
+ if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+ !S_ISLNK(inode->i_mode))
return;
- spin_lock(&sbi->dir_inode_lock);
- if (get_dirty_pages(inode) ||
- !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
- spin_unlock(&sbi->dir_inode_lock);
+ if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
return;
- }
- entry = F2FS_I(inode)->dirty_dir;
- list_del(&entry->list);
- F2FS_I(inode)->dirty_dir = NULL;
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
- stat_dec_dirty_dir(sbi);
- spin_unlock(&sbi->dir_inode_lock);
- kmem_cache_free(inode_entry_slab, entry);
-
- /* Only from the recovery routine */
- if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
- clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
- iput(inode);
- }
+ spin_lock(&sbi->inode_lock[type]);
+ __remove_dirty_inode(inode, type);
+ spin_unlock(&sbi->inode_lock[type]);
}
-void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
+int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
{
struct list_head *head;
- struct inode_entry *entry;
struct inode *inode;
+ struct f2fs_inode_info *fi;
+ bool is_dir = (type == DIR_INODE);
+ unsigned long ino = 0;
+
+ trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
retry:
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- spin_lock(&sbi->dir_inode_lock);
+ spin_lock(&sbi->inode_lock[type]);
- head = &sbi->dir_inode_list;
+ head = &sbi->inode_list[type];
if (list_empty(head)) {
- spin_unlock(&sbi->dir_inode_lock);
- return;
+ spin_unlock(&sbi->inode_lock[type]);
+ trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ return 0;
}
- entry = list_entry(head->next, struct inode_entry, list);
- inode = igrab(entry->inode);
- spin_unlock(&sbi->dir_inode_lock);
+ fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[type]);
if (inode) {
+ unsigned long cur_ino = inode->i_ino;
+
+ if (is_dir)
+ F2FS_I(inode)->cp_task = current;
+
filemap_fdatawrite(inode->i_mapping);
+
+ if (is_dir)
+ F2FS_I(inode)->cp_task = NULL;
+
iput(inode);
+ /* We need to give cpu to another writers. */
+ if (ino == cur_ino) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ } else {
+ ino = cur_ino;
+ }
} else {
/*
* We should submit bio, since it exists several
* wribacking dentry pages in the freeing inode.
*/
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ f2fs_submit_merged_write(sbi, DATA);
cond_resched();
}
goto retry;
}
+int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
+{
+ struct list_head *head = &sbi->inode_list[DIRTY_META];
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
+
+ while (total--) {
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (list_empty(head)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return 0;
+ }
+ fi = list_first_entry(head, struct f2fs_inode_info,
+ gdirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ if (inode) {
+ sync_inode_metadata(inode, 0);
+
+ /* it's on eviction */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE))
+ update_inode_page(inode);
+ iput(inode);
+ }
+ };
+ return 0;
+}
+
+static void __prepare_cp_block(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ nid_t last_nid = nm_i->next_scan_nid;
+
+ next_free_nid(sbi, &last_nid);
+ ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
+ ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
+ ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
+ ckpt->next_free_nid = cpu_to_le32(last_nid);
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
- sync_dirty_dir_inodes(sbi);
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
+ err = sync_dirty_inodes(sbi, DIR_INODE);
+ if (err)
goto out;
- }
+ cond_resched();
goto retry_flush_dents;
}
/*
* POR: we should ensure that there are no dirty node pages
- * until finishing nat/sit flush.
+ * until finishing nat/sit flush. inode->i_blocks can be updated.
*/
+ down_write(&sbi->node_change);
+
+ if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
+ up_write(&sbi->node_change);
+ f2fs_unlock_all(sbi);
+ err = f2fs_sync_inode_meta(sbi);
+ if (err)
+ goto out;
+ cond_resched();
+ goto retry_flush_dents;
+ }
+
retry_flush_nodes:
down_write(&sbi->node_write);
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write);
- sync_node_pages(sbi, 0, &wbc);
- if (unlikely(f2fs_cp_error(sbi))) {
+ err = sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
+ if (err) {
+ up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
- err = -EIO;
goto out;
}
+ cond_resched();
goto retry_flush_nodes;
}
+
+ /*
+ * sbi->node_change is used only for AIO write_begin path which produces
+ * dirty node blocks and some checkpoint values by block allocation.
+ */
+ __prepare_cp_block(sbi);
+ up_write(&sbi->node_change);
out:
blk_finish_plug(&plug);
return err;
for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- if (!get_pages(sbi, F2FS_WRITEBACK))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
- io_schedule();
+ io_schedule_timeout(5*HZ);
}
finish_wait(&sbi->cp_wait, &wait);
}
-static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+
+ if ((cpc->reason & CP_UMOUNT) &&
+ le32_to_cpu(ckpt->cp_pack_total_block_count) >
+ sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
+ disable_nat_bits(sbi, false);
+
+ if (cpc->reason & CP_TRIMMED)
+ __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
+
+ if (cpc->reason & CP_UMOUNT)
+ __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+
+ if (cpc->reason & CP_FASTBOOT)
+ __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+
+ if (orphan_num)
+ __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+
+ /* set this flag to activate crc|cp_ver for recovery */
+ __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+}
+
+static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
- nid_t last_nid = nm_i->next_scan_nid;
+ unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
block_t start_blk;
unsigned int data_sum_blocks, orphan_blocks;
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
- block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
- bool invalidate = false;
-
- /*
- * This avoids to conduct wrong roll-forward operations and uses
- * metapages, so should be called prior to sync_meta_pages below.
- */
- if (discard_next_dnode(sbi, discard_blk))
- invalidate = true;
+ struct super_block *sb = sbi->sb;
+ struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ u64 kbytes_written;
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
- sync_meta_pages(sbi, META, LONG_MAX);
+ sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
}
- next_free_nid(sbi, &last_nid);
-
/*
* modify checkpoint
* version number is already updated
*/
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
- ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
ckpt->cur_node_segno[i] =
curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
}
- ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
- ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
- ckpt->next_free_nid = cpu_to_le32(last_nid);
-
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false);
+ spin_lock_irqsave(&sbi->cp_lock, flags);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
- set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
- clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
cp_payload_blks + data_sum_blocks +
orphan_blocks);
- if (cpc->reason == CP_UMOUNT)
- set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-
- if (cpc->reason == CP_FASTBOOT)
- set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
-
- if (orphan_num)
- set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
-
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ /* update ckpt flag for checkpoint */
+ update_ckpt_flags(sbi, cpc);
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
- crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
+ crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
*((__le32 *)((unsigned char *)ckpt +
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
- start_blk = __start_cp_addr(sbi);
+ start_blk = __start_cp_next_addr(sbi);
+
+ /* write nat bits */
+ if (enabled_nat_bits(sbi, cpc)) {
+ __u64 cp_ver = cur_cp_version(ckpt);
+ block_t blk;
+
+ cp_ver |= ((__u64)crc32 << 32);
+ *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
+
+ blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
+ for (i = 0; i < nm_i->nat_bits_blocks; i++)
+ update_meta_page(sbi, nm_i->nat_bits +
+ (i << F2FS_BLKSIZE_BITS), blk + i);
+
+ /* Flush all the NAT BITS pages */
+ while (get_pages(sbi, F2FS_DIRTY_META)) {
+ sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+ }
+ }
/* need to wait for end_io results */
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
/* write out checkpoint buffer at block 0 */
update_meta_page(sbi, ckpt, start_blk++);
write_data_summaries(sbi, start_blk);
start_blk += data_sum_blocks;
+
+ /* Record write statistics in the hot node summary */
+ kbytes_written = sbi->kbytes_written;
+ if (sb->s_bdev->bd_part)
+ kbytes_written += BD_PART_WRITTEN(sbi);
+
+ seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
+
if (__remain_node_summaries(cpc->reason)) {
write_node_summaries(sbi, start_blk);
start_blk += NR_CURSEG_NODE_TYPE;
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
- filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
+ filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
+ filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
/* update user_block_counts */
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
/* Here, we only have one bio having CP pack */
- sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
+ sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
- /*
- * invalidate meta page which is used temporarily for zeroing out
- * block at the end of warm node chain.
- */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
- discard_blk);
-
- release_dirty_inode(sbi);
+ release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
+ clear_sbi_flag(sbi, SBI_NEED_CP);
+ __set_cp_next_pack(sbi);
+
+ /*
+ * redirty superblock if metadata like node page or inode cache is
+ * updated during writing checkpoint.
+ */
+ if (get_pages(sbi, F2FS_DIRTY_NODES) ||
+ get_pages(sbi, F2FS_DIRTY_IMETA))
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+
+ f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
+
+ return 0;
}
/*
* We guarantee that this checkpoint procedure will not fail.
*/
-void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_ver;
+ int err = 0;
mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
- (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
- (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
+ ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
+ ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
goto out;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
goto out;
- if (f2fs_readonly(sbi->sb))
+ }
+ if (f2fs_readonly(sbi->sb)) {
+ err = -EROFS;
goto out;
+ }
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
- if (block_operations(sbi))
+ err = block_operations(sbi);
+ if (err)
goto out;
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
- f2fs_submit_merged_bio(sbi, META, WRITE);
+ f2fs_flush_merged_writes(sbi);
+
+ /* this is the case of multiple fstrims without any changes */
+ if (cpc->reason & CP_DISCARD) {
+ if (!exist_trim_candidates(sbi, cpc)) {
+ unblock_operations(sbi);
+ goto out;
+ }
+
+ if (NM_I(sbi)->dirty_nat_cnt == 0 &&
+ SIT_I(sbi)->dirty_sentries == 0 &&
+ prefree_segments(sbi) == 0) {
+ flush_sit_entries(sbi, cpc);
+ clear_prefree_segments(sbi, cpc);
+ unblock_operations(sbi);
+ goto out;
+ }
+ }
/*
* update checkpoint pack index
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
/* write cached NAT/SIT entries to NAT/SIT area */
- flush_nat_entries(sbi);
+ flush_nat_entries(sbi, cpc);
flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
- do_checkpoint(sbi, cpc);
+ err = do_checkpoint(sbi, cpc);
+ if (err)
+ release_discard_addrs(sbi);
+ else
+ clear_prefree_segments(sbi, cpc);
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
- if (cpc->reason == CP_RECOVERY)
+ if (cpc->reason & CP_RECOVERY)
f2fs_msg(sbi->sb, KERN_NOTICE,
"checkpoint: version = %llx", ckpt_ver);
/* do checkpoint periodically */
- sbi->cp_expires = round_jiffies_up(jiffies + HZ * sbi->cp_interval);
+ f2fs_update_time(sbi, CP_TIME);
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
mutex_unlock(&sbi->cp_mutex);
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+ return err;
}
void init_ino_entry_info(struct f2fs_sb_info *sbi)
+++ /dev/null
-/*
- * linux/fs/f2fs/crypto.c
- *
- * Copied from linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * This contains encryption functions for f2fs
- *
- * Written by Michael Halcrow, 2014.
- *
- * Filename encryption additions
- * Uday Savagaonkar, 2014
- * Encryption policy handling additions
- * Ildar Muslukhov, 2014
- * Remove ext4_encrypted_zeroout(),
- * add f2fs_restore_and_release_control_page()
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- *
- * The usage of AES-XTS should conform to recommendations in NIST
- * Special Publication 800-38E and IEEE P1619/D16.
- */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
-#include <keys/user-type.h>
-#include <keys/encrypted-type.h>
-#include <linux/crypto.h>
-#include <linux/ecryptfs.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/f2fs_fs.h>
-#include <linux/ratelimit.h>
-#include <linux/bio.h>
-
-#include "f2fs.h"
-#include "xattr.h"
-
-/* Encryption added and removed here! (L: */
-
-static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
-
-module_param(num_prealloc_crypto_pages, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_pages,
- "Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
-
-static mempool_t *f2fs_bounce_page_pool;
-
-static LIST_HEAD(f2fs_free_crypto_ctxs);
-static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);
-
-static struct workqueue_struct *f2fs_read_workqueue;
-static DEFINE_MUTEX(crypto_init);
-
-static struct kmem_cache *f2fs_crypto_ctx_cachep;
-struct kmem_cache *f2fs_crypt_info_cachep;
-
-/**
- * f2fs_release_crypto_ctx() - Releases an encryption context
- * @ctx: The encryption context to release.
- *
- * If the encryption context was allocated from the pre-allocated pool, returns
- * it to that pool. Else, frees it.
- *
- * If there's a bounce page in the context, this frees that.
- */
-void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
- mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
- ctx->w.bounce_page = NULL;
- }
- ctx->w.control_page = NULL;
- if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
- list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
- spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
- }
-}
-
-/**
- * f2fs_get_crypto_ctx() - Gets an encryption context
- * @inode: The inode for which we are doing the crypto
- *
- * Allocates and initializes an encryption context.
- *
- * Return: An allocated and initialized encryption context on success; error
- * value or NULL otherwise.
- */
-struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
-{
- struct f2fs_crypto_ctx *ctx = NULL;
- unsigned long flags;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (ci == NULL)
- return ERR_PTR(-ENOKEY);
-
- /*
- * We first try getting the ctx from a free list because in
- * the common case the ctx will have an allocated and
- * initialized crypto tfm, so it's probably a worthwhile
- * optimization. For the bounce page, we first try getting it
- * from the kernel allocator because that's just about as fast
- * as getting it from a list and because a cache of free pages
- * should generally be a "last resort" option for a filesystem
- * to be able to do its job.
- */
- spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
- ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs,
- struct f2fs_crypto_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- ctx->flags &= ~F2FS_WRITE_PATH_FL;
- return ctx;
-}
-
-/*
- * Call f2fs_decrypt on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
-{
- struct f2fs_crypto_ctx *ctx =
- container_of(work, struct f2fs_crypto_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
-
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- int ret = f2fs_decrypt(ctx, page);
-
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else
- SetPageUptodate(page);
- unlock_page(page);
- }
- f2fs_release_crypto_ctx(ctx);
- bio_put(bio);
-}
-
-void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->r.work, completion_pages);
- ctx->r.bio = bio;
- queue_work(f2fs_read_workqueue, &ctx->r.work);
-}
-
-static void f2fs_crypto_destroy(void)
-{
- struct f2fs_crypto_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list)
- kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
- INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
- if (f2fs_bounce_page_pool)
- mempool_destroy(f2fs_bounce_page_pool);
- f2fs_bounce_page_pool = NULL;
-}
-
-/**
- * f2fs_crypto_initialize() - Set up for f2fs encryption.
- *
- * We only call this when we start accessing encrypted files, since it
- * results in memory getting allocated that wouldn't otherwise be used.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int f2fs_crypto_initialize(void)
-{
- int i, res = -ENOMEM;
-
- if (f2fs_bounce_page_pool)
- return 0;
-
- mutex_lock(&crypto_init);
- if (f2fs_bounce_page_pool)
- goto already_initialized;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct f2fs_crypto_ctx *ctx;
-
- ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
- }
-
- /* must be allocated at the last step to avoid race condition above */
- f2fs_bounce_page_pool =
- mempool_create_page_pool(num_prealloc_crypto_pages, 0);
- if (!f2fs_bounce_page_pool)
- goto fail;
-
-already_initialized:
- mutex_unlock(&crypto_init);
- return 0;
-fail:
- f2fs_crypto_destroy();
- mutex_unlock(&crypto_init);
- return res;
-}
-
-/**
- * f2fs_exit_crypto() - Shutdown the f2fs encryption system
- */
-void f2fs_exit_crypto(void)
-{
- f2fs_crypto_destroy();
-
- if (f2fs_read_workqueue)
- destroy_workqueue(f2fs_read_workqueue);
- if (f2fs_crypto_ctx_cachep)
- kmem_cache_destroy(f2fs_crypto_ctx_cachep);
- if (f2fs_crypt_info_cachep)
- kmem_cache_destroy(f2fs_crypt_info_cachep);
-}
-
-int __init f2fs_init_crypto(void)
-{
- int res = -ENOMEM;
-
- f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
- if (!f2fs_read_workqueue)
- goto fail;
-
- f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
- SLAB_RECLAIM_ACCOUNT);
- if (!f2fs_crypto_ctx_cachep)
- goto fail;
-
- f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
- SLAB_RECLAIM_ACCOUNT);
- if (!f2fs_crypt_info_cachep)
- goto fail;
-
- return 0;
-fail:
- f2fs_exit_crypto();
- return res;
-}
-
-void f2fs_restore_and_release_control_page(struct page **page)
-{
- struct f2fs_crypto_ctx *ctx;
- struct page *bounce_page;
-
- /* The bounce data pages are unmapped. */
- if ((*page)->mapping)
- return;
-
- /* The bounce data page is unmapped. */
- bounce_page = *page;
- ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);
-
- /* restore control page */
- *page = ctx->w.control_page;
-
- f2fs_restore_control_page(bounce_page);
-}
-
-void f2fs_restore_control_page(struct page *data_page)
-{
- struct f2fs_crypto_ctx *ctx =
- (struct f2fs_crypto_ctx *)page_private(data_page);
-
- set_page_private(data_page, (unsigned long)NULL);
- ClearPagePrivate(data_page);
- unlock_page(data_page);
- f2fs_release_crypto_ctx(ctx);
-}
-
-/**
- * f2fs_crypt_complete() - The completion callback for page encryption
- * @req: The asynchronous encryption request context
- * @res: The result of the encryption operation
- */
-static void f2fs_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-typedef enum {
- F2FS_DECRYPT = 0,
- F2FS_ENCRYPT,
-} f2fs_direction_t;
-
-static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
- struct inode *inode,
- f2fs_direction_t rw,
- pgoff_t index,
- struct page *src_page,
- struct page *dest_page)
-{
- u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist dst, src;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
-
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n",
- __func__);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(
- req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_crypt_complete, &ecr);
-
- BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- F2FS_XTS_TWEAK_SIZE - sizeof(index));
-
- sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
- sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
- ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
- xts_tweak);
- if (rw == F2FS_DECRYPT)
- res = crypto_ablkcipher_decrypt(req);
- else
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- ablkcipher_request_free(req);
- if (res) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_ablkcipher_encrypt() returned %d\n",
- __func__, res);
- return res;
- }
- return 0;
-}
-
-static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx)
-{
- ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
- if (ctx->w.bounce_page == NULL)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= F2FS_WRITE_PATH_FL;
- return ctx->w.bounce_page;
-}
-
-/**
- * f2fs_encrypt() - Encrypts a page
- * @inode: The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- *
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
- *
- * Called on the page write path. The caller must call
- * f2fs_restore_control_page() on the returned ciphertext page to
- * release the bounce buffer and the encryption context.
- *
- * Return: An allocated page with the encrypted content on success. Else, an
- * error value or NULL.
- */
-struct page *f2fs_encrypt(struct inode *inode,
- struct page *plaintext_page)
-{
- struct f2fs_crypto_ctx *ctx;
- struct page *ciphertext_page = NULL;
- int err;
-
- BUG_ON(!PageLocked(plaintext_page));
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- return (struct page *)ctx;
-
- /* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
- if (IS_ERR(ciphertext_page))
- goto err_out;
-
- ctx->w.control_page = plaintext_page;
- err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
- if (err) {
- ciphertext_page = ERR_PTR(err);
- goto err_out;
- }
-
- SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)ctx);
- lock_page(ciphertext_page);
- return ciphertext_page;
-
-err_out:
- f2fs_release_crypto_ctx(ctx);
- return ciphertext_page;
-}
-
-/**
- * f2fs_decrypt() - Decrypts a page in-place
- * @ctx: The encryption context.
- * @page: The page to decrypt. Must be locked.
- *
- * Decrypts page in-place using the ctx encryption context.
- *
- * Called from the read completion callback.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page)
-{
- BUG_ON(!PageLocked(page));
-
- return f2fs_page_crypto(ctx, page->mapping->host,
- F2FS_DECRYPT, page->index, page, page);
-}
-
-/*
- * Convenience function which takes care of allocating and
- * deallocating the encryption context
- */
-int f2fs_decrypt_one(struct inode *inode, struct page *page)
-{
- struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
- int ret;
-
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
- ret = f2fs_decrypt(ctx, page);
- f2fs_release_crypto_ctx(ctx);
- return ret;
-}
-
-bool f2fs_valid_contents_enc_mode(uint32_t mode)
-{
- return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS);
-}
-
-/**
- * f2fs_validate_encryption_key_size() - Validate the encryption key size
- * @mode: The key mode.
- * @size: The key size to validate.
- *
- * Return: The validated key size for @mode. Zero if invalid.
- */
-uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size)
-{
- if (size == f2fs_encryption_key_size(mode))
- return size;
- return 0;
-}
+++ /dev/null
-/*
- * linux/fs/f2fs/crypto_fname.c
- *
- * Copied from linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * This contains functions for filename crypto management in f2fs
- *
- * Written by Uday Savagaonkar, 2014.
- *
- * Adjust f2fs dentry structure
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
-#include <linux/crypto.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/f2fs_fs.h>
-#include <linux/ratelimit.h>
-
-#include "f2fs.h"
-#include "f2fs_crypto.h"
-#include "xattr.h"
-
-/**
- * f2fs_dir_crypt_complete() -
- */
-static void f2fs_dir_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-bool f2fs_valid_filenames_enc_mode(uint32_t mode)
-{
- return (mode == F2FS_ENCRYPTION_MODE_AES_256_CTS);
-}
-
-static unsigned max_name_len(struct inode *inode)
-{
- return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
- F2FS_NAME_LEN;
-}
-
-/**
- * f2fs_fname_encrypt() -
- *
- * This function encrypts the input filename, and returns the length of the
- * ciphertext. Errors are returned as negative numbers. We trust the caller to
- * allocate sufficient memory to oname string.
- */
-static int f2fs_fname_encrypt(struct inode *inode,
- const struct qstr *iname, struct f2fs_str *oname)
-{
- u32 ciphertext_len;
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[F2FS_CRYPTO_BLOCK_SIZE];
- struct scatterlist src_sg, dst_sg;
- int padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK);
- char *workbuf, buf[32], *alloc_buf = NULL;
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- ciphertext_len = (iname->len < F2FS_CRYPTO_BLOCK_SIZE) ?
- F2FS_CRYPTO_BLOCK_SIZE : iname->len;
- ciphertext_len = f2fs_fname_crypto_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len;
-
- if (ciphertext_len <= sizeof(buf)) {
- workbuf = buf;
- } else {
- alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
- if (!alloc_buf)
- return -ENOMEM;
- workbuf = alloc_buf;
- }
-
- /* Allocate request */
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- kfree(alloc_buf);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_dir_crypt_complete, &ecr);
-
- /* Copy the input */
- memcpy(workbuf, iname->name, iname->len);
- if (iname->len < ciphertext_len)
- memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
-
- /* Initialize IV */
- memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE);
-
- /* Create encryption request */
- sg_init_one(&src_sg, workbuf, ciphertext_len);
- sg_init_one(&dst_sg, oname->name, ciphertext_len);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- kfree(alloc_buf);
- ablkcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(KERN_ERR
- "%s: Error (error code %d)\n", __func__, res);
- }
- oname->len = ciphertext_len;
- return res;
-}
-
-/*
- * f2fs_fname_decrypt()
- * This function decrypts the input filename, and returns
- * the length of the plaintext.
- * Errors are returned as negative numbers.
- * We trust the caller to allocate sufficient memory to oname string.
- */
-static int f2fs_fname_decrypt(struct inode *inode,
- const struct f2fs_str *iname, struct f2fs_str *oname)
-{
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist src_sg, dst_sg;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[F2FS_CRYPTO_BLOCK_SIZE];
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- /* Allocate request */
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_dir_crypt_complete, &ecr);
-
- /* Initialize IV */
- memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE);
-
- /* Create decryption request */
- sg_init_one(&src_sg, iname->name, iname->len);
- sg_init_one(&dst_sg, oname->name, oname->len);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
- res = crypto_ablkcipher_decrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- ablkcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(KERN_ERR
- "%s: Error in f2fs_fname_decrypt (error code %d)\n",
- __func__, res);
- return res;
- }
-
- oname->len = strnlen(oname->name, iname->len);
- return oname->len;
-}
-
-static const char *lookup_table =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
-
-/**
- * f2fs_fname_encode_digest() -
- *
- * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
- * The encoded string is roughly 4/3 times the size of the input string.
- */
-static int digest_encode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- char *cp = dst;
-
- while (i < len) {
- ac += (((unsigned char) src[i]) << bits);
- bits += 8;
- do {
- *cp++ = lookup_table[ac & 0x3f];
- ac >>= 6;
- bits -= 6;
- } while (bits >= 6);
- i++;
- }
- if (bits)
- *cp++ = lookup_table[ac & 0x3f];
- return cp - dst;
-}
-
-static int digest_decode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- const char *p;
- char *cp = dst;
-
- while (i < len) {
- p = strchr(lookup_table, src[i]);
- if (p == NULL || src[i] == 0)
- return -2;
- ac += (p - lookup_table) << bits;
- bits += 6;
- if (bits >= 8) {
- *cp++ = ac & 0xff;
- ac >>= 8;
- bits -= 8;
- }
- i++;
- }
- if (ac)
- return -1;
- return cp - dst;
-}
-
-/**
- * f2fs_fname_crypto_round_up() -
- *
- * Return: The next multiple of block size
- */
-u32 f2fs_fname_crypto_round_up(u32 size, u32 blksize)
-{
- return ((size + blksize - 1) / blksize) * blksize;
-}
-
-/**
- * f2fs_fname_crypto_alloc_obuff() -
- *
- * Allocates an output buffer that is sufficient for the crypto operation
- * specified by the context and the direction.
- */
-int f2fs_fname_crypto_alloc_buffer(struct inode *inode,
- u32 ilen, struct f2fs_str *crypto_str)
-{
- unsigned int olen;
- int padding = 16;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (ci)
- padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK);
- if (padding < F2FS_CRYPTO_BLOCK_SIZE)
- padding = F2FS_CRYPTO_BLOCK_SIZE;
- olen = f2fs_fname_crypto_round_up(ilen, padding);
- crypto_str->len = olen;
- if (olen < F2FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
- olen = F2FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
- /* Allocated buffer can hold one more character to null-terminate the
- * string */
- crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
- if (!(crypto_str->name))
- return -ENOMEM;
- return 0;
-}
-
-/**
- * f2fs_fname_crypto_free_buffer() -
- *
- * Frees the buffer allocated for crypto operation.
- */
-void f2fs_fname_crypto_free_buffer(struct f2fs_str *crypto_str)
-{
- if (!crypto_str)
- return;
- kfree(crypto_str->name);
- crypto_str->name = NULL;
-}
-
-/**
- * f2fs_fname_disk_to_usr() - converts a filename from disk space to user space
- */
-int f2fs_fname_disk_to_usr(struct inode *inode,
- f2fs_hash_t *hash,
- const struct f2fs_str *iname,
- struct f2fs_str *oname)
-{
- const struct qstr qname = FSTR_TO_QSTR(iname);
- char buf[24];
- int ret;
-
- if (is_dot_dotdot(&qname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
-
- if (F2FS_I(inode)->i_crypt_info)
- return f2fs_fname_decrypt(inode, iname, oname);
-
- if (iname->len <= F2FS_FNAME_CRYPTO_DIGEST_SIZE) {
- ret = digest_encode(iname->name, iname->len, oname->name);
- oname->len = ret;
- return ret;
- }
- if (hash) {
- memcpy(buf, hash, 4);
- memset(buf + 4, 0, 4);
- } else
- memset(buf, 0, 8);
- memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
- oname->name[0] = '_';
- ret = digest_encode(buf, 24, oname->name + 1);
- oname->len = ret + 1;
- return ret + 1;
-}
-
-/**
- * f2fs_fname_usr_to_disk() - converts a filename from user space to disk space
- */
-int f2fs_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct f2fs_str *oname)
-{
- int res;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (is_dot_dotdot(iname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
-
- if (ci) {
- res = f2fs_fname_encrypt(inode, iname, oname);
- return res;
- }
- /* Without a proper key, a user is not allowed to modify the filenames
- * in a directory. Consequently, a user space name cannot be mapped to
- * a disk-space name */
- return -EACCES;
-}
-
-int f2fs_fname_setup_filename(struct inode *dir, const struct qstr *iname,
- int lookup, struct f2fs_filename *fname)
-{
- struct f2fs_crypt_info *ci;
- int ret = 0, bigname = 0;
-
- memset(fname, 0, sizeof(struct f2fs_filename));
- fname->usr_fname = iname;
-
- if (!f2fs_encrypted_inode(dir) || is_dot_dotdot(iname)) {
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
- }
- ret = f2fs_get_encryption_info(dir);
- if (ret)
- return ret;
- ci = F2FS_I(dir)->i_crypt_info;
- if (ci) {
- ret = f2fs_fname_crypto_alloc_buffer(dir, iname->len,
- &fname->crypto_buf);
- if (ret < 0)
- return ret;
- ret = f2fs_fname_encrypt(dir, iname, &fname->crypto_buf);
- if (ret < 0)
- goto errout;
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- return 0;
- }
- if (!lookup)
- return -EACCES;
-
- /* We don't have the key and we are doing a lookup; decode the
- * user-supplied name
- */
- if (iname->name[0] == '_')
- bigname = 1;
- if ((bigname && (iname->len != 33)) ||
- (!bigname && (iname->len > 43)))
- return -ENOENT;
-
- fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
- if (fname->crypto_buf.name == NULL)
- return -ENOMEM;
- ret = digest_decode(iname->name + bigname, iname->len - bigname,
- fname->crypto_buf.name);
- if (ret < 0) {
- ret = -ENOENT;
- goto errout;
- }
- fname->crypto_buf.len = ret;
- if (bigname) {
- memcpy(&fname->hash, fname->crypto_buf.name, 4);
- } else {
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- }
- return 0;
-errout:
- f2fs_fname_crypto_free_buffer(&fname->crypto_buf);
- return ret;
-}
-
-void f2fs_fname_free_filename(struct f2fs_filename *fname)
-{
- kfree(fname->crypto_buf.name);
- fname->crypto_buf.name = NULL;
- fname->usr_fname = NULL;
- fname->disk_name.name = NULL;
-}
+++ /dev/null
-/*
- * linux/fs/f2fs/crypto_key.c
- *
- * Copied from linux/fs/f2fs/crypto_key.c
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption key functions for f2fs
- *
- * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
- */
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <uapi/linux/keyctl.h>
-#include <crypto/hash.h>
-#include <linux/f2fs_fs.h>
-
-#include "f2fs.h"
-#include "xattr.h"
-
-static void derive_crypt_complete(struct crypto_async_request *req, int rc)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (rc == -EINPROGRESS)
- return;
-
- ecr->res = rc;
- complete(&ecr->completion);
-}
-
-/**
- * f2fs_derive_key_aes() - Derive a key using AES-128-ECB
- * @deriving_key: Encryption key used for derivatio.
- * @source_key: Source key to which to apply derivation.
- * @derived_key: Derived key.
- *
- * Return: Zero on success; non-zero otherwise.
- */
-static int f2fs_derive_key_aes(char deriving_key[F2FS_AES_128_ECB_KEY_SIZE],
- char source_key[F2FS_AES_256_XTS_KEY_SIZE],
- char derived_key[F2FS_AES_256_XTS_KEY_SIZE])
-{
- int res = 0;
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist src_sg, dst_sg;
- struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
- 0);
-
- if (IS_ERR(tfm)) {
- res = PTR_ERR(tfm);
- tfm = NULL;
- goto out;
- }
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- res = -ENOMEM;
- goto out;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- derive_crypt_complete, &ecr);
- res = crypto_ablkcipher_setkey(tfm, deriving_key,
- F2FS_AES_128_ECB_KEY_SIZE);
- if (res < 0)
- goto out;
-
- sg_init_one(&src_sg, source_key, F2FS_AES_256_XTS_KEY_SIZE);
- sg_init_one(&dst_sg, derived_key, F2FS_AES_256_XTS_KEY_SIZE);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
- F2FS_AES_256_XTS_KEY_SIZE, NULL);
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
-out:
- if (req)
- ablkcipher_request_free(req);
- if (tfm)
- crypto_free_ablkcipher(tfm);
- return res;
-}
-
-static void f2fs_free_crypt_info(struct f2fs_crypt_info *ci)
-{
- if (!ci)
- return;
-
- key_put(ci->ci_keyring_key);
- crypto_free_ablkcipher(ci->ci_ctfm);
- kmem_cache_free(f2fs_crypt_info_cachep, ci);
-}
-
-void f2fs_free_encryption_info(struct inode *inode, struct f2fs_crypt_info *ci)
-{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_crypt_info *prev;
-
- if (ci == NULL)
- ci = ACCESS_ONCE(fi->i_crypt_info);
- if (ci == NULL)
- return;
- prev = cmpxchg(&fi->i_crypt_info, ci, NULL);
- if (prev != ci)
- return;
-
- f2fs_free_crypt_info(ci);
-}
-
-int _f2fs_get_encryption_info(struct inode *inode)
-{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_crypt_info *crypt_info;
- char full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE +
- (F2FS_KEY_DESCRIPTOR_SIZE * 2) + 1];
- struct key *keyring_key = NULL;
- struct f2fs_encryption_key *master_key;
- struct f2fs_encryption_context ctx;
- const struct user_key_payload *ukp;
- struct crypto_ablkcipher *ctfm;
- const char *cipher_str;
- char raw_key[F2FS_MAX_KEY_SIZE];
- char mode;
- int res;
-
- res = f2fs_crypto_initialize();
- if (res)
- return res;
-retry:
- crypt_info = ACCESS_ONCE(fi->i_crypt_info);
- if (crypt_info) {
- if (!crypt_info->ci_keyring_key ||
- key_validate(crypt_info->ci_keyring_key) == 0)
- return 0;
- f2fs_free_encryption_info(inode, crypt_info);
- goto retry;
- }
-
- res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
- &ctx, sizeof(ctx), NULL);
- if (res < 0)
- return res;
- else if (res != sizeof(ctx))
- return -EINVAL;
- res = 0;
-
- crypt_info = kmem_cache_alloc(f2fs_crypt_info_cachep, GFP_NOFS);
- if (!crypt_info)
- return -ENOMEM;
-
- crypt_info->ci_flags = ctx.flags;
- crypt_info->ci_data_mode = ctx.contents_encryption_mode;
- crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
- crypt_info->ci_ctfm = NULL;
- crypt_info->ci_keyring_key = NULL;
- memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
- sizeof(crypt_info->ci_master_key));
- if (S_ISREG(inode->i_mode))
- mode = crypt_info->ci_data_mode;
- else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- mode = crypt_info->ci_filename_mode;
- else
- BUG();
-
- switch (mode) {
- case F2FS_ENCRYPTION_MODE_AES_256_XTS:
- cipher_str = "xts(aes)";
- break;
- case F2FS_ENCRYPTION_MODE_AES_256_CTS:
- cipher_str = "cts(cbc(aes))";
- break;
- default:
- printk_once(KERN_WARNING
- "f2fs: unsupported key mode %d (ino %u)\n",
- mode, (unsigned) inode->i_ino);
- res = -ENOKEY;
- goto out;
- }
-
- memcpy(full_key_descriptor, F2FS_KEY_DESC_PREFIX,
- F2FS_KEY_DESC_PREFIX_SIZE);
- sprintf(full_key_descriptor + F2FS_KEY_DESC_PREFIX_SIZE,
- "%*phN", F2FS_KEY_DESCRIPTOR_SIZE,
- ctx.master_key_descriptor);
- full_key_descriptor[F2FS_KEY_DESC_PREFIX_SIZE +
- (2 * F2FS_KEY_DESCRIPTOR_SIZE)] = '\0';
- keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
- if (IS_ERR(keyring_key)) {
- res = PTR_ERR(keyring_key);
- keyring_key = NULL;
- goto out;
- }
- crypt_info->ci_keyring_key = keyring_key;
- BUG_ON(keyring_key->type != &key_type_logon);
- ukp = user_key_payload(keyring_key);
- if (ukp->datalen != sizeof(struct f2fs_encryption_key)) {
- res = -EINVAL;
- goto out;
- }
- master_key = (struct f2fs_encryption_key *)ukp->data;
- BUILD_BUG_ON(F2FS_AES_128_ECB_KEY_SIZE !=
- F2FS_KEY_DERIVATION_NONCE_SIZE);
- BUG_ON(master_key->size != F2FS_AES_256_XTS_KEY_SIZE);
- res = f2fs_derive_key_aes(ctx.nonce, master_key->raw,
- raw_key);
- if (res)
- goto out;
-
- ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
- if (!ctfm || IS_ERR(ctfm)) {
- res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
- printk(KERN_DEBUG
- "%s: error %d (inode %u) allocating crypto tfm\n",
- __func__, res, (unsigned) inode->i_ino);
- goto out;
- }
- crypt_info->ci_ctfm = ctfm;
- crypto_ablkcipher_clear_flags(ctfm, ~0);
- crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
- CRYPTO_TFM_REQ_WEAK_KEY);
- res = crypto_ablkcipher_setkey(ctfm, raw_key,
- f2fs_encryption_key_size(mode));
- if (res)
- goto out;
-
- memzero_explicit(raw_key, sizeof(raw_key));
- if (cmpxchg(&fi->i_crypt_info, NULL, crypt_info) != NULL) {
- f2fs_free_crypt_info(crypt_info);
- goto retry;
- }
- return 0;
-
-out:
- if (res == -ENOKEY && !S_ISREG(inode->i_mode))
- res = 0;
-
- f2fs_free_crypt_info(crypt_info);
- memzero_explicit(raw_key, sizeof(raw_key));
- return res;
-}
-
-int f2fs_has_encryption_key(struct inode *inode)
-{
- struct f2fs_inode_info *fi = F2FS_I(inode);
-
- return (fi->i_crypt_info != NULL);
-}
+++ /dev/null
-/*
- * copied from linux/fs/ext4/crypto_policy.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility.
- *
- * This contains encryption policy functions for f2fs with some modifications
- * to support f2fs-specific xattr APIs.
- *
- * Written by Michael Halcrow, 2015.
- * Modified by Jaegeuk Kim, 2015.
- */
-#include <linux/random.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/f2fs_fs.h>
-
-#include "f2fs.h"
-#include "xattr.h"
-
-static int f2fs_inode_has_encryption_context(struct inode *inode)
-{
- int res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0, NULL);
- return (res > 0);
-}
-
-/*
- * check whether the policy is consistent with the encryption context
- * for the inode
- */
-static int f2fs_is_encryption_context_consistent_with_policy(
- struct inode *inode, const struct f2fs_encryption_policy *policy)
-{
- struct f2fs_encryption_context ctx;
- int res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
- sizeof(ctx), NULL);
-
- if (res != sizeof(ctx))
- return 0;
-
- return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
- F2FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (ctx.flags == policy->flags) &&
- (ctx.contents_encryption_mode ==
- policy->contents_encryption_mode) &&
- (ctx.filenames_encryption_mode ==
- policy->filenames_encryption_mode));
-}
-
-static int f2fs_create_encryption_context_from_policy(
- struct inode *inode, const struct f2fs_encryption_policy *policy)
-{
- struct f2fs_encryption_context ctx;
-
- ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
- F2FS_KEY_DESCRIPTOR_SIZE);
-
- if (!f2fs_valid_contents_enc_mode(policy->contents_encryption_mode)) {
- printk(KERN_WARNING
- "%s: Invalid contents encryption mode %d\n", __func__,
- policy->contents_encryption_mode);
- return -EINVAL;
- }
-
- if (!f2fs_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
- printk(KERN_WARNING
- "%s: Invalid filenames encryption mode %d\n", __func__,
- policy->filenames_encryption_mode);
- return -EINVAL;
- }
-
- if (policy->flags & ~F2FS_POLICY_FLAGS_VALID)
- return -EINVAL;
-
- ctx.contents_encryption_mode = policy->contents_encryption_mode;
- ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
- ctx.flags = policy->flags;
- BUILD_BUG_ON(sizeof(ctx.nonce) != F2FS_KEY_DERIVATION_NONCE_SIZE);
- get_random_bytes(ctx.nonce, F2FS_KEY_DERIVATION_NONCE_SIZE);
-
- return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
- sizeof(ctx), NULL, XATTR_CREATE);
-}
-
-int f2fs_process_policy(const struct f2fs_encryption_policy *policy,
- struct inode *inode)
-{
- if (!inode_owner_or_capable(inode))
- return -EACCES;
-
- if (policy->version != 0)
- return -EINVAL;
-
- if (!S_ISDIR(inode->i_mode))
- return -EINVAL;
-
- if (!f2fs_inode_has_encryption_context(inode)) {
- if (!f2fs_empty_dir(inode))
- return -ENOTEMPTY;
- return f2fs_create_encryption_context_from_policy(inode,
- policy);
- }
-
- if (f2fs_is_encryption_context_consistent_with_policy(inode, policy))
- return 0;
-
- printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
- __func__);
- return -EINVAL;
-}
-
-int f2fs_get_policy(struct inode *inode, struct f2fs_encryption_policy *policy)
-{
- struct f2fs_encryption_context ctx;
- int res;
-
- if (!f2fs_encrypted_inode(inode))
- return -ENODATA;
-
- res = f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
- &ctx, sizeof(ctx), NULL);
- if (res != sizeof(ctx))
- return -ENODATA;
- if (ctx.format != F2FS_ENCRYPTION_CONTEXT_FORMAT_V1)
- return -EINVAL;
-
- policy->version = 0;
- policy->contents_encryption_mode = ctx.contents_encryption_mode;
- policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
- policy->flags = ctx.flags;
- memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
- F2FS_KEY_DESCRIPTOR_SIZE);
- return 0;
-}
-
-int f2fs_is_child_context_consistent_with_parent(struct inode *parent,
- struct inode *child)
-{
- const struct f2fs_crypt_info *parent_ci, *child_ci;
- struct f2fs_encryption_context parent_ctx, child_ctx;
- int res;
-
- /* No restrictions on file types which are never encrypted */
- if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
- !S_ISLNK(child->i_mode))
- return 1;
-
- /* No restrictions if the parent directory is unencrypted */
- if (!f2fs_encrypted_inode(parent))
- return 1;
-
- /* Encrypted directories must not contain unencrypted files */
- if (!f2fs_encrypted_inode(child))
- return 0;
-
- /*
- * Both parent and child are encrypted, so verify they use the same
- * encryption policy. Compare the fscrypt_info structs if the keys are
- * available, otherwise retrieve and compare the fscrypt_contexts.
- *
- * Note that the fscrypt_context retrieval will be required frequently
- * when accessing an encrypted directory tree without the key.
- * Performance-wise this is not a big deal because we already don't
- * really optimize for file access without the key (to the extent that
- * such access is even possible), given that any attempted access
- * already causes a fscrypt_context retrieval and keyring search.
- *
- * In any case, if an unexpected error occurs, fall back to "forbidden".
- */
-
- res = f2fs_get_encryption_info(parent);
- if (res)
- return 0;
- res = f2fs_get_encryption_info(child);
- if (res)
- return 0;
- parent_ci = F2FS_I(parent)->i_crypt_info;
- child_ci = F2FS_I(child)->i_crypt_info;
- if (parent_ci && child_ci) {
- return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
- F2FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
- (parent_ci->ci_filename_mode ==
- child_ci->ci_filename_mode) &&
- (parent_ci->ci_flags == child_ci->ci_flags);
- }
-
- res = f2fs_getxattr(parent, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
- &parent_ctx, sizeof(parent_ctx), NULL);
- if (res != sizeof(parent_ctx))
- return 0;
-
- res = f2fs_getxattr(child, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
- &child_ctx, sizeof(child_ctx), NULL);
- if (res != sizeof(child_ctx))
- return 0;
-
- return memcmp(parent_ctx.master_key_descriptor,
- child_ctx.master_key_descriptor,
- F2FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (parent_ctx.contents_encryption_mode ==
- child_ctx.contents_encryption_mode) &&
- (parent_ctx.filenames_encryption_mode ==
- child_ctx.filenames_encryption_mode) &&
- (parent_ctx.flags == child_ctx.flags);
-}
-
-/**
- * f2fs_inherit_context() - Sets a child context from its parent
- * @parent: Parent inode from which the context is inherited.
- * @child: Child inode that inherits the context from @parent.
- *
- * Return: Zero on success, non-zero otherwise
- */
-int f2fs_inherit_context(struct inode *parent, struct inode *child,
- struct page *ipage)
-{
- struct f2fs_encryption_context ctx;
- struct f2fs_crypt_info *ci;
- int res;
-
- res = f2fs_get_encryption_info(parent);
- if (res < 0)
- return res;
-
- ci = F2FS_I(parent)->i_crypt_info;
- BUG_ON(ci == NULL);
-
- ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
-
- ctx.contents_encryption_mode = ci->ci_data_mode;
- ctx.filenames_encryption_mode = ci->ci_filename_mode;
- ctx.flags = ci->ci_flags;
- memcpy(ctx.master_key_descriptor, ci->ci_master_key,
- F2FS_KEY_DESCRIPTOR_SIZE);
-
- get_random_bytes(ctx.nonce, F2FS_KEY_DERIVATION_NONCE_SIZE);
- return f2fs_setxattr(child, F2FS_XATTR_INDEX_ENCRYPTION,
- F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
- sizeof(ctx), ipage, XATTR_CREATE);
-}
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
#include <trace/events/f2fs.h>
#include <trace/events/android_fs.h>
+static bool __is_cp_guaranteed(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ struct f2fs_sb_info *sbi;
+
+ if (!mapping)
+ return false;
+
+ inode = mapping->host;
+ sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_META_INO(sbi) ||
+ inode->i_ino == F2FS_NODE_INO(sbi) ||
+ S_ISDIR(inode->i_mode) ||
+ is_cold_data(page))
+ return true;
+ return false;
+}
+
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
int i;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
+ f2fs_show_injection_info(FAULT_IO);
+ bio->bi_error = -EIO;
+ }
+#endif
+
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
- f2fs_release_crypto_ctx(bio->bi_private);
+ fscrypt_release_ctx(bio->bi_private);
} else {
- f2fs_end_io_crypto_work(bio->bi_private, bio);
+ fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+ enum count_type type = WB_DATA_TYPE(page);
- f2fs_restore_and_release_control_page(&page);
+ if (IS_DUMMY_WRITTEN_PAGE(page)) {
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ unlock_page(page);
+ mempool_free(page, sbi->write_io_dummy);
+
+ if (unlikely(bio->bi_error))
+ f2fs_stop_checkpoint(sbi, true);
+ continue;
+ }
+
+ fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
- set_page_dirty(page);
set_bit(AS_EIO, &page->mapping->flags);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, true);
}
+ dec_page_count(sbi, type);
+ clear_cold_data(page);
end_page_writeback(page);
- dec_page_count(sbi, F2FS_WRITEBACK);
}
-
- if (!get_pages(sbi, F2FS_WRITEBACK) &&
- !list_empty(&sbi->cp_wait.task_list))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
+ wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
/*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (FDEV(i).start_blk <= blk_addr &&
+ FDEV(i).end_blk >= blk_addr) {
+ blk_addr -= FDEV(i).start_blk;
+ bdev = FDEV(i).bdev;
+ break;
+ }
+ }
+ if (bio) {
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ }
+ return bdev;
+}
+
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
+ return i;
+ return 0;
+}
+
+static bool __same_bdev(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+}
+
+/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
bio = f2fs_bio_alloc(npages);
- bio->bi_bdev = sbi->sb->s_bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
return bio;
}
+static inline void __submit_bio(struct f2fs_sb_info *sbi,
+ struct bio *bio, enum page_type type)
+{
+ if (!is_read_io(bio_op(bio))) {
+ unsigned int start;
+
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ current->plug && (type == DATA || type == NODE))
+ blk_finish_plug(current->plug);
+
+ if (type != DATA && type != NODE)
+ goto submit_io;
+
+ start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
+ start %= F2FS_IO_SIZE(sbi);
+
+ if (start == 0)
+ goto submit_io;
+
+ /* fill dummy pages */
+ for (; start < F2FS_IO_SIZE(sbi); start++) {
+ struct page *page =
+ mempool_alloc(sbi->write_io_dummy,
+ GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
+ f2fs_bug_on(sbi, !page);
+
+ SetPagePrivate(page);
+ set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
+ lock_page(page);
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
+ f2fs_bug_on(sbi, 1);
+ }
+ /*
+ * In the NODE case, we lose next block address chain. So, we
+ * need to do checkpoint in f2fs_sync_file.
+ */
+ if (type == NODE)
+ set_sbi_flag(sbi, SBI_NEED_CP);
+ }
+submit_io:
+ if (is_read_io(bio_op(bio)))
+ trace_f2fs_submit_read_bio(sbi->sb, type, bio);
+ else
+ trace_f2fs_submit_write_bio(sbi->sb, type, bio);
+ submit_bio(bio_op(bio), bio);
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
if (!io->bio)
return;
- if (is_read_io(fio->rw))
- trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
+
+ if (is_read_io(fio->op))
+ trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
else
- trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
+ trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
- submit_bio(fio->rw, io->bio);
+ __submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
-void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
- enum page_type type, int rw)
+static bool __has_merged_page(struct f2fs_bio_info *io,
+ struct inode *inode, nid_t ino, pgoff_t idx)
+{
+ struct bio_vec *bvec;
+ struct page *target;
+ int i;
+
+ if (!io->bio)
+ return false;
+
+ if (!inode && !ino)
+ return true;
+
+ bio_for_each_segment_all(bvec, io->bio, i) {
+
+ if (bvec->bv_page->mapping)
+ target = bvec->bv_page;
+ else
+ target = fscrypt_control_page(bvec->bv_page);
+
+ if (idx != target->index)
+ continue;
+
+ if (inode && inode == target->mapping->host)
+ return true;
+ if (ino && ino == ino_of_node(target))
+ return true;
+ }
+
+ return false;
+}
+
+static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
+ nid_t ino, pgoff_t idx, enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
+ enum temp_type temp;
struct f2fs_bio_info *io;
+ bool ret = false;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+ io = sbi->write_io[btype] + temp;
+
+ down_read(&io->io_rwsem);
+ ret = __has_merged_page(io, inode, ino, idx);
+ up_read(&io->io_rwsem);
+
+ /* TODO: use HOT temp only for meta pages now. */
+ if (ret || btype == META)
+ break;
+ }
+ return ret;
+}
- io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
+static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
+ enum page_type type, enum temp_type temp)
+{
+ enum page_type btype = PAGE_TYPE_OF_BIO(type);
+ struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
down_write(&io->io_rwsem);
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
- if (test_opt(sbi, NOBARRIER))
- io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
- else
- io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
+ io->fio.op = REQ_OP_WRITE;
+ io->fio.op_flags = REQ_META | REQ_PRIO;
+ if (!test_opt(sbi, NOBARRIER))
+ io->fio.op_flags |= WRITE_FLUSH | REQ_FUA;
}
__submit_merged_bio(io);
up_write(&io->io_rwsem);
}
+static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, nid_t ino, pgoff_t idx,
+ enum page_type type, bool force)
+{
+ enum temp_type temp;
+
+ if (!force && !has_merged_page(sbi, inode, ino, idx, type))
+ return;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+
+ __f2fs_submit_merged_write(sbi, type, temp);
+
+ /* TODO: use HOT temp only for meta pages now. */
+ if (type >= META)
+ break;
+ }
+}
+
+void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
+{
+ __submit_merged_write_cond(sbi, NULL, 0, 0, type, true);
+}
+
+void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, nid_t ino, pgoff_t idx,
+ enum page_type type)
+{
+ __submit_merged_write_cond(sbi, inode, ino, idx, type, false);
+}
+
+void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
+{
+ f2fs_submit_merged_write(sbi, DATA);
+ f2fs_submit_merged_write(sbi, NODE);
+ f2fs_submit_merged_write(sbi, META);
+}
+
/*
* Fill the locked page with data located in the block address.
- * Return unlocked page.
+ * A caller needs to unlock the page on failure.
*/
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ struct page *page = fio->encrypted_page ?
+ fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
+ bio_set_op_attrs(bio, fio->op, fio->op_flags);
- submit_bio(fio->rw, bio);
+ __submit_bio(fio->sbi, bio, fio->type);
+
+ if (!is_read_io(fio->op))
+ inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
return 0;
}
-void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
+int f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
- struct f2fs_bio_info *io;
- bool is_read = is_read_io(fio->rw);
+ struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
+ int err = 0;
- io = is_read ? &sbi->read_io : &sbi->write_io[btype];
-
- verify_block_addr(sbi, fio->blk_addr);
+ f2fs_bug_on(sbi, is_read_io(fio->op));
down_write(&io->io_rwsem);
+next:
+ if (fio->in_list) {
+ spin_lock(&io->io_lock);
+ if (list_empty(&io->io_list)) {
+ spin_unlock(&io->io_lock);
+ goto out_fail;
+ }
+ fio = list_first_entry(&io->io_list,
+ struct f2fs_io_info, list);
+ list_del(&fio->list);
+ spin_unlock(&io->io_lock);
+ }
+
+ if (fio->old_blkaddr != NEW_ADDR)
+ verify_block_addr(sbi, fio->old_blkaddr);
+ verify_block_addr(sbi, fio->new_blkaddr);
+
+ bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
- if (!is_read)
- inc_page_count(sbi, F2FS_WRITEBACK);
+ /* set submitted = 1 as a return value */
+ fio->submitted = 1;
- if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
- io->fio.rw != fio->rw))
+ inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+
+ if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
+ (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
+ !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
-
- io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
+ if ((fio->type == DATA || fio->type == NODE) &&
+ fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
+ err = -EAGAIN;
+ dec_page_count(sbi, WB_DATA_TYPE(bio_page));
+ goto out_fail;
+ }
+ io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+ BIO_MAX_PAGES, false);
io->fio = *fio;
}
- bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
-
- if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
- PAGE_CACHE_SIZE) {
+ if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
- io->last_block_in_bio = fio->blk_addr;
+ io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
+ trace_f2fs_submit_page_write(fio->page, fio);
+
+ if (fio->in_list)
+ goto next;
+out_fail:
up_write(&io->io_rwsem);
- trace_f2fs_submit_page_mbio(fio->page, fio);
+ return err;
+}
+
+static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct fscrypt_ctx *ctx = NULL;
+ struct bio *bio;
+
+ if (f2fs_encrypted_file(inode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ f2fs_target_device(sbi, blkaddr, bio);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ return bio;
+}
+
+/* This can handle encryption stuffs */
+static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+ block_t blkaddr)
+{
+ struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ return -EFAULT;
+ }
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ return 0;
+}
+
+static void __set_data_blkaddr(struct dnode_of_data *dn)
+{
+ struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+ __le32 *addr_array;
+ int base = 0;
+
+ if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+ base = get_extra_isize(dn->inode);
+
+ /* Get physical address of data block */
+ addr_array = blkaddr_in_node(rn);
+ addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
*/
void set_data_blkaddr(struct dnode_of_data *dn)
{
- struct f2fs_node *rn;
- __le32 *addr_array;
- struct page *node_page = dn->node_page;
- unsigned int ofs_in_node = dn->ofs_in_node;
-
- f2fs_wait_on_page_writeback(node_page, NODE);
-
- rn = F2FS_NODE(node_page);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+ __set_data_blkaddr(dn);
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
+}
- /* Get physical address of data block */
- addr_array = blkaddr_in_node(rn);
- addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
- set_page_dirty(node_page);
+void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+{
+ dn->data_blkaddr = blkaddr;
+ set_data_blkaddr(dn);
+ f2fs_update_extent_cache(dn);
}
-int reserve_new_block(struct dnode_of_data *dn)
+/* dn->ofs_in_node will be returned with up-to-date last block pointer */
+int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ int err;
+
+ if (!count)
+ return 0;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
- return -ENOSPC;
+ if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ return err;
- trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+ trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+ dn->ofs_in_node, count);
- dn->data_blkaddr = NEW_ADDR;
- set_data_blkaddr(dn);
- mark_inode_dirty(dn->inode);
- sync_inode_page(dn);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+
+ for (; count > 0; dn->ofs_in_node++) {
+ block_t blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
+ if (blkaddr == NULL_ADDR) {
+ dn->data_blkaddr = NEW_ADDR;
+ __set_data_blkaddr(dn);
+ count--;
+ }
+ }
+
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
return 0;
}
+/* Should keep dn->ofs_in_node unchanged */
+int reserve_new_block(struct dnode_of_data *dn)
+{
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ int ret;
+
+ ret = reserve_new_blocks(dn, 1);
+ dn->ofs_in_node = ofs_in_node;
+ return ret;
+}
+
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
- struct extent_info ei;
+ struct extent_info ei = {0,0,0};
struct inode *inode = dn->inode;
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
}
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
- int rw, bool for_write)
+ int op_flags, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
struct page *page;
- struct extent_info ei;
+ struct extent_info ei = {0,0,0};
int err;
- struct f2fs_io_info fio = {
- .sbi = F2FS_I_SB(inode),
- .type = DATA,
- .rw = rw,
- .encrypted_page = NULL,
- };
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return read_mapping_page(mapping, index, NULL);
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
return page;
}
- fio.blk_addr = dn.data_blkaddr;
- fio.page = page;
- err = f2fs_submit_page_bio(&fio);
+ err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
if (err)
goto put_err;
return page;
return page;
f2fs_put_page(page, 0);
- page = get_read_data_page(inode, index, READ_SYNC, false);
+ page = get_read_data_page(inode, index, REQ_SYNC, false);
if (IS_ERR(page))
return page;
struct address_space *mapping = inode->i_mapping;
struct page *page;
repeat:
- page = get_read_data_page(inode, index, READ_SYNC, for_write);
+ page = get_read_data_page(inode, index, REQ_SYNC, for_write);
if (IS_ERR(page))
return page;
/* wait for read completion */
lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
struct page *page;
struct dnode_of_data dn;
int err;
-repeat:
+
page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
- page = get_read_data_page(inode, index, READ_SYNC, true);
+ /* if ipage exists, blkaddr should be NEW_ADDR */
+ f2fs_bug_on(F2FS_I_SB(inode), ipage);
+ page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
- goto repeat;
-
- /* wait for read completion */
- lock_page(page);
+ return page;
}
got_it:
if (new_i_size && i_size_read(inode) <
- ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
- /* Only the directory inode sets new_i_size */
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ ((loff_t)(index + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
static int __allocate_data_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
- int seg = CURSEG_WARM_DATA;
pgoff_t fofs;
+ blkcnt_t count = 1;
+ int err;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
- return -ENOSPC;
+ if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ return err;
alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
- if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
- seg = CURSEG_DIRECT_IO;
-
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, seg);
+ &sum, CURSEG_WARM_DATA, NULL, false);
set_data_blkaddr(dn);
/* update i_size */
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
- i_size_write(dn->inode,
- ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
-
- /* direct IO doesn't use extent cache to maximize the performance */
- f2fs_drop_largest_extent(dn->inode, fofs);
-
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(dn->inode,
+ ((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
-static void __allocate_data_blocks(struct inode *inode, loff_t offset,
- size_t count)
+static inline bool __force_buffered_io(struct inode *inode, int rw)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dnode_of_data dn;
- u64 start = F2FS_BYTES_TO_BLK(offset);
- u64 len = F2FS_BYTES_TO_BLK(count);
- bool allocated;
- u64 end_offset;
-
- while (len) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
-
- /* When reading holes, we need its node page */
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- if (get_dnode_of_data(&dn, start, ALLOC_NODE))
- goto out;
-
- allocated = false;
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ return (f2fs_encrypted_file(inode) ||
+ (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+ F2FS_I_SB(inode)->s_ndevs);
+}
- while (dn.ofs_in_node < end_offset && len) {
- block_t blkaddr;
+int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct f2fs_map_blocks map;
+ int err = 0;
- if (unlikely(f2fs_cp_error(sbi)))
- goto sync_out;
+ if (is_inode_flag_set(inode, FI_NO_PREALLOC))
+ return 0;
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
- if (__allocate_data_block(&dn))
- goto sync_out;
- allocated = true;
- }
- len--;
- start++;
- dn.ofs_in_node++;
- }
+ map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
+ map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
+ if (map.m_len > map.m_lblk)
+ map.m_len -= map.m_lblk;
+ else
+ map.m_len = 0;
- if (allocated)
- sync_inode_page(&dn);
+ map.m_next_pgofs = NULL;
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ return f2fs_map_blocks(inode, &map, 1,
+ __force_buffered_io(inode, WRITE) ?
+ F2FS_GET_BLOCK_PRE_AIO :
+ F2FS_GET_BLOCK_PRE_DIO);
}
- return;
+ if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+ if (!f2fs_has_inline_data(inode))
+ return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ return err;
+}
-sync_out:
- if (allocated)
- sync_inode_page(&dn);
- f2fs_put_dnode(&dn);
-out:
- f2fs_unlock_op(sbi);
- return;
+static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+{
+ if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+ if (lock)
+ down_read(&sbi->node_change);
+ else
+ up_read(&sbi->node_change);
+ } else {
+ if (lock)
+ f2fs_lock_op(sbi);
+ else
+ f2fs_unlock_op(sbi);
+ }
}
/*
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
-static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
- pgoff_t pgofs, end_offset;
+ int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+ pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
- struct extent_info ei;
- bool allocated = false;
+ unsigned int ofs_in_node, last_ofs_in_node;
+ blkcnt_t prealloc;
+ struct extent_info ei = {0,0,0};
+ block_t blkaddr;
+
+ if (!maxblocks)
+ return 0;
map->m_len = 0;
map->m_flags = 0;
/* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk;
+ end = pgofs + maxblocks;
- if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
goto out;
}
+next_dnode:
if (create)
- f2fs_lock_op(F2FS_I_SB(inode));
+ __do_map_lock(sbi, flag, true);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
- if (err == -ENOENT)
+ if (flag == F2FS_GET_BLOCK_BMAP)
+ map->m_pblk = 0;
+ if (err == -ENOENT) {
err = 0;
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs =
+ get_next_page_offset(&dn, pgofs);
+ }
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ prealloc = 0;
+ last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+next_block:
+ blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
- goto put_out;
+ goto sync_out;
+ }
+ if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+ if (blkaddr == NULL_ADDR) {
+ prealloc++;
+ last_ofs_in_node = dn.ofs_in_node;
+ }
+ } else {
+ err = __allocate_data_block(&dn);
+ if (!err)
+ set_inode_flag(inode, FI_APPEND_WRITE);
}
- err = __allocate_data_block(&dn);
if (err)
- goto put_out;
- allocated = true;
- map->m_flags = F2FS_MAP_NEW;
+ goto sync_out;
+ map->m_flags |= F2FS_MAP_NEW;
+ blkaddr = dn.data_blkaddr;
} else {
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- dn.data_blkaddr != NEW_ADDR) {
- if (flag == F2FS_GET_BLOCK_BMAP)
- err = -ENOENT;
- goto put_out;
+ if (flag == F2FS_GET_BLOCK_BMAP) {
+ map->m_pblk = 0;
+ goto sync_out;
}
-
- /*
- * preallocated unwritten block should be mapped
- * for fiemap.
- */
- if (dn.data_blkaddr == NEW_ADDR)
- map->m_flags = F2FS_MAP_UNWRITTEN;
+ if (flag == F2FS_GET_BLOCK_FIEMAP &&
+ blkaddr == NULL_ADDR) {
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
+ }
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ blkaddr != NEW_ADDR)
+ goto sync_out;
}
}
- map->m_flags |= F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- map->m_len = 1;
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ goto skip;
+
+ if (map->m_len == 0) {
+ /* preallocated unwritten block should be mapped for fiemap. */
+ if (blkaddr == NEW_ADDR)
+ map->m_flags |= F2FS_MAP_UNWRITTEN;
+ map->m_flags |= F2FS_MAP_MAPPED;
+
+ map->m_pblk = blkaddr;
+ map->m_len = 1;
+ } else if ((map->m_pblk != NEW_ADDR &&
+ blkaddr == (map->m_pblk + ofs)) ||
+ (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+ flag == F2FS_GET_BLOCK_PRE_DIO) {
+ ofs++;
+ map->m_len++;
+ } else {
+ goto sync_out;
+ }
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+skip:
dn.ofs_in_node++;
pgofs++;
-get_next:
- if (dn.ofs_in_node >= end_offset) {
- if (allocated)
- sync_inode_page(&dn);
- allocated = false;
- f2fs_put_dnode(&dn);
+ /* preallocate blocks in batch for one dnode page */
+ if (flag == F2FS_GET_BLOCK_PRE_AIO &&
+ (pgofs == end || dn.ofs_in_node == end_offset)) {
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, mode);
- if (err) {
- if (err == -ENOENT)
- err = 0;
- goto unlock_out;
- }
+ dn.ofs_in_node = ofs_in_node;
+ err = reserve_new_blocks(&dn, prealloc);
+ if (err)
+ goto sync_out;
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ map->m_len += dn.ofs_in_node - ofs_in_node;
+ if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
+ err = -ENOSPC;
+ goto sync_out;
+ }
+ dn.ofs_in_node = end_offset;
}
- if (maxblocks > map->m_len) {
- block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (pgofs >= end)
+ goto sync_out;
+ else if (dn.ofs_in_node < end_offset)
+ goto next_block;
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
- if (create) {
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
- goto sync_out;
- }
- err = __allocate_data_block(&dn);
- if (err)
- goto sync_out;
- allocated = true;
- map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
- } else {
- /*
- * we only merge preallocated unwritten blocks
- * for fiemap.
- */
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- blkaddr != NEW_ADDR)
- goto sync_out;
- }
- }
+ f2fs_put_dnode(&dn);
- /* Give more consecutive addresses for the readahead */
- if ((map->m_pblk != NEW_ADDR &&
- blkaddr == (map->m_pblk + ofs)) ||
- (map->m_pblk == NEW_ADDR &&
- blkaddr == NEW_ADDR)) {
- ofs++;
- dn.ofs_in_node++;
- pgofs++;
- map->m_len++;
- goto get_next;
- }
+ if (create) {
+ __do_map_lock(sbi, flag, false);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
+ goto next_dnode;
+
sync_out:
- if (allocated)
- sync_inode_page(&dn);
-put_out:
f2fs_put_dnode(&dn);
unlock_out:
- if (create)
- f2fs_unlock_op(F2FS_I_SB(inode));
+ if (create) {
+ __do_map_lock(sbi, flag, false);
+ f2fs_balance_fs(sbi, dn.node_changed);
+ }
out:
trace_f2fs_map_blocks(inode, map, err);
return err;
}
static int __get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create, int flag)
+ struct buffer_head *bh, int create, int flag,
+ pgoff_t *next_pgofs)
{
struct f2fs_map_blocks map;
- int ret;
+ int err;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
+ map.m_next_pgofs = next_pgofs;
- ret = f2fs_map_blocks(inode, &map, create, flag);
- if (!ret) {
+ err = f2fs_map_blocks(inode, &map, create, flag);
+ if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
- bh->b_size = map.m_len << inode->i_blkbits;
+ bh->b_size = (u64)map.m_len << inode->i_blkbits;
}
- return ret;
+ return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int flag)
+ struct buffer_head *bh_result, int create, int flag,
+ pgoff_t *next_pgofs)
{
- return __get_data_block(inode, iblock, bh_result, create, flag);
+ return __get_data_block(inode, iblock, bh_result, create,
+ flag, next_pgofs);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO);
+ F2FS_GET_BLOCK_DEFAULT, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
/* Block number less than F2FS MAX BLOCKS */
- if (unlikely(iblock >= max_file_size(0)))
+ if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_BMAP);
+ F2FS_GET_BLOCK_BMAP, NULL);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
struct buffer_head map_bh;
sector_t start_blk, last_blk;
- loff_t isize = i_size_read(inode);
+ pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
- bool past_eof = false, whole_file = false;
int ret = 0;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
return ret;
}
- mutex_lock(&inode->i_mutex);
-
- if (len >= isize) {
- whole_file = true;
- len = isize;
- }
+ inode_lock(inode);
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
+
next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
ret = get_data_block(inode, start_blk, &map_bh, 0,
- F2FS_GET_BLOCK_FIEMAP);
+ F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
goto out;
/* HOLE */
if (!buffer_mapped(&map_bh)) {
- start_blk++;
-
- if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
- past_eof = 1;
-
- if (past_eof && size) {
- flags |= FIEMAP_EXTENT_LAST;
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- } else if (size) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- size = 0;
- }
-
- /* if we have holes up to/past EOF then we're done */
- if (start_blk > last_blk || past_eof || ret)
- goto out;
- } else {
- if (start_blk > last_blk && !whole_file) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- goto out;
- }
+ start_blk = next_pgofs;
- /*
- * if size != 0 then we know we already have an extent
- * to add, so add it.
- */
- if (size) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- if (ret)
- goto out;
- }
+ if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+ F2FS_I_SB(inode)->max_file_blocks))
+ goto prep_next;
- logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, map_bh.b_blocknr);
- size = map_bh.b_size;
- flags = 0;
- if (buffer_unwritten(&map_bh))
- flags = FIEMAP_EXTENT_UNWRITTEN;
+ flags |= FIEMAP_EXTENT_LAST;
+ }
- start_blk += logical_to_blk(inode, size);
+ if (size) {
+ if (f2fs_encrypted_inode(inode))
+ flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
- /*
- * If we are past the EOF, then we need to make sure as
- * soon as we find a hole that the last extent we found
- * is marked with FIEMAP_EXTENT_LAST
- */
- if (!past_eof && logical + size >= isize)
- past_eof = true;
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
}
+
+ if (start_blk > last_blk || ret)
+ goto out;
+
+ logical = blk_to_logical(inode, start_blk);
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
+ flags = 0;
+ if (buffer_unwritten(&map_bh))
+ flags = FIEMAP_EXTENT_UNWRITTEN;
+
+ start_blk += logical_to_blk(inode, size);
+
+prep_next:
cond_resched();
if (fatal_signal_pending(current))
ret = -EINTR;
if (ret == 1)
ret = 0;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return ret;
}
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
+ map.m_next_pgofs = NULL;
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
- prefetchw(&page->flags);
if (pages) {
- page = list_entry(pages->prev, struct page, lru);
+ page = list_last_entry(pages, struct page, lru);
+
+ prefetchw(&page->flags);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL))
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
- F2FS_GET_BLOCK_READ))
+ F2FS_GET_BLOCK_DEFAULT))
goto set_error_page;
}
got_it:
goto confused;
}
} else {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != block_nr - 1)) {
+ if (bio && (last_block_in_bio != block_nr - 1 ||
+ !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
- struct f2fs_crypto_ctx *ctx = NULL;
-
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- goto set_error_page;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(
- F2FS_I_SB(inode), block_nr);
- }
-
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- f2fs_release_crypto_ctx(ctx);
+ bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+ if (IS_ERR(bio)) {
+ bio = NULL;
goto set_error_page;
}
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto next_page;
set_error_page:
SetPageError(page);
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
goto next_page;
confused:
if (bio) {
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
next_page:
if (pages)
- page_cache_release(page);
+ put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = file->f_mapping->host;
- struct page *page = list_entry(pages->prev, struct page, lru);
+ struct page *page = list_last_entry(pages, struct page, lru);
trace_f2fs_readpages(inode, page, nr_pages);
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
}
+static int encrypt_one_page(struct f2fs_io_info *fio)
+{
+ struct inode *inode = fio->page->mapping->host;
+ gfp_t gfp_flags = GFP_NOFS;
+
+ if (!f2fs_encrypted_file(inode))
+ return 0;
+
+ /* wait for GCed encrypted page writeback */
+ f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
+
+retry_encrypt:
+ fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+ PAGE_SIZE, 0, fio->page->index, gfp_flags);
+ if (!IS_ERR(fio->encrypted_page))
+ return 0;
+
+ /* flush pending IOs and wait for a while in the ENOMEM case */
+ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+ f2fs_flush_merged_writes(fio->sbi);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
+ return PTR_ERR(fio->encrypted_page);
+}
+
+static inline bool need_inplace_update(struct f2fs_io_info *fio)
+{
+ struct inode *inode = fio->page->mapping->host;
+
+ if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
+ return false;
+ if (is_cold_data(fio->page))
+ return false;
+ if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ return false;
+
+ return need_inplace_update_policy(inode, fio);
+}
+
+static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
+{
+ if (fio->old_blkaddr == NEW_ADDR)
+ return false;
+ if (fio->old_blkaddr == NULL_ADDR)
+ return false;
+ return true;
+}
+
int do_write_data_page(struct f2fs_io_info *fio)
{
struct page *page = fio->page;
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
+ struct extent_info ei = {0,0,0};
+ bool ipu_force = false;
int err = 0;
set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (need_inplace_update(fio) &&
+ f2fs_lookup_extent_cache(inode, page->index, &ei)) {
+ fio->old_blkaddr = ei.blk + page->index - ei.fofs;
+
+ if (valid_ipu_blkaddr(fio)) {
+ ipu_force = true;
+ fio->need_lock = LOCK_DONE;
+ goto got_it;
+ }
+ }
+
+ /* Deadlock due to between page->lock and f2fs_lock_op */
+ if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
+ return -EAGAIN;
+
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
if (err)
- return err;
+ goto out;
- fio->blk_addr = dn.data_blkaddr;
+ fio->old_blkaddr = dn.data_blkaddr;
/* This page is already truncated */
- if (fio->blk_addr == NULL_ADDR) {
+ if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
goto out_writepage;
}
+got_it:
+ /*
+ * If current allocation needs SSR,
+ * it had better in-place writes for updated data.
+ */
+ if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
+ err = encrypt_one_page(fio);
+ if (err)
+ goto out_writepage;
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
-
- /* wait for GCed encrypted page writeback */
- f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
- fio->blk_addr);
+ set_page_writeback(page);
+ f2fs_put_dnode(&dn);
+ if (fio->need_lock == LOCK_REQ)
+ f2fs_unlock_op(fio->sbi);
+ err = rewrite_data_page(fio);
+ trace_f2fs_do_write_data_page(fio->page, IPU);
+ set_inode_flag(inode, FI_UPDATE_WRITE);
+ return err;
+ }
- fio->encrypted_page = f2fs_encrypt(inode, fio->page);
- if (IS_ERR(fio->encrypted_page)) {
- err = PTR_ERR(fio->encrypted_page);
+ if (fio->need_lock == LOCK_RETRY) {
+ if (!f2fs_trylock_op(fio->sbi)) {
+ err = -EAGAIN;
goto out_writepage;
}
+ fio->need_lock = LOCK_REQ;
}
+ err = encrypt_one_page(fio);
+ if (err)
+ goto out_writepage;
+
set_page_writeback(page);
- /*
- * If current allocation needs SSR,
- * it had better in-place writes for updated data.
- */
- if (unlikely(fio->blk_addr != NEW_ADDR &&
- !is_cold_data(page) &&
- need_inplace_update(inode))) {
- rewrite_data_page(fio);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
- trace_f2fs_do_write_data_page(page, IPU);
- } else {
- write_data_page(&dn, fio);
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- trace_f2fs_do_write_data_page(page, OPU);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
- }
+ /* LFS mode write path */
+ write_data_page(&dn, fio);
+ trace_f2fs_do_write_data_page(page, OPU);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ if (page->index == 0)
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
out_writepage:
f2fs_put_dnode(&dn);
+out:
+ if (fio->need_lock == LOCK_REQ)
+ f2fs_unlock_op(fio->sbi);
return err;
}
-static int f2fs_write_data_page(struct page *page,
- struct writeback_control *wbc)
+static int __write_data_page(struct page *page, bool *submitted,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
- >> PAGE_CACHE_SHIFT;
+ >> PAGE_SHIFT;
+ loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
- .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+ .op = REQ_OP_WRITE,
+ .op_flags = wbc_to_write_flags(wbc),
+ .old_blkaddr = NULL_ADDR,
.page = page,
.encrypted_page = NULL,
+ .submitted = false,
+ .need_lock = LOCK_RETRY,
+ .io_type = io_type,
};
trace_f2fs_writepage(page, DATA);
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto redirty_out;
+
if (page->index < end_index)
goto write;
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
- offset = i_size & (PAGE_CACHE_SIZE - 1);
+ offset = i_size & (PAGE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
write:
- if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
- goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
- if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
- available_free_memory(sbi, BASE_CHECK))
+ /* we should not write 0'th page having journal header */
+ if (f2fs_is_volatile_file(inode) && (!page->index ||
+ (!wbc->for_reclaim &&
+ available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(page->mapping, -EIO);
+ goto out;
+ }
+
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
+ fio.need_lock = LOCK_DONE;
err = do_write_data_page(&fio);
goto done;
}
- /* we should bypass data pages to proceed the kworkder jobs */
- if (unlikely(f2fs_cp_error(sbi))) {
- SetPageError(page);
- goto out;
- }
-
if (!wbc->for_reclaim)
need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0))
+ else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
+ else
+ set_inode_flag(inode, FI_HOT_DATA);
err = -EAGAIN;
- f2fs_lock_op(sbi);
- if (f2fs_has_inline_data(inode))
+ if (f2fs_has_inline_data(inode)) {
err = f2fs_write_inline_data(inode, page);
- if (err == -EAGAIN)
+ if (!err)
+ goto out;
+ }
+
+ if (err == -EAGAIN) {
err = do_write_data_page(&fio);
- f2fs_unlock_op(sbi);
+ if (err == -EAGAIN) {
+ fio.need_lock = LOCK_REQ;
+ err = do_write_data_page(&fio);
+ }
+ }
+ if (F2FS_I(inode)->last_disk_size < psize)
+ F2FS_I(inode)->last_disk_size = psize;
+
done:
if (err && err != -ENOENT)
goto redirty_out;
- clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
if (err)
ClearPageUptodate(page);
+
+ if (wbc->for_reclaim) {
+ f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
+ clear_inode_flag(inode, FI_HOT_DATA);
+ remove_dirty_inode(inode);
+ submitted = NULL;
+ }
+
unlock_page(page);
- if (need_balance_fs)
- f2fs_balance_fs(sbi);
- if (wbc->for_reclaim)
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ if (!S_ISDIR(inode->i_mode))
+ f2fs_balance_fs(sbi, need_balance_fs);
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_submit_merged_write(sbi, DATA);
+ submitted = NULL;
+ }
+
+ if (submitted)
+ *submitted = fio.submitted;
+
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
+ if (!err)
+ return AOP_WRITEPAGE_ACTIVATE;
+ unlock_page(page);
+ return err;
}
-static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
+static int f2fs_write_data_page(struct page *page,
+ struct writeback_control *wbc)
{
- struct address_space *mapping = data;
- int ret = mapping->a_ops->writepage(page, wbc);
- mapping_set_error(mapping, ret);
- return ret;
+ return __write_data_page(page, NULL, wbc, FS_DATA_IO);
}
/*
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc, writepage_t writepage,
- void *data)
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
int ret = 0;
int done = 0;
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
+ pgoff_t last_idx = ULONG_MAX;
int cycled;
int range_whole = 0;
int tag;
- int step = 0;
pagevec_init(&pvec, 0);
-next:
+
+ if (get_dirty_pages(mapping->host) <=
+ SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
+ set_inode_flag(mapping->host, FI_HOT_DATA);
+ else
+ clear_inode_flag(mapping->host, FI_HOT_DATA);
+
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
cycled = 0;
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
+ bool submitted = false;
if (page->index > end) {
done = 1;
}
done_index = page->index;
-
+retry_write:
lock_page(page);
if (unlikely(page->mapping != mapping)) {
goto continue_unlock;
}
- if (step == is_cold_data(page))
- goto continue_unlock;
-
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page,
+ DATA, true);
else
goto continue_unlock;
}
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = (*writepage)(page, wbc, data);
+ ret = __write_data_page(page, &submitted, wbc, io_type);
if (unlikely(ret)) {
+ /*
+ * keep nr_to_write, since vfs uses this to
+ * get # of written pages.
+ */
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
- } else {
- done_index = page->index + 1;
- done = 1;
- break;
+ continue;
+ } else if (ret == -EAGAIN) {
+ ret = 0;
+ if (wbc->sync_mode == WB_SYNC_ALL) {
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC,
+ HZ/50);
+ goto retry_write;
+ }
+ continue;
}
+ done_index = page->index + 1;
+ done = 1;
+ break;
+ } else if (submitted) {
+ last_idx = page->index;
}
- if (--wbc->nr_to_write <= 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
+ /* give a priority to WB_SYNC threads */
+ if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
+ --wbc->nr_to_write <= 0) &&
+ wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
cond_resched();
}
- if (step < 1) {
- step++;
- goto next;
- }
-
if (!cycled && !done) {
cycled = 1;
index = 0;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
+ if (last_idx != ULONG_MAX)
+ f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
+ 0, last_idx, DATA);
+
return ret;
}
-static int f2fs_write_data_pages(struct address_space *mapping,
- struct writeback_control *wbc)
+int __f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- bool locked = false;
+ struct blk_plug plug;
int ret;
- long diff;
-
- trace_f2fs_writepages(mapping->host, wbc, DATA);
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
return 0;
+ /* during POR, we don't need to trigger writepage at all. */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
+
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
- /* during POR, we don't need to trigger writepage at all. */
- if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ /* skip writing during file defragment */
+ if (is_inode_flag_set(inode, FI_DO_DEFRAG))
goto skip_write;
- diff = nr_pages_to_write(sbi, DATA, wbc);
+ trace_f2fs_writepages(mapping->host, wbc, DATA);
- if (!S_ISDIR(inode->i_mode)) {
- mutex_lock(&sbi->writepages);
- locked = true;
- }
- ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- if (locked)
- mutex_unlock(&sbi->writepages);
+ /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_inc(&sbi->wb_sync_req);
+ else if (atomic_read(&sbi->wb_sync_req))
+ goto skip_write;
- remove_dirty_dir_inode(inode);
+ blk_start_plug(&plug);
+ ret = f2fs_write_cache_pages(mapping, wbc, io_type);
+ blk_finish_plug(&plug);
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_dec(&sbi->wb_sync_req);
+ /*
+ * if some pages were truncated, we cannot guarantee its mapping->host
+ * to detect pending bios.
+ */
- wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+ remove_dirty_inode(inode);
return ret;
skip_write:
wbc->pages_skipped += get_dirty_pages(inode);
+ trace_f2fs_writepages(mapping->host, wbc, DATA);
return 0;
}
+static int f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+
+ return __f2fs_write_data_pages(mapping, wbc,
+ F2FS_I(inode)->cp_task == current ?
+ FS_CP_DATA_IO : FS_DATA_IO);
+}
+
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
+ loff_t i_size = i_size_read(inode);
+
+ if (to > i_size) {
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+ truncate_pagecache(inode, i_size);
+ truncate_blocks(inode, i_size, true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ }
+}
+
+static int prepare_write_begin(struct f2fs_sb_info *sbi,
+ struct page *page, loff_t pos, unsigned len,
+ block_t *blk_addr, bool *node_changed)
+{
+ struct inode *inode = page->mapping->host;
+ pgoff_t index = page->index;
+ struct dnode_of_data dn;
+ struct page *ipage;
+ bool locked = false;
+ struct extent_info ei = {0,0,0};
+ int err = 0;
+
+ /*
+ * we already allocated all the blocks, so we don't need to get
+ * the block addresses when there is no need to fill the page.
+ */
+ if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
+ !is_inode_flag_set(inode, FI_NO_PREALLOC))
+ return 0;
- if (to > inode->i_size) {
- truncate_pagecache(inode, inode->i_size);
- truncate_blocks(inode, inode->i_size, true);
+ if (f2fs_has_inline_data(inode) ||
+ (pos & PAGE_MASK) >= i_size_read(inode)) {
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ locked = true;
+ }
+restart:
+ /* check inline_data */
+ ipage = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto unlock_out;
+ }
+
+ set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+ if (f2fs_has_inline_data(inode)) {
+ if (pos + len <= MAX_INLINE_DATA(inode)) {
+ read_inline_data(page, ipage);
+ set_inode_flag(inode, FI_DATA_EXIST);
+ if (inode->i_nlink)
+ set_inline_node(ipage);
+ } else {
+ err = f2fs_convert_inline_page(&dn, page);
+ if (err)
+ goto out;
+ if (dn.data_blkaddr == NULL_ADDR)
+ err = f2fs_get_block(&dn, index);
+ }
+ } else if (locked) {
+ err = f2fs_get_block(&dn, index);
+ } else {
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ } else {
+ /* hole case */
+ err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err || dn.data_blkaddr == NULL_ADDR) {
+ f2fs_put_dnode(&dn);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
+ true);
+ locked = true;
+ goto restart;
+ }
+ }
}
+
+ /* convert_inline_page can make node_changed */
+ *blk_addr = dn.data_blkaddr;
+ *node_changed = dn.node_changed;
+out:
+ f2fs_put_dnode(&dn);
+unlock_out:
+ if (locked)
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ return err;
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
- struct page *ipage;
- pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
- struct dnode_of_data dn;
+ pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
+ bool need_balance = false;
+ block_t blkaddr = NULL_ADDR;
int err = 0;
if (trace_android_fs_datawrite_start_enabled()) {
}
trace_f2fs_write_begin(inode, pos, len, flags);
- f2fs_balance_fs(sbi);
-
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
goto fail;
}
repeat:
- page = grab_cache_page_write_begin(mapping, index, flags);
+ /*
+ * Do not use grab_cache_page_write_begin() to avoid deadlock due to
+ * wait_for_stable_page. Will wait that below with our IO control.
+ */
+ page = grab_cache_page(mapping, index);
if (!page) {
err = -ENOMEM;
goto fail;
*pagep = page;
- f2fs_lock_op(sbi);
-
- /* check inline_data */
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto unlock_fail;
- }
-
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ err = prepare_write_begin(sbi, page, pos, len,
+ &blkaddr, &need_balance);
+ if (err)
+ goto fail;
- if (f2fs_has_inline_data(inode)) {
- if (pos + len <= MAX_INLINE_DATA) {
- read_inline_data(page, ipage);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- sync_inode_page(&dn);
- goto put_next;
+ if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+ unlock_page(page);
+ f2fs_balance_fs(sbi, true);
+ lock_page(page);
+ if (page->mapping != mapping) {
+ /* The page got truncated from under us */
+ f2fs_put_page(page, 1);
+ goto repeat;
}
- err = f2fs_convert_inline_page(&dn, page);
- if (err)
- goto put_fail;
}
- err = f2fs_get_block(&dn, index);
- if (err)
- goto put_fail;
-put_next:
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
-
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
+ if (f2fs_encrypted_file(inode))
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
- if (len == PAGE_CACHE_SIZE)
- goto out_update;
- if (PageUptodate(page))
- goto out_clear;
-
- if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
- unsigned end = start + len;
+ if (len == PAGE_SIZE || PageUptodate(page))
+ return 0;
- /* Reading beyond i_size is simple: memset to zero */
- zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
- goto out_update;
+ if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode)) {
+ zero_user_segment(page, len, PAGE_SIZE);
+ return 0;
}
- if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ if (blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
} else {
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .type = DATA,
- .rw = READ_SYNC,
- .blk_addr = dn.data_blkaddr,
- .page = page,
- .encrypted_page = NULL,
- };
- err = f2fs_submit_page_bio(&fio);
+ err = f2fs_submit_page_read(inode, page, blkaddr);
if (err)
goto fail;
lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- err = -EIO;
- goto fail;
- }
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
-
- /* avoid symlink page */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- err = f2fs_decrypt_one(inode, page);
- if (err)
- goto fail;
+ if (unlikely(!PageUptodate(page))) {
+ err = -EIO;
+ goto fail;
}
}
-out_update:
- SetPageUptodate(page);
-out_clear:
- clear_cold_data(page);
return 0;
-put_fail:
- f2fs_put_dnode(&dn);
-unlock_fail:
- f2fs_unlock_op(sbi);
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
trace_android_fs_datawrite_end(inode, pos, len);
trace_f2fs_write_end(inode, pos, len, copied);
- set_page_dirty(page);
-
- if (pos + copied > i_size_read(inode)) {
- i_size_write(inode, pos + copied);
- mark_inode_dirty(inode);
- update_inode_page(inode);
+ /*
+ * This should be come from len == PAGE_SIZE, and we expect copied
+ * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
+ * let generic_perform_write() try to copy data again through copied=0.
+ */
+ if (!PageUptodate(page)) {
+ if (unlikely(copied != len))
+ copied = 0;
+ else
+ SetPageUptodate(page);
}
+ if (!copied)
+ goto unlock_out;
+
+ set_page_dirty(page);
+ if (pos + copied > i_size_read(inode))
+ f2fs_i_size_write(inode, pos + copied);
+unlock_out:
f2fs_put_page(page, 1);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
}
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+ loff_t offset)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
+ int rw = iov_iter_rw(iter);
int err;
- /* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return 0;
-
err = check_direct_IO(inode, iter, offset);
if (err)
return err;
- trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
+ if (__force_buffered_io(inode, rw))
+ return 0;
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ)) {
current->pid, path,
current->comm);
}
- if (iov_iter_rw(iter) == WRITE) {
- __allocate_data_blocks(inode, offset, count);
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
- err = -EIO;
- goto out;
- }
- }
+ trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
-out:
- if (err < 0 && iov_iter_rw(iter) == WRITE)
- f2fs_write_failed(mapping, offset + count);
+ up_read(&F2FS_I(inode)->dio_rwsem[rw]);
+
+ if (rw == WRITE) {
+ if (err > 0) {
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
+ err);
+ set_inode_flag(inode, FI_UPDATE_WRITE);
+ } else if (err < 0) {
+ f2fs_write_failed(mapping, offset + count);
+ }
+ }
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ))
(iov_iter_rw(iter) == WRITE))
trace_android_fs_datawrite_end(inode, offset, count);
- trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
+ trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
- (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
+ (offset % PAGE_SIZE || length != PAGE_SIZE))
return;
if (PageDirty(page)) {
- if (inode->i_ino == F2FS_META_INO(sbi))
+ if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
- else if (inode->i_ino == F2FS_NODE_INO(sbi))
+ } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_NODES);
- else
+ } else {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
}
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
- return;
+ return drop_inmem_page(inode, page);
+ set_page_private(page, 0);
ClearPagePrivate(page);
}
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
+ set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
}
+/*
+ * This was copied from __set_page_dirty_buffers which gives higher performance
+ * in very high speed storages. (e.g., pmem)
+ */
+void f2fs_set_page_dirty_nobuffers(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct mem_cgroup *memcg;
+ unsigned long flags;
+
+ if (unlikely(!mapping))
+ return;
+
+ spin_lock(&mapping->private_lock);
+ memcg = mem_cgroup_begin_page_stat(page);
+ SetPageDirty(page);
+ spin_unlock(&mapping->private_lock);
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ WARN_ON_ONCE(!PageUptodate(page));
+ account_page_dirtied(page, mapping, memcg);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
+ mem_cgroup_end_page_stat(memcg);
+
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return;
+}
+
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
trace_f2fs_set_page_dirty(page, DATA);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
- if (f2fs_is_atomic_file(inode)) {
+ if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
register_inmem_page(inode, page);
return 1;
}
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
+#ifdef CONFIG_MIGRATION
+#include <linux/migrate.h>
+
+int f2fs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc, extra_count;
+ struct f2fs_inode_info *fi = F2FS_I(mapping->host);
+ bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+
+ BUG_ON(PageWriteback(page));
+
+ /* migrating an atomic written page is safe with the inmem_lock hold */
+ if (atomic_written) {
+ if (mode != MIGRATE_SYNC)
+ return -EBUSY;
+ if (!mutex_trylock(&fi->inmem_lock))
+ return -EAGAIN;
+ }
+
+ /*
+ * A reference is expected if PagePrivate set when move mapping,
+ * however F2FS breaks this for maintaining dirty page counts when
+ * truncating pages. So here adjusting the 'extra_count' make it work.
+ */
+ extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+ rc = migrate_page_move_mapping(mapping, newpage,
+ page, NULL, mode, extra_count);
+ if (rc != MIGRATEPAGE_SUCCESS) {
+ if (atomic_written)
+ mutex_unlock(&fi->inmem_lock);
+ return rc;
+ }
+
+ if (atomic_written) {
+ struct inmem_pages *cur;
+ list_for_each_entry(cur, &fi->inmem_pages, list)
+ if (cur->page == page) {
+ cur->page = newpage;
+ break;
+ }
+ mutex_unlock(&fi->inmem_lock);
+ put_page(page);
+ get_page(newpage);
+ }
+
+ if (PagePrivate(page))
+ SetPagePrivate(newpage);
+ set_page_private(newpage, page_private(page));
+
+ migrate_page_copy(newpage, page);
+
+ return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
si->total_ext = atomic64_read(&sbi->total_hit_ext);
- si->ext_tree = sbi->total_ext_tree;
+ si->ext_tree = atomic_read(&sbi->total_ext_tree);
+ si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
si->ext_node = atomic_read(&sbi->total_ext_node);
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
- si->ndirty_dirs = sbi->n_dirty_dirs;
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+ si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
+ si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+ si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
+ si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+ si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
- si->wb_pages = get_pages(sbi, F2FS_WRITEBACK);
+ si->aw_cnt = atomic_read(&sbi->aw_cnt);
+ si->vw_cnt = atomic_read(&sbi->vw_cnt);
+ si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
+ si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
+ si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
+ si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
+ if (SM_I(sbi) && SM_I(sbi)->fcc_info) {
+ si->nr_flushed =
+ atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
+ si->nr_flushing =
+ atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
+ }
+ if (SM_I(sbi) && SM_I(sbi)->dcc_info) {
+ si->nr_discarded =
+ atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
+ si->nr_discarding =
+ atomic_read(&SM_I(sbi)->dcc_info->issing_discard);
+ si->nr_discard_cmd =
+ atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
+ si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
+ }
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi);
+ si->discard_blks = discard_blocks(sbi);
si->valid_node_count = valid_node_count(sbi);
si->valid_inode_count = valid_inode_count(sbi);
si->inline_xattr = atomic_read(&sbi->inline_xattr);
si->inline_inode = atomic_read(&sbi->inline_inode);
si->inline_dir = atomic_read(&sbi->inline_dir);
+ si->append = sbi->im[APPEND_INO].ino_num;
+ si->update = sbi->im[UPDATE_INO].ino_num;
+ si->orphans = sbi->im[ORPHAN_INO].ino_num;
si->utilization = utilization(sbi);
si->free_segs = free_segments(sbi);
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
- si->fnids = NM_I(sbi)->fcnt;
+ si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+ si->avail_nids = NM_I(sbi)->available_nids;
+ si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
si->curseg[i] = curseg->segno;
- si->cursec[i] = curseg->segno / sbi->segs_per_sec;
- si->curzone[i] = si->cursec[i] / sbi->secs_per_zone;
+ si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno);
+ si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
}
for (i = 0; i < 2; i++) {
bimodal = 0;
total_vblocks = 0;
- blks_per_sec = sbi->segs_per_sec * (1 << sbi->log_blocks_per_seg);
+ blks_per_sec = BLKS_PER_SEC(sbi);
hblks_per_sec = blks_per_sec / 2;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
- vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ vblocks = get_valid_blocks(sbi, segno, true);
dist = abs(vblocks - hblks_per_sec);
bimodal += dist * dist;
if (si->base_mem)
goto get_cache;
- si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
+ /* build stat */
+ si->base_mem = sizeof(struct f2fs_stat_info);
+
+ /* build superblock */
+ si->base_mem += sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt);
+ si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
/* build sm */
si->base_mem += sizeof(struct f2fs_sm_info);
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
- si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ if (f2fs_discard_en(sbi))
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
/* build curseg */
si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
- si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+ si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
/* build dirty segmap */
si->base_mem += sizeof(struct dirty_seglist_info);
/* build nm */
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
+ si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
+ si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+ si->base_mem += NM_I(sbi)->nat_blocks / 8;
+ si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
get_cache:
si->cache_mem = 0;
si->cache_mem += sizeof(struct f2fs_gc_kthread);
/* build merge flush thread */
- if (SM_I(sbi)->cmd_control_info)
+ if (SM_I(sbi)->fcc_info)
si->cache_mem += sizeof(struct flush_cmd_control);
+ if (SM_I(sbi)->dcc_info) {
+ si->cache_mem += sizeof(struct discard_cmd_control);
+ si->cache_mem += sizeof(struct discard_cmd) *
+ atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
+ }
/* free nids */
- si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
+ si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
+ NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+ sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
- si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
- for (i = 0; i <= UPDATE_INO; i++)
+ for (i = 0; i <= ORPHAN_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
- si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree);
+ si->cache_mem += atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree);
si->cache_mem += atomic_read(&sbi->total_ext_node) *
sizeof(struct extent_node);
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
mutex_lock(&f2fs_stat_mutex);
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
- char devname[BDEVNAME_SIZE];
-
update_general_status(si->sbi);
- seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
- bdevname(si->sbi->sb->s_bdev, devname), i++);
+ seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n",
+ si->sbi->sb->s_bdev, i++,
+ f2fs_readonly(si->sbi->sb) ? "RO": "RW");
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs);
- seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
- si->utilization, si->valid_count);
+ if (test_opt(si->sbi, DISCARD))
+ seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
+ si->utilization, si->valid_count, si->discard_blks);
+ else
+ seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
+ si->utilization, si->valid_count);
+
seq_printf(s, " - Node: %u (Inode: %u, ",
si->valid_node_count, si->valid_inode_count);
seq_printf(s, "Other: %u)\n - Data: %u\n",
si->inline_inode);
seq_printf(s, " - Inline_dentry Inode: %u\n",
si->inline_dir);
+ seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n",
+ si->orphans, si->append, si->update);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
si->main_area_segs, si->main_area_sections,
si->main_area_zones);
si->dirty_count);
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
si->prefree_count, si->free_segs, si->free_secs);
- seq_printf(s, "CP calls: %d\n", si->cp_count);
+ seq_printf(s, "CP calls: %d (BG: %d)\n",
+ si->cp_count, si->bg_cp_count);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
seq_printf(s, " - data segments : %d (%d)\n",
!si->total_ext ? 0 :
div64_u64(si->hit_total * 100, si->total_ext),
si->hit_total, si->total_ext);
- seq_printf(s, " - Inner Struct Count: tree: %d, node: %d\n",
- si->ext_tree, si->ext_node);
+ seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
+ si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - inmem: %4d, wb: %4d\n",
- si->inmem_pages, si->wb_pages);
+ seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d), "
+ "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
+ si->nr_wb_cp_data, si->nr_wb_data,
+ si->nr_flushing, si->nr_flushed,
+ si->nr_discarding, si->nr_discarded,
+ si->nr_discard_cmd, si->undiscard_blks);
+ seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), "
+ "volatile IO: %4d (Max. %4d)\n",
+ si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
+ si->vw_cnt, si->max_vw_cnt);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
- seq_printf(s, " - dents: %4d in dirs:%4d\n",
- si->ndirty_dent, si->ndirty_dirs);
+ seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
+ si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
+ seq_printf(s, " - datas: %4d in files:%4d\n",
+ si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
+ seq_printf(s, " - imeta: %4d\n",
+ si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
- seq_printf(s, " - free_nids: %9d\n",
- si->fnids);
+ seq_printf(s, " - free_nids: %9d/%9d\n - alloc_nids: %9d\n",
+ si->free_nids, si->avail_nids, si->alloc_nids);
seq_puts(s, "\nDistribution of User Blocks:");
seq_puts(s, " [ valid | invalid | free ]\n");
seq_puts(s, " [");
atomic_set(&sbi->inline_dir, 0);
atomic_set(&sbi->inplace_count, 0);
+ atomic_set(&sbi->aw_cnt, 0);
+ atomic_set(&sbi->vw_cnt, 0);
+ atomic_set(&sbi->max_aw_cnt, 0);
+ atomic_set(&sbi->max_vw_cnt, 0);
+
mutex_lock(&f2fs_stat_mutex);
list_add_tail(&si->stat_list, &f2fs_stat_list);
mutex_unlock(&f2fs_stat_mutex);
kfree(si);
}
-void __init f2fs_create_root_stats(void)
+int __init f2fs_create_root_stats(void)
{
struct dentry *file;
f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
if (!f2fs_debugfs_root)
- return;
+ return -ENOMEM;
file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
NULL, &stat_fops);
if (!file) {
debugfs_remove(f2fs_debugfs_root);
f2fs_debugfs_root = NULL;
+ return -ENOMEM;
}
+
+ return 0;
}
void f2fs_destroy_root_stats(void)
static unsigned long dir_blocks(struct inode *inode)
{
- return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
- >> PAGE_CACHE_SHIFT;
+ return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+ >> PAGE_SHIFT;
}
static unsigned int dir_buckets(unsigned int level, int dir_level)
return 4;
}
-unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
+static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR,
[F2FS_FT_SYMLINK] = DT_LNK,
};
-#define S_SHIFT 12
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
+unsigned char get_de_type(struct f2fs_dir_entry *de)
+{
+ if (de->file_type < F2FS_FT_MAX)
+ return f2fs_filetype_table[de->file_type];
+ return DT_UNKNOWN;
+}
+
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
}
static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
- struct f2fs_filename *fname,
+ struct fscrypt_name *fname,
f2fs_hash_t namehash,
int *max_slots,
struct page **res_page)
dentry_blk = (struct f2fs_dentry_block *)kmap(dentry_page);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(NULL, &d, dentry_blk);
de = find_target_dentry(fname, namehash, max_slots, &d);
if (de)
*res_page = dentry_page;
else
kunmap(dentry_page);
- /*
- * For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs has occurred.
- */
- f2fs_bug_on(F2FS_P_SB(dentry_page), d.max < 0);
return de;
}
-struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname,
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
f2fs_hash_t namehash, int *max_slots,
struct f2fs_dentry_ptr *d)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
int max_len = 0;
- struct f2fs_str de_name = FSTR_INIT(NULL, 0);
- struct f2fs_str *name = &fname->disk_name;
if (max_slots)
*max_slots = 0;
de = &d->dentry[bit_pos];
- if (de->hash_code != namehash)
- goto not_match;
-
- de_name.name = d->filename[bit_pos];
- de_name.len = le16_to_cpu(de->name_len);
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (unlikely(!name->name)) {
- if (fname->usr_fname->name[0] == '_') {
- if (de_name.len > 32 &&
- !memcmp(de_name.name + ((de_name.len - 17) & ~15),
- fname->crypto_buf.name + 8, 16))
- goto found;
- goto not_match;
- }
- name->name = fname->crypto_buf.name;
- name->len = fname->crypto_buf.len;
+ if (unlikely(!de->name_len)) {
+ bit_pos++;
+ continue;
}
-#endif
- if (de_name.len == name->len &&
- !memcmp(de_name.name, name->name, name->len))
+
+ if (de->hash_code == namehash &&
+ fscrypt_match_name(fname, d->filename[bit_pos],
+ le16_to_cpu(de->name_len)))
goto found;
-not_match:
+
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
max_len = 0;
- /* remain bug on condition */
- if (unlikely(!de->name_len))
- d->max = -1;
-
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
- struct f2fs_filename *fname,
+ struct fscrypt_name *fname,
struct page **res_page)
{
struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
struct f2fs_dir_entry *de = NULL;
bool room = false;
int max_slots;
- f2fs_hash_t namehash;
-
- namehash = f2fs_dentry_hash(&name, fname);
-
- f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH);
+ f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
/* no need to allocate new dentry pages to all the indices */
dentry_page = find_data_page(dir, bidx);
if (IS_ERR(dentry_page)) {
- room = true;
- continue;
+ if (PTR_ERR(dentry_page) == -ENOENT) {
+ room = true;
+ continue;
+ } else {
+ *res_page = dentry_page;
+ break;
+ }
}
de = find_in_block(dentry_page, fname, namehash, &max_slots,
return de;
}
-/*
- * Find an entry in the specified directory with the wanted name.
- * It returns the page where the entry was found (as a parameter - res_page),
- * and the entry itself. Page is returned mapped and unlocked.
- * Entry is guaranteed to be valid.
- */
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- struct qstr *child, struct page **res_page)
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
- struct f2fs_filename fname;
- int err;
-
- *res_page = NULL;
-
- err = f2fs_fname_setup_filename(dir, child, 1, &fname);
- if (err)
- return NULL;
if (f2fs_has_inline_dentry(dir)) {
- de = find_in_inline_dir(dir, &fname, res_page);
+ *res_page = NULL;
+ de = find_in_inline_dir(dir, fname, res_page);
goto out;
}
- if (npages == 0)
+ if (npages == 0) {
+ *res_page = NULL;
goto out;
+ }
max_depth = F2FS_I(dir)->i_current_depth;
+ if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
+ f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING,
+ "Corrupted max_depth of %lu: %u",
+ dir->i_ino, max_depth);
+ max_depth = MAX_DIR_HASH_DEPTH;
+ f2fs_i_depth_write(dir, max_depth);
+ }
for (level = 0; level < max_depth; level++) {
- de = find_in_level(dir, level, &fname, res_page);
- if (de)
+ *res_page = NULL;
+ de = find_in_level(dir, level, fname, res_page);
+ if (de || IS_ERR(*res_page))
break;
}
out:
- f2fs_fname_free_filename(&fname);
+ /* This is to increase the speed of f2fs_create */
+ if (!de)
+ F2FS_I(dir)->task = current;
return de;
}
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+/*
+ * Find an entry in the specified directory with the wanted name.
+ * It returns the page where the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ const struct qstr *child, struct page **res_page)
{
- struct page *page;
- struct f2fs_dir_entry *de;
- struct f2fs_dentry_block *dentry_blk;
-
- if (f2fs_has_inline_dentry(dir))
- return f2fs_parent_inline_dir(dir, p);
+ struct f2fs_dir_entry *de = NULL;
+ struct fscrypt_name fname;
+ int err;
- page = get_lock_data_page(dir, 0, false);
- if (IS_ERR(page))
+ err = fscrypt_setup_filename(dir, child, 1, &fname);
+ if (err) {
+ if (err == -ENOENT)
+ *res_page = NULL;
+ else
+ *res_page = ERR_PTR(err);
return NULL;
+ }
+
+ de = __f2fs_find_entry(dir, &fname, res_page);
- dentry_blk = kmap(page);
- de = &dentry_blk->dentry[1];
- *p = page;
- unlock_page(page);
+ fscrypt_free_filename(&fname);
return de;
}
-ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+{
+ struct qstr dotdot = QSTR_INIT("..", 2);
+
+ return f2fs_find_entry(dir, &dotdot, p);
+}
+
+ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
+ struct page **page)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
- struct page *page;
- de = f2fs_find_entry(dir, qstr, &page);
+ de = f2fs_find_entry(dir, qstr, page);
if (de) {
res = le32_to_cpu(de->ino);
- f2fs_dentry_kunmap(dir, page);
- f2fs_put_page(page, 0);
+ f2fs_dentry_kunmap(dir, *page);
+ f2fs_put_page(*page, 0);
}
return res;
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
lock_page(page);
- f2fs_wait_on_page_writeback(page, type);
+ f2fs_wait_on_page_writeback(page, type, true);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
{
struct f2fs_inode *ri;
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
set_page_dirty(ipage);
}
-int update_dent_inode(struct inode *inode, struct inode *to,
- const struct qstr *name)
-{
- struct page *page;
-
- if (file_enc_name(to))
- return 0;
-
- page = get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- init_dent_inode(name, page);
- f2fs_put_page(page, 1);
-
- return 0;
-}
-
void do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
- struct f2fs_dir_entry *de;
-
- de = &d->dentry[0];
- de->name_len = cpu_to_le16(1);
- de->hash_code = 0;
- de->ino = cpu_to_le32(inode->i_ino);
- memcpy(d->filename[0], ".", 1);
- set_de_type(de, inode->i_mode);
+ struct qstr dot = QSTR_INIT(".", 1);
+ struct qstr dotdot = QSTR_INIT("..", 2);
- de = &d->dentry[1];
- de->hash_code = 0;
- de->name_len = cpu_to_le16(2);
- de->ino = cpu_to_le32(parent->i_ino);
- memcpy(d->filename[1], "..", 2);
- set_de_type(de, parent->i_mode);
+ /* update dirent of "." */
+ f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
- test_and_set_bit_le(0, (void *)d->bitmap);
- test_and_set_bit_le(1, (void *)d->bitmap);
+ /* update dirent of ".." */
+ f2fs_update_dentry(parent->i_ino, parent->i_mode, d, &dotdot, 0, 1);
}
static int make_empty_dir(struct inode *inode,
dentry_blk = kmap_atomic(dentry_page);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(NULL, &d, dentry_blk);
do_make_empty_dir(inode, parent, &d);
kunmap_atomic(dentry_blk);
}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *name, struct page *dpage)
+ const struct qstr *new_name, const struct qstr *orig_name,
+ struct page *dpage)
{
struct page *page;
int err;
- if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ if (is_inode_flag_set(inode, FI_NEW_INODE)) {
page = new_inode_page(inode);
if (IS_ERR(page))
return page;
if (S_ISDIR(inode->i_mode)) {
+ /* in order to handle error case */
+ get_page(page);
err = make_empty_dir(inode, dir, page);
- if (err)
- goto error;
+ if (err) {
+ lock_page(page);
+ goto put_error;
+ }
+ put_page(page);
}
err = f2fs_init_acl(inode, dir, page, dpage);
if (err)
goto put_error;
- err = f2fs_init_security(inode, dir, name, page);
+ err = f2fs_init_security(inode, dir, orig_name, page);
if (err)
goto put_error;
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
- err = f2fs_inherit_context(dir, inode, page);
+ err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
goto put_error;
}
set_cold_node(inode, page);
}
- if (name)
- init_dent_inode(name, page);
+ if (new_name) {
+ init_dent_inode(new_name, page);
+ if (f2fs_encrypted_inode(dir))
+ file_set_enc_name(inode);
+ }
/*
* This file should be checkpointed during fsync.
* We lost i_pino from now on.
*/
- if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
- file_lost_pino(inode);
+ if (is_inode_flag_set(inode, FI_INC_LINK)) {
+ if (!S_ISDIR(inode->i_mode))
+ file_lost_pino(inode);
/*
* If link the tmpfile to alias through linkat path,
* we should remove this inode from orphan list.
*/
if (inode->i_nlink == 0)
remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
- inc_nlink(inode);
+ f2fs_i_links_write(inode, true);
}
return page;
put_error:
+ clear_nlink(inode);
+ update_inode(inode, page);
f2fs_put_page(page, 1);
-error:
- /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
- truncate_inode_pages(&inode->i_data, 0);
- truncate_blocks(inode, 0, false);
- remove_dirty_dir_inode(inode);
- remove_inode_page(inode);
return ERR_PTR(err);
}
void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
- if (S_ISDIR(inode->i_mode)) {
- inc_nlink(dir);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, true);
+ clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
- if (F2FS_I(dir)->i_current_depth != current_depth) {
- F2FS_I(dir)->i_current_depth = current_depth;
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
+ if (F2FS_I(dir)->i_current_depth != current_depth)
+ f2fs_i_depth_write(dir, current_depth);
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ if (inode && is_inode_flag_set(inode, FI_INC_LINK))
+ clear_inode_flag(inode, FI_INC_LINK);
}
int room_for_filename(const void *bitmap, int slots, int max_slots)
memcpy(d->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(ino);
set_de_type(de, mode);
- for (i = 0; i < slots; i++)
- test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
+ for (i = 0; i < slots; i++) {
+ __set_bit_le(bit_pos + i, (void *)d->bitmap);
+ /* avoid wrong garbage data for readdir */
+ if (i)
+ (de + i)->name_len = 0;
+ }
}
-/*
- * Caller should grab and release a rwsem by calling f2fs_lock_op() and
- * f2fs_unlock_op().
- */
-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
struct page *page = NULL;
- struct f2fs_filename fname;
- struct qstr new_name;
- int slots, err;
-
- err = f2fs_fname_setup_filename(dir, name, 0, &fname);
- if (err)
- return err;
-
- new_name.name = fname_name(&fname);
- new_name.len = fname_len(&fname);
-
- if (f2fs_has_inline_dentry(dir)) {
- err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
- if (!err || err != -EAGAIN)
- goto out;
- else
- err = 0;
- }
+ int slots, err = 0;
level = 0;
- slots = GET_DENTRY_SLOTS(new_name.len);
- dentry_hash = f2fs_dentry_hash(&new_name, NULL);
+ slots = GET_DENTRY_SLOTS(new_name->len);
+ dentry_hash = f2fs_dentry_hash(new_name, NULL);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
}
start:
- if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
- err = -ENOSPC;
- goto out;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
+ f2fs_show_injection_info(FAULT_DIR_DEPTH);
+ return -ENOSPC;
}
+#endif
+ if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
+ return -ENOSPC;
/* Increase the depth, if required */
if (level == current_depth)
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = get_new_data_page(dir, NULL, block, true);
- if (IS_ERR(dentry_page)) {
- err = PTR_ERR(dentry_page);
- goto out;
- }
+ if (IS_ERR(dentry_page))
+ return PTR_ERR(dentry_page);
dentry_blk = kmap(dentry_page);
bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
++level;
goto start;
add_dentry:
- f2fs_wait_on_page_writeback(dentry_page, DATA);
+ f2fs_wait_on_page_writeback(dentry_page, DATA, true);
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, &new_name, NULL);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
- if (f2fs_encrypted_inode(dir))
- file_set_enc_name(inode);
}
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
- f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
+ make_dentry_ptr_block(NULL, &d, dentry_blk);
+ f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page);
if (inode) {
- /* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
if (inode)
up_write(&F2FS_I(inode)->i_sem);
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode_page(dir);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
-out:
- f2fs_fname_free_filename(&fname);
+
+ return err;
+}
+
+int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct qstr new_name;
+ int err = -EAGAIN;
+
+ new_name.name = fname_name(fname);
+ new_name.len = fname_len(fname);
+
+ if (f2fs_has_inline_dentry(dir))
+ err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+ if (err == -EAGAIN)
+ err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+ return err;
+}
+
+/*
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
+ */
+int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct fscrypt_name fname;
+ struct page *page = NULL;
+ struct f2fs_dir_entry *de = NULL;
+ int err;
+
+ err = fscrypt_setup_filename(dir, name, 0, &fname);
+ if (err)
+ return err;
+
+ /*
+ * An immature stakable filesystem shows a race condition between lookup
+ * and create. If we have same task when doing lookup and create, it's
+ * definitely fine as expected by VFS normally. Otherwise, let's just
+ * verify on-disk dentry one more time, which guarantees filesystem
+ * consistency more.
+ */
+ if (current != F2FS_I(dir)->task) {
+ de = __f2fs_find_entry(dir, &fname, &page);
+ F2FS_I(dir)->task = NULL;
+ }
+ if (de) {
+ f2fs_dentry_kunmap(dir, page);
+ f2fs_put_page(page, 0);
+ err = -EEXIST;
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ } else {
+ err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
+ }
+ fscrypt_free_filename(&fname);
return err;
}
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, NULL, NULL);
+ page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
- /* we don't need to mark_inode_dirty now */
- update_inode(inode, page);
f2fs_put_page(page, 1);
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ clear_inode_flag(inode, FI_NEW_INODE);
fail:
up_write(&F2FS_I(inode)->i_sem);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err;
}
-void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page)
+void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem);
- if (S_ISDIR(inode->i_mode)) {
- drop_nlink(dir);
- if (page)
- update_inode(dir, page);
- else
- update_inode_page(dir);
- }
- inode->i_ctime = CURRENT_TIME;
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, false);
+ inode->i_ctime = current_time(inode);
- drop_nlink(inode);
+ f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) {
- drop_nlink(inode);
- i_size_write(inode, 0);
+ f2fs_i_links_write(inode, false);
+ f2fs_i_size_write(inode, 0);
}
up_write(&F2FS_I(inode)->i_sem);
- update_inode_page(inode);
if (inode->i_nlink == 0)
- add_orphan_inode(sbi, inode->i_ino);
+ add_orphan_inode(inode);
else
release_orphan_inode(sbi);
}
struct f2fs_dentry_block *dentry_blk;
unsigned int bit_pos;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+ struct address_space *mapping = page_mapping(page);
+ unsigned long flags;
int i;
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry;
for (i = 0; i < slots; i++)
- clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+ __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
/* Let's check and deallocate this dentry page */
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
kunmap(page); /* kunmap - pair of f2fs_find_entry */
set_page_dirty(page);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
- f2fs_drop_nlink(dir, inode, NULL);
+ f2fs_drop_nlink(dir, inode);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) {
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ radix_tree_tag_clear(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
clear_page_dirty_for_io(page);
ClearPagePrivate(page);
ClearPageUptodate(page);
inode_dec_dirty_pages(dir);
+ remove_dirty_inode(dir);
}
f2fs_put_page(page, 1);
}
return true;
}
-bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
- unsigned int start_pos, struct f2fs_str *fstr)
+int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ unsigned int start_pos, struct fscrypt_str *fstr)
{
unsigned char d_type = DT_UNKNOWN;
unsigned int bit_pos;
struct f2fs_dir_entry *de = NULL;
- struct f2fs_str de_name = FSTR_INIT(NULL, 0);
+ struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
bit_pos = ((unsigned long)ctx->pos % d->max);
break;
de = &d->dentry[bit_pos];
- if (de->file_type < F2FS_FT_MAX)
- d_type = f2fs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
+ if (de->name_len == 0) {
+ bit_pos++;
+ ctx->pos = start_pos + bit_pos;
+ continue;
+ }
+
+ d_type = get_de_type(de);
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
- int ret;
+ int err;
- de_name.name = kmalloc(de_name.len, GFP_NOFS);
- if (!de_name.name)
- return false;
-
- memcpy(de_name.name, d->filename[bit_pos], de_name.len);
-
- ret = f2fs_fname_disk_to_usr(d->inode, &de->hash_code,
- &de_name, fstr);
- kfree(de_name.name);
- if (ret < 0)
- return true;
+ err = fscrypt_fname_disk_to_usr(d->inode,
+ (u32)de->hash_code, 0,
+ &de_name, fstr);
+ if (err)
+ return err;
de_name = *fstr;
fstr->len = save_len;
if (!dir_emit(ctx, de_name.name, de_name.len,
le32_to_cpu(de->ino), d_type))
- return true;
+ return 1;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
- return false;
+ return 0;
}
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct file_ra_state *ra = &file->f_ra;
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
struct f2fs_dentry_ptr d;
- struct f2fs_str fstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
int err = 0;
if (f2fs_encrypted_inode(inode)) {
- err = f2fs_get_encryption_info(inode);
- if (err)
+ err = fscrypt_get_encryption_info(inode);
+ if (err && err != -ENOKEY)
return err;
- err = f2fs_fname_crypto_alloc_buffer(inode, F2FS_NAME_LEN,
- &fstr);
+ err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
if (err < 0)
return err;
}
for (; n < npages; n++) {
dentry_page = get_lock_data_page(inode, n, false);
- if (IS_ERR(dentry_page))
- continue;
+ if (IS_ERR(dentry_page)) {
+ err = PTR_ERR(dentry_page);
+ if (err == -ENOENT) {
+ err = 0;
+ continue;
+ } else {
+ goto out;
+ }
+ }
dentry_blk = kmap(dentry_page);
- make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(inode, &d, dentry_blk);
- if (f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr))
- goto stop;
+ err = f2fs_fill_dentries(ctx, &d,
+ n * NR_DENTRY_IN_BLOCK, &fstr);
+ if (err) {
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ break;
+ }
ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
- dentry_page = NULL;
- }
-stop:
- if (dentry_page && !IS_ERR(dentry_page)) {
- kunmap(dentry_page);
- f2fs_put_page(dentry_page, 1);
}
out:
- f2fs_fname_crypto_free_buffer(&fstr);
- return err;
+ fscrypt_fname_free_buffer(&fstr);
+ return err < 0 ? err : 0;
+}
+
+static int f2fs_dir_open(struct inode *inode, struct file *filp)
+{
+ if (f2fs_encrypted_inode(inode))
+ return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
+ return 0;
}
const struct file_operations f2fs_dir_operations = {
.read = generic_read_dir,
.iterate = f2fs_readdir,
.fsync = f2fs_sync_file,
+ .open = f2fs_dir_open,
.unlocked_ioctl = f2fs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
#include "node.h"
#include <trace/events/f2fs.h>
+static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
+ unsigned int ofs)
+{
+ if (cached_re) {
+ if (cached_re->ofs <= ofs &&
+ cached_re->ofs + cached_re->len > ofs) {
+ return cached_re;
+ }
+ }
+ return NULL;
+}
+
+static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
+ unsigned int ofs)
+{
+ struct rb_node *node = root->rb_node;
+ struct rb_entry *re;
+
+ while (node) {
+ re = rb_entry(node, struct rb_entry, rb_node);
+
+ if (ofs < re->ofs)
+ node = node->rb_left;
+ else if (ofs >= re->ofs + re->len)
+ node = node->rb_right;
+ else
+ return re;
+ }
+ return NULL;
+}
+
+struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+ struct rb_entry *cached_re, unsigned int ofs)
+{
+ struct rb_entry *re;
+
+ re = __lookup_rb_tree_fast(cached_re, ofs);
+ if (!re)
+ return __lookup_rb_tree_slow(root, ofs);
+
+ return re;
+}
+
+struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ struct rb_root *root, struct rb_node **parent,
+ unsigned int ofs)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_entry *re;
+
+ while (*p) {
+ *parent = *p;
+ re = rb_entry(*parent, struct rb_entry, rb_node);
+
+ if (ofs < re->ofs)
+ p = &(*p)->rb_left;
+ else if (ofs >= re->ofs + re->len)
+ p = &(*p)->rb_right;
+ else
+ f2fs_bug_on(sbi, 1);
+ }
+
+ return p;
+}
+
+/*
+ * lookup rb entry in position of @ofs in rb-tree,
+ * if hit, return the entry, otherwise, return NULL
+ * @prev_ex: extent before ofs
+ * @next_ex: extent after ofs
+ * @insert_p: insert point for new extent at ofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+ struct rb_entry *cached_re,
+ unsigned int ofs,
+ struct rb_entry **prev_entry,
+ struct rb_entry **next_entry,
+ struct rb_node ***insert_p,
+ struct rb_node **insert_parent,
+ bool force)
+{
+ struct rb_node **pnode = &root->rb_node;
+ struct rb_node *parent = NULL, *tmp_node;
+ struct rb_entry *re = cached_re;
+
+ *insert_p = NULL;
+ *insert_parent = NULL;
+ *prev_entry = NULL;
+ *next_entry = NULL;
+
+ if (RB_EMPTY_ROOT(root))
+ return NULL;
+
+ if (re) {
+ if (re->ofs <= ofs && re->ofs + re->len > ofs)
+ goto lookup_neighbors;
+ }
+
+ while (*pnode) {
+ parent = *pnode;
+ re = rb_entry(*pnode, struct rb_entry, rb_node);
+
+ if (ofs < re->ofs)
+ pnode = &(*pnode)->rb_left;
+ else if (ofs >= re->ofs + re->len)
+ pnode = &(*pnode)->rb_right;
+ else
+ goto lookup_neighbors;
+ }
+
+ *insert_p = pnode;
+ *insert_parent = parent;
+
+ re = rb_entry(parent, struct rb_entry, rb_node);
+ tmp_node = parent;
+ if (parent && ofs > re->ofs)
+ tmp_node = rb_next(parent);
+ *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+
+ tmp_node = parent;
+ if (parent && ofs < re->ofs)
+ tmp_node = rb_prev(parent);
+ *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ return NULL;
+
+lookup_neighbors:
+ if (ofs == re->ofs || force) {
+ /* lookup prev node for merging backward later */
+ tmp_node = rb_prev(&re->rb_node);
+ *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ }
+ if (ofs == re->ofs + re->len - 1 || force) {
+ /* lookup next node for merging frontward later */
+ tmp_node = rb_next(&re->rb_node);
+ *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ }
+ return re;
+}
+
+bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ struct rb_root *root)
+{
+#ifdef CONFIG_F2FS_CHECK_FS
+ struct rb_node *cur = rb_first(root), *next;
+ struct rb_entry *cur_re, *next_re;
+
+ if (!cur)
+ return true;
+
+ while (cur) {
+ next = rb_next(cur);
+ if (!next)
+ return true;
+
+ cur_re = rb_entry(cur, struct rb_entry, rb_node);
+ next_re = rb_entry(next, struct rb_entry, rb_node);
+
+ if (cur_re->ofs + cur_re->len > next_re->ofs) {
+ f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
+ "cur(%u, %u) next(%u, %u)",
+ cur_re->ofs, cur_re->len,
+ next_re->ofs, next_re->len);
+ return false;
+ }
+
+ cur = next;
+ }
+#endif
+ return true;
+}
+
static struct kmem_cache *extent_tree_slab;
static struct kmem_cache *extent_node_slab;
en->ei = *ei;
INIT_LIST_HEAD(&en->list);
+ en->et = et;
rb_link_node(&en->rb_node, parent, p);
rb_insert_color(&en->rb_node, &et->root);
- et->count++;
+ atomic_inc(&et->node_cnt);
atomic_inc(&sbi->total_ext_node);
return en;
}
struct extent_tree *et, struct extent_node *en)
{
rb_erase(&en->rb_node, &et->root);
- et->count--;
+ atomic_dec(&et->node_cnt);
atomic_dec(&sbi->total_ext_node);
if (et->cached_en == en)
et->cached_en = NULL;
+ kmem_cache_free(extent_node_slab, en);
+}
+
+/*
+ * Flow to release an extent_node:
+ * 1. list_del_init
+ * 2. __detach_extent_node
+ * 3. kmem_cache_free.
+ */
+static void __release_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+{
+ spin_lock(&sbi->extent_lock);
+ f2fs_bug_on(sbi, list_empty(&en->list));
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
+
+ __detach_extent_node(sbi, et, en);
}
static struct extent_tree *__grab_extent_tree(struct inode *inode)
struct extent_tree *et;
nid_t ino = inode->i_ino;
- down_write(&sbi->extent_tree_lock);
+ mutex_lock(&sbi->extent_tree_lock);
et = radix_tree_lookup(&sbi->extent_tree_root, ino);
if (!et) {
et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
et->root = RB_ROOT;
et->cached_en = NULL;
rwlock_init(&et->lock);
- atomic_set(&et->refcount, 0);
- et->count = 0;
- sbi->total_ext_tree++;
+ INIT_LIST_HEAD(&et->list);
+ atomic_set(&et->node_cnt, 0);
+ atomic_inc(&sbi->total_ext_tree);
+ } else {
+ atomic_dec(&sbi->total_zombie_tree);
+ list_del_init(&et->list);
}
- atomic_inc(&et->refcount);
- up_write(&sbi->extent_tree_lock);
+ mutex_unlock(&sbi->extent_tree_lock);
/* never died until evict_inode */
F2FS_I(inode)->extent_tree = et;
return et;
}
-static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et, unsigned int fofs)
-{
- struct rb_node *node = et->root.rb_node;
- struct extent_node *en = et->cached_en;
-
- if (en) {
- struct extent_info *cei = &en->ei;
-
- if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
- stat_inc_cached_node_hit(sbi);
- return en;
- }
- }
-
- while (node) {
- en = rb_entry(node, struct extent_node, rb_node);
-
- if (fofs < en->ei.fofs) {
- node = node->rb_left;
- } else if (fofs >= en->ei.fofs + en->ei.len) {
- node = node->rb_right;
- } else {
- stat_inc_rbtree_node_hit(sbi);
- return en;
- }
- }
- return NULL;
-}
-
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei)
{
}
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et, bool free_all)
+ struct extent_tree *et)
{
struct rb_node *node, *next;
struct extent_node *en;
- unsigned int count = et->count;
+ unsigned int count = atomic_read(&et->node_cnt);
node = rb_first(&et->root);
while (node) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
-
- if (free_all) {
- spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list))
- list_del_init(&en->list);
- spin_unlock(&sbi->extent_lock);
- }
-
- if (free_all || list_empty(&en->list)) {
- __detach_extent_node(sbi, et, en);
- kmem_cache_free(extent_node_slab, en);
- }
+ __release_extent_node(sbi, et, en);
node = next;
}
- return count - et->count;
+ return count - atomic_read(&et->node_cnt);
}
static void __drop_largest_extent(struct inode *inode,
{
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
- if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
+ if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
largest->len = 0;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
-void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
-{
- if (!f2fs_may_extent_tree(inode))
- return;
-
- __drop_largest_extent(inode, fofs, 1);
-}
-
-void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+/* return true, if inode page is changed */
+static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et;
struct extent_node *en;
struct extent_info ei;
- if (!f2fs_may_extent_tree(inode))
- return;
+ if (!f2fs_may_extent_tree(inode)) {
+ /* drop largest extent */
+ if (i_ext && i_ext->len) {
+ i_ext->len = 0;
+ return true;
+ }
+ return false;
+ }
et = __grab_extent_tree(inode);
- if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
- return;
+ if (!i_ext || !i_ext->len)
+ return false;
- set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
- le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+ get_extent_info(&ei, i_ext);
write_lock(&et->lock);
- if (et->count)
+ if (atomic_read(&et->node_cnt))
goto out;
en = __init_extent_tree(sbi, et, &ei);
}
out:
write_unlock(&et->lock);
+ return false;
+}
+
+bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+{
+ bool ret = __f2fs_init_extent_tree(inode, i_ext);
+
+ if (!F2FS_I(inode)->extent_tree)
+ set_inode_flag(inode, FI_NO_EXTENT);
+
+ return ret;
}
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
goto out;
}
- en = __lookup_extent_tree(sbi, et, pgofs);
- if (en) {
- *ei = en->ei;
- spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list))
- list_move_tail(&en->list, &sbi->extent_list);
+ en = (struct extent_node *)__lookup_rb_tree(&et->root,
+ (struct rb_entry *)et->cached_en, pgofs);
+ if (!en)
+ goto out;
+
+ if (en == et->cached_en)
+ stat_inc_cached_node_hit(sbi);
+ else
+ stat_inc_rbtree_node_hit(sbi);
+
+ *ei = en->ei;
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list)) {
+ list_move_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
- spin_unlock(&sbi->extent_lock);
- ret = true;
}
+ spin_unlock(&sbi->extent_lock);
+ ret = true;
out:
stat_inc_total_hit(sbi);
read_unlock(&et->lock);
return ret;
}
-
-/*
- * lookup extent at @fofs, if hit, return the extent
- * if not, return NULL and
- * @prev_ex: extent before fofs
- * @next_ex: extent after fofs
- * @insert_p: insert point for new extent at fofs
- * in order to simpfy the insertion after.
- * tree must stay unchanged between lookup and insertion.
- */
-static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
- unsigned int fofs,
- struct extent_node **prev_ex,
- struct extent_node **next_ex,
- struct rb_node ***insert_p,
- struct rb_node **insert_parent)
-{
- struct rb_node **pnode = &et->root.rb_node;
- struct rb_node *parent = NULL, *tmp_node;
- struct extent_node *en = et->cached_en;
-
- *insert_p = NULL;
- *insert_parent = NULL;
- *prev_ex = NULL;
- *next_ex = NULL;
-
- if (RB_EMPTY_ROOT(&et->root))
- return NULL;
-
- if (en) {
- struct extent_info *cei = &en->ei;
-
- if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
- goto lookup_neighbors;
- }
-
- while (*pnode) {
- parent = *pnode;
- en = rb_entry(*pnode, struct extent_node, rb_node);
-
- if (fofs < en->ei.fofs)
- pnode = &(*pnode)->rb_left;
- else if (fofs >= en->ei.fofs + en->ei.len)
- pnode = &(*pnode)->rb_right;
- else
- goto lookup_neighbors;
- }
-
- *insert_p = pnode;
- *insert_parent = parent;
-
- en = rb_entry(parent, struct extent_node, rb_node);
- tmp_node = parent;
- if (parent && fofs > en->ei.fofs)
- tmp_node = rb_next(parent);
- *next_ex = tmp_node ?
- rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
-
- tmp_node = parent;
- if (parent && fofs < en->ei.fofs)
- tmp_node = rb_prev(parent);
- *prev_ex = tmp_node ?
- rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
- return NULL;
-
-lookup_neighbors:
- if (fofs == en->ei.fofs) {
- /* lookup prev node for merging backward later */
- tmp_node = rb_prev(&en->rb_node);
- *prev_ex = tmp_node ?
- rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
- }
- if (fofs == en->ei.fofs + en->ei.len - 1) {
- /* lookup next node for merging frontward later */
- tmp_node = rb_next(&en->rb_node);
- *next_ex = tmp_node ?
- rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
- }
- return en;
-}
-
-static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+static struct extent_node *__try_merge_extent_node(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
- struct extent_node **den,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
}
if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
- if (en) {
- __detach_extent_node(sbi, et, prev_ex);
- *den = prev_ex;
- }
next_ex->ei.fofs = ei->fofs;
next_ex->ei.blk = ei->blk;
next_ex->ei.len += ei->len;
+ if (en)
+ __release_extent_node(sbi, et, prev_ex);
+
en = next_ex;
}
- if (en) {
- __try_update_largest_extent(et, en);
+ if (!en)
+ return NULL;
+
+ __try_update_largest_extent(inode, et, en);
+
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list)) {
+ list_move_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
}
+ spin_unlock(&sbi->extent_lock);
return en;
}
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+static struct extent_node *__insert_extent_tree(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
struct rb_node *insert_parent)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p = &et->root.rb_node;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
goto do_insert;
}
- while (*p) {
- parent = *p;
- en = rb_entry(parent, struct extent_node, rb_node);
-
- if (ei->fofs < en->ei.fofs)
- p = &(*p)->rb_left;
- else if (ei->fofs >= en->ei.fofs + en->ei.len)
- p = &(*p)->rb_right;
- else
- f2fs_bug_on(sbi, 1);
- }
+ p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
do_insert:
en = __attach_extent_node(sbi, et, ei, parent, p);
if (!en)
return NULL;
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
+
+ /* update in global extent list */
+ spin_lock(&sbi->extent_lock);
+ list_add_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
+ spin_unlock(&sbi->extent_lock);
return en;
}
-static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
+static void f2fs_update_extent_tree_range(struct inode *inode,
pgoff_t fofs, block_t blkaddr, unsigned int len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int pos = (unsigned int)fofs;
if (!et)
- return false;
+ return;
trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
write_lock(&et->lock);
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
write_unlock(&et->lock);
- return false;
+ return;
}
prev = et->largest;
__drop_largest_extent(inode, fofs, len);
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
- en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
- &insert_p, &insert_parent);
+ en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
+ (struct rb_entry *)et->cached_en, fofs,
+ (struct rb_entry **)&prev_en,
+ (struct rb_entry **)&next_en,
+ &insert_p, &insert_parent, false);
if (!en)
en = next_en;
set_extent_info(&ei, end,
end - dei.fofs + dei.blk,
org_end - end);
- en1 = __insert_extent_tree(sbi, et, &ei,
+ en1 = __insert_extent_tree(inode, et, &ei,
NULL, NULL);
next_en = en1;
} else {
if (!next_en) {
struct rb_node *node = rb_next(&en->rb_node);
- next_en = node ?
- rb_entry(node, struct extent_node, rb_node)
- : NULL;
+ next_en = rb_entry_safe(node, struct extent_node,
+ rb_node);
}
if (parts)
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
else
- __detach_extent_node(sbi, et, en);
+ __release_extent_node(sbi, et, en);
/*
* if original extent is split into zero or two parts, extent
insert_p = NULL;
insert_parent = NULL;
}
-
- /* update in global extent list */
- spin_lock(&sbi->extent_lock);
- if (!parts && !list_empty(&en->list))
- list_del(&en->list);
- if (en1)
- list_add_tail(&en1->list, &sbi->extent_list);
- spin_unlock(&sbi->extent_lock);
-
- /* release extent node */
- if (!parts)
- kmem_cache_free(extent_node_slab, en);
-
en = next_en;
}
/* 3. update extent in extent cache */
if (blkaddr) {
- struct extent_node *den = NULL;
set_extent_info(&ei, fofs, blkaddr, len);
- en1 = __try_merge_extent_node(sbi, et, &ei, &den,
- prev_en, next_en);
- if (!en1)
- en1 = __insert_extent_tree(sbi, et, &ei,
+ if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
+ __insert_extent_tree(inode, et, &ei,
insert_p, insert_parent);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
- et->largest.len = 0;
- set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
- }
-
- spin_lock(&sbi->extent_lock);
- if (en1) {
- if (list_empty(&en1->list))
- list_add_tail(&en1->list, &sbi->extent_list);
- else
- list_move_tail(&en1->list, &sbi->extent_list);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ set_inode_flag(inode, FI_NO_EXTENT);
}
- if (den && !list_empty(&den->list))
- list_del(&den->list);
- spin_unlock(&sbi->extent_lock);
-
- if (den)
- kmem_cache_free(extent_node_slab, den);
}
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
- __free_extent_tree(sbi, et, true);
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ __free_extent_tree(sbi, et);
write_unlock(&et->lock);
-
- return !__is_extent_same(&prev, &et->largest);
}
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
- struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
- struct extent_node *en, *tmp;
- unsigned long ino = F2FS_ROOT_INO(sbi);
- struct radix_tree_root *root = &sbi->extent_tree_root;
- unsigned int found;
+ struct extent_tree *et, *next;
+ struct extent_node *en;
unsigned int node_cnt = 0, tree_cnt = 0;
int remained;
if (!test_opt(sbi, EXTENT_CACHE))
return 0;
- if (!down_write_trylock(&sbi->extent_tree_lock))
+ if (!atomic_read(&sbi->total_zombie_tree))
+ goto free_node;
+
+ if (!mutex_trylock(&sbi->extent_tree_lock))
goto out;
/* 1. remove unreferenced extent tree */
- while ((found = radix_tree_gang_lookup(root,
- (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
- unsigned i;
-
- ino = treevec[found - 1]->ino + 1;
- for (i = 0; i < found; i++) {
- struct extent_tree *et = treevec[i];
-
- if (!atomic_read(&et->refcount)) {
- write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et, true);
- write_unlock(&et->lock);
-
- radix_tree_delete(root, et->ino);
- kmem_cache_free(extent_tree_slab, et);
- sbi->total_ext_tree--;
- tree_cnt++;
-
- if (node_cnt + tree_cnt >= nr_shrink)
- goto unlock_out;
- }
+ list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
+ if (atomic_read(&et->node_cnt)) {
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et);
+ write_unlock(&et->lock);
}
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+ list_del_init(&et->list);
+ radix_tree_delete(&sbi->extent_tree_root, et->ino);
+ kmem_cache_free(extent_tree_slab, et);
+ atomic_dec(&sbi->total_ext_tree);
+ atomic_dec(&sbi->total_zombie_tree);
+ tree_cnt++;
+
+ if (node_cnt + tree_cnt >= nr_shrink)
+ goto unlock_out;
+ cond_resched();
}
- up_write(&sbi->extent_tree_lock);
+ mutex_unlock(&sbi->extent_tree_lock);
+free_node:
/* 2. remove LRU extent entries */
- if (!down_write_trylock(&sbi->extent_tree_lock))
+ if (!mutex_trylock(&sbi->extent_tree_lock))
goto out;
remained = nr_shrink - (node_cnt + tree_cnt);
spin_lock(&sbi->extent_lock);
- list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
- if (!remained--)
+ for (; remained > 0; remained--) {
+ if (list_empty(&sbi->extent_list))
break;
- list_del_init(&en->list);
- }
- spin_unlock(&sbi->extent_lock);
-
- /*
- * reset ino for searching victims from beginning of global extent tree.
- */
- ino = F2FS_ROOT_INO(sbi);
-
- while ((found = radix_tree_gang_lookup(root,
- (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
- unsigned i;
+ en = list_first_entry(&sbi->extent_list,
+ struct extent_node, list);
+ et = en->et;
+ if (!write_trylock(&et->lock)) {
+ /* refresh this extent node's position in extent list */
+ list_move_tail(&en->list, &sbi->extent_list);
+ continue;
+ }
- ino = treevec[found - 1]->ino + 1;
- for (i = 0; i < found; i++) {
- struct extent_tree *et = treevec[i];
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
- write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et, false);
- write_unlock(&et->lock);
+ __detach_extent_node(sbi, et, en);
- if (node_cnt + tree_cnt >= nr_shrink)
- goto unlock_out;
- }
+ write_unlock(&et->lock);
+ node_cnt++;
+ spin_lock(&sbi->extent_lock);
}
+ spin_unlock(&sbi->extent_lock);
+
unlock_out:
- up_write(&sbi->extent_tree_lock);
+ mutex_unlock(&sbi->extent_tree_lock);
out:
trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
unsigned int node_cnt = 0;
- if (!et)
+ if (!et || !atomic_read(&et->node_cnt))
return 0;
write_lock(&et->lock);
- node_cnt = __free_extent_tree(sbi, et, true);
+ node_cnt = __free_extent_tree(sbi, et);
write_unlock(&et->lock);
return node_cnt;
}
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ set_inode_flag(inode, FI_NO_EXTENT);
+
+ write_lock(&et->lock);
+ __free_extent_tree(sbi, et);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ write_unlock(&et->lock);
+}
+
void f2fs_destroy_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!et)
return;
- if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
- atomic_dec(&et->refcount);
+ if (inode->i_nlink && !is_bad_inode(inode) &&
+ atomic_read(&et->node_cnt)) {
+ mutex_lock(&sbi->extent_tree_lock);
+ list_add_tail(&et->list, &sbi->zombie_list);
+ atomic_inc(&sbi->total_zombie_tree);
+ mutex_unlock(&sbi->extent_tree_lock);
return;
}
node_cnt = f2fs_destroy_extent_node(inode);
/* delete extent tree entry in radix tree */
- down_write(&sbi->extent_tree_lock);
- atomic_dec(&et->refcount);
- f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+ mutex_lock(&sbi->extent_tree_lock);
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
kmem_cache_free(extent_tree_slab, et);
- sbi->total_ext_tree--;
- up_write(&sbi->extent_tree_lock);
+ atomic_dec(&sbi->total_ext_tree);
+ mutex_unlock(&sbi->extent_tree_lock);
F2FS_I(inode)->extent_tree = NULL;
void f2fs_update_extent_cache(struct dnode_of_data *dn)
{
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs;
+ block_t blkaddr;
if (!f2fs_may_extent_tree(dn->inode))
return;
- f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
- dn->ofs_in_node;
+ if (dn->data_blkaddr == NEW_ADDR)
+ blkaddr = NULL_ADDR;
+ else
+ blkaddr = dn->data_blkaddr;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
- sync_inode_page(dn);
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+ dn->ofs_in_node;
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
}
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
if (!f2fs_may_extent_tree(dn->inode))
return;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
- sync_inode_page(dn);
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
}
void init_extent_cache_info(struct f2fs_sb_info *sbi)
{
INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
- init_rwsem(&sbi->extent_tree_lock);
+ mutex_init(&sbi->extent_tree_lock);
INIT_LIST_HEAD(&sbi->extent_list);
spin_lock_init(&sbi->extent_lock);
- sbi->total_ext_tree = 0;
+ atomic_set(&sbi->total_ext_tree, 0);
+ INIT_LIST_HEAD(&sbi->zombie_list);
+ atomic_set(&sbi->total_zombie_tree, 0);
atomic_set(&sbi->total_ext_node, 0);
}
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/quotaops.h>
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#include <linux/fscrypt_supp.h>
+#else
+#include <linux/fscrypt_notsupp.h>
+#endif
+#include <crypto/hash.h>
+#include <linux/writeback.h>
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
-#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
#else
#define f2fs_bug_on(sbi, condition) \
do { \
set_sbi_flag(sbi, SBI_NEED_FSCK); \
} \
} while (0)
-#define f2fs_down_write(x, y) down_write(x)
+#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+enum {
+ FAULT_KMALLOC,
+ FAULT_PAGE_ALLOC,
+ FAULT_ALLOC_NID,
+ FAULT_ORPHAN,
+ FAULT_BLOCK,
+ FAULT_DIR_DEPTH,
+ FAULT_EVICT_INODE,
+ FAULT_TRUNCATE,
+ FAULT_IO,
+ FAULT_CHECKPOINT,
+ FAULT_MAX,
+};
+
+struct f2fs_fault_info {
+ atomic_t inject_ops;
+ unsigned int inject_rate;
+ unsigned int inject_type;
+};
+
+extern char *fault_name[FAULT_MAX];
+#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif
/*
#define F2FS_MOUNT_FASTBOOT 0x00001000
#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
#define F2FS_MOUNT_FORCE_FG_GC 0x00004000
-
-#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
-#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
-#define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
+#define F2FS_MOUNT_DATA_FLUSH 0x00008000
+#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
+#define F2FS_MOUNT_ADAPTIVE 0x00020000
+#define F2FS_MOUNT_LFS 0x00040000
+#define F2FS_MOUNT_USRQUOTA 0x00080000
+#define F2FS_MOUNT_GRPQUOTA 0x00100000
+#define F2FS_MOUNT_PRJQUOTA 0x00200000
+#define F2FS_MOUNT_QUOTA 0x00400000
+
+#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
+#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
+#define test_opt(sbi, option) ((sbi)->mount_opt.opt & F2FS_MOUNT_##option)
#define ver_after(a, b) (typecheck(unsigned long long, a) && \
typecheck(unsigned long long, b) && \
unsigned int opt;
};
-#define F2FS_FEATURE_ENCRYPT 0x0001
+#define F2FS_FEATURE_ENCRYPT 0x0001
+#define F2FS_FEATURE_BLKZONED 0x0002
+#define F2FS_FEATURE_ATOMIC_WRITE 0x0004
+#define F2FS_FEATURE_EXTRA_ATTR 0x0008
+#define F2FS_FEATURE_PRJQUOTA 0x0010
+#define F2FS_FEATURE_INODE_CHKSUM 0x0020
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
#define F2FS_SET_FEATURE(sb, mask) \
- F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)
+ (F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask))
#define F2FS_CLEAR_FEATURE(sb, mask) \
- F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)
+ (F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
-#define CRCPOLY_LE 0xedb88320
+/* bio stuffs */
+#define REQ_OP_READ READ
+#define REQ_OP_WRITE WRITE
+#define bio_op(bio) ((bio)->bi_rw & 1)
-static inline __u32 f2fs_crc32(void *buf, size_t len)
+static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
+ unsigned op_flags)
{
- unsigned char *p = (unsigned char *)buf;
- __u32 crc = F2FS_SUPER_MAGIC;
- int i;
+ bio->bi_rw = op | op_flags;
+}
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
- }
- return crc;
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return REQ_SYNC | REQ_NOIDLE;
+ return 0;
+}
+
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: wait queue head
+ *
+ * Returns true if wq has waiting processes
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_sleeper(wait_queue_head_t *wq)
+{
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return waitqueue_active(wq);
+}
+
+static inline void inode_nohighmem(struct inode *inode)
+{
+ mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
}
-static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
+/**
+ * current_time - Return FS time
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs.
+ *
+ * Note that inode and inode->sb cannot be NULL.
+ * Otherwise, the function warns and returns time without truncation.
+ */
+static inline struct timespec current_time(struct inode *inode)
{
- return f2fs_crc32(buf, buf_size) == blk_crc;
+ struct timespec now = current_kernel_time();
+
+ if (unlikely(!inode->i_sb)) {
+ WARN(1, "current_time() called with uninitialized super_block in the inode");
+ return now;
+ }
+
+ return timespec_trunc(now, inode->i_sb->s_time_gran);
}
/*
SIT_BITMAP
};
-enum {
- CP_UMOUNT,
- CP_FASTBOOT,
- CP_SYNC,
- CP_RECOVERY,
- CP_DISCARD,
-};
+#define CP_UMOUNT 0x00000001
+#define CP_FASTBOOT 0x00000002
+#define CP_SYNC 0x00000004
+#define CP_RECOVERY 0x00000008
+#define CP_DISCARD 0x00000010
+#define CP_TRIMMED 0x00000020
-#define DEF_BATCHED_TRIM_SECTIONS 32
+#define DEF_BATCHED_TRIM_SECTIONS 2048
#define BATCHED_TRIM_SEGMENTS(sbi) \
- (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
+ (GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
+#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
+#define DISCARD_ISSUE_RATE 8
+#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
+#define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
#define DEF_CP_INTERVAL 60 /* 60 secs */
+#define DEF_IDLE_INTERVAL 5 /* 5 secs */
struct cp_control {
int reason;
nid_t ino; /* inode number */
};
-/*
- * for the list of directory inodes or gc inodes.
- * NOTE: there are two slab users for this structure, if we add/modify/delete
- * fields in structure for one of slab users, it may affect fields or size of
- * other one, in this condition, it's better to split both of slab and related
- * data structure.
- */
+/* for the list of inodes to be GCed */
struct inode_entry {
struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */
};
-/* for the list of blockaddresses to be discarded */
+/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
- block_t blkaddr; /* block address to be discarded */
- int len; /* # of consecutive blocks of the discard */
+ block_t start_blkaddr; /* start blockaddr of current segment */
+ unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
+};
+
+/* default discard granularity of inner discard thread, unit: block count */
+#define DEFAULT_DISCARD_GRANULARITY 16
+
+/* max discard pend list number */
+#define MAX_PLIST_NUM 512
+#define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
+ (MAX_PLIST_NUM - 1) : (blk_num - 1))
+
+#define P_ACTIVE 0x01
+#define P_TRIM 0x02
+#define plist_issue(tag) (((tag) & P_ACTIVE) || ((tag) & P_TRIM))
+
+enum {
+ D_PREP,
+ D_SUBMIT,
+ D_DONE,
+};
+
+struct discard_info {
+ block_t lstart; /* logical start address */
+ block_t len; /* length */
+ block_t start; /* actual start address in dev */
+};
+
+struct discard_cmd {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+ union {
+ struct {
+ block_t lstart; /* logical start address */
+ block_t len; /* length */
+ block_t start; /* actual start address in dev */
+ };
+ struct discard_info di; /* discard info */
+
+ };
+ struct list_head list; /* command list */
+ struct completion wait; /* compleation */
+ struct block_device *bdev; /* bdev */
+ unsigned short ref; /* reference count */
+ unsigned char state; /* state */
+ int error; /* bio error */
+};
+
+struct discard_cmd_control {
+ struct task_struct *f2fs_issue_discard; /* discard thread */
+ struct list_head entry_list; /* 4KB discard entry list */
+ struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
+ unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */
+ struct list_head wait_list; /* store on-flushing entries */
+ wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
+ unsigned int discard_wake; /* to wake up discard thread */
+ struct mutex cmd_lock;
+ unsigned int nr_discards; /* # of discards in the list */
+ unsigned int max_discards; /* max. discards to be issued */
+ unsigned int discard_granularity; /* discard granularity */
+ unsigned int undiscard_blks; /* # of undiscard blocks */
+ atomic_t issued_discard; /* # of issued discard */
+ atomic_t issing_discard; /* # of issing discard */
+ atomic_t discard_cmd_cnt; /* # of cached cmd count */
+ struct rb_root root; /* root of discard rb-tree */
};
/* for the list of fsync inodes, used only during recovery */
struct inode *inode; /* vfs inode pointer */
block_t blkaddr; /* block address locating the last fsync */
block_t last_dentry; /* block address locating the last dentry */
- block_t last_inode; /* block address locating the last inode */
};
-#define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
-#define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
+#define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
+#define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
-#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
-#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
-#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
-#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
+#define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
+#define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
+#define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
+#define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
-#define MAX_NAT_JENTRIES(sum) (NAT_JOURNAL_ENTRIES - nats_in_cursum(sum))
-#define MAX_SIT_JENTRIES(sum) (SIT_JOURNAL_ENTRIES - sits_in_cursum(sum))
+#define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
+#define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
-static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
{
- int before = nats_in_cursum(rs);
- rs->n_nats = cpu_to_le16(before + i);
+ int before = nats_in_cursum(journal);
+
+ journal->n_nats = cpu_to_le16(before + i);
return before;
}
-static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
{
- int before = sits_in_cursum(rs);
- rs->n_sits = cpu_to_le16(before + i);
+ int before = sits_in_cursum(journal);
+
+ journal->n_sits = cpu_to_le16(before + i);
return before;
}
-static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
- int type)
+static inline bool __has_cursum_space(struct f2fs_journal *journal,
+ int size, int type)
{
if (type == NAT_JOURNAL)
- return size <= MAX_NAT_JENTRIES(sum);
- return size <= MAX_SIT_JENTRIES(sum);
+ return size <= MAX_NAT_JENTRIES(journal);
+ return size <= MAX_SIT_JENTRIES(journal);
}
/*
#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
-#define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6)
+#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32)
#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
-
-#define F2FS_IOC_SET_ENCRYPTION_POLICY \
- _IOR('f', 19, struct f2fs_encryption_policy)
-#define F2FS_IOC_GET_ENCRYPTION_PWSALT \
- _IOW('f', 20, __u8[16])
-#define F2FS_IOC_GET_ENCRYPTION_POLICY \
- _IOW('f', 21, struct f2fs_encryption_policy)
+#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \
+ struct f2fs_defragment)
+#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct f2fs_move_range)
+#define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \
+ struct f2fs_flush_device)
+#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \
+ struct f2fs_gc_range)
+#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32)
+
+#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
/*
* should be same as XFS_IOC_GOINGDOWN.
/*
* ioctl commands in 32 bit emulation
*/
-#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
-#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION
#endif
-/*
- * For INODE and NODE manager
- */
-/* for directory operations */
-struct f2fs_str {
- unsigned char *name;
- u32 len;
+struct f2fs_gc_range {
+ u32 sync;
+ u64 start;
+ u64 len;
};
-struct f2fs_filename {
- const struct qstr *usr_fname;
- struct f2fs_str disk_name;
- f2fs_hash_t hash;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_str crypto_buf;
-#endif
+struct f2fs_defragment {
+ u64 start;
+ u64 len;
+};
+
+struct f2fs_move_range {
+ u32 dst_fd; /* destination fd */
+ u64 pos_in; /* start position in src_fd */
+ u64 pos_out; /* start position in dst_fd */
+ u64 len; /* size to move */
};
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
+struct f2fs_flush_device {
+ u32 dev_num; /* device number to flush */
+ u32 segments; /* # of segments to flush */
+};
+
+/* for inline stuff */
+#define DEF_INLINE_RESERVED_SIZE 1
+static inline int get_extra_isize(struct inode *inode);
+#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
+ (CUR_ADDRS_PER_INODE(inode) - \
+ DEF_INLINE_RESERVED_SIZE - \
+ F2FS_INLINE_XATTR_ADDRS))
+
+/* for inline dir */
+#define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+ BITS_PER_BYTE + 1))
+#define INLINE_DENTRY_BITMAP_SIZE(inode) ((NR_INLINE_DENTRY(inode) + \
+ BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+#define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+ NR_INLINE_DENTRY(inode) + \
+ INLINE_DENTRY_BITMAP_SIZE(inode)))
+/*
+ * For INODE and NODE manager
+ */
+/* for directory operations */
struct f2fs_dentry_ptr {
struct inode *inode;
- const void *bitmap;
+ void *bitmap;
struct f2fs_dir_entry *dentry;
__u8 (*filename)[F2FS_SLOT_LEN];
int max;
+ int nr_bitmap;
};
-static inline void make_dentry_ptr(struct inode *inode,
- struct f2fs_dentry_ptr *d, void *src, int type)
+static inline void make_dentry_ptr_block(struct inode *inode,
+ struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
{
d->inode = inode;
+ d->max = NR_DENTRY_IN_BLOCK;
+ d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
+ d->bitmap = &t->dentry_bitmap;
+ d->dentry = t->dentry;
+ d->filename = t->filename;
+}
- if (type == 1) {
- struct f2fs_dentry_block *t = (struct f2fs_dentry_block *)src;
- d->max = NR_DENTRY_IN_BLOCK;
- d->bitmap = &t->dentry_bitmap;
- d->dentry = t->dentry;
- d->filename = t->filename;
- } else {
- struct f2fs_inline_dentry *t = (struct f2fs_inline_dentry *)src;
- d->max = NR_INLINE_DENTRY;
- d->bitmap = &t->dentry_bitmap;
- d->dentry = t->dentry;
- d->filename = t->filename;
- }
+static inline void make_dentry_ptr_inline(struct inode *inode,
+ struct f2fs_dentry_ptr *d, void *t)
+{
+ int entry_cnt = NR_INLINE_DENTRY(inode);
+ int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
+ int reserved_size = INLINE_RESERVED_SIZE(inode);
+
+ d->inode = inode;
+ d->max = entry_cnt;
+ d->nr_bitmap = bitmap_size;
+ d->bitmap = t;
+ d->dentry = t + bitmap_size + reserved_size;
+ d->filename = t + bitmap_size + reserved_size +
+ SIZE_OF_DIR_ENTRY * entry_cnt;
}
/*
/* number of extent info in extent cache we try to shrink */
#define EXTENT_CACHE_SHRINK_NUMBER 128
+struct rb_entry {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+ unsigned int ofs; /* start offset of the entry */
+ unsigned int len; /* length of the entry */
+};
+
struct extent_info {
unsigned int fofs; /* start offset in a file */
- u32 blk; /* start block address of the extent */
unsigned int len; /* length of the extent */
+ u32 blk; /* start block address of the extent */
};
struct extent_node {
- struct rb_node rb_node; /* rb node located in rb-tree */
+ struct rb_node rb_node;
+ union {
+ struct {
+ unsigned int fofs;
+ unsigned int len;
+ u32 blk;
+ };
+ struct extent_info ei; /* extent info */
+
+ };
struct list_head list; /* node in global extent list of sbi */
- struct extent_info ei; /* extent info */
+ struct extent_tree *et; /* extent tree pointer */
};
struct extent_tree {
struct rb_root root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */
struct extent_info largest; /* largested extent info */
+ struct list_head list; /* to be used by sbi->zombie_list */
rwlock_t lock; /* protect extent info rb-tree */
- atomic_t refcount; /* reference count of rb-tree */
- unsigned int count; /* # of extent node in rb-tree*/
+ atomic_t node_cnt; /* # of extent node in rb-tree*/
};
/*
block_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
};
/* for flag in get_data_block */
-#define F2FS_GET_BLOCK_READ 0
-#define F2FS_GET_BLOCK_DIO 1
-#define F2FS_GET_BLOCK_FIEMAP 2
-#define F2FS_GET_BLOCK_BMAP 3
+enum {
+ F2FS_GET_BLOCK_DEFAULT,
+ F2FS_GET_BLOCK_FIEMAP,
+ F2FS_GET_BLOCK_BMAP,
+ F2FS_GET_BLOCK_PRE_DIO,
+ F2FS_GET_BLOCK_PRE_AIO,
+};
/*
* i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
#define FADVISE_LOST_PINO_BIT 0x02
#define FADVISE_ENCRYPT_BIT 0x04
#define FADVISE_ENC_NAME_BIT 0x08
+#define FADVISE_KEEP_SIZE_BIT 0x10
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
#define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
-
-/* Encryption algorithms */
-#define F2FS_ENCRYPTION_MODE_INVALID 0
-#define F2FS_ENCRYPTION_MODE_AES_256_XTS 1
-#define F2FS_ENCRYPTION_MODE_AES_256_GCM 2
-#define F2FS_ENCRYPTION_MODE_AES_256_CBC 3
-#define F2FS_ENCRYPTION_MODE_AES_256_CTS 4
-
-#include "f2fs_crypto.h"
+#define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
#define DEF_DIR_LEVEL 0
atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
+ struct task_struct *task; /* lookup and create consistency */
+ struct task_struct *cp_task; /* separate cp/wb IO stats*/
nid_t i_xattr_nid; /* node id that contains xattrs */
- unsigned long long xattr_ver; /* cp version of xattr modification */
- struct inode_entry *dirty_dir; /* the pointer of dirty dir */
+ loff_t last_disk_size; /* lastly written file size */
+#ifdef CONFIG_QUOTA
+ struct dquot *i_dquot[MAXQUOTAS];
+
+ /* quota space reservation, managed internally by quota code */
+ qsize_t i_reserved_quota;
+#endif
+ struct list_head dirty_list; /* dirty list for dirs and files */
+ struct list_head gdirty_list; /* linked in global dirty list */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
+ struct task_struct *inmem_task; /* store inmemory task */
struct mutex inmem_lock; /* lock for inmemory pages */
-
struct extent_tree *extent_tree; /* cached extent_tree entry */
+ struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
+ struct rw_semaphore i_mmap_sem;
+ struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- /* Encryption params */
- struct f2fs_crypt_info *i_crypt_info;
-#endif
+ int i_extra_isize; /* size of extra space located in i_addr */
+ kprojid_t i_projid; /* id for project quota */
};
static inline void get_extent_info(struct extent_info *ext,
- struct f2fs_extent i_ext)
+ struct f2fs_extent *i_ext)
{
- ext->fofs = le32_to_cpu(i_ext.fofs);
- ext->blk = le32_to_cpu(i_ext.blk);
- ext->len = le32_to_cpu(i_ext.len);
+ ext->fofs = le32_to_cpu(i_ext->fofs);
+ ext->blk = le32_to_cpu(i_ext->blk);
+ ext->len = le32_to_cpu(i_ext->len);
}
static inline void set_raw_extent(struct extent_info *ext,
ei->len = len;
}
-static inline bool __is_extent_same(struct extent_info *ei1,
- struct extent_info *ei2)
+static inline bool __is_discard_mergeable(struct discard_info *back,
+ struct discard_info *front)
{
- return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk &&
- ei1->len == ei2->len);
+ return back->lstart + back->len == front->lstart;
+}
+
+static inline bool __is_discard_back_mergeable(struct discard_info *cur,
+ struct discard_info *back)
+{
+ return __is_discard_mergeable(back, cur);
+}
+
+static inline bool __is_discard_front_mergeable(struct discard_info *cur,
+ struct discard_info *front)
+{
+ return __is_discard_mergeable(cur, front);
}
static inline bool __is_extent_mergeable(struct extent_info *back,
return __is_extent_mergeable(cur, front);
}
-static inline void __try_update_largest_extent(struct extent_tree *et,
- struct extent_node *en)
+extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
+static inline void __try_update_largest_extent(struct inode *inode,
+ struct extent_tree *et, struct extent_node *en)
{
- if (en->ei.len > et->largest.len)
+ if (en->ei.len > et->largest.len) {
et->largest = en->ei;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
+enum nid_list {
+ FREE_NID_LIST,
+ ALLOC_NID_LIST,
+ MAX_NID_LIST,
+};
+
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
- nid_t available_nids; /* maximum available node ids */
+ nid_t available_nids; /* # of available node ids */
nid_t next_scan_nid; /* the next nid to be scanned */
unsigned int ram_thresh; /* control the memory footprint */
unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
+ unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
struct list_head nat_entries; /* cached nat entry list (clean) */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
+ unsigned int nat_blocks; /* # of nat blocks */
/* free node ids management */
struct radix_tree_root free_nid_root;/* root of the free_nid cache */
- struct list_head free_nid_list; /* a list for free nids */
- spinlock_t free_nid_list_lock; /* protect free nid list */
- unsigned int fcnt; /* the number of free node id */
+ struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
+ unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
+ spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
+ unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
+ unsigned char *nat_block_bitmap;
+ unsigned short *free_nid_count; /* free nid count of NAT block */
/* for checkpoint */
char *nat_bitmap; /* NAT bitmap pointer */
+
+ unsigned int nat_bits_blocks; /* # of nat bits blocks */
+ unsigned char *nat_bits; /* NAT bits blocks */
+ unsigned char *full_nat_bits; /* full NAT pages */
+ unsigned char *empty_nat_bits; /* empty NAT pages */
+#ifdef CONFIG_F2FS_CHECK_FS
+ char *nat_bitmap_mir; /* NAT bitmap mirror */
+#endif
int bitmap_size; /* bitmap size */
};
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
bool inode_page_locked; /* inode page is locked or not */
+ bool node_changed; /* is node block changed */
+ char cur_level; /* level of hole node page */
+ char max_level; /* level of current page located */
block_t data_blkaddr; /* block address of the node block */
};
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE,
- CURSEG_DIRECT_IO, /* to use for the direct IO path */
};
struct flush_cmd {
struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
+ atomic_t issued_flush; /* # of issued flushes */
+ atomic_t issing_flush; /* # of issing flushes */
struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */
};
/* a threshold to reclaim prefree segments */
unsigned int rec_prefree_segments;
- /* for small discard management */
- struct list_head discard_list; /* 4KB discard list */
- int nr_discards; /* # of discards in the list */
- int max_discards; /* max. discards to be issued */
-
/* for batched trimming */
unsigned int trim_sections; /* # of sections to trim */
unsigned int ipu_policy; /* in-place-update policy */
unsigned int min_ipu_util; /* in-place-update threshold */
unsigned int min_fsync_blocks; /* threshold for fsync */
+ unsigned int min_hot_blocks; /* threshold for hot block allocation */
/* for flush command control */
- struct flush_cmd_control *cmd_control_info;
+ struct flush_cmd_control *fcc_info;
+ /* for discard command control */
+ struct discard_cmd_control *dcc_info;
};
/*
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
+#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
- F2FS_WRITEBACK,
F2FS_DIRTY_DENTS,
+ F2FS_DIRTY_DATA,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
F2FS_INMEM_PAGES,
+ F2FS_DIRTY_IMETA,
+ F2FS_WB_CP_DATA,
+ F2FS_WB_DATA,
NR_COUNT_TYPE,
};
META_FLUSH,
INMEM, /* the below types are used by tracepoints only. */
INMEM_DROP,
+ INMEM_INVALIDATE,
+ INMEM_REVOKE,
IPU,
OPU,
};
+enum temp_type {
+ HOT = 0, /* must be zero for meta bio */
+ WARM,
+ COLD,
+ NR_TEMP_TYPE,
+};
+
+enum need_lock_type {
+ LOCK_REQ = 0,
+ LOCK_DONE,
+ LOCK_RETRY,
+};
+
+enum iostat_type {
+ APP_DIRECT_IO, /* app direct IOs */
+ APP_BUFFERED_IO, /* app buffered IOs */
+ APP_WRITE_IO, /* app write IOs */
+ APP_MAPPED_IO, /* app mapped IOs */
+ FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
+ FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
+ FS_META_IO, /* meta IOs from kworker/reclaimer */
+ FS_GC_DATA_IO, /* data IOs from forground gc */
+ FS_GC_NODE_IO, /* node IOs from forground gc */
+ FS_CP_DATA_IO, /* data IOs from checkpoint */
+ FS_CP_NODE_IO, /* node IOs from checkpoint */
+ FS_CP_META_IO, /* meta IOs from checkpoint */
+ FS_DISCARD, /* discard */
+ NR_IO_TYPE,
+};
+
struct f2fs_io_info {
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
- int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
- block_t blk_addr; /* block address to be written */
+ enum temp_type temp; /* contains HOT/WARM/COLD */
+ int op; /* contains REQ_OP_ */
+ int op_flags; /* req_flag_bits */
+ block_t new_blkaddr; /* new block address to be written */
+ block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
+ struct list_head list; /* serialize IOs */
+ bool submitted; /* indicate IO submission */
+ int need_lock; /* indicate we need to lock cp_rwsem */
+ bool in_list; /* indicate fio is in io_list */
+ enum iostat_type io_type; /* io type */
};
-#define is_read_io(rw) (((rw) & 1) == READ)
+#define is_read_io(rw) ((rw) == READ)
struct f2fs_bio_info {
struct f2fs_sb_info *sbi; /* f2fs superblock */
struct bio *bio; /* bios to merge */
sector_t last_block_in_bio; /* last block number */
struct f2fs_io_info fio; /* store buffered io info. */
struct rw_semaphore io_rwsem; /* blocking op for bio */
+ spinlock_t io_lock; /* serialize DATA/NODE IOs */
+ struct list_head io_list; /* track fios */
+};
+
+#define FDEV(i) (sbi->devs[i])
+#define RDEV(i) (raw_super->devs[i])
+struct f2fs_dev_info {
+ struct block_device *bdev;
+ char path[MAX_PATH_LEN];
+ unsigned int total_segments;
+ block_t start_blk;
+ block_t end_blk;
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int nr_blkz; /* Total number of zones */
+ u8 *blkz_type; /* Array of zones type */
+#endif
+};
+
+enum inode_type {
+ DIR_INODE, /* for dirty dir inode */
+ FILE_INODE, /* for dirty regular/symlink inode */
+ DIRTY_META, /* for all dirtied inode metadata */
+ NR_INODE_TYPE,
};
/* for inner inode cache management */
SBI_IS_CLOSE, /* specify unmounting */
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */
+ SBI_NEED_SB_WRITE, /* need to recover superblock */
+ SBI_NEED_CP, /* need to checkpoint */
+};
+
+enum {
+ CP_TIME,
+ REQ_TIME,
+ MAX_TIME,
};
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
- struct buffer_head *raw_super_buf; /* buffer head of raw sb */
struct f2fs_super_block *raw_super; /* raw super block pointer */
- int s_flag; /* flags for sbi */
+ int valid_super_block; /* valid super block no */
+ unsigned long s_flag; /* flags for sbi */
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int blocks_per_blkz; /* F2FS blocks per zone */
+ unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
+#endif
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
struct f2fs_sm_info *sm_info; /* segment manager */
/* for bio operations */
- struct f2fs_bio_info read_io; /* for read bios */
- struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
+ struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
+ struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
+ /* bio ordering for NODE/DATA */
+ int write_io_size_bits; /* Write IO size bits */
+ mempool_t *write_io_dummy; /* Dummy pages */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ int cur_cp_pack; /* remain current cp pack */
+ spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
- struct mutex writepages; /* mutex for writepages() */
+ struct rw_semaphore node_change; /* locking node change */
wait_queue_head_t cp_wait;
- long cp_expires, cp_interval; /* next expected periodic cp */
+ unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
+ long interval_time[MAX_TIME]; /* to store thresholds */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
/* for orphan inode, use 0'th array */
unsigned int max_orphans; /* max orphan inodes */
- /* for directory inode management */
- struct list_head dir_inode_list; /* dir inode list */
- spinlock_t dir_inode_lock; /* for dir inode list lock */
+ /* for inode management */
+ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
+ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
/* for extent tree cache */
struct radix_tree_root extent_tree_root;/* cache extent cache entries */
- struct rw_semaphore extent_tree_lock; /* locking extent radix tree */
+ struct mutex extent_tree_lock; /* locking extent radix tree */
struct list_head extent_list; /* lru list for shrinker */
spinlock_t extent_lock; /* locking extent lru list */
- int total_ext_tree; /* extent tree count */
+ atomic_t total_ext_tree; /* extent tree count */
+ struct list_head zombie_list; /* extent zombie tree list */
+ atomic_t total_zombie_tree; /* extent zombie tree count */
atomic_t total_ext_node; /* extent info count */
/* basic filesystem units */
unsigned int total_sections; /* total section count */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
- unsigned int total_valid_inode_count; /* valid inode count */
+ loff_t max_file_blocks; /* max block index of file */
int active_logs; /* # of active logs */
int dir_level; /* directory level */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
- block_t alloc_valid_block_count; /* # of allocated blocks */
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */
+ block_t reserved_blocks; /* configurable reserved blocks */
+
u32 s_next_generation; /* for NFS support */
- atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
+
+ /* # of pages, see count_type */
+ atomic_t nr_pages[NR_COUNT_TYPE];
+ /* # of allocated blocks */
+ struct percpu_counter alloc_valid_block_count;
+
+ /* writeback control */
+ atomic_t wb_sync_req; /* count # of WB_SYNC threads */
+
+ /* valid inode count */
+ struct percpu_counter total_valid_inode_count;
struct f2fs_mount_info mount_opt; /* mount options */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */
+ /* threshold for converting bg victims for fg */
+ u64 fggc_threshold;
+
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
atomic_t inline_xattr; /* # of inline_xattr inodes */
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
+ atomic_t aw_cnt; /* # of atomic writes */
+ atomic_t vw_cnt; /* # of volatile writes */
+ atomic_t max_aw_cnt; /* max # of atomic writes */
+ atomic_t max_vw_cnt; /* max # of volatile writes */
int bg_gc; /* background gc calls */
- unsigned int n_dirty_dirs; /* # of dir inodes */
+ unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
- unsigned int last_victim[2]; /* last victim segment # */
spinlock_t stat_lock; /* lock for stat operations */
+ /* For app/fs IO statistics */
+ spinlock_t iostat_lock;
+ unsigned long long write_iostat[NR_IO_TYPE];
+ bool iostat_enable;
+
/* For sysfs suppport */
struct kobject s_kobj;
struct completion s_kobj_unregister;
/* For shrinker support */
struct list_head s_list;
+ int s_ndevs; /* number of devices */
+ struct f2fs_dev_info *devs; /* for device list */
struct mutex umount_mutex;
unsigned int shrinker_run_no;
+
+ /* For write statistics */
+ u64 sectors_written_start;
+ u64 kbytes_written;
+
+ /* Reference to checksum algorithm driver via cryptoapi */
+ struct crypto_shash *s_chksum_driver;
+
+ /* Precomputed FS UUID checksum for seeding other checksums */
+ __u32 s_chksum_seed;
+
+ /* For fault injection */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info fault_info;
+#endif
+
+#ifdef CONFIG_QUOTA
+ /* Names of quota files with journalled quota */
+ char *s_qf_names[MAXQUOTAS];
+ int s_jquota_fmt; /* Format of quota to use */
+#endif
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define f2fs_show_injection_info(type) \
+ printk("%sF2FS-fs : inject %s in %s of %pF\n", \
+ KERN_INFO, fault_name[type], \
+ __func__, __builtin_return_address(0))
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (!ffi->inject_rate)
+ return false;
+
+ if (!IS_FAULT_SET(ffi, type))
+ return false;
+
+ atomic_inc(&ffi->inject_ops);
+ if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ return true;
+ }
+ return false;
+}
+#endif
+
+/* For write statistics. Suppose sector size is 512 bytes,
+ * and the return value is in kbytes. s is of struct f2fs_sb_info.
+ */
+#define BD_PART_WRITTEN(s) \
+(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[1]) - \
+ (s)->sectors_written_start) >> 1)
+
+static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
+{
+ sbi->last_time[type] = jiffies;
+}
+
+static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
+{
+ struct timespec ts = {sbi->interval_time[type], 0};
+ unsigned long interval = timespec_to_jiffies(&ts);
+
+ return time_after(jiffies, sbi->last_time[type] + interval);
+}
+
+static inline bool is_idle(struct f2fs_sb_info *sbi)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct request_list *rl = &q->root_rl;
+
+ if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
+ return 0;
+
+ return f2fs_time_over(sbi, REQ_TIME);
+}
+
/*
* Inline functions
*/
+static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
+ unsigned int length)
+{
+ SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
+ u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 retval;
+ int err;
+
+ shash->tfm = sbi->s_chksum_driver;
+ shash->flags = 0;
+ *ctx = F2FS_SUPER_MAGIC;
+
+ err = crypto_shash_update(shash, address, length);
+ BUG_ON(err);
+
+ retval = *ctx;
+ barrier_data(ctx);
+ return retval;
+}
+
+static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+ void *buf, size_t buf_size)
+{
+ return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+}
+
+static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
+ const void *address, unsigned int length)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[4];
+ } desc;
+ int err;
+
+ BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
+
+ desc.shash.tfm = sbi->s_chksum_driver;
+ desc.shash.flags = 0;
+ *(u32 *)desc.ctx = crc;
+
+ err = crypto_shash_update(&desc.shash, address, length);
+ BUG_ON(err);
+
+ return *(u32 *)desc.ctx;
+}
+
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
{
return container_of(inode, struct f2fs_inode_info, vfs_inode);
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
- return sbi->s_flag & (0x01 << type);
+ return test_bit(type, &sbi->s_flag);
}
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag |= (0x01 << type);
+ set_bit(type, &sbi->s_flag);
}
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag &= ~(0x01 << type);
+ clear_bit(type, &sbi->s_flag);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver);
}
-static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
+{
+ size_t crc_offset = le32_to_cpu(cp->checksum_offset);
+ return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
+}
+
+static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+
return ckpt_flags & f;
}
-static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
+}
+
+static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags |= f;
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
-static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+ __set_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+}
+
+static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags &= (~f);
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
+static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+}
+
+static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
+{
+ unsigned long flags;
+
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+
+ if (lock)
+ spin_lock_irqsave(&sbi->cp_lock, flags);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
+ kfree(NM_I(sbi)->nat_bits);
+ NM_I(sbi)->nat_bits = NULL;
+ if (lock)
+ spin_unlock_irqrestore(&sbi->cp_lock, flags);
+}
+
+static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
+ struct cp_control *cpc)
+{
+ bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
+
+ return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
}
+static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
+{
+ return down_read_trylock(&sbi->cp_rwsem);
+}
+
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{
up_read(&sbi->cp_rwsem);
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
+ down_write(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
static inline bool __remain_node_summaries(int reason)
{
- return (reason == CP_UMOUNT || reason == CP_FASTBOOT);
+ return (reason & (CP_UMOUNT | CP_FASTBOOT));
}
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{
- return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) ||
- is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG));
+ return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
+ is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
}
/*
return 0;
}
-#define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
-
/*
* Check whether the inode has blocks or not
*/
static inline int F2FS_HAS_BLOCKS(struct inode *inode)
{
- if (F2FS_I(inode)->i_xattr_nid)
- return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1;
- else
- return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS;
+ block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
+
+ return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
}
static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
-static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
- struct inode *inode, blkcnt_t count)
+static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
+static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, blkcnt_t *count)
{
- block_t valid_block_count;
+ blkcnt_t diff = 0, release = 0;
+ block_t avail_user_block_count;
+ int ret;
+
+ ret = dquot_reserve_block(inode, *count);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_BLOCK)) {
+ f2fs_show_injection_info(FAULT_BLOCK);
+ release = *count;
+ goto enospc;
+ }
+#endif
+ /*
+ * let's increase this in prior to actual block count change in order
+ * for f2fs_sync_file to avoid data races when deciding checkpoint.
+ */
+ percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
spin_lock(&sbi->stat_lock);
- valid_block_count =
- sbi->total_valid_block_count + (block_t)count;
- if (unlikely(valid_block_count > sbi->user_block_count)) {
- spin_unlock(&sbi->stat_lock);
- return false;
+ sbi->total_valid_block_count += (block_t)(*count);
+ avail_user_block_count = sbi->user_block_count - sbi->reserved_blocks;
+ if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
+ diff = sbi->total_valid_block_count - avail_user_block_count;
+ *count -= diff;
+ release = diff;
+ sbi->total_valid_block_count = avail_user_block_count;
+ if (!*count) {
+ spin_unlock(&sbi->stat_lock);
+ percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
+ goto enospc;
+ }
}
- inode->i_blocks += count;
- sbi->total_valid_block_count = valid_block_count;
- sbi->alloc_valid_block_count += (block_t)count;
spin_unlock(&sbi->stat_lock);
- return true;
+
+ if (release)
+ dquot_release_reservation_block(inode, release);
+ f2fs_i_blocks_write(inode, *count, true, true);
+ return 0;
+
+enospc:
+ dquot_release_reservation_block(inode, release);
+ return -ENOSPC;
}
static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode,
- blkcnt_t count)
+ block_t count)
{
+ blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
+
spin_lock(&sbi->stat_lock);
f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
- f2fs_bug_on(sbi, inode->i_blocks < count);
- inode->i_blocks -= count;
+ f2fs_bug_on(sbi, inode->i_blocks < sectors);
sbi->total_valid_block_count -= (block_t)count;
spin_unlock(&sbi->stat_lock);
+ f2fs_i_blocks_write(inode, count, false, true);
}
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
atomic_inc(&sbi->nr_pages[count_type]);
+
+ if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
+ count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
+ return;
+
set_sbi_flag(sbi, SBI_IS_DIRTY);
}
static inline void inode_inc_dirty_pages(struct inode *inode)
{
atomic_inc(&F2FS_I(inode)->dirty_pages);
- if (S_ISDIR(inode->i_mode))
- inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_DENTS);
+ inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
return;
atomic_dec(&F2FS_I(inode)->dirty_pages);
-
- if (S_ISDIR(inode->i_mode))
- dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_DENTS);
+ dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
-static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
+static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{
return atomic_read(&sbi->nr_pages[count_type]);
}
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{
- unsigned int pages_per_sec = sbi->segs_per_sec *
- (1 << sbi->log_blocks_per_seg);
- return ((get_pages(sbi, block_type) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
+ unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
+ sbi->log_blocks_per_seg;
+
+ return segs / sbi->segs_per_sec;
}
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
return sbi->total_valid_block_count;
}
+static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
+{
+ return sbi->discard_blks;
+}
+
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
- block_t start_addr;
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned long long ckpt_version = cur_cp_version(ckpt);
-
- start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
- /*
- * odd numbered checkpoint should at cp segment 0
- * and even segment must be at cp segment 1
- */
- if (!(ckpt_version & 1))
+ if (sbi->cur_cp_pack == 2)
start_addr += sbi->blocks_per_seg;
+ return start_addr;
+}
+
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ if (sbi->cur_cp_pack == 1)
+ start_addr += sbi->blocks_per_seg;
return start_addr;
}
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+ sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
{
return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
}
-static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
- struct inode *inode)
+static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, bool is_inode)
{
block_t valid_block_count;
unsigned int valid_node_count;
+ bool quota = inode && !is_inode;
+
+ if (quota) {
+ int ret = dquot_reserve_block(inode, 1);
+ if (ret)
+ return ret;
+ }
spin_lock(&sbi->stat_lock);
valid_block_count = sbi->total_valid_block_count + 1;
- if (unlikely(valid_block_count > sbi->user_block_count)) {
+ if (unlikely(valid_block_count + sbi->reserved_blocks >
+ sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
- return false;
+ goto enospc;
}
valid_node_count = sbi->total_valid_node_count + 1;
if (unlikely(valid_node_count > sbi->total_node_count)) {
spin_unlock(&sbi->stat_lock);
- return false;
+ goto enospc;
}
- if (inode)
- inode->i_blocks++;
-
- sbi->alloc_valid_block_count++;
sbi->total_valid_node_count++;
sbi->total_valid_block_count++;
spin_unlock(&sbi->stat_lock);
- return true;
+ if (inode) {
+ if (is_inode)
+ f2fs_mark_inode_dirty_sync(inode, true);
+ else
+ f2fs_i_blocks_write(inode, 1, true, true);
+ }
+
+ percpu_counter_inc(&sbi->alloc_valid_block_count);
+ return 0;
+
+enospc:
+ if (quota)
+ dquot_release_reservation_block(inode, 1);
+ return -ENOSPC;
}
static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
- struct inode *inode)
+ struct inode *inode, bool is_inode)
{
spin_lock(&sbi->stat_lock);
f2fs_bug_on(sbi, !sbi->total_valid_block_count);
f2fs_bug_on(sbi, !sbi->total_valid_node_count);
- f2fs_bug_on(sbi, !inode->i_blocks);
+ f2fs_bug_on(sbi, !is_inode && !inode->i_blocks);
- inode->i_blocks--;
sbi->total_valid_node_count--;
sbi->total_valid_block_count--;
spin_unlock(&sbi->stat_lock);
+
+ if (!is_inode)
+ f2fs_i_blocks_write(inode, 1, false, true);
}
static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{
- spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, sbi->total_valid_inode_count == sbi->total_node_count);
- sbi->total_valid_inode_count++;
- spin_unlock(&sbi->stat_lock);
+ percpu_counter_inc(&sbi->total_valid_inode_count);
}
static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
{
- spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, !sbi->total_valid_inode_count);
- sbi->total_valid_inode_count--;
- spin_unlock(&sbi->stat_lock);
+ percpu_counter_dec(&sbi->total_valid_inode_count);
}
-static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
+static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
{
- return sbi->total_valid_inode_count;
+ return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct page *page = find_lock_page(mapping, index);
+
+ if (page)
+ return page;
+
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+ f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+ return NULL;
+ }
+#endif
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
static inline bool IS_INODE(struct page *page)
{
struct f2fs_node *p = F2FS_NODE(page);
+
return RAW_IS_INODE(p);
}
+static inline int offset_in_addr(struct f2fs_inode *i)
+{
+ return (i->i_inline & F2FS_EXTRA_ATTR) ?
+ (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
+}
+
static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
{
return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
}
-static inline block_t datablock_addr(struct page *node_page,
- unsigned int offset)
+static inline int f2fs_has_extra_attr(struct inode *inode);
+static inline block_t datablock_addr(struct inode *inode,
+ struct page *node_page, unsigned int offset)
{
struct f2fs_node *raw_node;
__le32 *addr_array;
+ int base = 0;
+ bool is_inode = IS_INODE(node_page);
+
raw_node = F2FS_NODE(node_page);
+
+ /* from GC path only */
+ if (!inode) {
+ if (is_inode)
+ base = offset_in_addr(&raw_node->i);
+ } else if (f2fs_has_extra_attr(inode) && is_inode) {
+ base = get_extra_isize(inode);
+ }
+
addr_array = blkaddr_in_node(raw_node);
- return le32_to_cpu(addr_array[offset]);
+ return le32_to_cpu(addr_array[base + offset]);
}
static inline int f2fs_test_bit(unsigned int nr, char *addr)
*addr ^= mask;
}
+#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
+#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
+#define F2FS_FL_INHERITED (FS_PROJINHERIT_FL)
+
+static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
+{
+ if (S_ISDIR(mode))
+ return flags;
+ else if (S_ISREG(mode))
+ return flags & F2FS_REG_FLMASK;
+ else
+ return flags & F2FS_OTHER_FLMASK;
+}
+
/* used for f2fs_inode_info->flags */
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
FI_DIRTY_INODE, /* indicate inode is dirty or not */
+ FI_AUTO_RECOVER, /* indicate inode is recoverable */
FI_DIRTY_DIR, /* indicate directory has dirty pages */
FI_INC_LINK, /* need to increment i_nlink */
FI_ACL_MODE, /* indicate acl mode */
FI_NO_ALLOC, /* should not allocate any blocks */
FI_FREE_NID, /* free allocated nide */
- FI_UPDATE_DIR, /* should update inode block for consistency */
- FI_DELAY_IPUT, /* used for the recovery */
FI_NO_EXTENT, /* not to use the extent cache */
FI_INLINE_XATTR, /* used for inline xattr */
FI_INLINE_DATA, /* used for inline data*/
FI_UPDATE_WRITE, /* inode has in-place-update data */
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
+ FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
FI_VOLATILE_FILE, /* indicate volatile file */
FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
FI_INLINE_DOTS, /* indicate inline dot dentries */
+ FI_DO_DEFRAG, /* indicate defragment is running */
+ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
+ FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
+ FI_HOT_DATA, /* indicate file is hot */
+ FI_EXTRA_ATTR, /* indicate file has extra attribute */
+ FI_PROJ_INHERIT, /* indicate file inherits projectid */
};
-static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void __mark_inode_dirty_flag(struct inode *inode,
+ int flag, bool set)
+{
+ switch (flag) {
+ case FI_INLINE_XATTR:
+ case FI_INLINE_DATA:
+ case FI_INLINE_DENTRY:
+ if (set)
+ return;
+ case FI_DATA_EXIST:
+ case FI_INLINE_DOTS:
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+}
+
+static inline void set_inode_flag(struct inode *inode, int flag)
{
- if (!test_bit(flag, &fi->flags))
- set_bit(flag, &fi->flags);
+ if (!test_bit(flag, &F2FS_I(inode)->flags))
+ set_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, true);
}
-static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
+static inline int is_inode_flag_set(struct inode *inode, int flag)
{
- return test_bit(flag, &fi->flags);
+ return test_bit(flag, &F2FS_I(inode)->flags);
}
-static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void clear_inode_flag(struct inode *inode, int flag)
{
- if (test_bit(flag, &fi->flags))
- clear_bit(flag, &fi->flags);
+ if (test_bit(flag, &F2FS_I(inode)->flags))
+ clear_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, false);
}
-static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
+static inline void set_acl_inode(struct inode *inode, umode_t mode)
{
- fi->i_acl_mode = mode;
- set_inode_flag(fi, FI_ACL_MODE);
+ F2FS_I(inode)->i_acl_mode = mode;
+ set_inode_flag(inode, FI_ACL_MODE);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
-static inline void get_inline_info(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void f2fs_i_links_write(struct inode *inode, bool inc)
{
+ if (inc)
+ inc_nlink(inode);
+ else
+ drop_nlink(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_blocks_write(struct inode *inode,
+ block_t diff, bool add, bool claim)
+{
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ /* add = 1, claim = 1 should be dquot_reserve_block in pair */
+ if (add) {
+ if (claim)
+ dquot_claim_block(inode, diff);
+ else
+ dquot_alloc_block_nofail(inode, diff);
+ } else {
+ dquot_free_block(inode, diff);
+ }
+
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
+}
+
+static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
+{
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ if (i_size_read(inode) == i_size)
+ return;
+
+ i_size_write(inode, i_size);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
+}
+
+static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
+{
+ F2FS_I(inode)->i_current_depth = depth;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
+{
+ F2FS_I(inode)->i_xattr_nid = xnid;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
+{
+ F2FS_I(inode)->i_pino = pino;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
if (ri->i_inline & F2FS_INLINE_XATTR)
- set_inode_flag(fi, FI_INLINE_XATTR);
+ set_bit(FI_INLINE_XATTR, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DATA)
- set_inode_flag(fi, FI_INLINE_DATA);
+ set_bit(FI_INLINE_DATA, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DENTRY)
- set_inode_flag(fi, FI_INLINE_DENTRY);
+ set_bit(FI_INLINE_DENTRY, &fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
- set_inode_flag(fi, FI_DATA_EXIST);
+ set_bit(FI_DATA_EXIST, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DOTS)
- set_inode_flag(fi, FI_INLINE_DOTS);
+ set_bit(FI_INLINE_DOTS, &fi->flags);
+ if (ri->i_inline & F2FS_EXTRA_ATTR)
+ set_bit(FI_EXTRA_ATTR, &fi->flags);
}
-static inline void set_raw_inline(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
{
ri->i_inline = 0;
- if (is_inode_flag_set(fi, FI_INLINE_XATTR))
+ if (is_inode_flag_set(inode, FI_INLINE_XATTR))
ri->i_inline |= F2FS_INLINE_XATTR;
- if (is_inode_flag_set(fi, FI_INLINE_DATA))
+ if (is_inode_flag_set(inode, FI_INLINE_DATA))
ri->i_inline |= F2FS_INLINE_DATA;
- if (is_inode_flag_set(fi, FI_INLINE_DENTRY))
+ if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
ri->i_inline |= F2FS_INLINE_DENTRY;
- if (is_inode_flag_set(fi, FI_DATA_EXIST))
+ if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(fi, FI_INLINE_DOTS))
+ if (is_inode_flag_set(inode, FI_INLINE_DOTS))
ri->i_inline |= F2FS_INLINE_DOTS;
+ if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
+ ri->i_inline |= F2FS_EXTRA_ATTR;
+}
+
+static inline int f2fs_has_extra_attr(struct inode *inode)
+{
+ return is_inode_flag_set(inode, FI_EXTRA_ATTR);
}
static inline int f2fs_has_inline_xattr(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
+ return is_inode_flag_set(inode, FI_INLINE_XATTR);
}
-static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
+static inline unsigned int addrs_per_inode(struct inode *inode)
{
- if (f2fs_has_inline_xattr(&fi->vfs_inode))
- return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
- return DEF_ADDRS_PER_INODE;
+ if (f2fs_has_inline_xattr(inode))
+ return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS;
+ return CUR_ADDRS_PER_INODE(inode);
}
static inline void *inline_xattr_addr(struct page *page)
{
struct f2fs_inode *ri = F2FS_INODE(page);
+
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
F2FS_INLINE_XATTR_ADDRS]);
}
static inline int f2fs_has_inline_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA);
-}
-
-static inline void f2fs_clear_inline_inode(struct inode *inode)
-{
- clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- clear_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ return is_inode_flag_set(inode, FI_INLINE_DATA);
}
static inline int f2fs_exist_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST);
+ return is_inode_flag_set(inode, FI_DATA_EXIST);
}
static inline int f2fs_has_inline_dots(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DOTS);
+ return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE);
+ return is_inode_flag_set(inode, FI_ATOMIC_FILE);
+}
+
+static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
+{
+ return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
}
static inline bool f2fs_is_volatile_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE);
+ return is_inode_flag_set(inode, FI_VOLATILE_FILE);
}
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
}
static inline bool f2fs_is_drop_cache(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE);
+ return is_inode_flag_set(inode, FI_DROP_CACHE);
}
-static inline void *inline_data_addr(struct page *page)
+static inline void *inline_data_addr(struct inode *inode, struct page *page)
{
struct f2fs_inode *ri = F2FS_INODE(page);
- return (void *)&(ri->i_addr[1]);
+ int extra_size = get_extra_isize(inode);
+
+ return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
}
static inline int f2fs_has_inline_dentry(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DENTRY);
+ return is_inode_flag_set(inode, FI_INLINE_DENTRY);
}
static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
static inline void set_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise |= type;
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void clear_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise &= ~type;
+ f2fs_mark_inode_dirty_sync(inode, true);
}
-static inline int f2fs_readonly(struct super_block *sb)
+static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
- return sb->s_flags & MS_RDONLY;
+ if (dsync) {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool ret;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ ret = list_empty(&F2FS_I(inode)->gdirty_list);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+ }
+ if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
+ file_keep_isize(inode) ||
+ i_size_read(inode) & PAGE_MASK)
+ return false;
+ return F2FS_I(inode)->last_disk_size == i_size_read(inode);
}
-static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+static inline int f2fs_readonly(struct super_block *sb)
{
- return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ return sb->s_flags & MS_RDONLY;
}
-static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
+static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
{
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- sbi->sb->s_flags |= MS_RDONLY;
+ return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
static inline bool is_dot_dotdot(const struct qstr *str)
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
- mode_t mode = inode->i_mode;
-
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
- is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+ is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
- return S_ISREG(mode);
+ return S_ISREG(inode->i_mode);
+}
+
+static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_KMALLOC)) {
+ f2fs_show_injection_info(FAULT_KMALLOC);
+ return NULL;
+ }
+#endif
+ return kmalloc(size, flags);
}
-static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
+static inline void *kvmalloc(size_t size, gfp_t flags)
{
void *ret;
return ret;
}
-static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
+static inline void *kvzalloc(size_t size, gfp_t flags)
{
void *ret;
return ret;
}
+static inline int get_extra_isize(struct inode *inode)
+{
+ return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
+}
+
#define get_inode_mode(i) \
- ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
+ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
-/* get offset of first page in next direct node */
-#define PGOFS_OF_NEXT_DNODE(pgofs, fi) \
- ((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) : \
- (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \
- ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+#define F2FS_TOTAL_EXTRA_ATTR_SIZE \
+ (offsetof(struct f2fs_inode, i_extra_end) - \
+ offsetof(struct f2fs_inode, i_extra_isize)) \
+
+#define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
+#define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
+ ((offsetof(typeof(*f2fs_inode), field) + \
+ sizeof((f2fs_inode)->field)) \
+ <= (F2FS_OLD_ATTRIBUTE_SIZE + extra_isize)) \
+
+static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ spin_lock(&sbi->iostat_lock);
+ for (i = 0; i < NR_IO_TYPE; i++)
+ sbi->write_iostat[i] = 0;
+ spin_unlock(&sbi->iostat_lock);
+}
+
+static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
+ enum iostat_type type, unsigned long long io_bytes)
+{
+ if (!sbi->iostat_enable)
+ return;
+ spin_lock(&sbi->iostat_lock);
+ sbi->write_iostat[type] += io_bytes;
+
+ if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
+ sbi->write_iostat[APP_BUFFERED_IO] =
+ sbi->write_iostat[APP_WRITE_IO] -
+ sbi->write_iostat[APP_DIRECT_IO];
+ spin_unlock(&sbi->iostat_lock);
+}
/*
* file.c
*/
-int f2fs_sync_file(struct file *, loff_t, loff_t, int);
-void truncate_data_blocks(struct dnode_of_data *);
-int truncate_blocks(struct inode *, u64, bool);
-int f2fs_truncate(struct inode *, bool);
-int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-int f2fs_setattr(struct dentry *, struct iattr *);
-int truncate_hole(struct inode *, pgoff_t, pgoff_t);
-int truncate_data_blocks_range(struct dnode_of_data *, int);
-long f2fs_ioctl(struct file *, unsigned int, unsigned long);
-long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
+int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
+void truncate_data_blocks(struct dnode_of_data *dn);
+int truncate_blocks(struct inode *inode, u64 from, bool lock);
+int f2fs_truncate(struct inode *inode);
+int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
+int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
+int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
+int truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/*
* inode.c
*/
-void f2fs_set_inode_flags(struct inode *);
-struct inode *f2fs_iget(struct super_block *, unsigned long);
-int try_to_free_nats(struct f2fs_sb_info *, int);
-void update_inode(struct inode *, struct page *);
-void update_inode_page(struct inode *);
-int f2fs_write_inode(struct inode *, struct writeback_control *);
-void f2fs_evict_inode(struct inode *);
-void handle_failed_inode(struct inode *);
+void f2fs_set_inode_flags(struct inode *inode);
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
+struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
+struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
+int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
+int update_inode(struct inode *inode, struct page *node_page);
+int update_inode_page(struct inode *inode);
+int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
+void f2fs_evict_inode(struct inode *inode);
+void handle_failed_inode(struct inode *inode);
/*
* namei.c
/*
* dir.c
*/
-extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
-void set_de_type(struct f2fs_dir_entry *, umode_t);
-
-struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *,
- f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
-bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
- unsigned int, struct f2fs_str *);
-void do_make_empty_dir(struct inode *, struct inode *,
- struct f2fs_dentry_ptr *);
-struct page *init_inode_metadata(struct inode *, struct inode *,
- const struct qstr *, struct page *);
-void update_parent_metadata(struct inode *, struct inode *, unsigned int);
-int room_for_filename(const void *, int, int);
-void f2fs_drop_nlink(struct inode *, struct inode *, struct page *);
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
- struct page **);
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
-ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
-void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
- struct page *, struct inode *);
-int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
-void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
- const struct qstr *, f2fs_hash_t , unsigned int);
-int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
- umode_t);
-void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
- struct inode *);
-int f2fs_do_tmpfile(struct inode *, struct inode *);
-bool f2fs_empty_dir(struct inode *);
+void set_de_type(struct f2fs_dir_entry *de, umode_t mode);
+unsigned char get_de_type(struct f2fs_dir_entry *de);
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
+ f2fs_hash_t namehash, int *max_slots,
+ struct f2fs_dentry_ptr *d);
+int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ unsigned int start_pos, struct fscrypt_str *fstr);
+void do_make_empty_dir(struct inode *inode, struct inode *parent,
+ struct f2fs_dentry_ptr *d);
+struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
+ const struct qstr *new_name,
+ const struct qstr *orig_name, struct page *dpage);
+void update_parent_metadata(struct inode *dir, struct inode *inode,
+ unsigned int current_depth);
+int room_for_filename(const void *bitmap, int slots, int max_slots);
+void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page);
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ const struct qstr *child, struct page **res_page);
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
+ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
+ struct page **page);
+void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
+ struct page *page, struct inode *inode);
+void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
+ const struct qstr *name, f2fs_hash_t name_hash,
+ unsigned int bit_pos);
+int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode);
+int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+ struct inode *inode, nid_t ino, umode_t mode);
+int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode);
+void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ struct inode *dir, struct inode *inode);
+int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
+bool f2fs_empty_dir(struct inode *dir);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
/*
* super.c
*/
-int f2fs_commit_super(struct f2fs_sb_info *, bool);
-loff_t max_file_size(unsigned bits);
-int f2fs_sync_fs(struct super_block *, int);
+int f2fs_inode_dirtied(struct inode *inode, bool sync);
+void f2fs_inode_synced(struct inode *inode);
+void f2fs_enable_quota_files(struct f2fs_sb_info *sbi);
+void f2fs_quota_off_umount(struct super_block *sb);
+int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
+int f2fs_sync_fs(struct super_block *sb, int sync);
extern __printf(3, 4)
-void f2fs_msg(struct super_block *, const char *, const char *, ...);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+int sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct f2fs_filename *fname);
+ struct fscrypt_name *fname);
/*
* node.c
struct dnode_of_data;
struct node_info;
-bool available_free_memory(struct f2fs_sb_info *, int);
-int need_dentry_mark(struct f2fs_sb_info *, nid_t);
-bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
-bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
-void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
-int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
-int truncate_inode_blocks(struct inode *, pgoff_t);
-int truncate_xattr_node(struct inode *, struct page *);
-int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
-int remove_inode_page(struct inode *);
-struct page *new_inode_page(struct inode *);
-struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
-void ra_node_page(struct f2fs_sb_info *, nid_t);
-struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
-struct page *get_node_page_ra(struct page *, int);
-void sync_inode_page(struct dnode_of_data *);
-int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
-bool alloc_nid(struct f2fs_sb_info *, nid_t *);
-void alloc_nid_done(struct f2fs_sb_info *, nid_t);
-void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
-int try_to_free_nids(struct f2fs_sb_info *, int);
-void recover_inline_xattr(struct inode *, struct page *);
-void recover_xattr_data(struct inode *, struct page *, block_t);
-int recover_inode_page(struct f2fs_sb_info *, struct page *);
-int restore_node_summary(struct f2fs_sb_info *, unsigned int,
- struct f2fs_summary_block *);
-void flush_nat_entries(struct f2fs_sb_info *);
-int build_node_manager(struct f2fs_sb_info *);
-void destroy_node_manager(struct f2fs_sb_info *);
+bool available_free_memory(struct f2fs_sb_info *sbi, int type);
+int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
+bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
+bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
+void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni);
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
+int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
+int truncate_inode_blocks(struct inode *inode, pgoff_t from);
+int truncate_xattr_node(struct inode *inode, struct page *page);
+int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+int remove_inode_page(struct inode *inode);
+struct page *new_inode_page(struct inode *inode);
+struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs);
+void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
+struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
+struct page *get_node_page_ra(struct page *parent, int start);
+void move_node_page(struct page *node_page, int gc_type);
+int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct writeback_control *wbc, bool atomic);
+int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
+ bool do_balance, enum iostat_type io_type);
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
+void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
+void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
+void recover_inline_xattr(struct inode *inode, struct page *page);
+int recover_xattr_data(struct inode *inode, struct page *page,
+ block_t blkaddr);
+int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+int restore_node_summary(struct f2fs_sb_info *sbi,
+ unsigned int segno, struct f2fs_summary_block *sum);
+void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int build_node_manager(struct f2fs_sb_info *sbi);
+void destroy_node_manager(struct f2fs_sb_info *sbi);
int __init create_node_manager_caches(void);
void destroy_node_manager_caches(void);
/*
* segment.c
*/
-void register_inmem_page(struct inode *, struct page *);
-int commit_inmem_pages(struct inode *, bool);
-void f2fs_balance_fs(struct f2fs_sb_info *);
-void f2fs_balance_fs_bg(struct f2fs_sb_info *);
-int f2fs_issue_flush(struct f2fs_sb_info *);
-int create_flush_cmd_control(struct f2fs_sb_info *);
-void destroy_flush_cmd_control(struct f2fs_sb_info *);
-void invalidate_blocks(struct f2fs_sb_info *, block_t);
-bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
-void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
-void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
-void release_discard_addrs(struct f2fs_sb_info *);
-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
-int npages_for_summary_flush(struct f2fs_sb_info *, bool);
-void allocate_new_segments(struct f2fs_sb_info *);
-int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
-struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
-void update_meta_page(struct f2fs_sb_info *, void *, block_t);
-void write_meta_page(struct f2fs_sb_info *, struct page *);
-void write_node_page(unsigned int, struct f2fs_io_info *);
-void write_data_page(struct dnode_of_data *, struct f2fs_io_info *);
-void rewrite_data_page(struct f2fs_io_info *);
-void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
- block_t, block_t, unsigned char, bool);
-void allocate_data_block(struct f2fs_sb_info *, struct page *,
- block_t, block_t *, struct f2fs_summary *, int);
-void f2fs_wait_on_page_writeback(struct page *, enum page_type);
-void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
-void write_data_summaries(struct f2fs_sb_info *, block_t);
-void write_node_summaries(struct f2fs_sb_info *, block_t);
-int lookup_journal_in_cursum(struct f2fs_summary_block *,
- int, unsigned int, int);
-void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *);
-int build_segment_manager(struct f2fs_sb_info *);
-void destroy_segment_manager(struct f2fs_sb_info *);
+bool need_SSR(struct f2fs_sb_info *sbi);
+void register_inmem_page(struct inode *inode, struct page *page);
+void drop_inmem_pages(struct inode *inode);
+void drop_inmem_page(struct inode *inode, struct page *page);
+int commit_inmem_pages(struct inode *inode);
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
+void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
+int f2fs_issue_flush(struct f2fs_sb_info *sbi);
+int create_flush_cmd_control(struct f2fs_sb_info *sbi);
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
+void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
+bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
+void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
+void stop_discard_thread(struct f2fs_sb_info *sbi);
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
+void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+void release_discard_addrs(struct f2fs_sb_info *sbi);
+int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
+void allocate_new_segments(struct f2fs_sb_info *sbi);
+int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
+bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
+void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr);
+void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+ enum iostat_type io_type);
+void write_node_page(unsigned int nid, struct f2fs_io_info *fio);
+void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio);
+int rewrite_data_page(struct f2fs_io_info *fio);
+void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ block_t old_blkaddr, block_t new_blkaddr,
+ bool recover_curseg, bool recover_newaddr);
+void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
+ block_t old_addr, block_t new_addr,
+ unsigned char version, bool recover_curseg,
+ bool recover_newaddr);
+void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ block_t old_blkaddr, block_t *new_blkaddr,
+ struct f2fs_summary *sum, int type,
+ struct f2fs_io_info *fio, bool add_list);
+void f2fs_wait_on_page_writeback(struct page *page,
+ enum page_type type, bool ordered);
+void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr);
+void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
+void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
+int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
+ unsigned int val, int alloc);
+void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int build_segment_manager(struct f2fs_sb_info *sbi);
+void destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
/*
* checkpoint.c
*/
-struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
-struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
-struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
-bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
-int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
-void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
-long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
-void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
-void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
-void release_dirty_inode(struct f2fs_sb_info *);
-bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
-int acquire_orphan_inode(struct f2fs_sb_info *);
-void release_orphan_inode(struct f2fs_sb_info *);
-void add_orphan_inode(struct f2fs_sb_info *, nid_t);
-void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
-int recover_orphan_inodes(struct f2fs_sb_info *);
-int get_valid_checkpoint(struct f2fs_sb_info *);
-void update_dirty_page(struct inode *, struct page *);
-void add_dirty_dir_inode(struct inode *);
-void remove_dirty_dir_inode(struct inode *);
-void sync_dirty_dir_inodes(struct f2fs_sb_info *);
-void write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
-void init_ino_entry_info(struct f2fs_sb_info *);
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
+struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type);
+int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ int type, bool sync);
+void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
+long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
+ long nr_to_write, enum iostat_type io_type);
+void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
+void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
+void release_ino_entry(struct f2fs_sb_info *sbi, bool all);
+bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
+int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
+int acquire_orphan_inode(struct f2fs_sb_info *sbi);
+void release_orphan_inode(struct f2fs_sb_info *sbi);
+void add_orphan_inode(struct inode *inode);
+void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
+int recover_orphan_inodes(struct f2fs_sb_info *sbi);
+int get_valid_checkpoint(struct f2fs_sb_info *sbi);
+void update_dirty_page(struct inode *inode, struct page *page);
+void remove_dirty_inode(struct inode *inode);
+int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+void init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init create_checkpoint_caches(void);
void destroy_checkpoint_caches(void);
/*
* data.c
*/
-void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
-int f2fs_submit_page_bio(struct f2fs_io_info *);
-void f2fs_submit_page_mbio(struct f2fs_io_info *);
-void set_data_blkaddr(struct dnode_of_data *);
-int reserve_new_block(struct dnode_of_data *);
-int f2fs_get_block(struct dnode_of_data *, pgoff_t);
-int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
-struct page *find_data_page(struct inode *, pgoff_t);
-struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
-struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
-int do_write_data_page(struct f2fs_io_info *);
-int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
-void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
-int f2fs_release_page(struct page *, gfp_t);
+void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
+void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, nid_t ino, pgoff_t idx,
+ enum page_type type);
+void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
+int f2fs_submit_page_bio(struct f2fs_io_info *fio);
+int f2fs_submit_page_write(struct f2fs_io_info *fio);
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio);
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
+void set_data_blkaddr(struct dnode_of_data *dn);
+void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
+int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
+int reserve_new_block(struct dnode_of_data *dn);
+int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
+int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
+int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
+struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+ int op_flags, bool for_write);
+struct page *find_data_page(struct inode *inode, pgoff_t index);
+struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+ bool for_write);
+struct page *get_new_data_page(struct inode *inode,
+ struct page *ipage, pgoff_t index, bool new_i_size);
+int do_write_data_page(struct f2fs_io_info *fio);
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+ int create, int flag);
+int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len);
+void f2fs_set_page_dirty_nobuffers(struct page *page);
+int __f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc,
+ enum iostat_type io_type);
+void f2fs_invalidate_page(struct page *page, unsigned int offset,
+ unsigned int length);
+int f2fs_release_page(struct page *page, gfp_t wait);
+#ifdef CONFIG_MIGRATION
+int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+#endif
/*
* gc.c
*/
-int start_gc_thread(struct f2fs_sb_info *);
-void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
-int f2fs_gc(struct f2fs_sb_info *, bool);
-void build_gc_manager(struct f2fs_sb_info *);
+int start_gc_thread(struct f2fs_sb_info *sbi);
+void stop_gc_thread(struct f2fs_sb_info *sbi);
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
+ unsigned int segno);
+void build_gc_manager(struct f2fs_sb_info *sbi);
/*
* recovery.c
*/
-int recover_fsync_data(struct f2fs_sb_info *);
-bool space_for_roll_forward(struct f2fs_sb_info *);
+int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
+bool space_for_roll_forward(struct f2fs_sb_info *sbi);
/*
* debug.c
int main_area_segs, main_area_sections, main_area_zones;
unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
- int ext_tree, ext_node;
- int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
- int nats, dirty_nats, sits, dirty_sits, fnids;
+ int ext_tree, zombie_tree, ext_node;
+ int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+ int inmem_pages;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ int nats, dirty_nats, sits, dirty_sits;
+ int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, inmem_pages, wb_pages;
- int inline_xattr, inline_inode, inline_dir;
- unsigned int valid_count, valid_node_count, valid_inode_count;
+ int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_flushing, nr_flushed, nr_discarding, nr_discarded;
+ int nr_discard_cmd;
+ unsigned int undiscard_blks;
+ int inline_xattr, inline_inode, inline_dir, append, update, orphans;
+ int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
+ unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages;
- int prefree_count, call_count, cp_count;
+ int prefree_count, call_count, cp_count, bg_cp_count;
int tot_segs, node_segs, data_segs, free_segs, free_secs;
int bg_node_segs, bg_data_segs;
int tot_blks, data_blks, node_blks;
}
#define stat_inc_cp_count(si) ((si)->cp_count++)
+#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
-#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
-#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
+#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
+#define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
#define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
((sbi)->block_count[(curseg)->alloc_type]++)
#define stat_inc_inplace_blocks(sbi) \
(atomic_inc(&(sbi)->inplace_count))
+#define stat_inc_atomic_write(inode) \
+ (atomic_inc(&F2FS_I_SB(inode)->aw_cnt))
+#define stat_dec_atomic_write(inode) \
+ (atomic_dec(&F2FS_I_SB(inode)->aw_cnt))
+#define stat_update_max_atomic_write(inode) \
+ do { \
+ int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt); \
+ int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
+ if (cur > max) \
+ atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
+ } while (0)
+#define stat_inc_volatile_write(inode) \
+ (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
+#define stat_dec_volatile_write(inode) \
+ (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
+#define stat_update_max_volatile_write(inode) \
+ do { \
+ int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
+ int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
+ if (cur > max) \
+ atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
+ } while (0)
#define stat_inc_seg_count(sbi, type, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
- (si)->tot_segs++; \
- if (type == SUM_TYPE_DATA) { \
+ si->tot_segs++; \
+ if ((type) == SUM_TYPE_DATA) { \
si->data_segs++; \
si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
} else { \
} while (0)
#define stat_inc_tot_blk_count(si, blks) \
- (si->tot_blks += (blks))
+ ((si)->tot_blks += (blks))
#define stat_inc_data_blk_count(sbi, blks, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->data_blks += (blks); \
- si->bg_data_blks += (gc_type == BG_GC) ? (blks) : 0; \
+ si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
} while (0)
#define stat_inc_node_blk_count(sbi, blks, gc_type) \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->node_blks += (blks); \
- si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0; \
+ si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
} while (0)
-int f2fs_build_stats(struct f2fs_sb_info *);
-void f2fs_destroy_stats(struct f2fs_sb_info *);
-void __init f2fs_create_root_stats(void);
+int f2fs_build_stats(struct f2fs_sb_info *sbi);
+void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
+int __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
-#define stat_inc_cp_count(si)
-#define stat_inc_call_count(si)
-#define stat_inc_bggc_count(si)
-#define stat_inc_dirty_dir(sbi)
-#define stat_dec_dirty_dir(sbi)
-#define stat_inc_total_hit(sb)
-#define stat_inc_rbtree_node_hit(sb)
-#define stat_inc_largest_node_hit(sbi)
-#define stat_inc_cached_node_hit(sbi)
-#define stat_inc_inline_xattr(inode)
-#define stat_dec_inline_xattr(inode)
-#define stat_inc_inline_inode(inode)
-#define stat_dec_inline_inode(inode)
-#define stat_inc_inline_dir(inode)
-#define stat_dec_inline_dir(inode)
-#define stat_inc_seg_type(sbi, curseg)
-#define stat_inc_block_count(sbi, curseg)
-#define stat_inc_inplace_blocks(sbi)
-#define stat_inc_seg_count(sbi, type, gc_type)
-#define stat_inc_tot_blk_count(si, blks)
-#define stat_inc_data_blk_count(sbi, blks, gc_type)
-#define stat_inc_node_blk_count(sbi, blks, gc_type)
+#define stat_inc_cp_count(si) do { } while (0)
+#define stat_inc_bg_cp_count(si) do { } while (0)
+#define stat_inc_call_count(si) do { } while (0)
+#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_inc_dirty_inode(sbi, type) do { } while (0)
+#define stat_dec_dirty_inode(sbi, type) do { } while (0)
+#define stat_inc_total_hit(sb) do { } while (0)
+#define stat_inc_rbtree_node_hit(sb) do { } while (0)
+#define stat_inc_largest_node_hit(sbi) do { } while (0)
+#define stat_inc_cached_node_hit(sbi) do { } while (0)
+#define stat_inc_inline_xattr(inode) do { } while (0)
+#define stat_dec_inline_xattr(inode) do { } while (0)
+#define stat_inc_inline_inode(inode) do { } while (0)
+#define stat_dec_inline_inode(inode) do { } while (0)
+#define stat_inc_inline_dir(inode) do { } while (0)
+#define stat_dec_inline_dir(inode) do { } while (0)
+#define stat_inc_atomic_write(inode) do { } while (0)
+#define stat_dec_atomic_write(inode) do { } while (0)
+#define stat_update_max_atomic_write(inode) do { } while (0)
+#define stat_inc_volatile_write(inode) do { } while (0)
+#define stat_dec_volatile_write(inode) do { } while (0)
+#define stat_update_max_volatile_write(inode) do { } while (0)
+#define stat_inc_seg_type(sbi, curseg) do { } while (0)
+#define stat_inc_block_count(sbi, curseg) do { } while (0)
+#define stat_inc_inplace_blocks(sbi) do { } while (0)
+#define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_tot_blk_count(si, blks) do { } while (0)
+#define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
+#define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline void __init f2fs_create_root_stats(void) { }
+static inline int __init f2fs_create_root_stats(void) { return 0; }
static inline void f2fs_destroy_root_stats(void) { }
#endif
/*
* inline.c
*/
-bool f2fs_may_inline_data(struct inode *);
-bool f2fs_may_inline_dentry(struct inode *);
-void read_inline_data(struct page *, struct page *);
-bool truncate_inline_inode(struct page *, u64);
-int f2fs_read_inline_data(struct inode *, struct page *);
-int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
-int f2fs_convert_inline_inode(struct inode *);
-int f2fs_write_inline_data(struct inode *, struct page *);
-bool recover_inline_data(struct inode *, struct page *);
-struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
- struct f2fs_filename *, struct page **);
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *, struct page **);
-int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
-int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *,
- nid_t, umode_t);
-void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
- struct inode *, struct inode *);
-bool f2fs_empty_inline_dir(struct inode *);
-int f2fs_read_inline_dir(struct file *, struct dir_context *,
- struct f2fs_str *);
-int f2fs_inline_data_fiemap(struct inode *,
- struct fiemap_extent_info *, __u64, __u64);
+bool f2fs_may_inline_data(struct inode *inode);
+bool f2fs_may_inline_dentry(struct inode *inode);
+void read_inline_data(struct page *page, struct page *ipage);
+void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from);
+int f2fs_read_inline_data(struct inode *inode, struct page *page);
+int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
+int f2fs_convert_inline_inode(struct inode *inode);
+int f2fs_write_inline_data(struct inode *inode, struct page *page);
+bool recover_inline_data(struct inode *inode, struct page *npage);
+struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page);
+int make_empty_inline_dir(struct inode *inode, struct inode *parent,
+ struct page *ipage);
+int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode);
+void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
+ struct inode *dir, struct inode *inode);
+bool f2fs_empty_inline_dir(struct inode *dir);
+int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
+ struct fscrypt_str *fstr);
+int f2fs_inline_data_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len);
/*
* shrinker.c
*/
-unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *);
-unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
-void f2fs_join_shrinker(struct f2fs_sb_info *);
-void f2fs_leave_shrinker(struct f2fs_sb_info *);
+unsigned long f2fs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
+void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
+void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
-void f2fs_drop_largest_extent(struct inode *, pgoff_t);
-void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
-unsigned int f2fs_destroy_extent_node(struct inode *);
-void f2fs_destroy_extent_tree(struct inode *);
-bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
-void f2fs_update_extent_cache(struct dnode_of_data *);
+struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+ struct rb_entry *cached_re, unsigned int ofs);
+struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ struct rb_root *root, struct rb_node **parent,
+ unsigned int ofs);
+struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+ struct rb_entry *cached_re, unsigned int ofs,
+ struct rb_entry **prev_entry, struct rb_entry **next_entry,
+ struct rb_node ***insert_p, struct rb_node **insert_parent,
+ bool force);
+bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ struct rb_root *root);
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
+bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
+void f2fs_drop_extent_tree(struct inode *inode);
+unsigned int f2fs_destroy_extent_node(struct inode *inode);
+void f2fs_destroy_extent_tree(struct inode *inode);
+bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+ struct extent_info *ei);
+void f2fs_update_extent_cache(struct dnode_of_data *dn);
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
- pgoff_t, block_t, unsigned int);
-void init_extent_cache_info(struct f2fs_sb_info *);
+ pgoff_t fofs, block_t blkaddr, unsigned int len);
+void init_extent_cache_info(struct f2fs_sb_info *sbi);
int __init create_extent_cache(void);
void destroy_extent_cache(void);
/*
+ * sysfs.c
+ */
+int __init f2fs_init_sysfs(void);
+void f2fs_exit_sysfs(void);
+int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
+void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
+
+/*
* crypto support
*/
-static inline int f2fs_encrypted_inode(struct inode *inode)
+static inline bool f2fs_encrypted_inode(struct inode *inode)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
return file_is_encrypt(inode);
-#else
- return 0;
-#endif
+}
+
+static inline bool f2fs_encrypted_file(struct inode *inode)
+{
+ return f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode);
}
static inline void f2fs_set_encrypted_inode(struct inode *inode)
static inline bool f2fs_bio_encrypted(struct bio *bio)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- return unlikely(bio->bi_private != NULL);
-#else
- return false;
-#endif
+ return bio->bi_private != NULL;
}
static inline int f2fs_sb_has_crypto(struct super_block *sb)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
-#else
- return 0;
-#endif
}
-static inline bool f2fs_may_encrypt(struct inode *inode)
+static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- mode_t mode = inode->i_mode;
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
+}
- return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
-#else
- return 0;
-#endif
+static inline int f2fs_sb_has_extra_attr(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_EXTRA_ATTR);
}
-/* crypto_policy.c */
-int f2fs_is_child_context_consistent_with_parent(struct inode *,
- struct inode *);
-int f2fs_inherit_context(struct inode *, struct inode *, struct page *);
-int f2fs_process_policy(const struct f2fs_encryption_policy *, struct inode *);
-int f2fs_get_policy(struct inode *, struct f2fs_encryption_policy *);
-
-/* crypt.c */
-extern struct kmem_cache *f2fs_crypt_info_cachep;
-bool f2fs_valid_contents_enc_mode(uint32_t);
-uint32_t f2fs_validate_encryption_key_size(uint32_t, uint32_t);
-struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *);
-void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *);
-struct page *f2fs_encrypt(struct inode *, struct page *);
-int f2fs_decrypt(struct f2fs_crypto_ctx *, struct page *);
-int f2fs_decrypt_one(struct inode *, struct page *);
-void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *, struct bio *);
-
-/* crypto_key.c */
-void f2fs_free_encryption_info(struct inode *, struct f2fs_crypt_info *);
-int _f2fs_get_encryption_info(struct inode *inode);
-
-/* crypto_fname.c */
-bool f2fs_valid_filenames_enc_mode(uint32_t);
-u32 f2fs_fname_crypto_round_up(u32, u32);
-int f2fs_fname_crypto_alloc_buffer(struct inode *, u32, struct f2fs_str *);
-int f2fs_fname_disk_to_usr(struct inode *, f2fs_hash_t *,
- const struct f2fs_str *, struct f2fs_str *);
-int f2fs_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct f2fs_str *);
+static inline int f2fs_sb_has_project_quota(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_PRJQUOTA);
+}
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
-void f2fs_restore_and_release_control_page(struct page **);
-void f2fs_restore_control_page(struct page *);
+static inline int f2fs_sb_has_inode_chksum(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM);
+}
-int __init f2fs_init_crypto(void);
-int f2fs_crypto_initialize(void);
-void f2fs_exit_crypto(void);
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkaddr)
+{
+ unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
+ int i;
-int f2fs_has_encryption_key(struct inode *);
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return FDEV(i).blkz_type[zno];
+ return -EINVAL;
+}
+#endif
-static inline int f2fs_get_encryption_info(struct inode *inode)
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
+ struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
- if (!ci ||
- (ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD)))))
- return _f2fs_get_encryption_info(inode);
- return 0;
+ return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
}
-void f2fs_fname_crypto_free_buffer(struct f2fs_str *);
-int f2fs_fname_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct f2fs_filename *);
-void f2fs_fname_free_filename(struct f2fs_filename *);
-#else
-static inline void f2fs_restore_and_release_control_page(struct page **p) { }
-static inline void f2fs_restore_control_page(struct page *p) { }
-
-static inline int __init f2fs_init_crypto(void) { return 0; }
-static inline void f2fs_exit_crypto(void) { }
+static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+{
+ clear_opt(sbi, ADAPTIVE);
+ clear_opt(sbi, LFS);
-static inline int f2fs_has_encryption_key(struct inode *i) { return 0; }
-static inline int f2fs_get_encryption_info(struct inode *i) { return 0; }
-static inline void f2fs_fname_crypto_free_buffer(struct f2fs_str *p) { }
+ switch (mt) {
+ case F2FS_MOUNT_ADAPTIVE:
+ set_opt(sbi, ADAPTIVE);
+ break;
+ case F2FS_MOUNT_LFS:
+ set_opt(sbi, LFS);
+ break;
+ }
+}
-static inline int f2fs_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct f2fs_filename *fname)
+static inline bool f2fs_may_encrypt(struct inode *inode)
{
- memset(fname, 0, sizeof(struct f2fs_filename));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ umode_t mode = inode->i_mode;
+
+ return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
+#else
return 0;
+#endif
}
-static inline void f2fs_fname_free_filename(struct f2fs_filename *fname) { }
-#endif
#endif
+++ /dev/null
-/*
- * linux/fs/f2fs/f2fs_crypto.h
- *
- * Copied from linux/fs/ext4/ext4_crypto.h
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption header content for f2fs
- *
- * Written by Michael Halcrow, 2015.
- * Modified by Jaegeuk Kim, 2015.
- */
-#ifndef _F2FS_CRYPTO_H
-#define _F2FS_CRYPTO_H
-
-#include <linux/fs.h>
-
-#define F2FS_KEY_DESCRIPTOR_SIZE 8
-
-/* Policy provided via an ioctl on the topmost directory */
-struct f2fs_encryption_policy {
- char version;
- char contents_encryption_mode;
- char filenames_encryption_mode;
- char flags;
- char master_key_descriptor[F2FS_KEY_DESCRIPTOR_SIZE];
-} __attribute__((__packed__));
-
-#define F2FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
-#define F2FS_KEY_DERIVATION_NONCE_SIZE 16
-
-#define F2FS_POLICY_FLAGS_PAD_4 0x00
-#define F2FS_POLICY_FLAGS_PAD_8 0x01
-#define F2FS_POLICY_FLAGS_PAD_16 0x02
-#define F2FS_POLICY_FLAGS_PAD_32 0x03
-#define F2FS_POLICY_FLAGS_PAD_MASK 0x03
-#define F2FS_POLICY_FLAGS_VALID 0x03
-
-/**
- * Encryption context for inode
- *
- * Protector format:
- * 1 byte: Protector format (1 = this version)
- * 1 byte: File contents encryption mode
- * 1 byte: File names encryption mode
- * 1 byte: Flags
- * 8 bytes: Master Key descriptor
- * 16 bytes: Encryption Key derivation nonce
- */
-struct f2fs_encryption_context {
- char format;
- char contents_encryption_mode;
- char filenames_encryption_mode;
- char flags;
- char master_key_descriptor[F2FS_KEY_DESCRIPTOR_SIZE];
- char nonce[F2FS_KEY_DERIVATION_NONCE_SIZE];
-} __attribute__((__packed__));
-
-/* Encryption parameters */
-#define F2FS_XTS_TWEAK_SIZE 16
-#define F2FS_AES_128_ECB_KEY_SIZE 16
-#define F2FS_AES_256_GCM_KEY_SIZE 32
-#define F2FS_AES_256_CBC_KEY_SIZE 32
-#define F2FS_AES_256_CTS_KEY_SIZE 32
-#define F2FS_AES_256_XTS_KEY_SIZE 64
-#define F2FS_MAX_KEY_SIZE 64
-
-#define F2FS_KEY_DESC_PREFIX "f2fs:"
-#define F2FS_KEY_DESC_PREFIX_SIZE 5
-
-struct f2fs_encryption_key {
- __u32 mode;
- char raw[F2FS_MAX_KEY_SIZE];
- __u32 size;
-} __attribute__((__packed__));
-
-struct f2fs_crypt_info {
- char ci_data_mode;
- char ci_filename_mode;
- char ci_flags;
- struct crypto_ablkcipher *ci_ctfm;
- struct key *ci_keyring_key;
- char ci_master_key[F2FS_KEY_DESCRIPTOR_SIZE];
-};
-
-#define F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-#define F2FS_WRITE_PATH_FL 0x00000002
-
-struct f2fs_crypto_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- char flags; /* Flags */
-};
-
-struct f2fs_completion_result {
- struct completion completion;
- int res;
-};
-
-#define DECLARE_F2FS_COMPLETION_RESULT(ecr) \
- struct f2fs_completion_result ecr = { \
- COMPLETION_INITIALIZER((ecr).completion), 0 }
-
-static inline int f2fs_encryption_key_size(int mode)
-{
- switch (mode) {
- case F2FS_ENCRYPTION_MODE_AES_256_XTS:
- return F2FS_AES_256_XTS_KEY_SIZE;
- case F2FS_ENCRYPTION_MODE_AES_256_GCM:
- return F2FS_AES_256_GCM_KEY_SIZE;
- case F2FS_ENCRYPTION_MODE_AES_256_CBC:
- return F2FS_AES_256_CBC_KEY_SIZE;
- case F2FS_ENCRYPTION_MODE_AES_256_CTS:
- return F2FS_AES_256_CTS_KEY_SIZE;
- default:
- BUG();
- }
- return 0;
-}
-
-#define F2FS_FNAME_NUM_SCATTER_ENTRIES 4
-#define F2FS_CRYPTO_BLOCK_SIZE 16
-#define F2FS_FNAME_CRYPTO_DIGEST_SIZE 32
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct f2fs_encrypted_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __attribute__((__packed__));
-
-/**
- * This function is used to calculate the disk space required to
- * store a filename of length l in encrypted symlink format.
- */
-static inline u32 encrypted_symlink_data_len(u32 l)
-{
- return (l + sizeof(struct f2fs_encrypted_symlink_data) - 1);
-}
-#endif /* _F2FS_CRYPTO_H */
#include <linux/mount.h>
#include <linux/pagevec.h>
#include <linux/random.h>
+#include <linux/uio.h>
+#include <linux/uuid.h>
+#include <linux/file.h>
#include "f2fs.h"
#include "node.h"
#include "trace.h"
#include <trace/events/f2fs.h>
+static int f2fs_filemap_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vma->vm_file);
+ int err;
+
+ down_read(&F2FS_I(inode)->i_mmap_sem);
+ err = filemap_fault(vma, vmf);
+ up_read(&F2FS_I(inode)->i_mmap_sem);
+
+ return err;
+}
+
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct dnode_of_data dn;
int err;
- f2fs_balance_fs(sbi);
-
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+
file_update_time(vma->vm_file);
+ down_read(&F2FS_I(inode)->i_mmap_sem);
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping ||
page_offset(page) > i_size_read(inode) ||
!PageUptodate(page))) {
unlock_page(page);
err = -EFAULT;
- goto out;
+ goto out_sem;
}
/*
goto mapped;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+ if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
unsigned offset;
- offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ offset = i_size_read(inode) & ~PAGE_MASK;
+ zero_user_segment(page, offset, PAGE_SIZE);
}
set_page_dirty(page);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+
+ f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
/* fill the page */
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
+ if (f2fs_encrypted_file(inode))
+ f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
- /* if gced page is attached, don't write to cold segment */
- clear_cold_data(page);
+out_sem:
+ up_read(&F2FS_I(inode)->i_mmap_sem);
out:
sb_end_pagefault(inode->i_sb);
+ f2fs_update_time(sbi, REQ_TIME);
return block_page_mkwrite_return(err);
}
static const struct vm_operations_struct f2fs_file_vm_ops = {
- .fault = filemap_fault,
+ .fault = f2fs_filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = f2fs_vm_page_mkwrite,
};
if (!dentry)
return 0;
- if (update_dent_inode(inode, inode, &dentry->d_name)) {
- dput(dentry);
- return 0;
- }
-
*pino = parent_ino(dentry);
dput(dentry);
return 1;
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
- else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
+ else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
need_cp = true;
else if (file_wrong_pino(inode))
need_cp = true;
need_cp = true;
else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
need_cp = true;
- else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
- need_cp = true;
else if (test_opt(sbi, FASTBOOT))
need_cp = true;
else if (sbi->active_logs == 2)
nid_t pino;
down_write(&fi->i_sem);
- fi->xattr_ver = 0;
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) {
- fi->i_pino = pino;
+ f2fs_i_pino_write(inode, pino);
file_got_pino(inode);
- up_write(&fi->i_sem);
-
- mark_inode_dirty_sync(inode);
- f2fs_write_inode(inode, NULL);
- } else {
- up_write(&fi->i_sem);
}
+ up_write(&fi->i_sem);
}
-int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
+ int datasync, bool atomic)
{
struct inode *inode = file->f_mapping->host;
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t ino = inode->i_ino;
int ret = 0;
trace_f2fs_sync_file_enter(inode);
/* if fdatasync is triggered, let's do in-place-update */
- if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
- set_inode_flag(fi, FI_NEED_IPU);
+ if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
+ set_inode_flag(inode, FI_NEED_IPU);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- clear_inode_flag(fi, FI_NEED_IPU);
+ clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
}
/* if the inode is dirty, let's recover all the time */
- if (!datasync) {
+ if (!f2fs_skip_inode_update(inode, datasync)) {
f2fs_write_inode(inode, NULL);
goto go_write;
}
/*
* if there is no written data, don't waste time to write recovery info.
*/
- if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
+ if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
!exist_written_data(sbi, ino, APPEND_INO)) {
/* it may call write_inode just prior to fsync */
if (need_inode_page_update(sbi, ino))
goto go_write;
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
exist_written_data(sbi, ino, UPDATE_INO))
goto flush_out;
goto out;
}
go_write:
- /* guarantee free sections for fsync */
- f2fs_balance_fs(sbi);
-
/*
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- down_read(&fi->i_sem);
+ down_read(&F2FS_I(inode)->i_sem);
need_cp = need_do_checkpoint(inode);
- up_read(&fi->i_sem);
+ up_read(&F2FS_I(inode)->i_sem);
if (need_cp) {
/* all the dirty node pages should be flushed for POR */
* will be used only for fsynced inodes after checkpoint.
*/
try_to_fix_pino(inode);
- clear_inode_flag(fi, FI_APPEND_WRITE);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
goto out;
}
sync_nodes:
- sync_node_pages(sbi, ino, &wbc);
+ ret = fsync_node_pages(sbi, inode, &wbc, atomic);
+ if (ret)
+ goto out;
/* if cp_error was enabled, we should avoid infinite loop */
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
goto out;
+ }
if (need_inode_block_update(sbi, ino)) {
- mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
f2fs_write_inode(inode, NULL);
goto sync_nodes;
}
- ret = wait_on_node_pages_writeback(sbi, ino);
- if (ret)
- goto out;
+ /*
+ * If it's atomic_write, it's just fine to keep write ordering. So
+ * here we don't need to wait for node write completion, since we use
+ * node chain which serializes node blocks. If one of node writes are
+ * reordered, we can see simply broken chain, resulting in stopping
+ * roll-forward recovery. It means we'll recover all or none node blocks
+ * given fsync mark.
+ */
+ if (!atomic) {
+ ret = wait_on_node_pages_writeback(sbi, ino);
+ if (ret)
+ goto out;
+ }
/* once recovery info is written, don't need to tack this */
- remove_dirty_inode(sbi, ino, APPEND_INO);
- clear_inode_flag(fi, FI_APPEND_WRITE);
+ remove_ino_entry(sbi, ino, APPEND_INO);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- remove_dirty_inode(sbi, ino, UPDATE_INO);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
- ret = f2fs_issue_flush(sbi);
+ remove_ino_entry(sbi, ino, UPDATE_INO);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
+ if (!atomic)
+ ret = f2fs_issue_flush(sbi);
+ f2fs_update_time(sbi, REQ_TIME);
out:
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
f2fs_trace_ios(NULL, 1);
return ret;
}
+int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return f2fs_do_sync_file(file, start, end, datasync, false);
+}
+
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pgoff_t pgofs, int whence)
{
pagevec_init(&pvec, 0);
nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
PAGECACHE_TAG_DIRTY, 1);
- pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
+ pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
pagevec_release(&pvec);
return pgofs;
}
loff_t isize;
int err = 0;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
isize = i_size_read(inode);
if (offset >= isize)
goto found;
}
- pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
+ pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
- for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
+ err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
if (err && err != -ENOENT) {
goto fail;
} else if (err == -ENOENT) {
/* direct node does not exists */
if (whence == SEEK_DATA) {
- pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
- F2FS_I(inode));
+ pgofs = get_next_page_offset(&dn, pgofs);
continue;
} else {
goto found;
}
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
dn.ofs_in_node++, pgofs++,
- data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
block_t blkaddr;
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node);
if (__found_offset(blkaddr, dirty, pgofs, whence)) {
f2fs_put_dnode(&dn);
found:
if (whence == SEEK_HOLE && data_ofs > isize)
data_ofs = isize;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return vfs_setpos(file, data_ofs, maxbytes);
fail:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return -ENXIO;
}
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
-
- if (f2fs_encrypted_inode(inode)) {
- int err = f2fs_get_encryption_info(inode);
- if (err)
- return 0;
- }
+ int err;
/* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- int err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
- int ret = generic_file_open(inode, filp);
+ struct dentry *dir;
- if (!ret && f2fs_encrypted_inode(inode)) {
- ret = f2fs_get_encryption_info(inode);
+ if (f2fs_encrypted_inode(inode)) {
+ int ret = fscrypt_get_encryption_info(inode);
if (ret)
- ret = -EACCES;
+ return -EACCES;
+ if (!fscrypt_has_encryption_key(inode))
+ return -ENOKEY;
}
- return ret;
+ dir = dget_parent(file_dentry(filp));
+ if (f2fs_encrypted_inode(d_inode(dir)) &&
+ !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ dput(dir);
+ return -EPERM;
+ }
+ dput(dir);
+ return dquot_file_open(inode, filp);
}
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
struct f2fs_node *raw_node;
int nr_free = 0, ofs = dn->ofs_in_node, len = count;
__le32 *addr;
+ int base = 0;
+
+ if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+ base = get_extra_isize(dn->inode);
raw_node = F2FS_NODE(dn->node_page);
- addr = blkaddr_in_node(raw_node) + ofs;
+ addr = blkaddr_in_node(raw_node) + base + ofs;
for (; count > 0; count--, addr++, dn->ofs_in_node++) {
block_t blkaddr = le32_to_cpu(*addr);
set_data_blkaddr(dn);
invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
- clear_inode_flag(F2FS_I(dn->inode),
- FI_FIRST_BLOCK_WRITTEN);
+ clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
nr_free++;
}
* we will invalidate all blkaddr in the whole range.
*/
fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
- F2FS_I(dn->inode)) + ofs;
+ dn->inode) + ofs;
f2fs_update_extent_cache_range(dn, fofs, 0, len);
dec_valid_block_count(sbi, dn->inode, nr_free);
- set_page_dirty(dn->node_page);
- sync_inode_page(dn);
}
dn->ofs_in_node = ofs;
+ f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
dn->ofs_in_node, nr_free);
return nr_free;
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool cache_only)
{
- unsigned offset = from & (PAGE_CACHE_SIZE - 1);
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE - 1);
+ pgoff_t index = from >> PAGE_SHIFT;
struct address_space *mapping = inode->i_mapping;
struct page *page;
return 0;
if (cache_only) {
- page = f2fs_grab_cache_page(mapping, index, false);
+ page = find_lock_page(mapping, index);
if (page && PageUptodate(page))
goto truncate_out;
f2fs_put_page(page, 1);
page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
- return 0;
+ return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
truncate_out:
- f2fs_wait_on_page_writeback(page, DATA);
- zero_user(page, offset, PAGE_CACHE_SIZE - offset);
- if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ zero_user(page, offset, PAGE_SIZE - offset);
+
+ /* An encrypted inode should have a key and truncate the last page. */
+ f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode));
+ if (!cache_only)
set_page_dirty(page);
f2fs_put_page(page, 1);
return 0;
free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
+ if (free_from >= sbi->max_file_blocks)
+ goto free_partial;
+
if (lock)
f2fs_lock_op(sbi);
}
if (f2fs_has_inline_data(inode)) {
- if (truncate_inline_inode(ipage, from))
- set_page_dirty(ipage);
+ truncate_inline_inode(inode, ipage, from);
f2fs_put_page(ipage, 1);
truncate_page = true;
goto out;
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
- err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
+ err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
if (err) {
if (err == -ENOENT)
goto free_next;
goto out;
}
- count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ count = ADDRS_PER_PAGE(dn.node_page, inode);
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
out:
if (lock)
f2fs_unlock_op(sbi);
-
+free_partial:
/* lastly zero out the first data page */
if (!err)
err = truncate_partial_data_page(inode, from, truncate_page);
return err;
}
-int f2fs_truncate(struct inode *inode, bool lock)
+int f2fs_truncate(struct inode *inode)
{
int err;
trace_f2fs_truncate(inode);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
+ f2fs_show_injection_info(FAULT_TRUNCATE);
+ return -EIO;
+ }
+#endif
/* we should check inline_data size */
- if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
+ if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
- err = truncate_blocks(inode, i_size_read(inode), lock);
+ err = truncate_blocks(inode, i_size_read(inode), true);
if (err)
return err;
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
{
struct inode *inode = d_inode(dentry);
generic_fillattr(inode, stat);
- stat->blocks <<= 3;
+
+ /* we need to show initial sectors used for inline_data/dentries */
+ if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
+ stat->blocks += (stat->size + 511) >> 9;
+
return 0;
}
#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
- set_acl_inode(fi, mode);
+ set_acl_inode(inode, mode);
}
}
#else
int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
- struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
+ bool size_changed = false;
err = inode_change_ok(inode, attr);
if (err)
return err;
+ if (is_quota_modification(inode, attr)) {
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+ }
+ if ((attr->ia_valid & ATTR_UID &&
+ !uid_eq(attr->ia_uid, inode->i_uid)) ||
+ (attr->ia_valid & ATTR_GID &&
+ !gid_eq(attr->ia_gid, inode->i_gid))) {
+ err = dquot_transfer(inode, attr);
+ if (err)
+ return err;
+ }
+
if (attr->ia_valid & ATTR_SIZE) {
- if (f2fs_encrypted_inode(inode) &&
- f2fs_get_encryption_info(inode))
- return -EACCES;
+ if (f2fs_encrypted_inode(inode)) {
+ err = fscrypt_get_encryption_info(inode);
+ if (err)
+ return err;
+ if (!fscrypt_has_encryption_key(inode))
+ return -ENOKEY;
+ }
if (attr->ia_size <= i_size_read(inode)) {
+ down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_setsize(inode, attr->ia_size);
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (err)
return err;
- f2fs_balance_fs(F2FS_I_SB(inode));
} else {
/*
* do not trim all blocks after i_size if target size is
* larger than i_size.
*/
+ down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_setsize(inode, attr->ia_size);
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+
+ /* should convert inline inode here */
+ if (!f2fs_may_inline_data(inode)) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
+
+ size_changed = true;
}
__setattr_copy(inode, attr);
if (attr->ia_valid & ATTR_MODE) {
err = posix_acl_chmod(inode, get_inode_mode(inode));
- if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ clear_inode_flag(inode, FI_ACL_MODE);
}
}
- mark_inode_dirty(inode);
+ /* file size may changed here */
+ f2fs_mark_inode_dirty_sync(inode, size_changed);
+
+ /* inode change will produce dirty node pages flushed by checkpoint */
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
return err;
}
if (!len)
return 0;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
page = get_new_data_page(inode, NULL, index, false);
if (IS_ERR(page))
return PTR_ERR(page);
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
zero_user(page, start, len);
set_page_dirty(page);
f2fs_put_page(page, 1);
return err;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
{
pgoff_t pg_start, pg_end;
loff_t off_start, off_end;
- int ret = 0;
+ int ret;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
}
loff_t blk_start, blk_end;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
- blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
- blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
+ blk_start = (loff_t)pg_start << PAGE_SHIFT;
+ blk_end = (loff_t)pg_end << PAGE_SHIFT;
+ down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
f2fs_lock_op(sbi);
ret = truncate_hole(inode, pg_start, pg_end);
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
}
}
return ret;
}
-static int __exchange_data_block(struct inode *inode, pgoff_t src,
- pgoff_t dst, bool full)
+static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, pgoff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- block_t new_addr;
- bool do_replace = false;
- int ret;
+ int ret, done, i;
+next_dnode:
set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
+ ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
if (ret && ret != -ENOENT) {
return ret;
} else if (ret == -ENOENT) {
- new_addr = NULL_ADDR;
- } else {
- new_addr = dn.data_blkaddr;
- if (!is_checkpointed_data(sbi, new_addr)) {
- dn.data_blkaddr = NULL_ADDR;
+ if (dn.max_level == 0)
+ return -ENOENT;
+ done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+ blkaddr += done;
+ do_replace += done;
+ goto next;
+ }
+
+ done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ dn.ofs_in_node, len);
+ for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
+ *blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node);
+ if (!is_checkpointed_data(sbi, *blkaddr)) {
+
+ if (test_opt(sbi, LFS)) {
+ f2fs_put_dnode(&dn);
+ return -ENOTSUPP;
+ }
+
/* do not invalidate this block address */
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- do_replace = true;
+ f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+ *do_replace = 1;
}
- f2fs_put_dnode(&dn);
}
+ f2fs_put_dnode(&dn);
+next:
+ len -= done;
+ off += done;
+ if (len)
+ goto next_dnode;
+ return 0;
+}
- if (new_addr == NULL_ADDR)
- return full ? truncate_hole(inode, dst, dst + 1) : 0;
+static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, int len)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ int ret, i;
- if (do_replace) {
- struct page *ipage = get_node_page(sbi, inode->i_ino);
- struct node_info ni;
+ for (i = 0; i < len; i++, do_replace++, blkaddr++) {
+ if (*do_replace == 0)
+ continue;
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- goto err_out;
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
+ if (ret) {
+ dec_valid_block_count(sbi, inode, 1);
+ invalidate_blocks(sbi, *blkaddr);
+ } else {
+ f2fs_update_data_blkaddr(&dn, *blkaddr);
+ }
+ f2fs_put_dnode(&dn);
+ }
+ return 0;
+}
+
+static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ block_t *blkaddr, int *do_replace,
+ pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
+ pgoff_t i = 0;
+ int ret;
+
+ while (i < len) {
+ if (blkaddr[i] == NULL_ADDR && !full) {
+ i++;
+ continue;
}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, dst);
- if (ret)
- goto err_out;
+ if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+ size_t new_size;
+ pgoff_t ilen;
- truncate_data_blocks_range(&dn, 1);
+ set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+ if (ret)
+ return ret;
- get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
- ni.version, true);
- f2fs_put_dnode(&dn);
- } else {
- struct page *psrc, *pdst;
+ get_node_info(sbi, dn.nid, &ni);
+ ilen = min((pgoff_t)
+ ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ dn.ofs_in_node, len - i);
+ do {
+ dn.data_blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node);
+ truncate_data_blocks_range(&dn, 1);
+
+ if (do_replace[i]) {
+ f2fs_i_blocks_write(src_inode,
+ 1, false, false);
+ f2fs_i_blocks_write(dst_inode,
+ 1, true, false);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ blkaddr[i], ni.version, true, false);
+
+ do_replace[i] = 0;
+ }
+ dn.ofs_in_node++;
+ i++;
+ new_size = (dst + i) << PAGE_SHIFT;
+ if (dst_inode->i_size < new_size)
+ f2fs_i_size_write(dst_inode, new_size);
+ } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
- psrc = get_lock_data_page(inode, src, true);
- if (IS_ERR(psrc))
- return PTR_ERR(psrc);
- pdst = get_new_data_page(inode, NULL, dst, false);
- if (IS_ERR(pdst)) {
+ f2fs_put_dnode(&dn);
+ } else {
+ struct page *psrc, *pdst;
+
+ psrc = get_lock_data_page(src_inode, src + i, true);
+ if (IS_ERR(psrc))
+ return PTR_ERR(psrc);
+ pdst = get_new_data_page(dst_inode, NULL, dst + i,
+ true);
+ if (IS_ERR(pdst)) {
+ f2fs_put_page(psrc, 1);
+ return PTR_ERR(pdst);
+ }
+ f2fs_copy_page(psrc, pdst);
+ set_page_dirty(pdst);
+ f2fs_put_page(pdst, 1);
f2fs_put_page(psrc, 1);
- return PTR_ERR(pdst);
- }
- f2fs_copy_page(psrc, pdst);
- set_page_dirty(pdst);
- f2fs_put_page(pdst, 1);
- f2fs_put_page(psrc, 1);
- return truncate_hole(inode, src, src + 1);
+ ret = truncate_hole(src_inode, src + i, src + i + 1);
+ if (ret)
+ return ret;
+ i++;
+ }
}
return 0;
+}
-err_out:
- if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
- dn.data_blkaddr = new_addr;
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- f2fs_put_dnode(&dn);
+static int __exchange_data_block(struct inode *src_inode,
+ struct inode *dst_inode, pgoff_t src, pgoff_t dst,
+ pgoff_t len, bool full)
+{
+ block_t *src_blkaddr;
+ int *do_replace;
+ pgoff_t olen;
+ int ret;
+
+ while (len) {
+ olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+
+ src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ if (!src_blkaddr)
+ return -ENOMEM;
+
+ do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ if (!do_replace) {
+ kvfree(src_blkaddr);
+ return -ENOMEM;
+ }
+
+ ret = __read_out_blkaddrs(src_inode, src_blkaddr,
+ do_replace, src, olen);
+ if (ret)
+ goto roll_back;
+
+ ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
+ do_replace, src, dst, olen, full);
+ if (ret)
+ goto roll_back;
+
+ src += olen;
+ dst += olen;
+ len -= olen;
+
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
}
+ return 0;
+
+roll_back:
+ __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
return ret;
}
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
- int ret = 0;
+ int ret;
- for (; end < nrpages; start++, end++) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, end, start, true);
- f2fs_unlock_op(sbi);
- if (ret)
- break;
- }
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
+ f2fs_unlock_op(sbi);
return ret;
}
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
- f2fs_balance_fs(F2FS_I_SB(inode));
-
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
+ down_write(&F2FS_I(inode)->i_mmap_sem);
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- return ret;
+ goto out;
truncate_pagecache(inode, offset);
ret = f2fs_do_collapse(inode, pg_start, pg_end);
if (ret)
- return ret;
+ goto out;
/* write out all moved pages, if possible */
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
ret = truncate_blocks(inode, new_size, true);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
+
+out:
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ return ret;
+}
+
+static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ pgoff_t end)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ pgoff_t index = start;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ blkcnt_t count = 0;
+ int ret;
+
+ for (; index < end; index++, dn->ofs_in_node++) {
+ if (datablock_addr(dn->inode, dn->node_page,
+ dn->ofs_in_node) == NULL_ADDR)
+ count++;
+ }
+
+ dn->ofs_in_node = ofs_in_node;
+ ret = reserve_new_blocks(dn, count);
+ if (ret)
+ return ret;
+
+ dn->ofs_in_node = ofs_in_node;
+ for (index = start; index < end; index++, dn->ofs_in_node++) {
+ dn->data_blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
+ /*
+ * reserve_new_blocks will not guarantee entire block
+ * allocation.
+ */
+ if (dn->data_blkaddr == NULL_ADDR) {
+ ret = -ENOSPC;
+ break;
+ }
+ if (dn->data_blkaddr != NEW_ADDR) {
+ invalidate_blocks(sbi, dn->data_blkaddr);
+ dn->data_blkaddr = NEW_ADDR;
+ set_data_blkaddr(dn);
+ }
+ }
+
+ f2fs_update_extent_cache_range(dn, start, 0, index - start);
return ret;
}
if (ret)
return ret;
- f2fs_balance_fs(sbi);
-
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
+ down_write(&F2FS_I(inode)->i_mmap_sem);
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
if (ret)
- return ret;
+ goto out_sem;
truncate_pagecache_range(inode, offset, offset + len - 1);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
off_end - off_start);
if (ret)
- return ret;
+ goto out_sem;
- if (offset + len > new_size)
- new_size = offset + len;
new_size = max_t(loff_t, new_size, offset + len);
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
- return ret;
+ goto out_sem;
new_size = max_t(loff_t, new_size,
- (loff_t)pg_start << PAGE_CACHE_SHIFT);
+ (loff_t)pg_start << PAGE_SHIFT);
}
- for (index = pg_start; index < pg_end; index++) {
+ for (index = pg_start; index < pg_end;) {
struct dnode_of_data dn;
- struct page *ipage;
+ unsigned int end_offset;
+ pgoff_t end;
f2fs_lock_op(sbi);
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- f2fs_unlock_op(sbi);
- goto out;
- }
-
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, index);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
goto out;
}
- if (dn.data_blkaddr != NEW_ADDR) {
- invalidate_blocks(sbi, dn.data_blkaddr);
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end = min(pg_end, end_offset - dn.ofs_in_node + index);
- dn.data_blkaddr = NEW_ADDR;
- set_data_blkaddr(&dn);
-
- dn.data_blkaddr = NULL_ADDR;
- f2fs_update_extent_cache(&dn);
- }
+ ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+
+ if (ret)
+ goto out;
+
+ index = end;
new_size = max_t(loff_t, new_size,
- (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
+ (loff_t)index << PAGE_SHIFT);
}
if (off_end) {
}
out:
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
+out_sem:
+ up_write(&F2FS_I(inode)->i_mmap_sem);
return ret;
}
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t pg_start, pg_end, delta, nrpages, idx;
+ pgoff_t nr, pg_start, pg_end, delta, idx;
loff_t new_size;
int ret = 0;
new_size = i_size_read(inode) + len;
- if (new_size > inode->i_sb->s_maxbytes)
- return -EFBIG;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+ return ret;
if (offset >= i_size_read(inode))
return -EINVAL;
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
- f2fs_balance_fs(sbi);
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ f2fs_balance_fs(sbi, true);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
- return ret;
+ goto out;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- return ret;
+ goto out;
truncate_pagecache(inode, offset);
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
- nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ while (!ret && idx > pg_start) {
+ nr = idx - pg_start;
+ if (nr > delta)
+ nr = delta;
+ idx -= nr;
- for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, idx, idx + delta, false);
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, idx,
+ idx + delta, nr, false);
f2fs_unlock_op(sbi);
- if (ret)
- break;
}
/* write out all moved pages, if possible */
truncate_pagecache(inode, offset);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
+out:
+ up_write(&F2FS_I(inode)->i_mmap_sem);
return ret;
}
loff_t len, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t index, pg_start, pg_end;
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
- loff_t off_start, off_end;
- int ret = 0;
+ loff_t off_end;
+ int err;
- f2fs_balance_fs(sbi);
+ err = inode_newsize_ok(inode, (len + offset));
+ if (err)
+ return err;
- ret = inode_newsize_ok(inode, (len + offset));
- if (ret)
- return ret;
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ f2fs_balance_fs(sbi, true);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
+ off_end = (offset + len) & (PAGE_SIZE - 1);
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
+ map.m_len = pg_end - map.m_lblk;
+ if (off_end)
+ map.m_len++;
- f2fs_lock_op(sbi);
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ if (err) {
+ pgoff_t last_off;
- for (index = pg_start; index <= pg_end; index++) {
- struct dnode_of_data dn;
+ if (!map.m_len)
+ return err;
- if (index == pg_end && !off_end)
- goto noalloc;
+ last_off = map.m_lblk + map.m_len - 1;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = f2fs_reserve_block(&dn, index);
- if (ret)
- break;
-noalloc:
- if (pg_start == pg_end)
- new_size = offset + len;
- else if (index == pg_start && off_start)
- new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
- else if (index == pg_end)
- new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
- off_end;
- else
- new_size += PAGE_CACHE_SIZE;
- }
-
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
+ /* update new size to the failed position */
+ new_size = (last_off == pg_end) ? offset + len:
+ (loff_t)(last_off + 1) << PAGE_SHIFT;
+ } else {
+ new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
}
- f2fs_unlock_op(sbi);
- return ret;
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
+
+ return err;
}
static long f2fs_fallocate(struct file *file, int mode,
FALLOC_FL_INSERT_RANGE))
return -EOPNOTSUPP;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
}
if (!ret) {
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ file_set_keep_isize(inode);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
out:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
trace_f2fs_fallocate(inode, mode, offset, len, ret);
return ret;
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
+ /*
+ * f2fs_relase_file is called at every close calls. So we should
+ * not drop any inmemory pages by close called by other process.
+ */
+ if (!(filp->f_mode & FMODE_WRITE) ||
+ atomic_read(&inode->i_writecount) != 1)
+ return 0;
+
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
- set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ stat_dec_volatile_write(inode);
+ set_inode_flag(inode, FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping);
- clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_DROP_CACHE);
}
return 0;
}
-#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
-#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
-
-static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
+static int f2fs_file_flush(struct file *file, fl_owner_t id)
{
- if (S_ISDIR(mode))
- return flags;
- else if (S_ISREG(mode))
- return flags & F2FS_REG_FLMASK;
- else
- return flags & F2FS_OTHER_FLMASK;
+ struct inode *inode = file_inode(file);
+
+ /*
+ * If the process doing a transaction is crashed, we should do
+ * roll-back. Otherwise, other reader/write can see corrupted database
+ * until all the writers close its file. Since this should be done
+ * before dropping file lock, it needs to do in ->flush.
+ */
+ if (f2fs_is_atomic_file(inode) &&
+ F2FS_I(inode)->inmem_task == current)
+ drop_inmem_pages(inode);
+ return 0;
}
static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ unsigned int flags;
unsigned int oldflags;
int ret;
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (get_user(flags, (int __user *)arg))
+ return -EFAULT;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
- if (!inode_owner_or_capable(inode)) {
- ret = -EACCES;
- goto out;
- }
+ inode_lock(inode);
- if (get_user(flags, (int __user *)arg)) {
- ret = -EFAULT;
- goto out;
+ /* Is it quota file? Do not allow user to mess with it */
+ if (IS_NOQUOTA(inode)) {
+ ret = -EPERM;
+ goto unlock_out;
}
flags = f2fs_mask_flags(inode->i_mode, flags);
- mutex_lock(&inode->i_mutex);
-
oldflags = fi->i_flags;
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
- mutex_unlock(&inode->i_mutex);
ret = -EPERM;
- goto out;
+ goto unlock_out;
}
}
flags = flags & FS_FL_USER_MODIFIABLE;
flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
fi->i_flags = flags;
- mutex_unlock(&inode->i_mutex);
+ inode->i_ctime = current_time(inode);
f2fs_set_inode_flags(inode);
- inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
-out:
+ f2fs_mark_inode_dirty_sync(inode, false);
+unlock_out:
+ inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
if (!inode_owner_or_capable(inode))
return -EACCES;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
if (f2fs_is_atomic_file(inode))
- return 0;
+ goto out;
ret = f2fs_convert_inline_inode(inode);
if (ret)
- return ret;
+ goto out;
- set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- return 0;
+ set_inode_flag(inode, FI_ATOMIC_FILE);
+ set_inode_flag(inode, FI_HOT_DATA);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+ if (!get_dirty_pages(inode))
+ goto inc_stat;
+
+ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+ "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+ inode->i_ino, get_dirty_pages(inode));
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret) {
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_HOT_DATA);
+ goto out;
+ }
+
+inc_stat:
+ F2FS_I(inode)->inmem_task = current;
+ stat_inc_atomic_write(inode);
+ stat_update_max_atomic_write(inode);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_commit_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
- if (f2fs_is_volatile_file(inode))
- return 0;
-
ret = mnt_want_write_file(filp);
if (ret)
return ret;
+ inode_lock(inode);
+
+ if (f2fs_is_volatile_file(inode))
+ goto err_out;
+
if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- ret = commit_inmem_pages(inode, false);
+ ret = commit_inmem_pages(inode);
if (ret)
goto err_out;
- }
- ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+ if (!ret) {
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_HOT_DATA);
+ stat_dec_atomic_write(inode);
+ }
+ } else {
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
+ }
err_out:
+ inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
if (!inode_owner_or_capable(inode))
return -EACCES;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
if (f2fs_is_volatile_file(inode))
- return 0;
+ goto out;
ret = f2fs_convert_inline_inode(inode);
if (ret)
- return ret;
+ goto out;
- set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- return 0;
+ stat_inc_volatile_write(inode);
+ stat_update_max_volatile_write(inode);
+
+ set_inode_flag(inode, FI_VOLATILE_FILE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ int ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
if (!f2fs_is_volatile_file(inode))
- return 0;
+ goto out;
- if (!f2fs_is_first_block_written(inode))
- return truncate_partial_data_page(inode, 0, true);
+ if (!f2fs_is_first_block_written(inode)) {
+ ret = truncate_partial_data_page(inode, 0, true);
+ goto out;
+ }
- return punch_hole(inode, 0, F2FS_BLKSIZE);
+ ret = punch_hole(inode, 0, F2FS_BLKSIZE);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (ret)
return ret;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ inode_lock(inode);
+
+ if (f2fs_is_atomic_file(inode))
+ drop_inmem_pages(inode);
+ if (f2fs_is_volatile_file(inode)) {
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ stat_dec_volatile_write(inode);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+ }
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- commit_inmem_pages(inode, true);
+ inode_unlock(inode);
mnt_drop_write_file(filp);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return ret;
}
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(in, (__u32 __user *)arg))
return -EFAULT;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
sb = freeze_bdev(sb->s_bdev);
if (sb && !IS_ERR(sb)) {
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
thaw_bdev(sb->s_bdev, sb);
}
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
f2fs_sync_fs(sb, 1);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_NOSYNC:
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_METAFLUSH:
- sync_meta_pages(sbi, META, LONG_MAX);
- f2fs_stop_checkpoint(sbi);
+ sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
+ f2fs_stop_checkpoint(sbi, false);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return 0;
+ f2fs_update_time(sbi, REQ_TIME);
+out:
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
sizeof(range)))
return -EFAULT;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
range.minlen = max((unsigned int)range.minlen,
q->limits.discard_granularity);
ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+ mnt_drop_write_file(filp);
if (ret < 0)
return ret;
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return 0;
}
static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_encryption_policy policy;
struct inode *inode = file_inode(filp);
- int err;
-
- if (copy_from_user(&policy, (struct f2fs_encryption_policy __user *)arg,
- sizeof(policy)))
- return -EFAULT;
-
- mutex_lock(&inode->i_mutex);
- err = f2fs_process_policy(&policy, inode);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- mutex_unlock(&inode->i_mutex);
-
- return err;
-#else
- return -EOPNOTSUPP;
-#endif
+ return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
}
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_encryption_policy policy;
- struct inode *inode = file_inode(filp);
- int err;
-
- err = f2fs_get_policy(inode, &policy);
- if (err)
- return err;
-
- if (copy_to_user((struct f2fs_encryption_policy __user *)arg, &policy,
- sizeof(policy)))
- return -EFAULT;
- return 0;
-#else
- return -EOPNOTSUPP;
-#endif
+ return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
}
static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
err = f2fs_commit_super(sbi, false);
-
- mnt_drop_write_file(filp);
if (err) {
/* undo new data */
memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
+ mnt_drop_write_file(filp);
return err;
}
+ mnt_drop_write_file(filp);
got_it:
if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
16))
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
__u32 sync;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
if (!sync) {
- if (!mutex_trylock(&sbi->gc_mutex))
- return -EBUSY;
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ mutex_lock(&sbi->gc_mutex);
+ }
+
+ ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
+out:
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_gc_range range;
+ u64 end;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ end = range.start + range.len;
+ if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi))
+ return -EINVAL;
+do_more:
+ if (!range.sync) {
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ ret = -EBUSY;
+ goto out;
+ }
} else {
mutex_lock(&sbi->gc_mutex);
}
- return f2fs_gc(sbi, sync);
+ ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
+ range.start += sbi->blocks_per_seg;
+ if (range.start <= end)
+ goto do_more;
+out:
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct cp_control cpc;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = f2fs_sync_fs(sbi->sb, 1);
+
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ struct file *filp,
+ struct f2fs_defragment *range)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct extent_info ei = {0,0,0};
+ pgoff_t pg_start, pg_end;
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
+ unsigned int total = 0, sec_num;
+ block_t blk_end = 0;
+ bool fragmented = false;
+ int err;
+
+ /* if in-place-update policy is enabled, don't waste time here */
+ if (need_inplace_update_policy(inode, NULL))
+ return -EINVAL;
+
+ pg_start = range->start >> PAGE_SHIFT;
+ pg_end = (range->start + range->len) >> PAGE_SHIFT;
+
+ f2fs_balance_fs(sbi, true);
+
+ inode_lock(inode);
+
+ /* writeback all dirty pages in the range */
+ err = filemap_write_and_wait_range(inode->i_mapping, range->start,
+ range->start + range->len - 1);
+ if (err)
+ goto out;
+
+ /*
+ * lookup mapping info in extent cache, skip defragmenting if physical
+ * block addresses are continuous.
+ */
+ if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
+ if (ei.fofs + ei.len >= pg_end)
+ goto out;
+ }
+
+ map.m_lblk = pg_start;
+
+ /*
+ * lookup mapping info in dnode page cache, skip defragmenting if all
+ * physical block addresses are continuous even if there are hole(s)
+ * in logical blocks.
+ */
+ while (map.m_lblk < pg_end) {
+ map.m_len = pg_end - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ if (err)
+ goto out;
+
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+ map.m_lblk++;
+ continue;
+ }
+
+ if (blk_end && blk_end != map.m_pblk) {
+ fragmented = true;
+ break;
+ }
+ blk_end = map.m_pblk + map.m_len;
+
+ map.m_lblk += map.m_len;
+ }
+
+ if (!fragmented)
+ goto out;
+
+ map.m_lblk = pg_start;
+ map.m_len = pg_end - pg_start;
+
+ sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
+
+ /*
+ * make sure there are enough free section for LFS allocation, this can
+ * avoid defragment running in SSR mode when free section are allocated
+ * intensively
+ */
+ if (has_not_enough_free_secs(sbi, 0, sec_num)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ while (map.m_lblk < pg_end) {
+ pgoff_t idx;
+ int cnt = 0;
+
+do_map:
+ map.m_len = pg_end - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
+ if (err)
+ goto clear_out;
+
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+ map.m_lblk++;
+ continue;
+ }
+
+ set_inode_flag(inode, FI_DO_DEFRAG);
+
+ idx = map.m_lblk;
+ while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
+ struct page *page;
+
+ page = get_lock_data_page(inode, idx, true);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto clear_out;
+ }
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+
+ idx++;
+ cnt++;
+ total++;
+ }
+
+ map.m_lblk = idx;
+
+ if (idx < pg_end && cnt < blk_per_seg)
+ goto do_map;
+
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+
+ err = filemap_fdatawrite(inode->i_mapping);
+ if (err)
+ goto out;
+ }
+clear_out:
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+out:
+ inode_unlock(inode);
+ if (!err)
+ range->len = (u64)total << PAGE_SHIFT;
+ return err;
+}
+
+static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_defragment range;
+ int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
+ return -EINVAL;
+
if (f2fs_readonly(sbi->sb))
return -EROFS;
- cpc.reason = __get_cp_reason(sbi);
+ if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
- mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
- mutex_unlock(&sbi->gc_mutex);
+ /* verify alignment of offset & size */
+ if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
+ return -EINVAL;
+
+ if (unlikely((range.start + range.len) >> PAGE_SHIFT >
+ sbi->max_file_blocks))
+ return -EINVAL;
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
+ err = f2fs_defragment_range(sbi, filp, &range);
+ mnt_drop_write_file(filp);
+
+ f2fs_update_time(sbi, REQ_TIME);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
+ sizeof(range)))
+ return -EFAULT;
return 0;
}
+static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, size_t len)
+{
+ struct inode *src = file_inode(file_in);
+ struct inode *dst = file_inode(file_out);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+ size_t olen = len, dst_max_i_size = 0;
+ size_t dst_osize;
+ int ret;
+
+ if (file_in->f_path.mnt != file_out->f_path.mnt ||
+ src->i_sb != dst->i_sb)
+ return -EXDEV;
+
+ if (unlikely(f2fs_readonly(src->i_sb)))
+ return -EROFS;
+
+ if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
+ return -EINVAL;
+
+ if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+ return -EOPNOTSUPP;
+
+ if (src == dst) {
+ if (pos_in == pos_out)
+ return 0;
+ if (pos_out > pos_in && pos_out < pos_in + len)
+ return -EINVAL;
+ }
+
+ inode_lock(src);
+ if (src != dst) {
+ if (!inode_trylock(dst)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ goto out_unlock;
+ if (len == 0)
+ olen = len = src->i_size - pos_in;
+ if (pos_in + len == src->i_size)
+ len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ dst_osize = dst->i_size;
+ if (pos_out + olen > dst->i_size)
+ dst_max_i_size = pos_out + olen;
+
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(src);
+ if (ret)
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(dst);
+ if (ret)
+ goto out_unlock;
+
+ /* write out all dirty pages from offset */
+ ret = filemap_write_and_wait_range(src->i_mapping,
+ pos_in, pos_in + len);
+ if (ret)
+ goto out_unlock;
+
+ ret = filemap_write_and_wait_range(dst->i_mapping,
+ pos_out, pos_out + len);
+ if (ret)
+ goto out_unlock;
+
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+ ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
+ pos_out >> F2FS_BLKSIZE_BITS,
+ len >> F2FS_BLKSIZE_BITS, false);
+
+ if (!ret) {
+ if (dst_max_i_size)
+ f2fs_i_size_write(dst, dst_max_i_size);
+ else if (dst_osize != dst->i_size)
+ f2fs_i_size_write(dst, dst_osize);
+ }
+ f2fs_unlock_op(sbi);
+out_unlock:
+ if (src != dst)
+ inode_unlock(dst);
+out:
+ inode_unlock(src);
+ return ret;
+}
+
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+{
+ struct f2fs_move_range range;
+ struct fd dst;
+ int err;
+
+ if (!(filp->f_mode & FMODE_READ) ||
+ !(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ dst = fdget(range.dst_fd);
+ if (!dst.file)
+ return -EBADF;
+
+ if (!(dst.file->f_mode & FMODE_WRITE)) {
+ err = -EBADF;
+ goto err_out;
+ }
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ goto err_out;
+
+ err = f2fs_move_file_range(filp, range.pos_in, dst.file,
+ range.pos_out, range.len);
+
+ mnt_drop_write_file(filp);
+ if (err)
+ goto err_out;
+
+ if (copy_to_user((struct f2fs_move_range __user *)arg,
+ &range, sizeof(range)))
+ err = -EFAULT;
+err_out:
+ fdput(dst);
+ return err;
+}
+
+static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct sit_info *sm = SIT_I(sbi);
+ unsigned int start_segno = 0, end_segno = 0;
+ unsigned int dev_start_segno = 0, dev_end_segno = 0;
+ struct f2fs_flush_device range;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
+ sbi->segs_per_sec != 1) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Can't flush %u in %d for segs_per_sec %u != 1\n",
+ range.dev_num, sbi->s_ndevs,
+ sbi->segs_per_sec);
+ return -EINVAL;
+ }
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ if (range.dev_num != 0)
+ dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
+ dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
+
+ start_segno = sm->last_victim[FLUSH_DEVICE];
+ if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
+ start_segno = dev_start_segno;
+ end_segno = min(start_segno + range.segments, dev_end_segno);
+
+ while (start_segno < end_segno) {
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ sm->last_victim[GC_CB] = end_segno + 1;
+ sm->last_victim[GC_GREEDY] = end_segno + 1;
+ sm->last_victim[ALLOC_NEXT] = end_segno + 1;
+ ret = f2fs_gc(sbi, true, true, start_segno);
+ if (ret == -EAGAIN)
+ ret = 0;
+ else if (ret < 0)
+ break;
+ start_segno++;
+ }
+out:
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
+
+ /* Must validate to set it with SQLite behavior in Android. */
+ sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
+
+ return put_user(sb_feature, (u32 __user *)arg);
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
return f2fs_ioc_get_encryption_pwsalt(filp, arg);
case F2FS_IOC_GARBAGE_COLLECT:
return f2fs_ioc_gc(filp, arg);
+ case F2FS_IOC_GARBAGE_COLLECT_RANGE:
+ return f2fs_ioc_gc_range(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
return f2fs_ioc_write_checkpoint(filp, arg);
+ case F2FS_IOC_DEFRAGMENT:
+ return f2fs_ioc_defragment(filp, arg);
+ case F2FS_IOC_MOVE_RANGE:
+ return f2fs_ioc_move_range(filp, arg);
+ case F2FS_IOC_FLUSH_DEVICE:
+ return f2fs_ioc_flush_device(filp, arg);
+ case F2FS_IOC_GET_FEATURES:
+ return f2fs_ioc_get_features(filp, arg);
default:
return -ENOTTY;
}
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct inode *inode = file_inode(iocb->ki_filp);
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct blk_plug plug;
+ ssize_t ret;
- if (f2fs_encrypted_inode(inode) &&
- !f2fs_has_encryption_key(inode) &&
- f2fs_get_encryption_info(inode))
- return -EACCES;
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0) {
+ int err;
+
+ if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
+ set_inode_flag(inode, FI_NO_PREALLOC);
+
+ err = f2fs_preallocate_blocks(iocb, from);
+ if (err) {
+ inode_unlock(inode);
+ return err;
+ }
+ blk_start_plug(&plug);
+ ret = __generic_file_write_iter(iocb, from);
+ blk_finish_plug(&plug);
+ clear_inode_flag(inode, FI_NO_PREALLOC);
- return generic_file_write_iter(iocb, from);
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
+ }
+ inode_unlock(inode);
+
+ if (ret > 0) {
+ ssize_t err;
+
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
+ ret = err;
+ }
+ return ret;
}
#ifdef CONFIG_COMPAT
case F2FS_IOC32_SETFLAGS:
cmd = F2FS_IOC_SETFLAGS;
break;
+ case F2FS_IOC32_GETVERSION:
+ cmd = F2FS_IOC_GETVERSION;
+ break;
+ case F2FS_IOC_START_ATOMIC_WRITE:
+ case F2FS_IOC_COMMIT_ATOMIC_WRITE:
+ case F2FS_IOC_START_VOLATILE_WRITE:
+ case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+ case F2FS_IOC_ABORT_VOLATILE_WRITE:
+ case F2FS_IOC_SHUTDOWN:
+ case F2FS_IOC_SET_ENCRYPTION_POLICY:
+ case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+ case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case F2FS_IOC_GARBAGE_COLLECT:
+ case F2FS_IOC_GARBAGE_COLLECT_RANGE:
+ case F2FS_IOC_WRITE_CHECKPOINT:
+ case F2FS_IOC_DEFRAGMENT:
+ case F2FS_IOC_MOVE_RANGE:
+ case F2FS_IOC_FLUSH_DEVICE:
+ case F2FS_IOC_GET_FEATURES:
+ break;
default:
return -ENOIOCTLCMD;
}
.open = f2fs_file_open,
.release = f2fs_release_file,
.mmap = f2fs_file_mmap,
+ .flush = f2fs_file_flush,
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
.unlocked_ioctl = f2fs_ioctl,
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
-#include <linux/blkdev.h>
#include "f2fs.h"
#include "node.h"
struct f2fs_sb_info *sbi = data;
struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
- long wait_ms;
+ unsigned int wait_ms;
wait_ms = gc_th->min_sleep_time;
+ set_freezable();
do {
+ wait_event_interruptible_timeout(*wq,
+ kthread_should_stop() || freezing(current) ||
+ gc_th->gc_wake,
+ msecs_to_jiffies(wait_ms));
+
+ /* give it a try one time */
+ if (gc_th->gc_wake)
+ gc_th->gc_wake = 0;
+
if (try_to_freeze())
continue;
- else
- wait_event_interruptible_timeout(*wq,
- kthread_should_stop(),
- msecs_to_jiffies(wait_ms));
if (kthread_should_stop())
break;
continue;
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
+ f2fs_show_injection_info(FAULT_CHECKPOINT);
+ f2fs_stop_checkpoint(sbi, false);
+ }
+#endif
+
+ if (!sb_start_write_trylock(sbi->sb))
+ continue;
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
* So, I'd like to wait some time to collect dirty segments.
*/
if (!mutex_trylock(&sbi->gc_mutex))
- continue;
+ goto next;
+
+ if (gc_th->gc_urgent) {
+ wait_ms = gc_th->urgent_sleep_time;
+ goto do_gc;
+ }
if (!is_idle(sbi)) {
increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex);
- continue;
+ goto next;
}
if (has_enough_invalid_blocks(sbi))
decrease_sleep_time(gc_th, &wait_ms);
else
increase_sleep_time(gc_th, &wait_ms);
-
+do_gc:
stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
+ if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
wait_ms = gc_th->no_gc_sleep_time;
trace_f2fs_background_gc(sbi->sb, wait_ms,
/* balancing f2fs's metadata periodically */
f2fs_balance_fs_bg(sbi);
+next:
+ sb_end_write(sbi->sb);
} while (!kthread_should_stop());
return 0;
dev_t dev = sbi->sb->s_bdev->bd_dev;
int err = 0;
- gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
+ gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th) {
err = -ENOMEM;
goto out;
}
+ gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
gc_th->gc_idle = 0;
+ gc_th->gc_urgent = 0;
+ gc_th->gc_wake= 0;
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
p->ofs_unit = sbi->segs_per_sec;
}
- if (p->max_search > sbi->max_victim_search)
+ /* we need to check every dirty segments in the FG_GC case */
+ if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
p->max_search = sbi->max_victim_search;
- p->offset = sbi->last_victim[p->gc_mode];
+ /* let's select beginning hot/small space first */
+ if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ p->offset = 0;
+ else
+ p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
}
static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
{
/* SSR allocates in a segment unit */
if (p->alloc_mode == SSR)
- return 1 << sbi->log_blocks_per_seg;
+ return sbi->blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
- return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
+ return 2 * sbi->blocks_per_seg * p->ofs_unit;
else if (p->gc_mode == GC_CB)
return UINT_MAX;
else /* No other gc_mode */
for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
if (sec_usage_check(sbi, secno))
continue;
+
+ if (no_fggc_candidate(sbi, secno))
+ continue;
+
clear_bit(secno, dirty_i->victim_secmap);
- return secno * sbi->segs_per_sec;
+ return GET_SEG_FROM_SEC(sbi, secno);
}
return NULL_SEGNO;
}
static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- unsigned int secno = GET_SECNO(sbi, segno);
- unsigned int start = secno * sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
unsigned long long mtime = 0;
unsigned int vblocks;
unsigned char age = 0;
for (i = 0; i < sbi->segs_per_sec; i++)
mtime += get_seg_entry(sbi, start + i)->mtime;
- vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ vblocks = get_valid_blocks(sbi, segno, true);
mtime = div_u64(mtime, sbi->segs_per_sec);
vblocks = div_u64(vblocks, sbi->segs_per_sec);
return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
}
+static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ unsigned int valid_blocks =
+ get_valid_blocks(sbi, segno, true);
+
+ return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+ valid_blocks * 2 : valid_blocks;
+}
+
static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
unsigned int segno, struct victim_sel_policy *p)
{
/* alloc_mode == LFS */
if (p->gc_mode == GC_GREEDY)
- return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ return get_greedy_cost(sbi, segno);
else
return get_cb_cost(sbi, segno);
}
+static unsigned int count_bits(const unsigned long *addr,
+ unsigned int offset, unsigned int len)
+{
+ unsigned int end = offset + len, sum = 0;
+
+ while (offset < end) {
+ if (test_bit(offset++, addr))
+ ++sum;
+ }
+ return sum;
+}
+
/*
* This function is called from two paths.
* One is garbage collection and the other is SSR segment selection.
unsigned int *result, int gc_type, int type, char alloc_mode)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ struct sit_info *sm = SIT_I(sbi);
struct victim_sel_policy p;
- unsigned int secno, max_cost;
+ unsigned int secno, last_victim;
unsigned int last_segment = MAIN_SEGS(sbi);
- int nsearched = 0;
+ unsigned int nsearched = 0;
mutex_lock(&dirty_i->seglist_lock);
select_policy(sbi, gc_type, type, &p);
p.min_segno = NULL_SEGNO;
- p.min_cost = max_cost = get_max_cost(sbi, &p);
+ p.min_cost = get_max_cost(sbi, &p);
+
+ if (*result != NULL_SEGNO) {
+ if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
+ get_valid_blocks(sbi, *result, false) &&
+ !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+ p.min_segno = *result;
+ goto out;
+ }
if (p.max_search == 0)
goto out;
+ last_victim = sm->last_victim[p.gc_mode];
if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi);
if (p.min_segno != NULL_SEGNO)
segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
if (segno >= last_segment) {
- if (sbi->last_victim[p.gc_mode]) {
- last_segment = sbi->last_victim[p.gc_mode];
- sbi->last_victim[p.gc_mode] = 0;
+ if (sm->last_victim[p.gc_mode]) {
+ last_segment =
+ sm->last_victim[p.gc_mode];
+ sm->last_victim[p.gc_mode] = 0;
p.offset = 0;
continue;
}
}
p.offset = segno + p.ofs_unit;
- if (p.ofs_unit > 1)
+ if (p.ofs_unit > 1) {
p.offset -= segno % p.ofs_unit;
+ nsearched += count_bits(p.dirty_segmap,
+ p.offset - p.ofs_unit,
+ p.ofs_unit);
+ } else {
+ nsearched++;
+ }
- secno = GET_SECNO(sbi, segno);
+ secno = GET_SEC_FROM_SEG(sbi, segno);
if (sec_usage_check(sbi, secno))
- continue;
+ goto next;
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
- continue;
+ goto next;
+ if (gc_type == FG_GC && p.alloc_mode == LFS &&
+ no_fggc_candidate(sbi, secno))
+ goto next;
cost = get_gc_cost(sbi, segno, &p);
if (p.min_cost > cost) {
p.min_segno = segno;
p.min_cost = cost;
- } else if (unlikely(cost == max_cost)) {
- continue;
}
-
- if (nsearched++ >= p.max_search) {
- sbi->last_victim[p.gc_mode] = segno;
+next:
+ if (nsearched >= p.max_search) {
+ if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
+ sm->last_victim[p.gc_mode] = last_victim + 1;
+ else
+ sm->last_victim[p.gc_mode] = segno + 1;
+ sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
break;
}
}
if (p.min_segno != NULL_SEGNO) {
got_it:
if (p.alloc_mode == LFS) {
- secno = GET_SECNO(sbi, p.min_segno);
+ secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
if (gc_type == FG_GC)
sbi->cur_victim_sec = secno;
else
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
- bool initial = true;
struct f2fs_summary *entry;
block_t start_addr;
int off;
+ int phase = 0;
start_addr = START_BLOCK(sbi, segno);
struct node_info ni;
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
- return 0;
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
- if (initial) {
+ if (phase == 0) {
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
ra_node_page(sbi, nid);
continue;
}
+
+ /* phase == 2 */
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
continue;
continue;
}
- /* set page dirty and write it */
- if (gc_type == FG_GC) {
- f2fs_wait_on_page_writeback(node_page, NODE);
- set_page_dirty(node_page);
- } else {
- if (!PageWriteback(node_page))
- set_page_dirty(node_page);
- }
- f2fs_put_page(node_page, 1);
+ move_node_page(node_page, gc_type);
stat_inc_node_blk_count(sbi, 1, gc_type);
}
- if (initial) {
- initial = false;
+ if (++phase < 3)
goto next_step;
- }
-
- if (gc_type == FG_GC) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = LONG_MAX,
- .for_reclaim = 0,
- };
- sync_node_pages(sbi, 0, &wbc);
-
- /* return 1 only if FG_GC succefully reclaimed one */
- if (get_valid_blocks(sbi, segno, 1) == 0)
- return 1;
- }
- return 0;
}
/*
* as indirect or double indirect node blocks, are given, it must be a caller's
* bug.
*/
-block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
{
unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
unsigned int bidx;
int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec;
}
- return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
+ return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
}
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
get_node_info(sbi, nid, dni);
if (sum->version != dni->version) {
- f2fs_put_page(node_page, 1);
- return false;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: valid data with mismatched node version.",
+ __func__);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
*nofs = ofs_of_node(node_page);
- source_blkaddr = datablock_addr(node_page, ofs_in_node);
+ source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
f2fs_put_page(node_page, 1);
if (source_blkaddr != blkaddr)
return true;
}
-static void move_encrypted_block(struct inode *inode, block_t bidx)
+/*
+ * Move data block via META_MAPPING while keeping locked data page.
+ * This can be used to move blocks, aka LBAs, directly on disk.
+ */
+static void move_data_block(struct inode *inode, block_t bidx,
+ unsigned int segno, int off)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
- .rw = READ_SYNC,
+ .temp = COLD,
+ .op = REQ_OP_READ,
+ .op_flags = REQ_SYNC,
.encrypted_page = NULL,
+ .in_list = false,
};
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
struct page *page;
+ block_t newaddr;
int err;
/* do not read out */
if (!page)
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
+ if (f2fs_is_atomic_file(inode))
+ goto out;
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
get_node_info(fio.sbi, dn.nid, &ni);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
fio.page = page;
- fio.blk_addr = dn.data_blkaddr;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
- fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
- fio.blk_addr,
- FGP_LOCK|FGP_CREAT,
- GFP_NOFS);
- if (!fio.encrypted_page)
- goto put_out;
+ allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+ &sum, CURSEG_COLD_DATA, NULL, false);
+
+ fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+ goto recover_block;
+ }
err = f2fs_submit_page_bio(&fio);
if (err)
/* write page */
lock_page(fio.encrypted_page);
- if (unlikely(!PageUptodate(fio.encrypted_page)))
+ if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
+ err = -EIO;
goto put_page_out;
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
+ }
+ if (unlikely(!PageUptodate(fio.encrypted_page))) {
+ err = -EIO;
goto put_page_out;
+ }
set_page_dirty(fio.encrypted_page);
- f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
+ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
set_page_writeback(fio.encrypted_page);
/* allocate block address */
- f2fs_wait_on_page_writeback(dn.node_page, NODE);
- allocate_data_block(fio.sbi, NULL, fio.blk_addr,
- &fio.blk_addr, &sum, CURSEG_COLD_DATA);
- fio.rw = WRITE_SYNC;
- f2fs_submit_page_mbio(&fio);
-
- dn.data_blkaddr = fio.blk_addr;
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+
+ fio.op = REQ_OP_WRITE;
+ fio.op_flags = REQ_SYNC | REQ_NOIDLE;
+ fio.new_blkaddr = newaddr;
+ f2fs_submit_page_write(&fio);
+
+ f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
+
+ f2fs_update_data_blkaddr(&dn, newaddr);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
+recover_block:
+ if (err)
+ __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
+ true, true);
put_out:
f2fs_put_dnode(&dn);
out:
f2fs_put_page(page, 1);
}
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
+static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+ unsigned int segno, int off)
{
struct page *page;
if (IS_ERR(page))
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
+ if (f2fs_is_atomic_file(inode))
+ goto out;
+
if (gc_type == BG_GC) {
if (PageWriteback(page))
goto out;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
- .rw = WRITE_SYNC,
+ .temp = COLD,
+ .op = REQ_OP_WRITE,
+ .op_flags = REQ_SYNC,
+ .old_blkaddr = NULL_ADDR,
.page = page,
.encrypted_page = NULL,
+ .need_lock = LOCK_REQ,
+ .io_type = FS_GC_DATA_IO,
};
+ bool is_dirty = PageDirty(page);
+ int err;
+
+retry:
set_page_dirty(page);
- f2fs_wait_on_page_writeback(page, DATA);
- if (clear_page_dirty_for_io(page))
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
+
set_cold_data(page);
- do_write_data_page(&fio);
- clear_cold_data(page);
+
+ err = do_write_data_page(&fio);
+ if (err == -ENOMEM && is_dirty) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
}
out:
f2fs_put_page(page, 1);
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs;
block_t start_bidx;
+ nid_t nid = le32_to_cpu(entry->nid);
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
- return 0;
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
- ra_node_page(sbi, le32_to_cpu(entry->nid));
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
+ ra_node_page(sbi, nid);
continue;
}
if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
continue;
- if (phase == 1) {
+ if (phase == 2) {
ra_node_page(sbi, dni.ino);
continue;
}
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
- if (phase == 2) {
+ if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
/* if encrypted inode, let's go phase 3 */
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
+ if (f2fs_encrypted_file(inode)) {
add_gc_inode(gc_list, inode);
continue;
}
- start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
+ start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode,
- start_bidx + ofs_in_node, READA, true);
+ start_bidx + ofs_in_node, REQ_RAHEAD,
+ true);
if (IS_ERR(data_page)) {
iput(inode);
continue;
continue;
}
- /* phase 3 */
+ /* phase 4 */
inode = find_gc_inode(gc_list, dni.ino);
if (inode) {
- start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ bool locked = false;
+
+ if (S_ISREG(inode->i_mode)) {
+ if (!down_write_trylock(&fi->dio_rwsem[READ]))
+ continue;
+ if (!down_write_trylock(
+ &fi->dio_rwsem[WRITE])) {
+ up_write(&fi->dio_rwsem[READ]);
+ continue;
+ }
+ locked = true;
+
+ /* wait for all inflight aio data */
+ inode_dio_wait(inode);
+ }
+
+ start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- move_encrypted_block(inode, start_bidx);
+ if (f2fs_encrypted_file(inode))
+ move_data_block(inode, start_bidx, segno, off);
else
- move_data_page(inode, start_bidx, gc_type);
+ move_data_page(inode, start_bidx, gc_type,
+ segno, off);
+
+ if (locked) {
+ up_write(&fi->dio_rwsem[WRITE]);
+ up_write(&fi->dio_rwsem[READ]);
+ }
+
stat_inc_data_blk_count(sbi, 1, gc_type);
}
}
- if (++phase < 4)
+ if (++phase < 5)
goto next_step;
-
- if (gc_type == FG_GC) {
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
- /* return 1 only if FG_GC succefully reclaimed one */
- if (get_valid_blocks(sbi, segno, 1) == 0)
- return 1;
- }
- return 0;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
return ret;
}
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
struct blk_plug plug;
- int nfree = 0;
+ unsigned int segno = start_segno;
+ unsigned int end_segno = start_segno + sbi->segs_per_sec;
+ int seg_freed = 0;
+ unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+ SUM_TYPE_DATA : SUM_TYPE_NODE;
+
+ /* readahead multi ssa blocks those have contiguous address */
+ if (sbi->segs_per_sec > 1)
+ ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+ sbi->segs_per_sec, META_SSA, true);
- /* read segment summary of victim */
- sum_page = get_sum_page(sbi, segno);
+ /* reference all summary page */
+ while (segno < end_segno) {
+ sum_page = get_sum_page(sbi, segno++);
+ unlock_page(sum_page);
+ }
blk_start_plug(&plug);
- sum = page_address(sum_page);
+ for (segno = start_segno; segno < end_segno; segno++) {
- /*
- * this is to avoid deadlock:
- * - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - mutex_lock(sentry_lock)
- * - mutex_lock(sentry_lock) - change_curseg()
- * - lock_page(sum_page)
- */
- unlock_page(sum_page);
-
- switch (GET_SUM_TYPE((&sum->footer))) {
- case SUM_TYPE_NODE:
- nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
- break;
- case SUM_TYPE_DATA:
- nfree = gc_data_segment(sbi, sum->entries, gc_list,
- segno, gc_type);
- break;
+ /* find segment summary of victim */
+ sum_page = find_get_page(META_MAPPING(sbi),
+ GET_SUM_BLOCK(sbi, segno));
+ f2fs_put_page(sum_page, 0);
+
+ if (get_valid_blocks(sbi, segno, false) == 0 ||
+ !PageUptodate(sum_page) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next;
+
+ sum = page_address(sum_page);
+ f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+
+ /*
+ * this is to avoid deadlock:
+ * - lock_page(sum_page) - f2fs_replace_block
+ * - check_valid_map() - mutex_lock(sentry_lock)
+ * - mutex_lock(sentry_lock) - change_curseg()
+ * - lock_page(sum_page)
+ */
+ if (type == SUM_TYPE_NODE)
+ gc_node_segment(sbi, sum->entries, segno, gc_type);
+ else
+ gc_data_segment(sbi, sum->entries, gc_list, segno,
+ gc_type);
+
+ stat_inc_seg_count(sbi, type, gc_type);
+
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, segno, false) == 0)
+ seg_freed++;
+next:
+ f2fs_put_page(sum_page, 0);
}
+
+ if (gc_type == FG_GC)
+ f2fs_submit_merged_write(sbi,
+ (type == SUM_TYPE_NODE) ? NODE : DATA);
+
blk_finish_plug(&plug);
- stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
stat_inc_call_count(sbi->stat_info);
- f2fs_put_page(sum_page, 0);
- return nfree;
+ return seg_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
+ bool background, unsigned int segno)
{
- unsigned int segno, i;
int gc_type = sync ? FG_GC : BG_GC;
- int sec_freed = 0;
- int ret = -EINVAL;
+ int sec_freed = 0, seg_freed = 0, total_freed = 0;
+ int ret = 0;
struct cp_control cpc;
+ unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(GFP_NOFS),
};
+ trace_f2fs_gc_begin(sbi->sb, sync, background,
+ get_pages(sbi, F2FS_DIRTY_NODES),
+ get_pages(sbi, F2FS_DIRTY_DENTS),
+ get_pages(sbi, F2FS_DIRTY_IMETA),
+ free_sections(sbi),
+ free_segments(sbi),
+ reserved_segments(sbi),
+ prefree_segments(sbi));
+
cpc.reason = __get_cp_reason(sbi);
gc_more:
- segno = NULL_SEGNO;
-
- if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
+ if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
+ ret = -EINVAL;
goto stop;
- if (unlikely(f2fs_cp_error(sbi)))
- goto stop;
-
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
- gc_type = FG_GC;
- if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
- write_checkpoint(sbi, &cpc);
}
-
- if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
goto stop;
- ret = 0;
-
- /* readahead multi ssa blocks those have contiguous address */
- if (sbi->segs_per_sec > 1)
- ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
- META_SSA, true);
+ }
- for (i = 0; i < sbi->segs_per_sec; i++) {
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
/*
- * for FG_GC case, halt gcing left segments once failed one
- * of segments in selected section to avoid long latency.
+ * For example, if there are many prefree_segments below given
+ * threshold, we can make them free by checkpoint. Then, we
+ * secure free segments which doesn't need fggc any more.
*/
- if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
- gc_type == FG_GC)
- break;
+ if (prefree_segments(sbi)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ }
+ if (has_not_enough_free_secs(sbi, 0, 0))
+ gc_type = FG_GC;
+ }
+
+ /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
+ if (gc_type == BG_GC && !background) {
+ ret = -EINVAL;
+ goto stop;
+ }
+ if (!__get_victim(sbi, &segno, gc_type)) {
+ ret = -ENODATA;
+ goto stop;
}
- if (i == sbi->segs_per_sec && gc_type == FG_GC)
+ seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
+ if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
sec_freed++;
+ total_freed += seg_freed;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed))
+ if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+ segno = NULL_SEGNO;
goto gc_more;
+ }
if (gc_type == FG_GC)
- write_checkpoint(sbi, &cpc);
+ ret = write_checkpoint(sbi, &cpc);
}
stop:
+ SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
+ SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
+
+ trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
+ get_pages(sbi, F2FS_DIRTY_NODES),
+ get_pages(sbi, F2FS_DIRTY_DENTS),
+ get_pages(sbi, F2FS_DIRTY_IMETA),
+ free_sections(sbi),
+ free_segments(sbi),
+ reserved_segments(sbi),
+ prefree_segments(sbi));
+
mutex_unlock(&sbi->gc_mutex);
put_gc_inode(&gc_list);
void build_gc_manager(struct f2fs_sb_info *sbi)
{
+ u64 main_count, resv_count, ovp_count;
+
DIRTY_I(sbi)->v_ops = &default_v_ops;
+
+ /* threshold of # of valid blocks in a section for victims of FG_GC */
+ main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
+ resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
+ ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
+
+ sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
+ BLKS_PER_SEC(sbi), (main_count - resv_count));
+
+ /* give warm/cold data area from slower device */
+ if (sbi->s_ndevs && sbi->segs_per_sec == 1)
+ SIT_I(sbi)->last_victim[ALLOC_NEXT] =
+ GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
}
* whether IO subsystem is idle
* or not
*/
+#define DEF_GC_THREAD_URGENT_SLEEP_TIME 500 /* 500 ms */
#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
wait_queue_head_t gc_wait_queue_head;
/* for gc sleep time */
+ unsigned int urgent_sleep_time;
unsigned int min_sleep_time;
unsigned int max_sleep_time;
unsigned int no_gc_sleep_time;
/* for changing gc mode */
unsigned int gc_idle;
+ unsigned int gc_urgent;
+ unsigned int gc_wake;
};
struct gc_inode_list {
}
static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
- long *wait)
+ unsigned int *wait)
{
+ unsigned int min_time = gc_th->min_sleep_time;
+ unsigned int max_time = gc_th->max_sleep_time;
+
if (*wait == gc_th->no_gc_sleep_time)
return;
- *wait += gc_th->min_sleep_time;
- if (*wait > gc_th->max_sleep_time)
- *wait = gc_th->max_sleep_time;
+ if ((long long)*wait + (long long)min_time > (long long)max_time)
+ *wait = max_time;
+ else
+ *wait += min_time;
}
static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
- long *wait)
+ unsigned int *wait)
{
+ unsigned int min_time = gc_th->min_sleep_time;
+
if (*wait == gc_th->no_gc_sleep_time)
*wait = gc_th->max_sleep_time;
- *wait -= gc_th->min_sleep_time;
- if (*wait <= gc_th->min_sleep_time)
- *wait = gc_th->min_sleep_time;
+ if ((long long)*wait - (long long)min_time < (long long)min_time)
+ *wait = min_time;
+ else
+ *wait -= min_time;
}
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
return true;
return false;
}
-
-static inline int is_idle(struct f2fs_sb_info *sbi)
-{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- struct request_list *rl = &q->root_rl;
- return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
-}
}
f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct f2fs_filename *fname)
+ struct fscrypt_name *fname)
{
__u32 hash;
f2fs_hash_t f2fs_hash;
bool f2fs_may_inline_data(struct inode *inode)
{
- if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
- return false;
-
if (f2fs_is_atomic_file(inode))
return false;
if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
return false;
- if (i_size_read(inode) > MAX_INLINE_DATA)
+ if (i_size_read(inode) > MAX_INLINE_DATA(inode))
return false;
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ if (f2fs_encrypted_file(inode))
return false;
return true;
void read_inline_data(struct page *page, struct page *ipage)
{
+ struct inode *inode = page->mapping->host;
void *src_addr, *dst_addr;
if (PageUptodate(page))
f2fs_bug_on(F2FS_P_SB(page), page->index);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
/* Copy the whole inline data block */
- src_addr = inline_data_addr(ipage);
+ src_addr = inline_data_addr(inode, ipage);
dst_addr = kmap_atomic(page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+ memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
flush_dcache_page(page);
kunmap_atomic(dst_addr);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
}
-bool truncate_inline_inode(struct page *ipage, u64 from)
+void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from)
{
void *addr;
- if (from >= MAX_INLINE_DATA)
- return false;
+ if (from >= MAX_INLINE_DATA(inode))
+ return;
- addr = inline_data_addr(ipage);
+ addr = inline_data_addr(inode, ipage);
- f2fs_wait_on_page_writeback(ipage, NODE);
- memset(addr + from, 0, MAX_INLINE_DATA - from);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
+ memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
+ set_page_dirty(ipage);
- return true;
+ if (from == 0)
+ clear_inode_flag(inode, FI_DATA_EXIST);
}
int f2fs_read_inline_data(struct inode *inode, struct page *page)
}
if (page->index)
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
else
read_inline_data(page, ipage);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
f2fs_put_page(ipage, 1);
trace_android_fs_dataread_end(inode, page_offset(page),
PAGE_SIZE);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
- void *src_addr, *dst_addr;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
.type = DATA,
- .rw = WRITE_SYNC | REQ_PRIO,
+ .op = REQ_OP_WRITE,
+ .op_flags = REQ_SYNC | REQ_NOIDLE | REQ_PRIO,
.page = page,
.encrypted_page = NULL,
+ .io_type = FS_DATA_IO,
};
int dirty, err;
- f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);
-
if (!f2fs_exist_data(dn->inode))
goto clear_out;
if (err)
return err;
- f2fs_wait_on_page_writeback(page, DATA);
-
- if (PageUptodate(page))
- goto no_update;
-
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
- /* Copy the whole inline data block */
- src_addr = inline_data_addr(dn->inode_page);
- dst_addr = kmap_atomic(page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
- flush_dcache_page(page);
- kunmap_atomic(dst_addr);
- SetPageUptodate(page);
-no_update:
+ read_inline_data(page, dn->inode_page);
set_page_dirty(page);
/* clear dirty state */
/* write data page to try to make data consistent */
set_page_writeback(page);
- fio.blk_addr = dn->data_blkaddr;
+ fio.old_blkaddr = dn->data_blkaddr;
+ set_inode_flag(dn->inode, FI_HOT_DATA);
write_data_page(dn, &fio);
- set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
- f2fs_wait_on_page_writeback(page, DATA);
- if (dirty)
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (dirty) {
inode_dec_dirty_pages(dn->inode);
+ remove_dirty_inode(dn->inode);
+ }
/* this converted inline_data should be recovered. */
- set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
+ set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
- truncate_inline_inode(dn->inode_page, 0);
+ truncate_inline_inode(dn->inode, dn->inode_page, 0);
+ clear_inline_node(dn->inode_page);
clear_out:
stat_dec_inline_inode(dn->inode);
- f2fs_clear_inline_inode(dn->inode);
- sync_inode_page(dn);
+ clear_inode_flag(dn->inode, FI_INLINE_DATA);
f2fs_put_dnode(dn);
return 0;
}
struct page *ipage, *page;
int err = 0;
- page = grab_cache_page(inode->i_mapping, 0);
+ if (!f2fs_has_inline_data(inode))
+ return 0;
+
+ page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
if (!page)
return -ENOMEM;
f2fs_unlock_op(sbi);
f2fs_put_page(page, 1);
+
+ f2fs_balance_fs(sbi, dn.node_changed);
+
return err;
}
{
void *src_addr, *dst_addr;
struct dnode_of_data dn;
+ struct address_space *mapping = page_mapping(page);
+ unsigned long flags;
int err;
set_new_dnode(&dn, inode, NULL, NULL, 0);
f2fs_bug_on(F2FS_I_SB(inode), page->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE);
+ f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
src_addr = kmap_atomic(page);
- dst_addr = inline_data_addr(dn.inode_page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+ dst_addr = inline_data_addr(inode, dn.inode_page);
+ memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
kunmap_atomic(src_addr);
+ set_page_dirty(dn.inode_page);
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ radix_tree_tag_clear(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_DATA_EXIST);
- sync_inode_page(&dn);
+ clear_inline_node(dn.inode_page);
f2fs_put_dnode(&dn);
return 0;
}
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- src_addr = inline_data_addr(npage);
- dst_addr = inline_data_addr(ipage);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+ src_addr = inline_data_addr(inode, npage);
+ dst_addr = inline_data_addr(inode, ipage);
+ memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_INLINE_DATA);
+ set_inode_flag(inode, FI_DATA_EXIST);
- update_inode(inode, ipage);
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
return true;
}
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- if (!truncate_inline_inode(ipage, 0))
- return false;
- f2fs_clear_inline_inode(inode);
- update_inode(inode, ipage);
+ truncate_inline_inode(inode, ipage, 0);
+ clear_inode_flag(inode, FI_INLINE_DATA);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
if (truncate_blocks(inode, 0, false))
}
struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
- struct f2fs_filename *fname, struct page **res_page)
+ struct fscrypt_name *fname, struct page **res_page)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
- struct f2fs_inline_dentry *inline_dentry;
struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
struct page *ipage;
+ void *inline_dentry;
f2fs_hash_t namehash;
ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
+ if (IS_ERR(ipage)) {
+ *res_page = ipage;
return NULL;
+ }
namehash = f2fs_dentry_hash(&name, fname);
- inline_dentry = inline_data_addr(ipage);
+ inline_dentry = inline_data_addr(dir, ipage);
- make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+ make_dentry_ptr_inline(dir, &d, inline_dentry);
de = find_target_dentry(fname, namehash, NULL, &d);
unlock_page(ipage);
if (de)
else
f2fs_put_page(ipage, 0);
- /*
- * For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs has occurred.
- */
- f2fs_bug_on(sbi, d.max < 0);
- return de;
-}
-
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
- struct page **p)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
- struct f2fs_dir_entry *de;
- struct f2fs_inline_dentry *dentry_blk;
-
- ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
- return NULL;
-
- dentry_blk = inline_data_addr(ipage);
- de = &dentry_blk->dentry[1];
- *p = ipage;
- unlock_page(ipage);
return de;
}
int make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage)
{
- struct f2fs_inline_dentry *dentry_blk;
struct f2fs_dentry_ptr d;
+ void *inline_dentry;
- dentry_blk = inline_data_addr(ipage);
+ inline_dentry = inline_data_addr(inode, ipage);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
+ make_dentry_ptr_inline(inode, &d, inline_dentry);
do_make_empty_dir(inode, parent, &d);
set_page_dirty(ipage);
/* update i_size to MAX_INLINE_DATA */
- if (i_size_read(inode) < MAX_INLINE_DATA) {
- i_size_write(inode, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ if (i_size_read(inode) < MAX_INLINE_DATA(inode))
+ f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
return 0;
}
* NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function.
*/
-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
- struct f2fs_inline_dentry *inline_dentry)
+static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+ void *inline_dentry)
{
struct page *page;
struct dnode_of_data dn;
struct f2fs_dentry_block *dentry_blk;
+ struct f2fs_dentry_ptr src, dst;
int err;
- page = grab_cache_page(dir->i_mapping, 0);
+ page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
if (!page) {
f2fs_put_page(ipage, 1);
return -ENOMEM;
if (err)
goto out;
- f2fs_wait_on_page_writeback(page, DATA);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE);
dentry_blk = kmap_atomic(page);
+ make_dentry_ptr_inline(dir, &src, inline_dentry);
+ make_dentry_ptr_block(dir, &dst, dentry_blk);
+
/* copy data from inline dentry block to new dentry block */
- memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
- INLINE_DENTRY_BITMAP_SIZE);
- memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
- SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
+ memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
+ memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
/*
* we do not need to zero out remainder part of dentry and filename
* field, since we have used bitmap for marking the usage status of
* them, besides, we can also ignore copying/zeroing reserved space
* of dentry block, because them haven't been used so far.
*/
- memcpy(dentry_blk->dentry, inline_dentry->dentry,
- sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
- memcpy(dentry_blk->filename, inline_dentry->filename,
- NR_INLINE_DENTRY * F2FS_SLOT_LEN);
+ memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
+ memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
kunmap_atomic(dentry_blk);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
- truncate_inline_inode(ipage, 0);
+ truncate_inline_inode(dir, ipage, 0);
stat_dec_inline_dir(dir);
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
- if (i_size_read(dir) < PAGE_CACHE_SIZE) {
- i_size_write(dir, PAGE_CACHE_SIZE);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
-
- sync_inode_page(&dn);
+ f2fs_i_depth_write(dir, 1);
+ if (i_size_read(dir) < PAGE_SIZE)
+ f2fs_i_size_write(dir, PAGE_SIZE);
out:
f2fs_put_page(page, 1);
return err;
}
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
- struct inode *inode, nid_t ino, umode_t mode)
+static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
+{
+ struct f2fs_dentry_ptr d;
+ unsigned long bit_pos = 0;
+ int err = 0;
+
+ make_dentry_ptr_inline(dir, &d, inline_dentry);
+
+ while (bit_pos < d.max) {
+ struct f2fs_dir_entry *de;
+ struct qstr new_name;
+ nid_t ino;
+ umode_t fake_mode;
+
+ if (!test_bit_le(bit_pos, d.bitmap)) {
+ bit_pos++;
+ continue;
+ }
+
+ de = &d.dentry[bit_pos];
+
+ if (unlikely(!de->name_len)) {
+ bit_pos++;
+ continue;
+ }
+
+ new_name.name = d.filename[bit_pos];
+ new_name.len = le16_to_cpu(de->name_len);
+
+ ino = le32_to_cpu(de->ino);
+ fake_mode = get_de_type(de) << S_SHIFT;
+
+ err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
+ ino, fake_mode);
+ if (err)
+ goto punch_dentry_pages;
+
+ bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+ }
+ return 0;
+punch_dentry_pages:
+ truncate_inode_pages(&dir->i_data, 0);
+ truncate_blocks(dir, 0, false);
+ remove_dirty_inode(dir);
+ return err;
+}
+
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+ void *inline_dentry)
+{
+ void *backup_dentry;
+ int err;
+
+ backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
+ MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
+ if (!backup_dentry) {
+ f2fs_put_page(ipage, 1);
+ return -ENOMEM;
+ }
+
+ memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
+ truncate_inline_inode(dir, ipage, 0);
+
+ unlock_page(ipage);
+
+ err = f2fs_add_inline_entries(dir, backup_dentry);
+ if (err)
+ goto recover;
+
+ lock_page(ipage);
+
+ stat_dec_inline_dir(dir);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
+ kfree(backup_dentry);
+ return 0;
+recover:
+ lock_page(ipage);
+ memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
+ f2fs_i_depth_write(dir, 0);
+ f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
+ set_page_dirty(ipage);
+ f2fs_put_page(ipage, 1);
+
+ kfree(backup_dentry);
+ return err;
+}
+
+static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+ void *inline_dentry)
+{
+ if (!F2FS_I(dir)->i_dir_level)
+ return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+ else
+ return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+}
+
+int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
f2fs_hash_t name_hash;
- size_t namelen = name->len;
- struct f2fs_inline_dentry *dentry_blk = NULL;
+ void *inline_dentry = NULL;
struct f2fs_dentry_ptr d;
- int slots = GET_DENTRY_SLOTS(namelen);
+ int slots = GET_DENTRY_SLOTS(new_name->len);
struct page *page = NULL;
int err = 0;
if (IS_ERR(ipage))
return PTR_ERR(ipage);
- dentry_blk = inline_data_addr(ipage);
- bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
- slots, NR_INLINE_DENTRY);
- if (bit_pos >= NR_INLINE_DENTRY) {
- err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
+ inline_dentry = inline_data_addr(dir, ipage);
+ make_dentry_ptr_inline(dir, &d, inline_dentry);
+
+ bit_pos = room_for_filename(d.bitmap, slots, d.max);
+ if (bit_pos >= d.max) {
+ err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
if (err)
return err;
err = -EAGAIN;
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, name, ipage);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
}
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- name_hash = f2fs_dentry_hash(name, NULL);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
- f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
+ name_hash = f2fs_dentry_hash(new_name, NULL);
+ f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);
/* we don't need to mark_inode_dirty now */
if (inode) {
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
-
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode(dir, ipage);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
out:
f2fs_put_page(ipage, 1);
return err;
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
struct inode *dir, struct inode *inode)
{
- struct f2fs_inline_dentry *inline_dentry;
+ struct f2fs_dentry_ptr d;
+ void *inline_dentry;
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
unsigned int bit_pos;
int i;
lock_page(page);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
- inline_dentry = inline_data_addr(page);
- bit_pos = dentry - inline_dentry->dentry;
+ inline_dentry = inline_data_addr(dir, page);
+ make_dentry_ptr_inline(dir, &d, inline_dentry);
+
+ bit_pos = dentry - d.dentry;
for (i = 0; i < slots; i++)
- test_and_clear_bit_le(bit_pos + i,
- &inline_dentry->dentry_bitmap);
+ __clear_bit_le(bit_pos + i, d.bitmap);
set_page_dirty(page);
+ f2fs_put_page(page, 1);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
- f2fs_drop_nlink(dir, inode, page);
-
- f2fs_put_page(page, 1);
+ f2fs_drop_nlink(dir, inode);
}
bool f2fs_empty_inline_dir(struct inode *dir)
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos = 2;
- struct f2fs_inline_dentry *dentry_blk;
+ void *inline_dentry;
+ struct f2fs_dentry_ptr d;
ipage = get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage))
return false;
- dentry_blk = inline_data_addr(ipage);
- bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
- NR_INLINE_DENTRY,
- bit_pos);
+ inline_dentry = inline_data_addr(dir, ipage);
+ make_dentry_ptr_inline(dir, &d, inline_dentry);
+
+ bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
f2fs_put_page(ipage, 1);
- if (bit_pos < NR_INLINE_DENTRY)
+ if (bit_pos < d.max)
return false;
return true;
}
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
- struct f2fs_str *fstr)
+ struct fscrypt_str *fstr)
{
struct inode *inode = file_inode(file);
- struct f2fs_inline_dentry *inline_dentry = NULL;
struct page *ipage = NULL;
struct f2fs_dentry_ptr d;
+ void *inline_dentry = NULL;
+ int err;
+
+ make_dentry_ptr_inline(inode, &d, inline_dentry);
- if (ctx->pos == NR_INLINE_DENTRY)
+ if (ctx->pos == d.max)
return 0;
ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(ipage))
return PTR_ERR(ipage);
- inline_dentry = inline_data_addr(ipage);
+ inline_dentry = inline_data_addr(inode, ipage);
- make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
+ make_dentry_ptr_inline(inode, &d, inline_dentry);
- if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
- ctx->pos = NR_INLINE_DENTRY;
+ err = f2fs_fill_dentries(ctx, &d, 0, fstr);
+ if (!err)
+ ctx->pos = d.max;
f2fs_put_page(ipage, 1);
- return 0;
+ return err < 0 ? err : 0;
}
int f2fs_inline_data_fiemap(struct inode *inode,
goto out;
}
- ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
+ ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
if (start >= ilen)
goto out;
if (start + len < ilen)
get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
- byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
+ byteaddr += (char *)inline_data_addr(inode, ipage) -
+ (char *)F2FS_INODE(ipage);
err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
out:
f2fs_put_page(ipage, 1);
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include <linux/writeback.h>
#include "f2fs.h"
#include "node.h"
+#include "segment.h"
#include <trace/events/f2fs.h>
+void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+{
+ if (f2fs_inode_dirtied(inode, sync))
+ return;
+
+ mark_inode_dirty_sync(inode);
+}
+
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
{
+ int extra_size = get_extra_isize(inode);
+
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- if (ri->i_addr[0])
- inode->i_rdev =
- old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+ if (ri->i_addr[extra_size])
+ inode->i_rdev = old_decode_dev(
+ le32_to_cpu(ri->i_addr[extra_size]));
else
- inode->i_rdev =
- new_decode_dev(le32_to_cpu(ri->i_addr[1]));
+ inode->i_rdev = new_decode_dev(
+ le32_to_cpu(ri->i_addr[extra_size + 1]));
}
}
static bool __written_first_block(struct f2fs_inode *ri)
{
- block_t addr = le32_to_cpu(ri->i_addr[0]);
+ block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
if (addr != NEW_ADDR && addr != NULL_ADDR)
return true;
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
{
+ int extra_size = get_extra_isize(inode);
+
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
- ri->i_addr[0] =
+ ri->i_addr[extra_size] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
- ri->i_addr[1] = 0;
+ ri->i_addr[extra_size + 1] = 0;
} else {
- ri->i_addr[0] = 0;
- ri->i_addr[1] =
+ ri->i_addr[extra_size] = 0;
+ ri->i_addr[extra_size + 1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
- ri->i_addr[2] = 0;
+ ri->i_addr[extra_size + 2] = 0;
}
}
}
static void __recover_inline_status(struct inode *inode, struct page *ipage)
{
- void *inline_data = inline_data_addr(ipage);
+ void *inline_data = inline_data_addr(inode, ipage);
__le32 *start = inline_data;
- __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
+ __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
while (start < end) {
if (*start++) {
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
+ set_inode_flag(inode, FI_DATA_EXIST);
+ set_raw_inline(inode, F2FS_INODE(ipage));
set_page_dirty(ipage);
return;
}
return;
}
+static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct f2fs_inode *ri = &F2FS_NODE(page)->i;
+ int extra_isize = le32_to_cpu(ri->i_extra_isize);
+
+ if (!f2fs_sb_has_inode_chksum(sbi->sb))
+ return false;
+
+ if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ return false;
+
+ if (!F2FS_FITS_IN_INODE(ri, extra_isize, i_inode_checksum))
+ return false;
+
+ return true;
+}
+
+static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct f2fs_node *node = F2FS_NODE(page);
+ struct f2fs_inode *ri = &node->i;
+ __le32 ino = node->footer.ino;
+ __le32 gen = ri->i_generation;
+ __u32 chksum, chksum_seed;
+ __u32 dummy_cs = 0;
+ unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
+ unsigned int cs_size = sizeof(dummy_cs);
+
+ chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
+ sizeof(ino));
+ chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
+
+ chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
+ chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
+ offset += cs_size;
+ chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
+ F2FS_BLKSIZE - offset);
+ return chksum;
+}
+
+bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct f2fs_inode *ri;
+ __u32 provided, calculated;
+
+ if (!f2fs_enable_inode_chksum(sbi, page) ||
+ PageDirty(page) || PageWriteback(page))
+ return true;
+
+ ri = &F2FS_NODE(page)->i;
+ provided = le32_to_cpu(ri->i_inode_checksum);
+ calculated = f2fs_inode_chksum(sbi, page);
+
+ if (provided != calculated)
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "checksum invalid, ino = %x, %x vs. %x",
+ ino_of_node(page), provided, calculated);
+
+ return provided == calculated;
+}
+
+void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct f2fs_inode *ri = &F2FS_NODE(page)->i;
+
+ if (!f2fs_enable_inode_chksum(sbi, page))
+ return;
+
+ ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
+}
+
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct page *node_page;
struct f2fs_inode *ri;
+ projid_t i_projid;
/* Check if ino is within scope */
if (check_nid_range(sbi, inode->i_ino)) {
i_gid_write(inode, le32_to_cpu(ri->i_gid));
set_nlink(inode, le32_to_cpu(ri->i_links));
inode->i_size = le64_to_cpu(ri->i_size);
- inode->i_blocks = le64_to_cpu(ri->i_blocks);
+ inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
- f2fs_init_extent_tree(inode, &ri->i_ext);
+ if (f2fs_init_extent_tree(inode, &ri->i_ext))
+ set_page_dirty(node_page);
+
+ get_inline_info(inode, ri);
- get_inline_info(fi, ri);
+ fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
+ le16_to_cpu(ri->i_extra_isize) : 0;
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__get_inode_rdev(inode, ri);
if (__written_first_block(ri))
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+ if (!need_inode_block_update(sbi, inode->i_ino))
+ fi->last_disk_size = inode->i_size;
+
+ if (fi->i_flags & FS_PROJINHERIT_FL)
+ set_inode_flag(inode, FI_PROJ_INHERIT);
+
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
+ i_projid = (projid_t)le32_to_cpu(ri->i_projid);
+ else
+ i_projid = F2FS_DEF_PROJID;
+ fi->i_projid = make_kprojid(&init_user_ns, i_projid);
f2fs_put_page(node_page, 1);
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
+ inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
ret = -EIO;
goto bad_inode;
}
+ f2fs_set_inode_flags(inode);
unlock_new_inode(inode);
trace_f2fs_iget(inode);
return inode;
return ERR_PTR(ret);
}
-void update_inode(struct inode *inode, struct page *node_page)
+struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+retry:
+ inode = f2fs_iget(sb, ino);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ }
+ return inode;
+}
+
+int update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ f2fs_inode_synced(inode);
- f2fs_wait_on_page_writeback(node_page, NODE);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
ri = F2FS_INODE(node_page);
ri->i_gid = cpu_to_le32(i_gid_read(inode));
ri->i_links = cpu_to_le32(inode->i_nlink);
ri->i_size = cpu_to_le64(i_size_read(inode));
- ri->i_blocks = cpu_to_le64(inode->i_blocks);
+ ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
- if (F2FS_I(inode)->extent_tree)
- set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
- &ri->i_ext);
- else
+ if (et) {
+ read_lock(&et->lock);
+ set_raw_extent(&et->largest, &ri->i_ext);
+ read_unlock(&et->lock);
+ } else {
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
- set_raw_inline(F2FS_I(inode), ri);
+ }
+ set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
ri->i_generation = cpu_to_le32(inode->i_generation);
ri->i_dir_level = F2FS_I(inode)->i_dir_level;
+ if (f2fs_has_extra_attr(inode)) {
+ ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
+
+ if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+ F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+ i_projid)) {
+ projid_t i_projid;
+
+ i_projid = from_kprojid(&init_user_ns,
+ F2FS_I(inode)->i_projid);
+ ri->i_projid = cpu_to_le32(i_projid);
+ }
+ }
+
__set_inode_rdev(inode, ri);
set_cold_node(inode, node_page);
- set_page_dirty(node_page);
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ /* deleted inode */
+ if (inode->i_nlink == 0)
+ clear_inline_node(node_page);
+
+ return set_page_dirty(node_page);
}
-void update_inode_page(struct inode *inode)
+int update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *node_page;
+ int ret = 0;
retry:
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page)) {
cond_resched();
goto retry;
} else if (err != -ENOENT) {
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
}
- return;
+ return 0;
}
- update_inode(inode, node_page);
+ ret = update_inode(inode, node_page);
f2fs_put_page(node_page, 1);
+ return ret;
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
/*
* during the urgent cleaning time when runing out of free sections.
*/
update_inode_page(inode);
-
- f2fs_balance_fs(sbi);
+ if (wbc && wbc->nr_to_write)
+ f2fs_balance_fs(sbi, true);
return 0;
}
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- nid_t xnid = fi->i_xattr_nid;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int err = 0;
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
goto out_clear;
f2fs_bug_on(sbi, get_dirty_pages(inode));
- remove_dirty_dir_inode(inode);
+ remove_dirty_inode(inode);
f2fs_destroy_extent_tree(inode);
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
+ dquot_initialize(inode);
+
+ remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+
sb_start_intwrite(inode->i_sb);
- set_inode_flag(fi, FI_NO_ALLOC);
+ set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
-
+retry:
if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
+ f2fs_show_injection_info(FAULT_EVICT_INODE);
+ err = -EIO;
+ }
+#endif
if (!err) {
f2fs_lock_op(sbi);
err = remove_inode_page(inode);
f2fs_unlock_op(sbi);
+ if (err == -ENOENT)
+ err = 0;
+ }
+
+ /* give more chances, if ENOMEM case */
+ if (err == -ENOMEM) {
+ err = 0;
+ goto retry;
}
+ if (err)
+ update_inode_page(inode);
+ dquot_free_inode(inode);
sb_end_intwrite(inode->i_sb);
no_delete:
+ dquot_drop(inode);
+
stat_dec_inline_xattr(inode);
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
- invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
+ if (!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))
+ f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
+
+ /* ino == 0, if f2fs_new_inode() was failed t*/
+ if (inode->i_ino)
+ invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
+ inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
- if (is_inode_flag_set(fi, FI_APPEND_WRITE))
- add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
- add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
- if (is_inode_flag_set(fi, FI_FREE_NID)) {
- if (err && err != -ENOENT)
- alloc_nid_done(sbi, inode->i_ino);
- else
- alloc_nid_failed(sbi, inode->i_ino);
- clear_inode_flag(fi, FI_FREE_NID);
+ if (inode->i_nlink) {
+ if (is_inode_flag_set(inode, FI_APPEND_WRITE))
+ add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
+ add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
}
-
- if (err && err != -ENOENT) {
- if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
- /*
- * get here because we failed to release resource
- * of inode previously, reminder our user to run fsck
- * for fixing.
- */
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_msg(sbi->sb, KERN_WARNING,
- "inode (ino:%lu) resource leak, run fsck "
- "to fix this issue!", inode->i_ino);
- }
+ if (is_inode_flag_set(inode, FI_FREE_NID)) {
+ alloc_nid_failed(sbi, inode->i_ino);
+ clear_inode_flag(inode, FI_FREE_NID);
+ } else {
+ f2fs_bug_on(sbi, err &&
+ !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
}
out_clear:
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (fi->i_crypt_info)
- f2fs_free_encryption_info(inode, fi->i_crypt_info);
-#endif
+ fscrypt_put_encryption_info(inode, NULL);
clear_inode(inode);
}
void handle_failed_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int err = 0;
+ struct node_info ni;
+ /*
+ * clear nlink of inode in order to release resource of inode
+ * immediately.
+ */
clear_nlink(inode);
- make_bad_inode(inode);
- unlock_new_inode(inode);
- i_size_write(inode, 0);
- if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, false);
+ /*
+ * we must call this to avoid inode being remained as dirty, resulting
+ * in a panic when flushing dirty inodes in gdirty_list.
+ */
+ update_inode_page(inode);
+ f2fs_inode_synced(inode);
- if (!err)
- err = remove_inode_page(inode);
+ /* don't make bad inode, since it becomes a regular file. */
+ unlock_new_inode(inode);
/*
- * if we skip truncate_node in remove_inode_page bacause we failed
- * before, it's better to find another way to release resource of
- * this inode (e.g. valid block count, node block or nid). Here we
- * choose to add this inode to orphan list, so that we can call iput
- * for releasing in orphan recovery flow.
- *
* Note: we should add inode to orphan list before f2fs_unlock_op()
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- if (err && err != -ENOENT) {
- err = acquire_orphan_inode(sbi);
- if (!err)
- add_orphan_inode(sbi, inode->i_ino);
+ get_node_info(sbi, inode->i_ino, &ni);
+
+ if (ni.blk_addr != NULL_ADDR) {
+ int err = acquire_orphan_inode(sbi);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Too many orphan inodes, run fsck to fix.");
+ } else {
+ add_orphan_inode(inode);
+ }
+ alloc_nid_done(sbi, inode->i_ino);
+ } else {
+ set_inode_flag(inode, FI_FREE_NID);
}
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
f2fs_unlock_op(sbi);
/* iput will drop the inode object */
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <linux/namei.h>
+#include <linux/quotaops.h>
#include "f2fs.h"
#include "node.h"
}
f2fs_unlock_op(sbi);
+ nid_free = true;
+
inode_init_owner(inode, dir, mode);
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_generation = sbi->s_next_generation++;
err = insert_inode_locked(inode);
if (err) {
err = -EINVAL;
- nid_free = true;
goto fail;
}
+ if (f2fs_sb_has_project_quota(sbi->sb) &&
+ (F2FS_I(dir)->i_flags & FS_PROJINHERIT_FL))
+ F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
+ else
+ F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns,
+ F2FS_DEF_PROJID);
+
+ err = dquot_initialize(inode);
+ if (err)
+ goto fail_drop;
+
+ err = dquot_alloc_inode(inode);
+ if (err)
+ goto fail_drop;
+
/* If the directory encrypted, then we should encrypt the inode. */
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
- if (f2fs_may_inline_data(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ set_inode_flag(inode, FI_NEW_INODE);
+
+ if (f2fs_sb_has_extra_attr(sbi->sb)) {
+ set_inode_flag(inode, FI_EXTRA_ATTR);
+ F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
+ }
+
+ if (test_opt(sbi, INLINE_XATTR))
+ set_inode_flag(inode, FI_INLINE_XATTR);
+ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+ set_inode_flag(inode, FI_INLINE_DATA);
if (f2fs_may_inline_dentry(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
+ set_inode_flag(inode, FI_INLINE_DENTRY);
f2fs_init_extent_tree(inode, NULL);
stat_inc_inline_inode(inode);
stat_inc_inline_dir(inode);
+ F2FS_I(inode)->i_flags =
+ f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
+
+ if (S_ISDIR(inode->i_mode))
+ F2FS_I(inode)->i_flags |= FS_INDEX_FL;
+
+ if (F2FS_I(inode)->i_flags & FS_PROJINHERIT_FL)
+ set_inode_flag(inode, FI_PROJ_INHERIT);
+
trace_f2fs_new_inode(inode, 0);
- mark_inode_dirty(inode);
return inode;
fail:
trace_f2fs_new_inode(inode, err);
make_bad_inode(inode);
if (nid_free)
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+ set_inode_flag(inode, FI_FREE_NID);
+ iput(inode);
+ return ERR_PTR(err);
+fail_drop:
+ trace_f2fs_new_inode(inode, err);
+ dquot_drop(inode);
+ inode->i_flags |= S_NOQUOTA;
+ if (nid_free)
+ set_inode_flag(inode, FI_FREE_NID);
+ clear_nlink(inode);
+ unlock_new_inode(inode);
iput(inode);
return ERR_PTR(err);
}
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
+ int i;
/*
* filename format of multimedia file should be defined as:
- * "filename + '.' + extension".
+ * "filename + '.' + extension + (optional: '.' + temp extension)".
*/
if (slen < sublen + 2)
return 0;
- if (s[slen - sublen - 1] != '.')
- return 0;
+ for (i = 1; i < slen - sublen; i++) {
+ if (s[i] != '.')
+ continue;
+ if (!strncasecmp(s + i + 1, sub, sublen))
+ return 1;
+ }
- return !strncasecmp(s + slen - sublen, sub, sublen);
+ return 0;
}
/*
nid_t ino = 0;
int err;
- f2fs_balance_fs(sbi);
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_balance_fs(sbi, true);
return 0;
out:
handle_failed_inode(inode);
int err;
if (f2fs_encrypted_inode(dir) &&
- !f2fs_is_child_context_consistent_with_parent(dir, inode))
+ !fscrypt_has_permitted_context(dir, inode))
return -EPERM;
- f2fs_balance_fs(sbi);
+ if (is_inode_flag_set(dir, FI_PROJ_INHERIT) &&
+ (!projid_eq(F2FS_I(dir)->i_projid,
+ F2FS_I(old_dentry->d_inode)->i_projid)))
+ return -EXDEV;
+
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
+
+ f2fs_balance_fs(sbi, true);
- inode->i_ctime = CURRENT_TIME;
+ inode->i_ctime = current_time(inode);
ihold(inode);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
f2fs_sync_fs(sbi->sb, 1);
return 0;
out:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
iput(inode);
f2fs_unlock_op(sbi);
return err;
struct dentry *f2fs_get_parent(struct dentry *child)
{
struct qstr dotdot = QSTR_INIT("..", 2);
- unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot);
- if (!ino)
+ struct page *page;
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page);
+ if (!ino) {
+ if (IS_ERR(page))
+ return ERR_CAST(page);
return ERR_PTR(-ENOENT);
- return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino));
+ }
+ return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
static int __recover_dot_dentries(struct inode *dir, nid_t pino)
struct page *page;
int err = 0;
+ if (f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "skip recovering inline_dots inode (ino:%lu, pino:%u) "
+ "in readonly mountpoint", dir->i_ino, pino);
+ return 0;
+ }
+
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
+
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
de = f2fs_find_entry(dir, &dot, &page);
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
} else {
err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
if (err)
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
}
out:
- if (!err) {
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
- mark_inode_dirty(dir);
- }
+ if (!err)
+ clear_inode_flag(dir, FI_INLINE_DOTS);
f2fs_unlock_op(sbi);
return err;
struct page *page;
nid_t ino;
int err = 0;
+ unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+
+ if (f2fs_encrypted_inode(dir)) {
+ int res = fscrypt_get_encryption_info(dir);
+
+ /*
+ * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
+ * created while the directory was encrypted and we
+ * don't have access to the key.
+ */
+ if (fscrypt_has_encryption_key(dir))
+ fscrypt_set_encrypted_dentry(dentry);
+ fscrypt_set_d_op(dentry);
+ if (res && res != -ENOKEY)
+ return ERR_PTR(res);
+ }
if (dentry->d_name.len > F2FS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ return (struct dentry *)page;
return d_splice_alias(inode, dentry);
+ }
ino = le32_to_cpu(de->ino);
f2fs_dentry_kunmap(dir, page);
if (IS_ERR(inode))
return ERR_CAST(inode);
+ if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
+ err = __recover_dot_dentries(dir, root_ino);
+ if (err)
+ goto err_out;
+ }
+
if (f2fs_has_inline_dots(inode)) {
err = __recover_dot_dentries(inode, dir->i_ino);
if (err)
goto err_out;
}
+ if (f2fs_encrypted_inode(dir) &&
+ (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+ !fscrypt_has_permitted_context(dir, inode)) {
+ f2fs_msg(inode->i_sb, KERN_WARNING,
+ "Inconsistent encryption contexts: %lu/%lu",
+ dir->i_ino, inode->i_ino);
+ err = -EPERM;
+ goto err_out;
+ }
return d_splice_alias(inode, dentry);
err_out:
- iget_failed(inode);
+ iput(inode);
return ERR_PTR(err);
}
int err = -ENOENT;
trace_f2fs_unlink_enter(dir, dentry);
- f2fs_balance_fs(sbi);
+
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ err = PTR_ERR(page);
goto fail;
+ }
+
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
f2fs_delete_entry(de, page, dir, inode);
f2fs_unlock_op(sbi);
- /* In order to evict this inode, we set it dirty */
- mark_inode_dirty(inode);
-
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
fail:
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
size_t len = strlen(symname);
- size_t p_len;
- char *p_str;
- struct f2fs_str disk_link = FSTR_INIT(NULL, 0);
- struct f2fs_encrypted_symlink_data *sd = NULL;
+ struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
+ struct fscrypt_symlink_data *sd = NULL;
int err;
- if (len > dir->i_sb->s_blocksize)
+ if (f2fs_encrypted_inode(dir)) {
+ err = fscrypt_get_encryption_info(dir);
+ if (err)
+ return err;
+
+ if (!fscrypt_has_encryption_key(dir))
+ return -ENOKEY;
+
+ disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
+ sizeof(struct fscrypt_symlink_data));
+ }
+
+ if (disk_link.len > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
- f2fs_balance_fs(sbi);
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
if (IS_ERR(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
+ inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
f2fs_lock_op(sbi);
f2fs_unlock_op(sbi);
alloc_nid_done(sbi, inode->i_ino);
- if (f2fs_encrypted_inode(dir)) {
+ if (f2fs_encrypted_inode(inode)) {
struct qstr istr = QSTR_INIT(symname, len);
+ struct fscrypt_str ostr;
- err = f2fs_get_encryption_info(inode);
- if (err)
+ sd = kzalloc(disk_link.len, GFP_NOFS);
+ if (!sd) {
+ err = -ENOMEM;
goto err_out;
+ }
- err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link);
+ err = fscrypt_get_encryption_info(inode);
if (err)
goto err_out;
- err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link);
- if (err < 0)
- goto err_out;
-
- p_len = encrypted_symlink_data_len(disk_link.len) + 1;
-
- if (p_len > dir->i_sb->s_blocksize) {
- err = -ENAMETOOLONG;
+ if (!fscrypt_has_encryption_key(inode)) {
+ err = -ENOKEY;
goto err_out;
}
- sd = kzalloc(p_len, GFP_NOFS);
- if (!sd) {
- err = -ENOMEM;
+ ostr.name = sd->encrypted_path;
+ ostr.len = disk_link.len;
+ err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
+ if (err)
goto err_out;
- }
- memcpy(sd->encrypted_path, disk_link.name, disk_link.len);
- sd->len = cpu_to_le16(disk_link.len);
- p_str = (char *)sd;
- } else {
- p_len = len + 1;
- p_str = (char *)symname;
+
+ sd->len = cpu_to_le16(ostr.len);
+ disk_link.name = (char *)sd;
}
- err = page_symlink(inode, p_str, p_len);
+ err = page_symlink(inode, disk_link.name, disk_link.len);
err_out:
d_instantiate(dentry, inode);
* performance regression.
*/
if (!err) {
- filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1);
+ filemap_write_and_wait_range(inode->i_mapping, 0,
+ disk_link.len - 1);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
}
kfree(sd);
- f2fs_fname_crypto_free_buffer(&disk_link);
+
+ f2fs_balance_fs(sbi, true);
return err;
out:
handle_failed_inode(inode);
struct inode *inode;
int err;
- f2fs_balance_fs(sbi);
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
inode = f2fs_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_balance_fs(sbi, true);
return 0;
out_fail:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
handle_failed_inode(inode);
return err;
}
struct inode *inode;
int err = 0;
- f2fs_balance_fs(sbi);
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_balance_fs(sbi, true);
return 0;
out:
handle_failed_inode(inode);
struct inode *inode;
int err;
- if (!whiteout)
- f2fs_balance_fs(sbi);
+ err = dquot_initialize(dir);
+ if (err)
+ return err;
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
* add this non-linked tmpfile to orphan list, in this way we could
* remove all unused data of tmpfile after abnormal power-off.
*/
- add_orphan_inode(sbi, inode->i_ino);
- f2fs_unlock_op(sbi);
-
+ add_orphan_inode(inode);
alloc_nid_done(sbi, inode->i_ino);
if (whiteout) {
- inode_dec_link_count(inode);
+ f2fs_i_links_write(inode, false);
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
}
+ /* link_count was changed by d_tmpfile as well. */
+ f2fs_unlock_op(sbi);
unlock_new_inode(inode);
+
+ f2fs_balance_fs(sbi, true);
return 0;
release_out:
static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (f2fs_encrypted_inode(dir)) {
- int err = f2fs_get_encryption_info(dir);
+ int err = fscrypt_get_encryption_info(dir);
if (err)
return err;
}
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
+ bool is_old_inline = f2fs_has_inline_dentry(old_dir);
int err = -ENOENT;
+ if ((f2fs_encrypted_inode(old_dir) &&
+ !fscrypt_has_encryption_key(old_dir)) ||
+ (f2fs_encrypted_inode(new_dir) &&
+ !fscrypt_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
- !f2fs_is_child_context_consistent_with_parent(new_dir,
- old_inode)) {
+ !fscrypt_has_permitted_context(new_dir, old_inode)) {
err = -EPERM;
goto out;
}
- f2fs_balance_fs(sbi);
+ if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ (!projid_eq(F2FS_I(new_dir)->i_projid,
+ F2FS_I(old_dentry->d_inode)->i_projid)))
+ return -EXDEV;
+
+ err = dquot_initialize(old_dir);
+ if (err)
+ goto out;
+
+ err = dquot_initialize(new_dir);
+ if (err)
+ goto out;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_old;
+ }
}
if (flags & RENAME_WHITEOUT) {
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
&new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_whiteout;
+ }
+
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
if (err)
goto put_out_dir;
- if (update_dent_inode(old_inode, new_inode,
- &new_dentry->d_name)) {
- release_orphan_inode(sbi);
- goto put_out_dir;
- }
-
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
- new_inode->i_ctime = CURRENT_TIME;
+ new_inode->i_ctime = current_time(new_inode);
down_write(&F2FS_I(new_inode)->i_sem);
if (old_dir_entry)
- drop_nlink(new_inode);
- drop_nlink(new_inode);
+ f2fs_i_links_write(new_inode, false);
+ f2fs_i_links_write(new_inode, false);
up_write(&F2FS_I(new_inode)->i_sem);
- mark_inode_dirty(new_inode);
-
if (!new_inode->i_nlink)
- add_orphan_inode(sbi, new_inode->i_ino);
+ add_orphan_inode(new_inode);
else
release_orphan_inode(sbi);
-
- update_inode_page(old_inode);
- update_inode_page(new_inode);
} else {
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(new_dentry, old_inode);
goto out_whiteout;
}
- if (old_dir_entry) {
- inc_nlink(new_dir);
- update_inode_page(new_dir);
+ if (old_dir_entry)
+ f2fs_i_links_write(new_dir, true);
+
+ /*
+ * old entry and new entry can locate in the same inline
+ * dentry in inode, when attaching new entry in inline dentry,
+ * it could force inline dentry conversion, after that,
+ * old_entry and old_page will point to wrong address, in
+ * order to avoid this, let's do the check and update here.
+ */
+ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) {
+ f2fs_put_page(old_page, 0);
+ old_page = NULL;
+
+ old_entry = f2fs_find_entry(old_dir,
+ &old_dentry->d_name, &old_page);
+ if (!old_entry) {
+ err = -ENOENT;
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
+ f2fs_unlock_op(sbi);
+ goto out_whiteout;
+ }
}
}
down_write(&F2FS_I(old_inode)->i_sem);
- file_lost_pino(old_inode);
- if (new_inode && file_enc_name(new_inode))
- file_set_enc_name(old_inode);
+ if (!old_dir_entry || whiteout)
+ file_lost_pino(old_inode);
+ else
+ F2FS_I(old_inode)->i_pino = new_dir->i_ino;
up_write(&F2FS_I(old_inode)->i_sem);
- old_inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(old_inode);
+ old_inode->i_ctime = current_time(old_inode);
+ f2fs_mark_inode_dirty_sync(old_inode, false);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
if (whiteout) {
whiteout->i_state |= I_LINKABLE;
- set_inode_flag(F2FS_I(whiteout), FI_INC_LINK);
+ set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
if (err)
goto put_out_dir;
if (old_dir != new_dir && !whiteout) {
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
- update_inode_page(old_inode);
} else {
f2fs_dentry_kunmap(old_inode, old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
- drop_nlink(old_dir);
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_i_links_write(old_dir, false);
}
f2fs_unlock_op(sbi);
int old_nlink = 0, new_nlink = 0;
int err = -ENOENT;
+ if ((f2fs_encrypted_inode(old_dir) &&
+ !fscrypt_has_encryption_key(old_dir)) ||
+ (f2fs_encrypted_inode(new_dir) &&
+ !fscrypt_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) &&
- (old_dir != new_dir) &&
- (!f2fs_is_child_context_consistent_with_parent(new_dir,
- old_inode) ||
- !f2fs_is_child_context_consistent_with_parent(old_dir,
- new_inode)))
+ (old_dir != new_dir) &&
+ (!fscrypt_has_permitted_context(new_dir, old_inode) ||
+ !fscrypt_has_permitted_context(old_dir, new_inode)))
return -EPERM;
- f2fs_balance_fs(sbi);
+ if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ !projid_eq(F2FS_I(new_dir)->i_projid,
+ F2FS_I(old_dentry->d_inode)->i_projid)) ||
+ (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
+ !projid_eq(F2FS_I(old_dir)->i_projid,
+ F2FS_I(new_dentry->d_inode)->i_projid)))
+ return -EXDEV;
+
+ err = dquot_initialize(old_dir);
+ if (err)
+ goto out;
+
+ err = dquot_initialize(new_dir);
+ if (err)
+ goto out;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_old;
+ }
/* prepare for updating ".." directory entry info later */
if (old_dir != new_dir) {
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode,
&old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_new;
+ }
}
if (S_ISDIR(new_inode->i_mode)) {
- err = -EIO;
new_dir_entry = f2fs_parent_dir(new_inode,
&new_dir_page);
- if (!new_dir_entry)
+ if (!new_dir_entry) {
+ if (IS_ERR(new_dir_page))
+ err = PTR_ERR(new_dir_page);
goto out_old_dir;
+ }
}
}
old_nlink = old_dir_entry ? -1 : 1;
new_nlink = -old_nlink;
err = -EMLINK;
- if ((old_nlink > 0 && old_inode->i_nlink >= F2FS_LINK_MAX) ||
- (new_nlink > 0 && new_inode->i_nlink >= F2FS_LINK_MAX))
+ if ((old_nlink > 0 && old_dir->i_nlink >= F2FS_LINK_MAX) ||
+ (new_nlink > 0 && new_dir->i_nlink >= F2FS_LINK_MAX))
goto out_new_dir;
}
- f2fs_lock_op(sbi);
-
- err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name);
- if (err)
- goto out_unlock;
- if (file_enc_name(new_inode))
- file_set_enc_name(old_inode);
+ f2fs_balance_fs(sbi, true);
- err = update_dent_inode(new_inode, old_inode, &old_dentry->d_name);
- if (err)
- goto out_undo;
- if (file_enc_name(old_inode))
- file_set_enc_name(new_inode);
+ f2fs_lock_op(sbi);
/* update ".." directory entry info of old dentry */
if (old_dir_entry)
file_lost_pino(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
- update_inode_page(old_inode);
-
- old_dir->i_ctime = CURRENT_TIME;
+ old_dir->i_ctime = current_time(old_dir);
if (old_nlink) {
down_write(&F2FS_I(old_dir)->i_sem);
- if (old_nlink < 0)
- drop_nlink(old_dir);
- else
- inc_nlink(old_dir);
+ f2fs_i_links_write(old_dir, old_nlink > 0);
up_write(&F2FS_I(old_dir)->i_sem);
}
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
file_lost_pino(new_inode);
up_write(&F2FS_I(new_inode)->i_sem);
- update_inode_page(new_inode);
-
- new_dir->i_ctime = CURRENT_TIME;
+ new_dir->i_ctime = current_time(new_dir);
if (new_nlink) {
down_write(&F2FS_I(new_dir)->i_sem);
- if (new_nlink < 0)
- drop_nlink(new_dir);
- else
- inc_nlink(new_dir);
+ f2fs_i_links_write(new_dir, new_nlink > 0);
up_write(&F2FS_I(new_dir)->i_sem);
}
- mark_inode_dirty(new_dir);
- update_inode_page(new_dir);
+ f2fs_mark_inode_dirty_sync(new_dir, false);
f2fs_unlock_op(sbi);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
return 0;
-out_undo:
- /*
- * Still we may fail to recover name info of f2fs_inode here
- * Drop it, once its name is set as encrypted
- */
- update_dent_inode(old_inode, old_inode, &old_dentry->d_name);
-out_unlock:
- f2fs_unlock_op(sbi);
out_new_dir:
if (new_dir_entry) {
f2fs_dentry_kunmap(new_inode, new_dir_page);
return f2fs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cookie)
{
struct page *cpage = NULL;
char *caddr, *paddr = NULL;
- struct f2fs_str cstr;
- struct f2fs_str pstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_symlink_data *sd;
struct inode *inode = d_inode(dentry);
- struct f2fs_encrypted_symlink_data *sd;
- loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
u32 max_size = inode->i_sb->s_blocksize;
int res;
- res = f2fs_get_encryption_info(inode);
+ res = fscrypt_get_encryption_info(inode);
if (res)
return ERR_PTR(res);
cpage = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(cpage))
return ERR_CAST(cpage);
- caddr = kmap(cpage);
- caddr[size] = 0;
+ caddr = page_address(cpage);
/* Symlink is encrypted */
- sd = (struct f2fs_encrypted_symlink_data *)caddr;
+ sd = (struct fscrypt_symlink_data *)caddr;
+ cstr.name = sd->encrypted_path;
cstr.len = le16_to_cpu(sd->len);
- cstr.name = kmalloc(cstr.len, GFP_NOFS);
- if (!cstr.name) {
- res = -ENOMEM;
- goto errout;
- }
- memcpy(cstr.name, sd->encrypted_path, cstr.len);
/* this is broken symlink case */
- if (cstr.name[0] == 0 && cstr.len == 0) {
+ if (unlikely(cstr.len == 0)) {
res = -ENOENT;
goto errout;
}
- if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
- max_size) {
+ if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
/* Symlink data on the disk is corrupted */
res = -EIO;
goto errout;
}
- res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr);
+ res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
if (res)
goto errout;
- res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
- if (res < 0)
+ res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+ if (res)
goto errout;
- kfree(cstr.name);
+ /* this is broken symlink case */
+ if (unlikely(pstr.name[0] == 0)) {
+ res = -ENOENT;
+ goto errout;
+ }
paddr = pstr.name;
/* Null-terminate the name */
- paddr[res] = '\0';
+ paddr[pstr.len] = '\0';
- kunmap(cpage);
- page_cache_release(cpage);
+ put_page(cpage);
return *cookie = paddr;
errout:
- kfree(cstr.name);
- f2fs_fname_crypto_free_buffer(&pstr);
- kunmap(cpage);
- page_cache_release(cpage);
+ fscrypt_fname_free_buffer(&pstr);
+ put_page(cpage);
return ERR_PTR(res);
}
const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = f2fs_encrypted_follow_link,
- .put_link = kfree_put_link,
+ .follow_link = f2fs_encrypted_follow_link,
+ .put_link = kfree_put_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
+#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = f2fs_listxattr,
.removexattr = generic_removexattr,
-};
#endif
+};
const struct inode_operations f2fs_dir_inode_operations = {
.create = f2fs_create,
#include "f2fs.h"
#include "node.h"
#include "segment.h"
+#include "xattr.h"
#include "trace.h"
#include <trace/events/f2fs.h>
-#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
+#define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
* give 25%, 25%, 50%, 50%, 50% memory for each components respectively
*/
if (type == FREE_NIDS) {
- mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
- PAGE_CACHE_SHIFT;
+ mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+ sizeof(struct free_nid)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ if (excess_cached_nats(sbi))
+ res = false;
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false;
int i;
for (i = 0; i <= UPDATE_INO; i++)
- mem_size += (sbi->im[i].ino_num *
- sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
+ mem_size += sbi->im[i].ino_num *
+ sizeof(struct ino_entry);
+ mem_size >>= PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else if (type == EXTENT_CACHE) {
- mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
+ mem_size = (atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree) +
atomic_read(&sbi->total_ext_node) *
- sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct extent_node)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else {
- if (sbi->sb->s_bdi->wb.dirty_exceeded)
- return false;
+ if (!sbi->sb->s_bdi->wb.dirty_exceeded)
+ return true;
}
return res;
}
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
struct nat_entry_set *head;
- if (get_nat_flag(ne, IS_DIRTY))
- return;
-
head = radix_tree_lookup(&nm_i->nat_set_root, set);
if (!head) {
head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
head->entry_cnt = 0;
f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
}
- list_move_tail(&ne->list, &head->entry_list);
+
+ if (get_nat_flag(ne, IS_DIRTY))
+ goto refresh_list;
+
nm_i->dirty_nat_cnt++;
head->entry_cnt++;
set_nat_flag(ne, IS_DIRTY, true);
+refresh_list:
+ if (nat_get_blkaddr(ne) == NEW_ADDR)
+ list_del_init(&ne->list);
+ else
+ list_move_tail(&ne->list, &head->entry_list);
}
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne)
+ struct nat_entry_set *set, struct nat_entry *ne)
{
- nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
- struct nat_entry_set *head;
-
- head = radix_tree_lookup(&nm_i->nat_set_root, set);
- if (head) {
- list_move_tail(&ne->list, &nm_i->nat_entries);
- set_nat_flag(ne, IS_DIRTY, false);
- head->entry_cnt--;
- nm_i->dirty_nat_cnt--;
- }
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ set_nat_flag(ne, IS_DIRTY, false);
+ set->entry_cnt--;
+ nm_i->dirty_nat_cnt--;
}
static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
return need_update;
}
-static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
+static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+ bool no_fail)
{
struct nat_entry *new;
- new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
- f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
+ if (no_fail) {
+ new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
+ f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
+ } else {
+ new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
+ if (!new)
+ return NULL;
+ if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
+ kmem_cache_free(nat_entry_slab, new);
+ return NULL;
+ }
+ }
+
memset(new, 0, sizeof(struct nat_entry));
nat_set_nid(new, nid);
nat_reset_flag(new);
return new;
}
-static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_nat_entry *ne)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
- down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (!e) {
- e = grab_nat_entry(nm_i, nid);
- node_info_from_raw_nat(&e->ni, ne);
+ e = grab_nat_entry(nm_i, nid, false);
+ if (e)
+ node_info_from_raw_nat(&e->ni, ne);
+ } else {
+ f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+ nat_get_blkaddr(e) !=
+ le32_to_cpu(ne->block_addr) ||
+ nat_get_version(e) != ne->version);
}
- up_write(&nm_i->nat_tree_lock);
}
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
- e = grab_nat_entry(nm_i, ni->nid);
+ e = grab_nat_entry(nm_i, ni->nid, true);
copy_node_info(&e->ni, ni);
f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
struct page *page = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
+ pgoff_t index;
int i;
ni->nid = nid;
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
- }
- up_read(&nm_i->nat_tree_lock);
- if (e)
+ up_read(&nm_i->nat_tree_lock);
return;
+ }
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
/* Check current segment summary */
- mutex_lock(&curseg->curseg_mutex);
- i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+ down_read(&curseg->journal_rwsem);
+ i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
if (i >= 0) {
- ne = nat_in_journal(sum, i);
+ ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne);
}
- mutex_unlock(&curseg->curseg_mutex);
- if (i >= 0)
+ up_read(&curseg->journal_rwsem);
+ if (i >= 0) {
+ up_read(&nm_i->nat_tree_lock);
goto cache;
+ }
/* Fill node_info from nat page */
- page = get_current_nat_page(sbi, start_nid);
+ index = current_nat_addr(sbi, nid);
+ up_read(&nm_i->nat_tree_lock);
+
+ page = get_meta_page(sbi, index);
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
/* cache nat entry */
- cache_nat_entry(NM_I(sbi), nid, &ne);
+ down_write(&nm_i->nat_tree_lock);
+ cache_nat_entry(sbi, nid, &ne);
+ up_write(&nm_i->nat_tree_lock);
+}
+
+/*
+ * readahead MAX_RA_NODE number of node pages.
+ */
+static void ra_node_pages(struct page *parent, int start, int n)
+{
+ struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ struct blk_plug plug;
+ int i, end;
+ nid_t nid;
+
+ blk_start_plug(&plug);
+
+ /* Then, try readahead for siblings of the desired node */
+ end = start + n;
+ end = min(end, NIDS_PER_BLOCK);
+ for (i = start; i < end; i++) {
+ nid = get_nid(parent, i, false);
+ ra_node_page(sbi, nid);
+ }
+
+ blk_finish_plug(&plug);
+}
+
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+ const long direct_index = ADDRS_PER_INODE(dn->inode);
+ const long direct_blks = ADDRS_PER_BLOCK;
+ const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ unsigned int skipped_unit = ADDRS_PER_BLOCK;
+ int cur_level = dn->cur_level;
+ int max_level = dn->max_level;
+ pgoff_t base = 0;
+
+ if (!dn->max_level)
+ return pgofs + 1;
+
+ while (max_level-- > cur_level)
+ skipped_unit *= NIDS_PER_BLOCK;
+
+ switch (dn->max_level) {
+ case 3:
+ base += 2 * indirect_blks;
+ case 2:
+ base += 2 * direct_blks;
+ case 1:
+ base += direct_index;
+ break;
+ default:
+ f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+ }
+
+ return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
}
/*
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
-static int get_node_path(struct f2fs_inode_info *fi, long block,
+static int get_node_path(struct inode *inode, long block,
int offset[4], unsigned int noffset[4])
{
- const long direct_index = ADDRS_PER_INODE(fi);
+ const long direct_index = ADDRS_PER_INODE(inode);
const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK;
const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
level = 3;
goto got;
} else {
- BUG();
+ return -E2BIG;
}
got:
return level;
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
- int level, i;
+ int level, i = 0;
int err = 0;
- level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
+ level = get_node_path(dn->inode, index, offset, noffset);
+ if (level < 0)
+ return level;
nids[0] = dn->inode->i_ino;
npage[0] = dn->inode_page;
}
dn->nid = nids[i];
- npage[i] = new_node_page(dn, noffset[i], NULL);
+ npage[i] = new_node_page(dn, noffset[i]);
if (IS_ERR(npage[i])) {
alloc_nid_failed(sbi, nids[i]);
err = PTR_ERR(npage[i]);
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
dn->node_page = npage[level];
- dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
return 0;
release_pages:
release_out:
dn->inode_page = NULL;
dn->node_page = NULL;
+ if (err == -ENOENT) {
+ dn->cur_level = i;
+ dn->max_level = level;
+ dn->ofs_in_node = offset[level];
+ }
return err;
}
struct node_info ni;
get_node_info(sbi, dn->nid, &ni);
- if (dn->inode->i_blocks == 0) {
- f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
- goto invalidate;
- }
f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
/* Deallocate node address */
invalidate_blocks(sbi, ni.blk_addr);
- dec_valid_node_count(sbi, dn->inode);
+ dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
set_node_addr(sbi, &ni, NULL_ADDR, false);
if (dn->nid == dn->inode->i_ino) {
remove_orphan_inode(sbi, dn->nid);
dec_valid_inode_count(sbi);
- } else {
- sync_inode_page(dn);
+ f2fs_inode_synced(dn->inode);
}
-invalidate:
+
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
return PTR_ERR(page);
}
+ ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+
rn = F2FS_NODE(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
- set_nid(page, i, 0, false);
+ if (set_nid(page, i, 0, false))
+ dn->node_changed = true;
}
} else {
child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
- set_nid(page, i, 0, false);
+ if (set_nid(page, i, 0, false))
+ dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
goto out_err;
nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
}
+ ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+
/* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
child_nid = get_nid(pages[idx], i, false);
err = truncate_dnode(dn);
if (err < 0)
goto fail;
- set_nid(pages[idx], i, 0, false);
+ if (set_nid(pages[idx], i, 0, false))
+ dn->node_changed = true;
}
if (offset[idx + 1] == 0) {
trace_f2fs_truncate_inode_blocks_enter(inode, from);
- level = get_node_path(F2FS_I(inode), from, offset, noffset);
-restart:
+ level = get_node_path(inode, from, offset, noffset);
+ if (level < 0)
+ return level;
+
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
if (offset[1] == 0 &&
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
lock_page(page);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
- goto restart;
- }
- f2fs_wait_on_page_writeback(page, NODE);
+ BUG_ON(page->mapping != NODE_MAPPING(sbi));
+ f2fs_wait_on_page_writeback(page, NODE, true);
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
set_page_dirty(page);
unlock_page(page);
if (IS_ERR(npage))
return PTR_ERR(npage);
- F2FS_I(inode)->i_xattr_nid = 0;
-
- /* need to do checkpoint during fsync */
- F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
+ f2fs_i_xnid_write(inode, 0);
set_new_dnode(&dn, inode, page, npage, nid);
/* 0 is possible, after f2fs_new_inode() has failed */
f2fs_bug_on(F2FS_I_SB(inode),
- inode->i_blocks != 0 && inode->i_blocks != 1);
+ inode->i_blocks != 0 && inode->i_blocks != 8);
/* will put inode & node pages */
truncate_node(&dn);
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
/* caller should f2fs_put_page(page, 1); */
- return new_node_page(&dn, 0, NULL);
+ return new_node_page(&dn, 0);
}
-struct page *new_node_page(struct dnode_of_data *dn,
- unsigned int ofs, struct page *ipage)
+struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct node_info old_ni, new_ni;
+ struct node_info new_ni;
struct page *page;
int err;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
if (!page)
return ERR_PTR(-ENOMEM);
- if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
- err = -ENOSPC;
+ if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
goto fail;
- }
- get_node_info(sbi, dn->nid, &old_ni);
-
- /* Reinitialize old_ni with new node page */
- f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
- new_ni = old_ni;
+#ifdef CONFIG_F2FS_CHECK_FS
+ get_node_info(sbi, dn->nid, &new_ni);
+ f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
+#endif
+ new_ni.nid = dn->nid;
new_ni.ino = dn->inode->i_ino;
+ new_ni.blk_addr = NULL_ADDR;
+ new_ni.flag = 0;
+ new_ni.version = 0;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(dn->inode, page);
- SetPageUptodate(page);
- set_page_dirty(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ if (set_page_dirty(page))
+ dn->node_changed = true;
if (f2fs_has_xattr_block(ofs))
- F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
+ f2fs_i_xnid_write(dn->inode, dn->nid);
- dn->node_page = page;
- if (ipage)
- update_inode(dn->inode, ipage);
- else
- sync_inode_page(dn);
if (ofs == 0)
inc_valid_inode_count(sbi);
-
return page;
fail:
* 0: f2fs_put_page(page, 0)
* LOCKED_PAGE or error: f2fs_put_page(page, 1)
*/
-static int read_node_page(struct page *page, int rw)
+static int read_node_page(struct page *page, int op_flags)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
- .rw = rw,
+ .op = REQ_OP_READ,
+ .op_flags = op_flags,
.page = page,
.encrypted_page = NULL,
};
+ if (PageUptodate(page))
+ return LOCKED_PAGE;
+
get_node_info(sbi, page->index, &ni);
if (unlikely(ni.blk_addr == NULL_ADDR)) {
return -ENOENT;
}
- if (PageUptodate(page))
- return LOCKED_PAGE;
-
- fio.blk_addr = ni.blk_addr;
+ fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
return f2fs_submit_page_bio(&fio);
}
struct page *apage;
int err;
- apage = find_get_page(NODE_MAPPING(sbi), nid);
- if (apage && PageUptodate(apage)) {
- f2fs_put_page(apage, 0);
+ if (!nid)
+ return;
+ f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+
+ rcu_read_lock();
+ apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
+ rcu_read_unlock();
+ if (apage)
return;
- }
- f2fs_put_page(apage, 0);
- apage = grab_cache_page(NODE_MAPPING(sbi), nid);
+ apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!apage)
return;
- err = read_node_page(apage, READA);
+ err = read_node_page(apage, REQ_RAHEAD);
f2fs_put_page(apage, err ? 1 : 0);
}
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
+ struct page *parent, int start)
{
struct page *page;
int err;
+
+ if (!nid)
+ return ERR_PTR(-ENOENT);
+ f2fs_bug_on(sbi, check_nid_range(sbi, nid));
repeat:
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!page)
return ERR_PTR(-ENOMEM);
- err = read_node_page(page, READ_SYNC);
+ err = read_node_page(page, REQ_SYNC);
if (err < 0) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
- } else if (err != LOCKED_PAGE) {
- lock_page(page);
+ } else if (err == LOCKED_PAGE) {
+ err = 0;
+ goto page_hit;
}
- if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
- ClearPageUptodate(page);
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
+ if (parent)
+ ra_node_pages(parent, start + 1, MAX_RA_NODE);
+
+ lock_page(page);
+
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
f2fs_put_page(page, 1);
goto repeat;
}
+
+ if (unlikely(!PageUptodate(page))) {
+ err = -EIO;
+ goto out_err;
+ }
+
+ if (!f2fs_inode_chksum_verify(sbi, page)) {
+ err = -EBADMSG;
+ goto out_err;
+ }
+page_hit:
+ if(unlikely(nid != nid_of_node(page))) {
+ f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
+ "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
+ nid, nid_of_node(page), ino_of_node(page),
+ ofs_of_node(page), cpver_of_node(page),
+ next_blkaddr_of_node(page));
+ err = -EINVAL;
+out_err:
+ ClearPageUptodate(page);
+ f2fs_put_page(page, 1);
+ return ERR_PTR(err);
+ }
return page;
}
-/*
- * Return a locked page for the desired node page.
- * And, readahead MAX_RA_NODE number of node pages.
- */
+struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+{
+ return __get_node_page(sbi, nid, NULL, 0);
+}
+
struct page *get_node_page_ra(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
- struct blk_plug plug;
+ nid_t nid = get_nid(parent, start, false);
+
+ return __get_node_page(sbi, nid, parent, start);
+}
+
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct inode *inode;
struct page *page;
- int err, i, end;
- nid_t nid;
+ int ret;
- /* First, try getting the desired direct node. */
- nid = get_nid(parent, start, false);
- if (!nid)
- return ERR_PTR(-ENOENT);
-repeat:
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
- if (!page)
- return ERR_PTR(-ENOMEM);
+ /* should flush inline_data before evict_inode */
+ inode = ilookup(sbi->sb, ino);
+ if (!inode)
+ return;
- err = read_node_page(page, READ_SYNC);
- if (err < 0) {
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- } else if (err == LOCKED_PAGE) {
- goto page_hit;
- }
+ page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
+ if (!page)
+ goto iput_out;
- blk_start_plug(&plug);
+ if (!PageUptodate(page))
+ goto page_out;
- /* Then, try readahead for siblings of the desired node */
- end = start + MAX_RA_NODE;
- end = min(end, NIDS_PER_BLOCK);
- for (i = start + 1; i < end; i++) {
- nid = get_nid(parent, i, false);
- if (!nid)
- continue;
- ra_node_page(sbi, nid);
- }
+ if (!PageDirty(page))
+ goto page_out;
- blk_finish_plug(&plug);
+ if (!clear_page_dirty_for_io(page))
+ goto page_out;
- lock_page(page);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
- goto repeat;
- }
-page_hit:
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
- return page;
+ ret = f2fs_write_inline_data(inode, page);
+ inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ if (ret)
+ set_page_dirty(page);
+page_out:
+ f2fs_put_page(page, 1);
+iput_out:
+ iput(inode);
}
-void sync_inode_page(struct dnode_of_data *dn)
+void move_node_page(struct page *node_page, int gc_type)
{
- if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
- update_inode(dn->inode, dn->node_page);
- } else if (dn->inode_page) {
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- update_inode(dn->inode, dn->inode_page);
- if (!dn->inode_page_locked)
- unlock_page(dn->inode_page);
+ if (gc_type == FG_GC) {
+ struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 1,
+ .for_reclaim = 0,
+ };
+
+ set_page_dirty(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
+
+ f2fs_bug_on(sbi, PageWriteback(node_page));
+ if (!clear_page_dirty_for_io(node_page))
+ goto out_page;
+
+ if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
+ unlock_page(node_page);
+ goto release_page;
} else {
- update_inode_page(dn->inode);
- }
+ /* set page dirty and write it */
+ if (!PageWriteback(node_page))
+ set_page_dirty(node_page);
+ }
+out_page:
+ unlock_page(node_page);
+release_page:
+ f2fs_put_page(node_page, 0);
}
-int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
- struct writeback_control *wbc)
+static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
pgoff_t index, end;
struct pagevec pvec;
- int step = ino ? 2 : 0;
- int nwritten = 0, wrote = 0;
+ struct page *last_page = NULL;
pagevec_init(&pvec, 0);
-
-next_step:
index = 0;
- end = LONG_MAX;
+ end = ULONG_MAX;
while (index <= end) {
int i, nr_pages;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- /*
- * flushing sequence with step:
- * 0. indirect nodes
- * 1. dentry dnodes
- * 2. file dnodes
- */
- if (step == 0 && IS_DNODE(page))
- continue;
- if (step == 1 && (!IS_DNODE(page) ||
- is_cold_node(page)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_put_page(last_page, 0);
+ pagevec_release(&pvec);
+ return ERR_PTR(-EIO);
+ }
+
+ if (!IS_DNODE(page) || !is_cold_node(page))
continue;
- if (step == 2 && (!IS_DNODE(page) ||
- !is_cold_node(page)))
+ if (ino_of_node(page) != ino)
continue;
- /*
- * If an fsync mode,
- * we should not skip writing node pages.
- */
- if (ino && ino_of_node(page) == ino)
- lock_page(page);
- else if (!trylock_page(page))
- continue;
+ lock_page(page);
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
unlock_page(page);
continue;
}
- if (ino && ino_of_node(page) != ino)
+ if (ino_of_node(page) != ino)
goto continue_unlock;
if (!PageDirty(page)) {
goto continue_unlock;
}
- if (!clear_page_dirty_for_io(page))
- goto continue_unlock;
-
- /* called by fsync() */
- if (ino && IS_DNODE(page)) {
- set_fsync_mark(page, 1);
- if (IS_INODE(page))
- set_dentry_mark(page,
- need_dentry_mark(sbi, ino));
- nwritten++;
- } else {
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
- }
-
- if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
- unlock_page(page);
- else
- wrote++;
-
- if (--wbc->nr_to_write == 0)
- break;
- }
- pagevec_release(&pvec);
- cond_resched();
-
- if (wbc->nr_to_write == 0) {
- step = 2;
- break;
- }
- }
-
- if (step < 2) {
- step++;
- goto next_step;
- }
-
- if (wrote)
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
- return nwritten;
-}
-
-int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
-{
- pgoff_t index = 0, end = LONG_MAX;
- struct pagevec pvec;
- int ret2 = 0, ret = 0;
-
- pagevec_init(&pvec, 0);
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_WRITEBACK,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- /* until radix tree lookup accepts end_index */
- if (unlikely(page->index > end))
- continue;
+ if (last_page)
+ f2fs_put_page(last_page, 0);
- if (ino && ino_of_node(page) == ino) {
- f2fs_wait_on_page_writeback(page, NODE);
- if (TestClearPageError(page))
- ret = -EIO;
- }
+ get_page(page);
+ last_page = page;
+ unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
}
-
- if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
- ret2 = -ENOSPC;
- if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
- ret2 = -EIO;
- if (!ret)
- ret = ret2;
- return ret;
+ return last_page;
}
-static int f2fs_write_node_page(struct page *page,
- struct writeback_control *wbc)
+static int __write_node_page(struct page *page, bool atomic, bool *submitted,
+ struct writeback_control *wbc, bool do_balance,
+ enum iostat_type io_type)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
nid_t nid;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
- .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+ .op = REQ_OP_WRITE,
+ .op_flags = wbc_to_write_flags(wbc),
.page = page,
.encrypted_page = NULL,
+ .submitted = false,
+ .io_type = io_type,
};
trace_f2fs_writepage(page, NODE);
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- f2fs_wait_on_page_writeback(page, NODE);
-
/* get old block addr of this node page */
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
return 0;
}
+ if (atomic && !test_opt(sbi, NOBARRIER))
+ fio.op_flags |= WRITE_FLUSH_FUA;
+
set_page_writeback(page);
- fio.blk_addr = ni.blk_addr;
+ fio.old_blkaddr = ni.blk_addr;
write_node_page(nid, &fio);
- set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
+ set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write);
+
+ if (wbc->for_reclaim) {
+ f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
+ page->index, NODE);
+ submitted = NULL;
+ }
+
unlock_page(page);
- if (wbc->for_reclaim)
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_submit_merged_write(sbi, NODE);
+ submitted = NULL;
+ }
+ if (submitted)
+ *submitted = fio.submitted;
+ if (do_balance)
+ f2fs_balance_fs(sbi, false);
return 0;
redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
+static int f2fs_write_node_page(struct page *page,
+ struct writeback_control *wbc)
+{
+ return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+}
+
+int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct writeback_control *wbc, bool atomic)
+{
+ pgoff_t index, end;
+ pgoff_t last_idx = ULONG_MAX;
+ struct pagevec pvec;
+ int ret = 0;
+ struct page *last_page = NULL;
+ bool marked = false;
+ nid_t ino = inode->i_ino;
+
+ if (atomic) {
+ last_page = last_fsync_dnode(sbi, ino);
+ if (IS_ERR_OR_NULL(last_page))
+ return PTR_ERR_OR_ZERO(last_page);
+ }
+retry:
+ pagevec_init(&pvec, 0);
+ index = 0;
+ end = ULONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ bool submitted = false;
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_put_page(last_page, 0);
+ pagevec_release(&pvec);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!IS_DNODE(page) || !is_cold_node(page))
+ continue;
+ if (ino_of_node(page) != ino)
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+ if (ino_of_node(page) != ino)
+ goto continue_unlock;
+
+ if (!PageDirty(page) && page != last_page) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ BUG_ON(PageWriteback(page));
+
+ set_fsync_mark(page, 0);
+ set_dentry_mark(page, 0);
+
+ if (!atomic || page == last_page) {
+ set_fsync_mark(page, 1);
+ if (IS_INODE(page)) {
+ if (is_inode_flag_set(inode,
+ FI_DIRTY_INODE))
+ update_inode(inode, page);
+ set_dentry_mark(page,
+ need_dentry_mark(sbi, ino));
+ }
+ /* may be written by other thread */
+ if (!PageDirty(page))
+ set_page_dirty(page);
+ }
+
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
+ ret = __write_node_page(page, atomic &&
+ page == last_page,
+ &submitted, wbc, true,
+ FS_NODE_IO);
+ if (ret) {
+ unlock_page(page);
+ f2fs_put_page(last_page, 0);
+ break;
+ } else if (submitted) {
+ last_idx = page->index;
+ }
+
+ if (page == last_page) {
+ f2fs_put_page(page, 0);
+ marked = true;
+ break;
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+
+ if (ret || marked)
+ break;
+ }
+ if (!ret && atomic && !marked) {
+ f2fs_msg(sbi->sb, KERN_DEBUG,
+ "Retry to write fsync mark: ino=%u, idx=%lx",
+ ino, last_page->index);
+ lock_page(last_page);
+ f2fs_wait_on_page_writeback(last_page, NODE, true);
+ set_page_dirty(last_page);
+ unlock_page(last_page);
+ goto retry;
+ }
+out:
+ if (last_idx != ULONG_MAX)
+ f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
+ return ret ? -EIO: 0;
+}
+
+int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
+ bool do_balance, enum iostat_type io_type)
+{
+ pgoff_t index, end;
+ struct pagevec pvec;
+ int step = 0;
+ int nwritten = 0;
+ int ret = 0;
+
+ pagevec_init(&pvec, 0);
+
+next_step:
+ index = 0;
+ end = ULONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ bool submitted = false;
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ pagevec_release(&pvec);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * flushing sequence with step:
+ * 0. indirect nodes
+ * 1. dentry dnodes
+ * 2. file dnodes
+ */
+ if (step == 0 && IS_DNODE(page))
+ continue;
+ if (step == 1 && (!IS_DNODE(page) ||
+ is_cold_node(page)))
+ continue;
+ if (step == 2 && (!IS_DNODE(page) ||
+ !is_cold_node(page)))
+ continue;
+lock_node:
+ if (!trylock_page(page))
+ continue;
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ /* flush inline_data */
+ if (is_inline_node(page)) {
+ clear_inline_node(page);
+ unlock_page(page);
+ flush_inline_data(sbi, ino_of_node(page));
+ goto lock_node;
+ }
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+
+ BUG_ON(PageWriteback(page));
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
+ set_fsync_mark(page, 0);
+ set_dentry_mark(page, 0);
+
+ ret = __write_node_page(page, false, &submitted,
+ wbc, do_balance, io_type);
+ if (ret)
+ unlock_page(page);
+ else if (submitted)
+ nwritten++;
+
+ if (--wbc->nr_to_write == 0)
+ break;
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+
+ if (wbc->nr_to_write == 0) {
+ step = 2;
+ break;
+ }
+ }
+
+ if (step < 2) {
+ step++;
+ goto next_step;
+ }
+out:
+ if (nwritten)
+ f2fs_submit_merged_write(sbi, NODE);
+ return ret;
+}
+
+int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ pgoff_t index = 0, end = ULONG_MAX;
+ struct pagevec pvec;
+ int ret2 = 0, ret = 0;
+
+ pagevec_init(&pvec, 0);
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_WRITEBACK,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ /* until radix tree lookup accepts end_index */
+ if (unlikely(page->index > end))
+ continue;
+
+ if (ino && ino_of_node(page) == ino) {
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ if (TestClearPageError(page))
+ ret = -EIO;
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+
+ if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
+ ret2 = -ENOSPC;
+ if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
+ ret2 = -EIO;
+ if (!ret)
+ ret = ret2;
+ return ret;
+}
+
static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff;
- trace_f2fs_writepages(mapping->host, wbc, NODE);
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ goto skip_write;
/* balancing f2fs's metadata in background */
f2fs_balance_fs_bg(sbi);
if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
goto skip_write;
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
+
diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE;
- sync_node_pages(sbi, 0, wbc);
+ blk_start_plug(&plug);
+ sync_node_pages(sbi, wbc, true, FS_NODE_IO);
+ blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0;
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
return 0;
}
{
trace_f2fs_set_page_dirty(page, NODE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
f2fs_trace_pid(page);
.set_page_dirty = f2fs_set_node_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
return radix_tree_lookup(&nm_i->free_nid_root, n);
}
-static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
- struct free_nid *i)
+static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool new)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ if (new) {
+ int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+ if (err)
+ return err;
+ }
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]++;
+ list_add_tail(&i->list, &nm_i->nid_list[list]);
+ return 0;
+}
+
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool reuse)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]--;
list_del(&i->list);
- radix_tree_delete(&nm_i->free_nid_root, i->nid);
+ if (!reuse)
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
}
-static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
+/* return if the nid is recognized as free */
+static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct free_nid *i;
+ struct free_nid *i, *e;
struct nat_entry *ne;
- bool allocated = false;
-
- if (!available_free_memory(sbi, FREE_NIDS))
- return -1;
+ int err = -EINVAL;
+ bool ret = false;
/* 0 nid should not be used */
if (unlikely(nid == 0))
- return 0;
-
- if (build) {
- /* do not add allocated nids */
- down_read(&nm_i->nat_tree_lock);
- ne = __lookup_nat_cache(nm_i, nid);
- if (ne &&
- (!get_nat_flag(ne, IS_CHECKPOINTED) ||
- nat_get_blkaddr(ne) != NULL_ADDR))
- allocated = true;
- up_read(&nm_i->nat_tree_lock);
- if (allocated)
- return 0;
- }
+ return false;
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
i->state = NID_NEW;
- if (radix_tree_preload(GFP_NOFS)) {
- kmem_cache_free(free_nid_slab, i);
- return 0;
- }
+ if (radix_tree_preload(GFP_NOFS))
+ goto err;
- spin_lock(&nm_i->free_nid_list_lock);
- if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
- kmem_cache_free(free_nid_slab, i);
- return 0;
+ spin_lock(&nm_i->nid_list_lock);
+
+ if (build) {
+ /*
+ * Thread A Thread B
+ * - f2fs_create
+ * - f2fs_new_inode
+ * - alloc_nid
+ * - __insert_nid_to_list(ALLOC_NID_LIST)
+ * - f2fs_balance_fs_bg
+ * - build_free_nids
+ * - __build_free_nids
+ * - scan_nat_page
+ * - add_free_nid
+ * - __lookup_nat_cache
+ * - f2fs_add_link
+ * - init_inode_metadata
+ * - new_inode_page
+ * - new_node_page
+ * - set_node_addr
+ * - alloc_nid_done
+ * - __remove_nid_from_list(ALLOC_NID_LIST)
+ * - __insert_nid_to_list(FREE_NID_LIST)
+ */
+ ne = __lookup_nat_cache(nm_i, nid);
+ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+ nat_get_blkaddr(ne) != NULL_ADDR))
+ goto err_out;
+
+ e = __lookup_free_nid_list(nm_i, nid);
+ if (e) {
+ if (e->state == NID_NEW)
+ ret = true;
+ goto err_out;
+ }
}
- list_add_tail(&i->list, &nm_i->free_nid_list);
- nm_i->fcnt++;
- spin_unlock(&nm_i->free_nid_list_lock);
+ ret = true;
+ err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+err_out:
+ spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
- return 1;
+err:
+ if (err)
+ kmem_cache_free(free_nid_slab, i);
+ return ret;
}
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
bool need_free = false;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
if (i && i->state == NID_NEW) {
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
need_free = true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
}
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+ bool set, bool build)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
+ unsigned int nid_ofs = nid - START_NID(nid);
+
+ if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+ return;
+
+ if (set)
+ __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ else
+ __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+
+ if (set)
+ nm_i->free_nid_count[nat_ofs]++;
+ else if (!build)
+ nm_i->free_nid_count[nat_ofs]--;
+}
+
static void scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct f2fs_nat_block *nat_blk = page_address(nat_page);
block_t blk_addr;
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
+ if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+ return;
+
+ __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
+
i = start_nid % NAT_ENTRY_PER_BLOCK;
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
+ bool freed = false;
if (unlikely(start_nid >= nm_i->max_nid))
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
- if (blk_addr == NULL_ADDR) {
- if (add_free_nid(sbi, start_nid, true) < 0)
- break;
+ if (blk_addr == NULL_ADDR)
+ freed = add_free_nid(sbi, start_nid, true);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, start_nid, freed, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
+}
+
+static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+ struct f2fs_journal *journal = curseg->journal;
+ unsigned int i, idx;
+
+ down_read(&nm_i->nat_tree_lock);
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ if (!test_bit_le(i, nm_i->nat_block_bitmap))
+ continue;
+ if (!nm_i->free_nid_count[i])
+ continue;
+ for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
+ nid_t nid;
+
+ if (!test_bit_le(idx, nm_i->free_nid_bitmap[i]))
+ continue;
+
+ nid = i * NAT_ENTRY_PER_BLOCK + idx;
+ add_free_nid(sbi, nid, true);
+
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
+ goto out;
}
}
+out:
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ block_t addr;
+ nid_t nid;
+
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(journal, i));
+ if (addr == NULL_ADDR)
+ add_free_nid(sbi, nid, true);
+ else
+ remove_free_nid(sbi, nid);
+ }
+ up_read(&curseg->journal_rwsem);
+ up_read(&nm_i->nat_tree_lock);
}
-static void build_free_nids(struct f2fs_sb_info *sbi)
+static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i = 0;
nid_t nid = nm_i->next_scan_nid;
+ if (unlikely(nid >= nm_i->max_nid))
+ nid = 0;
+
/* Enough entries */
- if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
return;
+ if (!sync && !available_free_memory(sbi, FREE_NIDS))
+ return;
+
+ if (!mount) {
+ /* try to find free nids in free_nid_bitmap */
+ scan_free_nid_bits(sbi);
+
+ if (nm_i->nid_cnt[FREE_NID_LIST])
+ return;
+ }
+
/* readahead nat pages to be scanned */
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
+ down_read(&nm_i->nat_tree_lock);
+
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
nm_i->next_scan_nid = nid;
/* find free nids from current sum_pages */
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
- block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
- nid = le32_to_cpu(nid_in_journal(sum, i));
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ block_t addr;
+
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(journal, i));
if (addr == NULL_ADDR)
add_free_nid(sbi, nid, true);
else
- remove_free_nid(nm_i, nid);
+ remove_free_nid(sbi, nid);
}
- mutex_unlock(&curseg->curseg_mutex);
+ up_read(&curseg->journal_rwsem);
+ up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
}
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+{
+ mutex_lock(&NM_I(sbi)->build_lock);
+ __build_free_nids(sbi, sync, mount);
+ mutex_unlock(&NM_I(sbi)->build_lock);
+}
+
/*
* If this function returns success, caller can obtain a new nid
* from second parameter of this function.
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
- if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
+ f2fs_show_injection_info(FAULT_ALLOC_NID);
return false;
+ }
+#endif
+ spin_lock(&nm_i->nid_list_lock);
- spin_lock(&nm_i->free_nid_list_lock);
+ if (unlikely(nm_i->available_nids == 0)) {
+ spin_unlock(&nm_i->nid_list_lock);
+ return false;
+ }
/* We should not use stale free nids created by build_free_nids */
- if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
- struct node_info ni;
-
- f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
- list_for_each_entry(i, &nm_i->free_nid_list, list)
- if (i->state == NID_NEW)
- break;
-
- f2fs_bug_on(sbi, i->state != NID_NEW);
+ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
+ f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
+ i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+ struct free_nid, list);
*nid = i->nid;
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
i->state = NID_ALLOC;
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
-
- /* check nid is allocated already */
- get_node_info(sbi, *nid, &ni);
- if (ni.blk_addr != NULL_ADDR) {
- alloc_nid_done(sbi, *nid);
- goto retry;
- }
+ __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+ nm_i->available_nids--;
+
+ update_free_nid_bitmap(sbi, *nid, false, false);
+
+ spin_unlock(&nm_i->nid_list_lock);
return true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- mutex_lock(&nm_i->build_lock);
- build_free_nids(sbi);
- mutex_unlock(&nm_i->build_lock);
+ build_free_nids(sbi, true, false);
goto retry;
}
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, !i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
}
if (!nid)
return;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
+ f2fs_bug_on(sbi, !i);
+
if (!available_free_memory(sbi, FREE_NIDS)) {
- __del_from_free_nid_list(nm_i, i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
need_free = true;
} else {
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
i->state = NID_NEW;
- nm_i->fcnt++;
+ __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
}
- spin_unlock(&nm_i->free_nid_list_lock);
+
+ nm_i->available_nids++;
+
+ update_free_nid_bitmap(sbi, nid, true, false);
+
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
struct free_nid *i, *next;
int nr = nr_shrink;
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+ return 0;
+
if (!mutex_trylock(&nm_i->build_lock))
return 0;
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ if (nr_shrink <= 0 ||
+ nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
break;
- if (i->state == NID_ALLOC)
- continue;
- __del_from_free_nid_list(nm_i, i);
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
kmem_cache_free(free_nid_slab, i);
- nm_i->fcnt--;
nr_shrink--;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
mutex_unlock(&nm_i->build_lock);
return nr - nr_shrink;
ri = F2FS_INODE(page);
if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
- clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
+ clear_inode_flag(inode, FI_INLINE_XATTR);
goto update_inode;
}
src_addr = inline_xattr_addr(page);
inline_size = inline_xattr_size(inode);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
}
-void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
+int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
- nid_t new_xnid = nid_of_node(page);
+ nid_t new_xnid;
+ struct dnode_of_data dn;
struct node_info ni;
+ struct page *xpage;
- /* 1: invalidate the previous xattr nid */
if (!prev_xnid)
goto recover_xnid;
- /* Deallocate node address */
+ /* 1: invalidate the previous xattr nid */
get_node_info(sbi, prev_xnid, &ni);
f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
invalidate_blocks(sbi, ni.blk_addr);
- dec_valid_node_count(sbi, inode);
+ dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
recover_xnid:
- /* 2: allocate new xattr nid */
- if (unlikely(!inc_valid_node_count(sbi, inode)))
- f2fs_bug_on(sbi, 1);
-
- remove_free_nid(NM_I(sbi), new_xnid);
- get_node_info(sbi, new_xnid, &ni);
- ni.ino = inode->i_ino;
- set_node_addr(sbi, &ni, NEW_ADDR, false);
- F2FS_I(inode)->i_xattr_nid = new_xnid;
+ /* 2: update xattr nid in inode */
+ if (!alloc_nid(sbi, &new_xnid))
+ return -ENOSPC;
- /* 3: update xattr blkaddr */
- refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
- set_node_addr(sbi, &ni, blkaddr, false);
+ set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
+ xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
+ if (IS_ERR(xpage)) {
+ alloc_nid_failed(sbi, new_xnid);
+ return PTR_ERR(xpage);
+ }
+ alloc_nid_done(sbi, new_xnid);
update_inode_page(inode);
+
+ /* 3: update and set xattr node page dirty */
+ memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
+
+ set_page_dirty(xpage);
+ f2fs_put_page(xpage, 1);
+
+ return 0;
}
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
-
- ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
- if (!ipage)
- return -ENOMEM;
+retry:
+ ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
+ if (!ipage) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
/* Should not use this inode from free nid list */
- remove_free_nid(NM_I(sbi), ino);
+ remove_free_nid(sbi, ino);
- SetPageUptodate(ipage);
+ if (!PageUptodate(ipage))
+ SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
src = F2FS_INODE(page);
dst->i_blocks = cpu_to_le64(1);
dst->i_links = cpu_to_le32(1);
dst->i_xattr_nid = 0;
- dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
+ dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
+ if (dst->i_inline & F2FS_EXTRA_ATTR) {
+ dst->i_extra_isize = src->i_extra_isize;
+ if (f2fs_sb_has_project_quota(sbi->sb) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_projid))
+ dst->i_projid = src->i_projid;
+ }
new_ni = old_ni;
new_ni.ino = ino;
- if (unlikely(!inc_valid_node_count(sbi, NULL)))
+ if (unlikely(inc_valid_node_count(sbi, NULL, true)))
WARN_ON(1);
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
inc_valid_inode_count(sbi);
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
block_t addr;
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
int i, idx, last_offset, nrpages;
/* scan the node segment */
sum_entry = &sum->entries[0];
for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
- nrpages = min(last_offset - i, bio_blocks);
+ nrpages = min(last_offset - i, BIO_MAX_PAGES);
/* readahead node pages */
ra_meta_pages(sbi, addr, nrpages, META_POR, true);
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
struct nat_entry *ne;
struct f2fs_nat_entry raw_ne;
- nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
+ nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
- raw_ne = nat_in_journal(sum, i);
+ raw_ne = nat_in_journal(journal, i);
- down_write(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
- ne = grab_nat_entry(nm_i, nid);
+ ne = grab_nat_entry(nm_i, nid, true);
node_info_from_raw_nat(&ne->ni, &raw_ne);
}
+
+ /*
+ * if a free nat in journal has not been used after last
+ * checkpoint, we should remove it from available nids,
+ * since later we will add it again.
+ */
+ if (!get_nat_flag(ne, IS_DIRTY) &&
+ le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+ spin_lock(&nm_i->nid_list_lock);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
+ }
+
__set_nat_cache_dirty(nm_i, ne);
- up_write(&nm_i->nat_tree_lock);
}
- update_nats_in_cursum(sum, -i);
- mutex_unlock(&curseg->curseg_mutex);
+ update_nats_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
}
static void __adjust_nat_entry_set(struct nat_entry_set *nes,
list_add_tail(&nes->set_list, head);
}
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+ struct page *page)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
+ struct f2fs_nat_block *nat_blk = page_address(page);
+ int valid = 0;
+ int i;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return;
+
+ for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
+ if (start_nid == 0 && i == 0)
+ valid++;
+ if (nat_blk->entries[i].block_addr)
+ valid++;
+ }
+ if (valid == 0) {
+ __set_bit_le(nat_index, nm_i->empty_nat_bits);
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
+ return;
+ }
+
+ __clear_bit_le(nat_index, nm_i->empty_nat_bits);
+ if (valid == NAT_ENTRY_PER_BLOCK)
+ __set_bit_le(nat_index, nm_i->full_nat_bits);
+ else
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
+}
+
static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
- struct nat_entry_set *set)
+ struct nat_entry_set *set, struct cp_control *cpc)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
bool to_journal = true;
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
struct page *page = NULL;
- struct f2fs_nm_info *nm_i = NM_I(sbi);
/*
* there are two steps to flush nat entries:
* #1, flush nat entries to journal in current hot data summary block.
* #2, flush nat entries to nat page.
*/
- if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL))
+ if (enabled_nat_bits(sbi, cpc) ||
+ !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
to_journal = false;
if (to_journal) {
- mutex_lock(&curseg->curseg_mutex);
+ down_write(&curseg->journal_rwsem);
} else {
page = get_next_nat_page(sbi, start_nid);
nat_blk = page_address(page);
nid_t nid = nat_get_nid(ne);
int offset;
- if (nat_get_blkaddr(ne) == NEW_ADDR)
- continue;
+ f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
if (to_journal) {
- offset = lookup_journal_in_cursum(sum,
+ offset = lookup_journal_in_cursum(journal,
NAT_JOURNAL, nid, 1);
f2fs_bug_on(sbi, offset < 0);
- raw_ne = &nat_in_journal(sum, offset);
- nid_in_journal(sum, offset) = cpu_to_le32(nid);
+ raw_ne = &nat_in_journal(journal, offset);
+ nid_in_journal(journal, offset) = cpu_to_le32(nid);
} else {
raw_ne = &nat_blk->entries[nid - start_nid];
}
raw_nat_from_node_info(raw_ne, &ne->ni);
-
- down_write(&NM_I(sbi)->nat_tree_lock);
nat_reset_flag(ne);
- __clear_nat_cache_dirty(NM_I(sbi), ne);
- up_write(&NM_I(sbi)->nat_tree_lock);
-
- if (nat_get_blkaddr(ne) == NULL_ADDR)
+ __clear_nat_cache_dirty(NM_I(sbi), set, ne);
+ if (nat_get_blkaddr(ne) == NULL_ADDR) {
add_free_nid(sbi, nid, false);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ NM_I(sbi)->available_nids++;
+ update_free_nid_bitmap(sbi, nid, true, false);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ } else {
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, nid, false, false);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
}
- if (to_journal)
- mutex_unlock(&curseg->curseg_mutex);
- else
+ if (to_journal) {
+ up_write(&curseg->journal_rwsem);
+ } else {
+ __update_nat_bits(sbi, start_nid, page);
f2fs_put_page(page, 1);
+ }
- f2fs_bug_on(sbi, set->entry_cnt);
-
- down_write(&nm_i->nat_tree_lock);
- radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
- up_write(&nm_i->nat_tree_lock);
- kmem_cache_free(nat_entry_set_slab, set);
+ /* Allow dirty nats by node block allocation in write_begin */
+ if (!set->entry_cnt) {
+ radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+ kmem_cache_free(nat_entry_set_slab, set);
+ }
}
/*
* This function is called during the checkpointing process.
*/
-void flush_nat_entries(struct f2fs_sb_info *sbi)
+void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
struct nat_entry_set *setvec[SETVEC_SIZE];
struct nat_entry_set *set, *tmp;
unsigned int found;
if (!nm_i->dirty_nat_cnt)
return;
+
+ down_write(&nm_i->nat_tree_lock);
+
/*
* if there are no enough space in journal to store dirty nat
* entries, remove all entries from journal and merge them
* into nat entry set.
*/
- if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
+ if (enabled_nat_bits(sbi, cpc) ||
+ !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
remove_nats_in_journal(sbi);
- down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_set(nm_i,
set_idx, SETVEC_SIZE, setvec))) {
unsigned idx;
set_idx = setvec[found - 1]->set + 1;
for (idx = 0; idx < found; idx++)
__adjust_nat_entry_set(setvec[idx], &sets,
- MAX_NAT_JENTRIES(sum));
+ MAX_NAT_JENTRIES(journal));
}
- up_write(&nm_i->nat_tree_lock);
/* flush dirty nats in nat entry set */
list_for_each_entry_safe(set, tmp, &sets, set_list)
- __flush_nat_entry_set(sbi, set);
+ __flush_nat_entry_set(sbi, set, cpc);
- f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
+ up_write(&nm_i->nat_tree_lock);
+ /* Allow dirty nats by node block allocation in write_begin */
+}
+
+static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
+ unsigned int i;
+ __u64 cp_ver = cur_cp_version(ckpt);
+ block_t nat_bits_addr;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return 0;
+
+ nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
+ F2FS_BLKSIZE - 1);
+ nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS,
+ GFP_KERNEL);
+ if (!nm_i->nat_bits)
+ return -ENOMEM;
+
+ nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
+ nm_i->nat_bits_blocks;
+ for (i = 0; i < nm_i->nat_bits_blocks; i++) {
+ struct page *page = get_meta_page(sbi, nat_bits_addr++);
+
+ memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
+ page_address(page), F2FS_BLKSIZE);
+ f2fs_put_page(page, 1);
+ }
+
+ cp_ver |= (cur_cp_crc(ckpt) << 32);
+ if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
+ disable_nat_bits(sbi, true);
+ return 0;
+ }
+
+ nm_i->full_nat_bits = nm_i->nat_bits + 8;
+ nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
+
+ f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
+ return 0;
+}
+
+static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int i = 0;
+ nid_t nid, last_nid;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return;
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
+ if (i >= nm_i->nat_blocks)
+ break;
+
+ __set_bit_le(i, nm_i->nat_block_bitmap);
+
+ nid = i * NAT_ENTRY_PER_BLOCK;
+ last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ for (; nid < last_nid; nid++)
+ update_free_nid_bitmap(sbi, nid, true, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
+ if (i >= nm_i->nat_blocks)
+ break;
+
+ __set_bit_le(i, nm_i->nat_block_bitmap);
+ }
}
static int init_node_manager(struct f2fs_sb_info *sbi)
struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned char *version_bitmap;
- unsigned int nat_segs, nat_blocks;
+ unsigned int nat_segs;
+ int err;
nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
/* segment_count_nat includes pair segment so divide to 2. */
nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
- nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
-
- nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
+ nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
+ nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
/* not used nids: 0, node, meta, (and root counted as valid node) */
- nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
- nm_i->fcnt = 0;
+ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+ F2FS_RESERVED_NODE_NUM;
+ nm_i->nid_cnt[FREE_NID_LIST] = 0;
+ nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+ nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
- INIT_LIST_HEAD(&nm_i->free_nid_list);
+ INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
+ INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
mutex_init(&nm_i->build_lock);
- spin_lock_init(&nm_i->free_nid_list_lock);
+ spin_lock_init(&nm_i->nid_list_lock);
init_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
GFP_KERNEL);
if (!nm_i->nat_bitmap)
return -ENOMEM;
+
+ err = __get_nat_bitmaps(sbi);
+ if (err)
+ return err;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
+ GFP_KERNEL);
+ if (!nm_i->nat_bitmap_mir)
+ return -ENOMEM;
+#endif
+
+ return 0;
+}
+
+static int init_free_nid_cache(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks *
+ NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap)
+ return -ENOMEM;
+
+ nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8,
+ GFP_KERNEL);
+ if (!nm_i->nat_block_bitmap)
+ return -ENOMEM;
+
+ nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks *
+ sizeof(unsigned short), GFP_KERNEL);
+ if (!nm_i->free_nid_count)
+ return -ENOMEM;
return 0;
}
if (err)
return err;
- build_free_nids(sbi);
+ err = init_free_nid_cache(sbi);
+ if (err)
+ return err;
+
+ /* load free nid status from nat_bits table */
+ load_free_nid_bitmap(sbi);
+
+ build_free_nids(sbi, true, true);
return 0;
}
return;
/* destroy free nid list */
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- f2fs_bug_on(sbi, i->state == NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
}
- f2fs_bug_on(sbi, nm_i->fcnt);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
+ f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+ spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
down_write(&nm_i->nat_tree_lock);
}
up_write(&nm_i->nat_tree_lock);
+ kvfree(nm_i->nat_block_bitmap);
+ kvfree(nm_i->free_nid_bitmap);
+ kvfree(nm_i->free_nid_count);
+
kfree(nm_i->nat_bitmap);
+ kfree(nm_i->nat_bits);
+#ifdef CONFIG_F2FS_CHECK_FS
+ kfree(nm_i->nat_bitmap_mir);
+#endif
sbi->nm_info = NULL;
kfree(nm_i);
}
* published by the Free Software Foundation.
*/
/* start node id of a node block dedicated to the given node id */
-#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
+#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
/* node block offset on the NAT area dedicated to the given start node id */
-#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
+#define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */
-#define FREE_NID_PAGES 4
+#define FREE_NID_PAGES 8
+#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
-#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
+#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128
/* control the memory footprint threshold (10MB per 1GB ram) */
-#define DEF_RAM_THRESHOLD 10
+#define DEF_RAM_THRESHOLD 1
+
+/* control dirty nats ratio threshold (default: 10% over max nid count) */
+#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
+/* control total # of nats */
+#define DEF_NAT_CACHE_THRESHOLD 100000
/* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64
struct node_info ni; /* in-memory node information */
};
-#define nat_get_nid(nat) (nat->ni.nid)
-#define nat_set_nid(nat, n) (nat->ni.nid = n)
-#define nat_get_blkaddr(nat) (nat->ni.blk_addr)
-#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
-#define nat_get_ino(nat) (nat->ni.ino)
-#define nat_set_ino(nat, i) (nat->ni.ino = i)
-#define nat_get_version(nat) (nat->ni.version)
-#define nat_set_version(nat, v) (nat->ni.version = v)
+#define nat_get_nid(nat) ((nat)->ni.nid)
+#define nat_set_nid(nat, n) ((nat)->ni.nid = (n))
+#define nat_get_blkaddr(nat) ((nat)->ni.blk_addr)
+#define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b))
+#define nat_get_ino(nat) ((nat)->ni.ino)
+#define nat_set_ino(nat, i) ((nat)->ni.ino = (i))
+#define nat_get_version(nat) ((nat)->ni.version)
+#define nat_set_version(nat, v) ((nat)->ni.version = (v))
-#define inc_node_version(version) (++version)
+#define inc_node_version(version) (++(version))
static inline void copy_node_info(struct node_info *dst,
struct node_info *src)
raw_ne->version = ni->version;
}
+static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+ NM_I(sbi)->dirty_nats_ratio / 100;
+}
+
+static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *fnid;
- spin_lock(&nm_i->free_nid_list_lock);
- if (nm_i->fcnt <= 0) {
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+ spin_unlock(&nm_i->nid_list_lock);
return;
}
- fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+ fnid = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+ struct free_nid, list);
*nid = fnid->nid;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
}
/*
static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir,
+ nm_i->bitmap_size))
+ f2fs_bug_on(sbi, 1);
+#endif
memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
}
struct f2fs_nm_info *nm_i = NM_I(sbi);
pgoff_t block_off;
pgoff_t block_addr;
- int seg_off;
+ /*
+ * block_off = segment_off * 512 + off_in_segment
+ * OLD = (segment_off * 512) * 2 + off_in_segment
+ * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment
+ */
block_off = NAT_BLOCK_OFFSET(start);
- seg_off = block_off >> sbi->log_blocks_per_seg;
block_addr = (pgoff_t)(nm_i->nat_blkaddr +
- (seg_off << sbi->log_blocks_per_seg << 1) +
- (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
+ (block_off << 1) -
+ (block_off & (sbi->blocks_per_seg - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
block_addr += sbi->blocks_per_seg;
struct f2fs_nm_info *nm_i = NM_I(sbi);
block_addr -= nm_i->nat_blkaddr;
- if ((block_addr >> sbi->log_blocks_per_seg) % 2)
- block_addr -= sbi->blocks_per_seg;
- else
- block_addr += sbi->blocks_per_seg;
-
+ block_addr ^= 1 << sbi->log_blocks_per_seg;
return block_addr + nm_i->nat_blkaddr;
}
unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
f2fs_change_bit(block_off, nm_i->nat_bitmap);
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_change_bit(block_off, nm_i->nat_bitmap_mir);
+#endif
+}
+
+static inline nid_t ino_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.ino);
+}
+
+static inline nid_t nid_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.nid);
+}
+
+static inline unsigned int ofs_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ unsigned flag = le32_to_cpu(rn->footer.flag);
+ return flag >> OFFSET_BIT_SHIFT;
+}
+
+static inline __u64 cpver_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le64_to_cpu(rn->footer.cp_ver);
+}
+
+static inline block_t next_blkaddr_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.next_blkaddr);
}
static inline void fill_node_footer(struct page *page, nid_t nid,
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
struct f2fs_node *rn = F2FS_NODE(page);
+ __u64 cp_ver = cur_cp_version(ckpt);
- rn->footer.cp_ver = ckpt->checkpoint_ver;
- rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
-}
-
-static inline nid_t ino_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.ino);
-}
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
+ cp_ver |= (cur_cp_crc(ckpt) << 32);
-static inline nid_t nid_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.nid);
+ rn->footer.cp_ver = cpu_to_le64(cp_ver);
+ rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline unsigned int ofs_of_node(struct page *node_page)
+static inline bool is_recoverable_dnode(struct page *page)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- unsigned flag = le32_to_cpu(rn->footer.flag);
- return flag >> OFFSET_BIT_SHIFT;
-}
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ __u64 cp_ver = cur_cp_version(ckpt);
-static inline unsigned long long cpver_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le64_to_cpu(rn->footer.cp_ver);
-}
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
+ cp_ver |= (cur_cp_crc(ckpt) << 32);
-static inline block_t next_blkaddr_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.next_blkaddr);
+ return cp_ver == cpver_of_node(page);
}
/*
unsigned int ofs = ofs_of_node(node_page);
if (f2fs_has_xattr_block(ofs))
- return false;
+ return true;
if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
ofs == 5 + 2 * NIDS_PER_BLOCK)
return true;
}
-static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
+static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
{
struct f2fs_node *rn = F2FS_NODE(p);
- f2fs_wait_on_page_writeback(p, NODE);
+ f2fs_wait_on_page_writeback(p, NODE, true);
if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
else
rn->in.nid[off] = cpu_to_le32(nid);
- set_page_dirty(p);
+ return set_page_dirty(p);
}
static inline nid_t get_nid(struct page *p, int off, bool i)
#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
+static inline int is_inline_node(struct page *page)
+{
+ return PageChecked(page);
+}
+
+static inline void set_inline_node(struct page *page)
+{
+ SetPageChecked(page);
+}
+
+static inline void clear_inline_node(struct page *page)
+{
+ ClearPageChecked(page);
+}
+
static inline void set_cold_node(struct inode *inode, struct page *page)
{
struct f2fs_node *rn = F2FS_NODE(page);
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
- if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
- > sbi->user_block_count)
+ s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
+
+ if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
return true;
}
return NULL;
}
-static int recover_dentry(struct inode *inode, struct page *ipage)
+static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
+ struct list_head *head, nid_t ino, bool quota_inode)
+{
+ struct inode *inode;
+ struct fsync_inode_entry *entry;
+ int err;
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ err = dquot_initialize(inode);
+ if (err)
+ goto err_out;
+
+ if (quota_inode) {
+ err = dquot_alloc_inode(inode);
+ if (err)
+ goto err_out;
+ }
+
+ entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+ entry->inode = inode;
+ list_add_tail(&entry->list, head);
+
+ return entry;
+err_out:
+ iput(inode);
+ return ERR_PTR(err);
+}
+
+static void del_fsync_inode(struct fsync_inode_entry *entry)
+{
+ iput(entry->inode);
+ list_del(&entry->list);
+ kmem_cache_free(fsync_entry_slab, entry);
+}
+
+static int recover_dentry(struct inode *inode, struct page *ipage,
+ struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
- struct qstr name;
+ struct fscrypt_name fname;
struct page *page;
struct inode *dir, *einode;
+ struct fsync_inode_entry *entry;
int err = 0;
+ char *name;
- dir = f2fs_iget(inode->i_sb, pino);
- if (IS_ERR(dir)) {
- err = PTR_ERR(dir);
- goto out;
+ entry = get_fsync_inode(dir_list, pino);
+ if (!entry) {
+ entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
+ pino, false);
+ if (IS_ERR(entry)) {
+ dir = ERR_CAST(entry);
+ err = PTR_ERR(entry);
+ goto out;
+ }
}
- if (file_enc_name(inode)) {
- iput(dir);
- return 0;
- }
+ dir = entry->inode;
- name.len = le32_to_cpu(raw_inode->i_namelen);
- name.name = raw_inode->i_name;
+ memset(&fname, 0, sizeof(struct fscrypt_name));
+ fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+ fname.disk_name.name = raw_inode->i_name;
- if (unlikely(name.len > F2FS_NAME_LEN)) {
+ if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
- goto out_err;
+ goto out;
}
retry:
- de = f2fs_find_entry(dir, &name, &page);
+ de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
if (de) {
- einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
+ einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
err = PTR_ERR(einode);
err = -EEXIST;
goto out_unmap_put;
}
+
+ err = dquot_initialize(einode);
+ if (err) {
+ iput(einode);
+ goto out_unmap_put;
+ }
+
err = acquire_orphan_inode(F2FS_I_SB(inode));
if (err) {
iput(einode);
f2fs_delete_entry(de, page, dir, einode);
iput(einode);
goto retry;
- }
- err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
- if (err)
- goto out_err;
-
- if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
- iput(dir);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
- add_dirty_dir_inode(dir);
- set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
+ err = __f2fs_do_add_link(dir, &fname, inode,
+ inode->i_ino, inode->i_mode);
}
-
+ if (err == -ENOMEM)
+ goto retry;
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
-out_err:
- iput(dir);
out:
+ if (file_enc_name(inode))
+ name = "<encrypted>";
+ else
+ name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
- __func__, ino_of_node(ipage), raw_inode->i_name,
+ __func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
- i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
+ f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
+ inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ F2FS_I(inode)->i_advise = raw->i_advise;
+
if (file_enc_name(inode))
name = "<encrypted>";
else
ino_of_node(page), name);
}
-static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
+ bool check_only)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
block_t blkaddr;
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
- ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
-
while (1) {
struct fsync_inode_entry *entry;
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page))
+ if (!is_recoverable_dnode(page))
break;
if (!is_fsync_dnode(page))
entry = get_fsync_inode(head, ino_of_node(page));
if (!entry) {
- if (IS_INODE(page) && is_dent_dnode(page)) {
+ bool quota_inode = false;
+
+ if (!check_only &&
+ IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
break;
+ quota_inode = true;
}
- /* add this fsync inode to the list */
- entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
- if (!entry) {
- err = -ENOMEM;
- break;
- }
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
- entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
- if (IS_ERR(entry->inode)) {
- err = PTR_ERR(entry->inode);
- kmem_cache_free(fsync_entry_slab, entry);
+ entry = add_fsync_inode(sbi, head, ino_of_node(page),
+ quota_inode);
+ if (IS_ERR(entry)) {
+ err = PTR_ERR(entry);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
- list_add_tail(&entry->list, head);
}
entry->blkaddr = blkaddr;
- if (IS_INODE(page)) {
- entry->last_inode = blkaddr;
- if (is_dent_dnode(page))
- entry->last_dentry = blkaddr;
- }
+ if (IS_INODE(page) && is_dent_dnode(page))
+ entry->last_dentry = blkaddr;
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
{
struct fsync_inode_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, head, list) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ list_for_each_entry_safe(entry, tmp, head, list)
+ del_fsync_inode(entry);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
return 0;
/* Get the previous summary */
- for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
f2fs_put_page(node_page, 1);
if (ino != dn->inode->i_ino) {
+ int ret;
+
/* Deallocate previous index in the node page */
- inode = f2fs_iget(sbi->sb, ino);
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
+
+ ret = dquot_initialize(inode);
+ if (ret) {
+ iput(inode);
+ return ret;
+ }
} else {
inode = dn->inode;
}
- bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
- le16_to_cpu(sum.ofs_in_node);
+ bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
/*
* if inode page is locked, unlock temporarily, but its reference
return 0;
truncate_out:
- if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
+ if (datablock_addr(tdn.inode, tdn.node_page,
+ tdn.ofs_in_node) == blkaddr)
truncate_data_blocks_range(&tdn, 1);
if (dn->inode->i_ino == nid && !dn->inode_page_locked)
unlock_page(dn->inode_page);
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int start, end;
struct dnode_of_data dn;
struct node_info ni;
+ unsigned int start, end;
int err = 0, recovered = 0;
/* step 1: recover xattr */
if (IS_INODE(page)) {
recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
- /*
- * Deprecated; xattr blocks should be found from cold log.
- * But, we should remain this for backward compatibility.
- */
- recover_xattr_data(inode, page, blkaddr);
+ err = recover_xattr_data(inode, page, blkaddr);
+ if (!err)
+ recovered++;
goto out;
}
goto out;
/* step 3: recover data indices */
- start = start_bidx_of_node(ofs_of_node(page), fi);
- end = start + ADDRS_PER_PAGE(page, fi);
+ start = start_bidx_of_node(ofs_of_node(page), inode);
+ end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
-
+retry_dn:
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_dn;
+ }
goto out;
+ }
- f2fs_wait_on_page_writeback(dn.node_page, NODE);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
get_node_info(sbi, dn.nid, &ni);
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
for (; start < end; start++, dn.ofs_in_node++) {
block_t src, dest;
- src = datablock_addr(dn.node_page, dn.ofs_in_node);
- dest = datablock_addr(page, dn.ofs_in_node);
+ src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
+ dest = datablock_addr(dn.inode, page, dn.ofs_in_node);
/* skip recovering if dest is the same as src */
if (src == dest)
continue;
}
+ if (!file_keep_isize(inode) &&
+ (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
+ f2fs_i_size_write(inode,
+ (loff_t)(start + 1) << PAGE_SHIFT);
+
/*
* dest is reserved block, invalidate src block
* and then reserve one new block in dnode page.
*/
if (dest == NEW_ADDR) {
truncate_data_blocks_range(&dn, 1);
- err = reserve_new_block(&dn);
- f2fs_bug_on(sbi, err);
+ reserve_new_block(&dn);
continue;
}
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ while (err)
+ err = reserve_new_block(&dn);
+#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
+ if (err)
+ goto err;
}
-
+retry_prev:
/* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_prev;
+ }
goto err;
+ }
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
- ni.version, false);
+ ni.version, false, false);
recovered++;
}
}
- if (IS_INODE(dn.node_page))
- sync_inode_page(&dn);
-
copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
f2fs_put_dnode(&dn);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
- "recover_data: ino = %lx, recovered = %d blocks, err = %d",
- inode->i_ino, recovered, err);
+ "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+ inode->i_ino,
+ file_keep_isize(inode) ? "keep" : "recover",
+ recovered, err);
return err;
}
-static int recover_data(struct f2fs_sb_info *sbi,
- struct list_head *head, int type)
+static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+ struct list_head *dir_list)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
block_t blkaddr;
/* get node pages in the current segment */
- curseg = CURSEG_I(sbi, type);
+ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page)) {
+ if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
break;
}
- entry = get_fsync_inode(head, ino_of_node(page));
+ entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry)
goto next;
/*
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (entry->last_inode == blkaddr)
+ if (IS_INODE(page))
recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) {
- err = recover_dentry(entry->inode, page);
+ err = recover_dentry(entry->inode, page, dir_list);
if (err) {
f2fs_put_page(page, 1);
break;
break;
}
- if (entry->blkaddr == blkaddr) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ if (entry->blkaddr == blkaddr)
+ del_fsync_inode(entry);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
return err;
}
-int recover_fsync_data(struct f2fs_sb_info *sbi)
+int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list;
- block_t blkaddr;
+ struct list_head dir_list;
int err;
+ int ret = 0;
+ unsigned long s_flags = sbi->sb->s_flags;
bool need_writecp = false;
+ if (s_flags & MS_RDONLY) {
+ f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+ sbi->sb->s_flags &= ~MS_RDONLY;
+ }
+
+#ifdef CONFIG_QUOTA
+ /* Needed for iput() to work correctly and not trash data */
+ sbi->sb->s_flags |= MS_ACTIVE;
+ /* Turn on quotas so that they are updated correctly */
+ f2fs_enable_quota_files(sbi);
+#endif
+
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
sizeof(struct fsync_inode_entry));
- if (!fsync_entry_slab)
- return -ENOMEM;
+ if (!fsync_entry_slab) {
+ err = -ENOMEM;
+ goto out;
+ }
INIT_LIST_HEAD(&inode_list);
+ INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
- blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
-
/* step #1: find fsynced inode numbers */
- err = find_fsync_dnodes(sbi, &inode_list);
- if (err)
- goto out;
+ err = find_fsync_dnodes(sbi, &inode_list, check_only);
+ if (err || list_empty(&inode_list))
+ goto skip;
- if (list_empty(&inode_list))
- goto out;
+ if (check_only) {
+ ret = 1;
+ goto skip;
+ }
need_writecp = true;
/* step #2: recover data */
- err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+ err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
-out:
+skip:
destroy_fsync_dnodes(&inode_list);
- kmem_cache_destroy(fsync_entry_slab);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
- (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+ (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
}
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (err) {
- bool invalidate = false;
+ mutex_unlock(&sbi->cp_mutex);
- if (discard_next_dnode(sbi, blkaddr))
- invalidate = true;
+ /* let's drop all the directory inodes for clean checkpoint */
+ destroy_fsync_dnodes(&dir_list);
- /* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META))
- sync_meta_pages(sbi, META, LONG_MAX);
-
- /* invalidate temporary meta page */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi),
- blkaddr, blkaddr);
-
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- mutex_unlock(&sbi->cp_mutex);
- } else if (need_writecp) {
+ if (!err && need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
- mutex_unlock(&sbi->cp_mutex);
- write_checkpoint(sbi, &cpc);
- } else {
- mutex_unlock(&sbi->cp_mutex);
+ err = write_checkpoint(sbi, &cpc);
}
- return err;
+
+ kmem_cache_destroy(fsync_entry_slab);
+out:
+#ifdef CONFIG_QUOTA
+ /* Turn quotas off */
+ f2fs_quota_off_umount(sbi->sb);
+#endif
+ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+
+ return ret ? ret: err;
}
#include <linux/kthread.h>
#include <linux/swap.h>
#include <linux/timer.h>
+#include <linux/freezer.h>
+#include <linux/sched.h>
#include "f2fs.h"
#include "segment.h"
#include "node.h"
+#include "gc.h"
#include "trace.h"
#include <trace/events/f2fs.h>
#define __reverse_ffz(x) __reverse_ffs(~(x))
static struct kmem_cache *discard_entry_slab;
+static struct kmem_cache *discard_cmd_slab;
static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *inmem_entry_slab;
/*
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
+ * @size must be integral times of unsigned long.
* Example:
* MSB <--> LSB
* f2fs_set_bit(0, bitmap) => 1000 0000
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
- size -= result;
+ size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
- if (!offset)
- goto aligned;
-
- tmp = __reverse_ulong((unsigned char *)p);
- tmp &= ~0UL >> offset;
-
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
-
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- p++;
-aligned:
- while (size & ~(BITS_PER_LONG-1)) {
+
+ while (1) {
+ if (*p == 0)
+ goto pass;
+
tmp = __reverse_ulong((unsigned char *)p);
+
+ tmp &= ~0UL >> offset;
+ if (size < BITS_PER_LONG)
+ tmp &= (~0UL << (BITS_PER_LONG - size));
if (tmp)
- goto found_middle;
- result += BITS_PER_LONG;
+ goto found;
+pass:
+ if (size <= BITS_PER_LONG)
+ break;
size -= BITS_PER_LONG;
+ offset = 0;
p++;
}
- if (!size)
- return result;
-
- tmp = __reverse_ulong((unsigned char *)p);
-found_first:
- tmp &= (~0UL << (BITS_PER_LONG - size));
- if (!tmp) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __reverse_ffs(tmp);
+ return result;
+found:
+ return result - size + __reverse_ffs(tmp);
}
static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
- size -= result;
+ size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
- if (!offset)
- goto aligned;
-
- tmp = __reverse_ulong((unsigned char *)p);
- tmp |= ~((~0UL << offset) >> offset);
-
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp != ~0UL)
- goto found_middle;
-
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- p++;
-aligned:
- while (size & ~(BITS_PER_LONG - 1)) {
+
+ while (1) {
+ if (*p == ~0UL)
+ goto pass;
+
tmp = __reverse_ulong((unsigned char *)p);
+
+ if (offset)
+ tmp |= ~0UL << (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ tmp |= ~0UL >> size;
if (tmp != ~0UL)
- goto found_middle;
- result += BITS_PER_LONG;
+ goto found;
+pass:
+ if (size <= BITS_PER_LONG)
+ break;
size -= BITS_PER_LONG;
+ offset = 0;
p++;
}
- if (!size)
- return result;
+ return result;
+found:
+ return result - size + __reverse_ffz(tmp);
+}
- tmp = __reverse_ulong((unsigned char *)p);
-found_first:
- tmp |= ~(~0UL << (BITS_PER_LONG - size));
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + __reverse_ffz(tmp);
+bool need_SSR(struct f2fs_sb_info *sbi)
+{
+ int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
+ int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+
+ if (test_opt(sbi, LFS))
+ return false;
+ if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+ return true;
+
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
+ 2 * reserved_sections(sbi));
}
void register_inmem_page(struct inode *inode, struct page *page)
trace_f2fs_register_inmem_page(page, INMEM);
}
-int commit_inmem_pages(struct inode *inode, bool abort)
+static int __revoke_inmem_pages(struct inode *inode,
+ struct list_head *head, bool drop, bool recover)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct inmem_pages *cur, *tmp;
+ int err = 0;
+
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ struct page *page = cur->page;
+
+ if (drop)
+ trace_f2fs_commit_inmem_page(page, INMEM_DROP);
+
+ lock_page(page);
+
+ if (recover) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+
+ trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
+retry:
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ goto retry;
+ }
+ err = -EAGAIN;
+ goto next;
+ }
+ get_node_info(sbi, dn.nid, &ni);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ cur->old_addr, ni.version, true, true);
+ f2fs_put_dnode(&dn);
+ }
+next:
+ /* we don't need to invalidate this in the sccessful status */
+ if (drop || recover)
+ ClearPageUptodate(page);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ f2fs_put_page(page, 1);
+
+ list_del(&cur->list);
+ kmem_cache_free(inmem_entry_slab, cur);
+ dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ }
+ return err;
+}
+
+void drop_inmem_pages(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ mutex_lock(&fi->inmem_lock);
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+ mutex_unlock(&fi->inmem_lock);
+
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_HOT_DATA);
+ stat_dec_atomic_write(inode);
+}
+
+void drop_inmem_page(struct inode *inode, struct page *page)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct list_head *head = &fi->inmem_pages;
+ struct inmem_pages *cur = NULL;
+
+ f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
+
+ mutex_lock(&fi->inmem_lock);
+ list_for_each_entry(cur, head, list) {
+ if (cur->page == page)
+ break;
+ }
+
+ f2fs_bug_on(sbi, !cur || cur->page != page);
+ list_del(&cur->list);
+ mutex_unlock(&fi->inmem_lock);
+
+ dec_page_count(sbi, F2FS_INMEM_PAGES);
+ kmem_cache_free(inmem_entry_slab, cur);
+
+ ClearPageUptodate(page);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ f2fs_put_page(page, 0);
+
+ trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
+}
+
+static int __commit_inmem_pages(struct inode *inode,
+ struct list_head *revoke_list)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *cur, *tmp;
- bool submit_bio = false;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
- .rw = WRITE_SYNC | REQ_PRIO,
- .encrypted_page = NULL,
+ .op = REQ_OP_WRITE,
+ .op_flags = REQ_SYNC | REQ_PRIO,
+ .io_type = FS_DATA_IO,
};
+ pgoff_t last_idx = ULONG_MAX;
int err = 0;
- /*
- * The abort is true only when f2fs_evict_inode is called.
- * Basically, the f2fs_evict_inode doesn't produce any data writes, so
- * that we don't need to call f2fs_balance_fs.
- * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
- * inode becomes free by iget_locked in f2fs_iget.
- */
- if (!abort) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
- }
-
- mutex_lock(&fi->inmem_lock);
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
- lock_page(cur->page);
- if (!abort) {
- if (cur->page->mapping == inode->i_mapping) {
- set_page_dirty(cur->page);
- f2fs_wait_on_page_writeback(cur->page, DATA);
- if (clear_page_dirty_for_io(cur->page))
- inode_dec_dirty_pages(inode);
- trace_f2fs_commit_inmem_page(cur->page, INMEM);
- fio.page = cur->page;
- err = do_write_data_page(&fio);
- if (err) {
- unlock_page(cur->page);
- break;
+ struct page *page = cur->page;
+
+ lock_page(page);
+ if (page->mapping == inode->i_mapping) {
+ trace_f2fs_commit_inmem_page(page, INMEM);
+
+ set_page_dirty(page);
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (clear_page_dirty_for_io(page)) {
+ inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
+retry:
+ fio.page = page;
+ fio.old_blkaddr = NULL_ADDR;
+ fio.encrypted_page = NULL;
+ fio.need_lock = LOCK_DONE;
+ err = do_write_data_page(&fio);
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ cond_resched();
+ goto retry;
}
- clear_cold_data(cur->page);
- submit_bio = true;
+ unlock_page(page);
+ break;
}
- } else {
- trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
+ /* record old blkaddr for revoking */
+ cur->old_addr = fio.old_blkaddr;
+ last_idx = page->index;
}
- set_page_private(cur->page, 0);
- ClearPagePrivate(cur->page);
- f2fs_put_page(cur->page, 1);
+ unlock_page(page);
+ list_move_tail(&cur->list, revoke_list);
+ }
- list_del(&cur->list);
- kmem_cache_free(inmem_entry_slab, cur);
- dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ if (last_idx != ULONG_MAX)
+ f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
+
+ if (!err)
+ __revoke_inmem_pages(inode, revoke_list, false, false);
+
+ return err;
+}
+
+int commit_inmem_pages(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct list_head revoke_list;
+ int err;
+
+ INIT_LIST_HEAD(&revoke_list);
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ set_inode_flag(inode, FI_ATOMIC_COMMIT);
+
+ mutex_lock(&fi->inmem_lock);
+ err = __commit_inmem_pages(inode, &revoke_list);
+ if (err) {
+ int ret;
+ /*
+ * try to revoke all committed pages, but still we could fail
+ * due to no memory or other reason, if that happened, EAGAIN
+ * will be returned, which means in such case, transaction is
+ * already not integrity, caller should use journal to do the
+ * recovery or rewrite & commit last transaction. For other
+ * error number, revoking was done by filesystem itself.
+ */
+ ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
+ if (ret)
+ err = ret;
+
+ /* drop all uncommitted pages */
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
}
mutex_unlock(&fi->inmem_lock);
- if (!abort) {
- f2fs_unlock_op(sbi);
- if (submit_bio)
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- }
+ clear_inode_flag(inode, FI_ATOMIC_COMMIT);
+
+ f2fs_unlock_op(sbi);
return err;
}
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
-void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
+ f2fs_show_injection_info(FAULT_CHECKPOINT);
+ f2fs_stop_checkpoint(sbi, false);
+ }
+#endif
+
+ /* balance_fs_bg is able to be pending */
+ if (need && excess_cached_nats(sbi))
+ f2fs_balance_fs_bg(sbi);
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi, 0)) {
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
- f2fs_gc(sbi, false);
+ f2fs_gc(sbi, false, false, NULL_SEGNO);
}
}
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS))
- try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+ try_to_free_nids(sbi, MAX_FREE_NIDS);
+ else
+ build_free_nids(sbi, false, false);
+
+ if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+ return;
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
- excess_prefree_segs(sbi) ||
!available_free_memory(sbi, INO_ENTRIES) ||
- jiffies > sbi->cp_expires)
+ excess_prefree_segs(sbi) ||
+ excess_dirty_nats(sbi) ||
+ f2fs_time_over(sbi, CP_TIME)) {
+ if (test_opt(sbi, DATA_FLUSH)) {
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+ sync_dirty_inodes(sbi, FILE_INODE);
+ blk_finish_plug(&plug);
+ }
f2fs_sync_fs(sbi->sb, true);
+ stat_inc_bg_cp_count(sbi->stat_info);
+ }
+}
+
+static int __submit_flush_wait(struct f2fs_sb_info *sbi,
+ struct block_device *bdev)
+{
+ struct bio *bio = f2fs_bio_alloc(0);
+ int ret;
+
+ bio->bi_rw = REQ_OP_WRITE;
+ bio->bi_bdev = bdev;
+ ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio_put(bio);
+
+ trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
+ test_opt(sbi, FLUSH_MERGE), ret);
+ return ret;
+}
+
+static int submit_flush_wait(struct f2fs_sb_info *sbi)
+{
+ int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
+ int i;
+
+ if (!sbi->s_ndevs || ret)
+ return ret;
+
+ for (i = 1; i < sbi->s_ndevs; i++) {
+ ret = __submit_flush_wait(sbi, FDEV(i).bdev);
+ if (ret)
+ break;
+ }
+ return ret;
}
static int issue_flush_thread(void *data)
{
struct f2fs_sb_info *sbi = data;
- struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
+ struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
wait_queue_head_t *q = &fcc->flush_wait_queue;
repeat:
if (kthread_should_stop())
return 0;
+ sb_start_intwrite(sbi->sb);
+
if (!llist_empty(&fcc->issue_list)) {
- struct bio *bio;
struct flush_cmd *cmd, *next;
int ret;
- bio = f2fs_bio_alloc(0);
-
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
- bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
+ ret = submit_flush_wait(sbi);
+ atomic_inc(&fcc->issued_flush);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
complete(&cmd->wait);
}
- bio_put(bio);
fcc->dispatch_list = NULL;
}
+ sb_end_intwrite(sbi->sb);
+
wait_event_interruptible(*q,
kthread_should_stop() || !llist_empty(&fcc->issue_list));
goto repeat;
int f2fs_issue_flush(struct f2fs_sb_info *sbi)
{
- struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
+ struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
struct flush_cmd cmd;
-
- trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
- test_opt(sbi, FLUSH_MERGE));
+ int ret;
if (test_opt(sbi, NOBARRIER))
return 0;
if (!test_opt(sbi, FLUSH_MERGE)) {
- struct bio *bio = f2fs_bio_alloc(0);
- int ret;
+ ret = submit_flush_wait(sbi);
+ atomic_inc(&fcc->issued_flush);
+ return ret;
+ }
+
+ if (atomic_inc_return(&fcc->issing_flush) == 1) {
+ ret = submit_flush_wait(sbi);
+ atomic_dec(&fcc->issing_flush);
- bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
- bio_put(bio);
+ atomic_inc(&fcc->issued_flush);
return ret;
}
llist_add(&cmd.llnode, &fcc->issue_list);
- if (!fcc->dispatch_list)
+ /* update issue_list before we wake up issue_flush thread */
+ smp_mb();
+
+ if (waitqueue_active(&fcc->flush_wait_queue))
wake_up(&fcc->flush_wait_queue);
- wait_for_completion(&cmd.wait);
+ if (fcc->f2fs_issue_flush) {
+ wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->issing_flush);
+ } else {
+ struct llist_node *list;
+
+ list = llist_del_all(&fcc->issue_list);
+ if (!list) {
+ wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->issing_flush);
+ } else {
+ struct flush_cmd *tmp, *next;
+
+ ret = submit_flush_wait(sbi);
+
+ llist_for_each_entry_safe(tmp, next, list, llnode) {
+ if (tmp == &cmd) {
+ cmd.ret = ret;
+ atomic_dec(&fcc->issing_flush);
+ continue;
+ }
+ tmp->ret = ret;
+ complete(&tmp->wait);
+ }
+ }
+ }
return cmd.ret;
}
struct flush_cmd_control *fcc;
int err = 0;
+ if (SM_I(sbi)->fcc_info) {
+ fcc = SM_I(sbi)->fcc_info;
+ if (fcc->f2fs_issue_flush)
+ return err;
+ goto init_thread;
+ }
+
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
+ atomic_set(&fcc->issued_flush, 0);
+ atomic_set(&fcc->issing_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
- SM_I(sbi)->cmd_control_info = fcc;
+ SM_I(sbi)->fcc_info = fcc;
+ if (!test_opt(sbi, FLUSH_MERGE))
+ return err;
+
+init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
err = PTR_ERR(fcc->f2fs_issue_flush);
kfree(fcc);
- SM_I(sbi)->cmd_control_info = NULL;
+ SM_I(sbi)->fcc_info = NULL;
return err;
}
return err;
}
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
{
- struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
+ struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
- if (fcc && fcc->f2fs_issue_flush)
- kthread_stop(fcc->f2fs_issue_flush);
- kfree(fcc);
- SM_I(sbi)->cmd_control_info = NULL;
+ if (fcc && fcc->f2fs_issue_flush) {
+ struct task_struct *flush_thread = fcc->f2fs_issue_flush;
+
+ fcc->f2fs_issue_flush = NULL;
+ kthread_stop(flush_thread);
+ }
+ if (free) {
+ kfree(fcc);
+ SM_I(sbi)->fcc_info = NULL;
+ }
}
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]--;
- if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
- clear_bit(GET_SECNO(sbi, segno),
+ if (get_valid_blocks(sbi, segno, true) == 0)
+ clear_bit(GET_SEC_FROM_SEG(sbi, segno),
dirty_i->victim_secmap);
}
}
mutex_lock(&dirty_i->seglist_lock);
- valid_blocks = get_valid_blocks(sbi, segno, 0);
+ valid_blocks = get_valid_blocks(sbi, segno, false);
if (valid_blocks == 0) {
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
}
- mutex_unlock(&dirty_i->seglist_lock);
+ mutex_unlock(&dirty_i->seglist_lock);
+}
+
+static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *pend_list;
+ struct discard_cmd *dc;
+
+ f2fs_bug_on(sbi, !len);
+
+ pend_list = &dcc->pend_list[plist_idx(len)];
+
+ dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
+ INIT_LIST_HEAD(&dc->list);
+ dc->bdev = bdev;
+ dc->lstart = lstart;
+ dc->start = start;
+ dc->len = len;
+ dc->ref = 0;
+ dc->state = D_PREP;
+ dc->error = 0;
+ init_completion(&dc->wait);
+ list_add_tail(&dc->list, pend_list);
+ atomic_inc(&dcc->discard_cmd_cnt);
+ dcc->undiscard_blks += len;
+
+ return dc;
+}
+
+static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len,
+ struct rb_node *parent, struct rb_node **p)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *dc;
+
+ dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
+
+ rb_link_node(&dc->rb_node, parent, p);
+ rb_insert_color(&dc->rb_node, &dcc->root);
+
+ return dc;
+}
+
+static void __detach_discard_cmd(struct discard_cmd_control *dcc,
+ struct discard_cmd *dc)
+{
+ if (dc->state == D_DONE)
+ atomic_dec(&dcc->issing_discard);
+
+ list_del(&dc->list);
+ rb_erase(&dc->rb_node, &dcc->root);
+ dcc->undiscard_blks -= dc->len;
+
+ kmem_cache_free(discard_cmd_slab, dc);
+
+ atomic_dec(&dcc->discard_cmd_cnt);
+}
+
+static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+
+ f2fs_bug_on(sbi, dc->ref);
+
+ if (dc->error == -EOPNOTSUPP)
+ dc->error = 0;
+
+ if (dc->error)
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Issue discard(%u, %u, %u) failed, ret: %d",
+ dc->lstart, dc->start, dc->len, dc->error);
+ __detach_discard_cmd(dcc, dc);
+}
+
+static void f2fs_submit_discard_endio(struct bio *bio)
+{
+ struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+
+ dc->error = bio->bi_error;
+ dc->state = D_DONE;
+ complete_all(&dc->wait);
+ bio_put(bio);
+}
+
+/* copied from block/blk-lib.c in 4.10-rc1 */
+static int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, int flags,
+ struct bio **biop)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct bio *bio = *biop;
+ unsigned int granularity;
+ int op = REQ_WRITE | REQ_DISCARD;
+ int alignment;
+ sector_t bs_mask;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
+ if (flags & BLKDEV_DISCARD_SECURE) {
+ if (!blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
+ op |= REQ_SECURE;
+ }
+
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
+ while (nr_sects) {
+ unsigned int req_sects;
+ sector_t end_sect, tmp;
+
+ /* Make sure bi_size doesn't overflow */
+ req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+ /**
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
+ end_sect = sector + req_sects;
+ tmp = end_sect;
+ if (req_sects < nr_sects &&
+ sector_div(tmp, granularity) != alignment) {
+ end_sect = end_sect - alignment;
+ sector_div(end_sect, granularity);
+ end_sect = end_sect * granularity + alignment;
+ req_sects = end_sect - sector;
+ }
+
+ if (bio) {
+ int ret = submit_bio_wait(op, bio);
+ bio_put(bio);
+ if (ret)
+ return ret;
+ }
+ bio = f2fs_bio_alloc(1);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio_set_op_attrs(bio, op, 0);
+
+ bio->bi_iter.bi_size = req_sects << 9;
+ nr_sects -= req_sects;
+ sector = end_sect;
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
+ }
+
+ *biop = bio;
+ return 0;
+}
+
+void __check_sit_bitmap(struct f2fs_sb_info *sbi,
+ block_t start, block_t end)
+{
+#ifdef CONFIG_F2FS_CHECK_FS
+ struct seg_entry *sentry;
+ unsigned int segno;
+ block_t blk = start;
+ unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
+ unsigned long *map;
+
+ while (blk < end) {
+ segno = GET_SEGNO(sbi, blk);
+ sentry = get_seg_entry(sbi, segno);
+ offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
+
+ if (end < START_BLOCK(sbi, segno + 1))
+ size = GET_BLKOFF_FROM_SEG0(sbi, end);
+ else
+ size = max_blocks;
+ map = (unsigned long *)(sentry->cur_valid_map);
+ offset = __find_rev_next_bit(map, size, offset);
+ f2fs_bug_on(sbi, offset != size);
+ blk = START_BLOCK(sbi, segno + 1);
+ }
+#endif
+}
+
+/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
+static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct bio *bio = NULL;
+
+ if (dc->state != D_PREP)
+ return;
+
+ trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+
+ dc->error = __blkdev_issue_discard(dc->bdev,
+ SECTOR_FROM_BLOCK(dc->start),
+ SECTOR_FROM_BLOCK(dc->len),
+ GFP_NOFS, 0, &bio);
+ if (!dc->error) {
+ /* should keep before submission to avoid D_DONE right away */
+ dc->state = D_SUBMIT;
+ atomic_inc(&dcc->issued_discard);
+ atomic_inc(&dcc->issing_discard);
+ if (bio) {
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ submit_bio(REQ_SYNC, bio);
+ list_move_tail(&dc->list, &dcc->wait_list);
+ __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+
+ f2fs_update_iostat(sbi, FS_DISCARD, 1);
+ }
+ } else {
+ __remove_discard_cmd(sbi, dc);
+ }
+}
+
+static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len,
+ struct rb_node **insert_p,
+ struct rb_node *insert_parent)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct rb_node **p = &dcc->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct discard_cmd *dc = NULL;
+
+ if (insert_p && insert_parent) {
+ parent = insert_parent;
+ p = insert_p;
+ goto do_insert;
+ }
+
+ p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
+do_insert:
+ dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
+ if (!dc)
+ return NULL;
+
+ return dc;
+}
+
+static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
+ struct discard_cmd *dc)
+{
+ list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
+}
+
+static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc, block_t blkaddr)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_info di = dc->di;
+ bool modified = false;
+
+ if (dc->state == D_DONE || dc->len == 1) {
+ __remove_discard_cmd(sbi, dc);
+ return;
+ }
+
+ dcc->undiscard_blks -= di.len;
+
+ if (blkaddr > di.lstart) {
+ dc->len = blkaddr - dc->lstart;
+ dcc->undiscard_blks += dc->len;
+ __relocate_discard_cmd(dcc, dc);
+ modified = true;
+ }
+
+ if (blkaddr < di.lstart + di.len - 1) {
+ if (modified) {
+ __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
+ di.start + blkaddr + 1 - di.lstart,
+ di.lstart + di.len - 1 - blkaddr,
+ NULL, NULL);
+ } else {
+ dc->lstart++;
+ dc->len--;
+ dc->start++;
+ dcc->undiscard_blks += dc->len;
+ __relocate_discard_cmd(dcc, dc);
+ }
+ }
+}
+
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct discard_cmd *dc;
+ struct discard_info di = {0};
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ block_t end = lstart + len;
+
+ mutex_lock(&dcc->cmd_lock);
+
+ dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+ NULL, lstart,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (dc)
+ prev_dc = dc;
+
+ if (!prev_dc) {
+ di.lstart = lstart;
+ di.len = next_dc ? next_dc->lstart - lstart : len;
+ di.len = min(di.len, len);
+ di.start = start;
+ }
+
+ while (1) {
+ struct rb_node *node;
+ bool merged = false;
+ struct discard_cmd *tdc = NULL;
+
+ if (prev_dc) {
+ di.lstart = prev_dc->lstart + prev_dc->len;
+ if (di.lstart < lstart)
+ di.lstart = lstart;
+ if (di.lstart >= end)
+ break;
+
+ if (!next_dc || next_dc->lstart > end)
+ di.len = end - di.lstart;
+ else
+ di.len = next_dc->lstart - di.lstart;
+ di.start = start + di.lstart - lstart;
+ }
+
+ if (!di.len)
+ goto next;
+
+ if (prev_dc && prev_dc->state == D_PREP &&
+ prev_dc->bdev == bdev &&
+ __is_discard_back_mergeable(&di, &prev_dc->di)) {
+ prev_dc->di.len += di.len;
+ dcc->undiscard_blks += di.len;
+ __relocate_discard_cmd(dcc, prev_dc);
+ di = prev_dc->di;
+ tdc = prev_dc;
+ merged = true;
+ }
+
+ if (next_dc && next_dc->state == D_PREP &&
+ next_dc->bdev == bdev &&
+ __is_discard_front_mergeable(&di, &next_dc->di)) {
+ next_dc->di.lstart = di.lstart;
+ next_dc->di.len += di.len;
+ next_dc->di.start = di.start;
+ dcc->undiscard_blks += di.len;
+ __relocate_discard_cmd(dcc, next_dc);
+ if (tdc)
+ __remove_discard_cmd(sbi, tdc);
+ merged = true;
+ }
+
+ if (!merged) {
+ __insert_discard_tree(sbi, bdev, di.lstart, di.start,
+ di.len, NULL, NULL);
+ }
+ next:
+ prev_dc = next_dc;
+ if (!prev_dc)
+ break;
+
+ node = rb_next(&prev_dc->rb_node);
+ next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+ }
+
+ mutex_unlock(&dcc->cmd_lock);
+}
+
+static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+ block_t lblkstart = blkstart;
+
+ trace_f2fs_queue_discard(bdev, blkstart, blklen);
+
+ if (sbi->s_ndevs) {
+ int devi = f2fs_target_device_index(sbi, blkstart);
+
+ blkstart -= FDEV(devi).start_blk;
+ }
+ __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+ return 0;
+}
+
+static int __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *pend_list;
+ struct discard_cmd *dc, *tmp;
+ struct blk_plug plug;
+ int iter = 0, issued = 0;
+ int i;
+ bool io_interrupted = false;
+
+ mutex_lock(&dcc->cmd_lock);
+ f2fs_bug_on(sbi,
+ !__check_rb_tree_consistence(sbi, &dcc->root));
+ blk_start_plug(&plug);
+ for (i = MAX_PLIST_NUM - 1;
+ i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
+ pend_list = &dcc->pend_list[i];
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ f2fs_bug_on(sbi, dc->state != D_PREP);
+
+ /* Hurry up to finish fstrim */
+ if (dcc->pend_list_tag[i] & P_TRIM) {
+ __submit_discard_cmd(sbi, dc);
+ issued++;
+
+ if (fatal_signal_pending(current))
+ break;
+ continue;
+ }
+
+ if (!issue_cond) {
+ __submit_discard_cmd(sbi, dc);
+ issued++;
+ continue;
+ }
+
+ if (is_idle(sbi)) {
+ __submit_discard_cmd(sbi, dc);
+ issued++;
+ } else {
+ io_interrupted = true;
+ }
+
+ if (++iter >= DISCARD_ISSUE_RATE)
+ goto out;
+ }
+ if (list_empty(pend_list) && dcc->pend_list_tag[i] & P_TRIM)
+ dcc->pend_list_tag[i] &= (~P_TRIM);
+ }
+out:
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (!issued && io_interrupted)
+ issued = -1;
+
+ return issued;
+}
+
+static void __drop_discard_cmd(struct f2fs_sb_info *sbi)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *pend_list;
+ struct discard_cmd *dc, *tmp;
+ int i;
+
+ mutex_lock(&dcc->cmd_lock);
+ for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ pend_list = &dcc->pend_list[i];
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ f2fs_bug_on(sbi, dc->state != D_PREP);
+ __remove_discard_cmd(sbi, dc);
+ }
+ }
+ mutex_unlock(&dcc->cmd_lock);
+}
+
+static void __wait_one_discard_bio(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+
+ wait_for_completion_io(&dc->wait);
+ mutex_lock(&dcc->cmd_lock);
+ f2fs_bug_on(sbi, dc->state != D_DONE);
+ dc->ref--;
+ if (!dc->ref)
+ __remove_discard_cmd(sbi, dc);
+ mutex_unlock(&dcc->cmd_lock);
+}
+
+static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *wait_list = &(dcc->wait_list);
+ struct discard_cmd *dc, *tmp;
+ bool need_wait;
+
+next:
+ need_wait = false;
+
+ mutex_lock(&dcc->cmd_lock);
+ list_for_each_entry_safe(dc, tmp, wait_list, list) {
+ if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
+ wait_for_completion_io(&dc->wait);
+ __remove_discard_cmd(sbi, dc);
+ } else {
+ dc->ref++;
+ need_wait = true;
+ break;
+ }
+ }
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (need_wait) {
+ __wait_one_discard_bio(sbi, dc);
+ goto next;
+ }
+}
+
+/* This should be covered by global mutex, &sit_i->sentry_lock */
+void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *dc;
+ bool need_wait = false;
+
+ mutex_lock(&dcc->cmd_lock);
+ dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
+ if (dc) {
+ if (dc->state == D_PREP) {
+ __punch_discard_cmd(sbi, dc, blkaddr);
+ } else {
+ dc->ref++;
+ need_wait = true;
+ }
+ }
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (need_wait)
+ __wait_one_discard_bio(sbi, dc);
+}
+
+void stop_discard_thread(struct f2fs_sb_info *sbi)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+
+ if (dcc && dcc->f2fs_issue_discard) {
+ struct task_struct *discard_thread = dcc->f2fs_issue_discard;
+
+ dcc->f2fs_issue_discard = NULL;
+ kthread_stop(discard_thread);
+ }
+}
+
+/* This comes from f2fs_put_super and f2fs_trim_fs */
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
+{
+ __issue_discard_cmd(sbi, false);
+ __drop_discard_cmd(sbi);
+ __wait_discard_cmd(sbi, !umount);
+}
+
+static void mark_discard_range_all(struct f2fs_sb_info *sbi)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ int i;
+
+ mutex_lock(&dcc->cmd_lock);
+ for (i = 0; i < MAX_PLIST_NUM; i++)
+ dcc->pend_list_tag[i] |= P_TRIM;
+ mutex_unlock(&dcc->cmd_lock);
+}
+
+static int issue_discard_thread(void *data)
+{
+ struct f2fs_sb_info *sbi = data;
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ wait_queue_head_t *q = &dcc->discard_wait_queue;
+ unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
+ int issued;
+
+ set_freezable();
+
+ do {
+ wait_event_interruptible_timeout(*q,
+ kthread_should_stop() || freezing(current) ||
+ dcc->discard_wake,
+ msecs_to_jiffies(wait_ms));
+ if (try_to_freeze())
+ continue;
+ if (kthread_should_stop())
+ return 0;
+
+ if (dcc->discard_wake) {
+ dcc->discard_wake = 0;
+ if (sbi->gc_thread && sbi->gc_thread->gc_urgent)
+ mark_discard_range_all(sbi);
+ }
+
+ sb_start_intwrite(sbi->sb);
+
+ issued = __issue_discard_cmd(sbi, true);
+ if (issued) {
+ __wait_discard_cmd(sbi, true);
+ wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
+ } else {
+ wait_ms = DEF_MAX_DISCARD_ISSUE_TIME;
+ }
+
+ sb_end_intwrite(sbi->sb);
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+ sector_t sector, nr_sects;
+ block_t lblkstart = blkstart;
+ int devi = 0;
+
+ if (sbi->s_ndevs) {
+ devi = f2fs_target_device_index(sbi, blkstart);
+ blkstart -= FDEV(devi).start_blk;
+ }
+
+ /*
+ * We need to know the type of the zone: for conventional zones,
+ * use regular discard if the drive supports it. For sequential
+ * zones, reset the zone write pointer.
+ */
+ switch (get_blkz_type(sbi, bdev, blkstart)) {
+
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ if (!blk_queue_discard(bdev_get_queue(bdev)))
+ return 0;
+ return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ sector = SECTOR_FROM_BLOCK(blkstart);
+ nr_sects = SECTOR_FROM_BLOCK(blklen);
+
+ if (sector & (bdev_zone_sectors(bdev) - 1) ||
+ nr_sects != bdev_zone_sectors(bdev)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "(%d) %s: Unaligned discard attempted (block %x + %x)",
+ devi, sbi->s_ndevs ? FDEV(devi).path: "",
+ blkstart, blklen);
+ return -EIO;
+ }
+ trace_f2fs_issue_reset_zone(bdev, blkstart);
+ return blkdev_reset_zones(bdev, sector,
+ nr_sects, GFP_NOFS);
+ default:
+ /* Unknown zone type: broken device ? */
+ return -EIO;
+ }
+}
+#endif
+
+static int __issue_discard_async(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+ return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
+#endif
+ return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
}
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
block_t blkstart, block_t blklen)
{
- sector_t start = SECTOR_FROM_BLOCK(blkstart);
- sector_t len = SECTOR_FROM_BLOCK(blklen);
+ sector_t start = blkstart, len = 0;
+ struct block_device *bdev;
struct seg_entry *se;
unsigned int offset;
block_t i;
+ int err = 0;
+
+ bdev = f2fs_target_device(sbi, blkstart, NULL);
+
+ for (i = blkstart; i < blkstart + blklen; i++, len++) {
+ if (i != start) {
+ struct block_device *bdev2 =
+ f2fs_target_device(sbi, i, NULL);
+
+ if (bdev2 != bdev) {
+ err = __issue_discard_async(sbi, bdev,
+ start, len);
+ if (err)
+ return err;
+ bdev = bdev2;
+ start = i;
+ len = 0;
+ }
+ }
- for (i = blkstart; i < blkstart + blklen; i++) {
se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
offset = GET_BLKOFF_FROM_SEG0(sbi, i);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
}
- trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
- return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
-}
-
-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
-{
- int err = -ENOTSUPP;
-
- if (test_opt(sbi, DISCARD)) {
- struct seg_entry *se = get_seg_entry(sbi,
- GET_SEGNO(sbi, blkaddr));
- unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
-
- if (f2fs_test_bit(offset, se->discard_map))
- return false;
-
- err = f2fs_issue_discard(sbi, blkaddr, 1);
- }
-
- if (err) {
- update_meta_page(sbi, NULL, blkaddr);
- return true;
- }
- return false;
-}
-
-static void __add_discard_entry(struct f2fs_sb_info *sbi,
- struct cp_control *cpc, struct seg_entry *se,
- unsigned int start, unsigned int end)
-{
- struct list_head *head = &SM_I(sbi)->discard_list;
- struct discard_entry *new, *last;
-
- if (!list_empty(head)) {
- last = list_last_entry(head, struct discard_entry, list);
- if (START_BLOCK(sbi, cpc->trim_start) + start ==
- last->blkaddr + last->len) {
- last->len += end - start;
- goto done;
- }
- }
- new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
- INIT_LIST_HEAD(&new->list);
- new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
- new->len = end - start;
- list_add_tail(&new->list, head);
-done:
- SM_I(sbi)->nr_discards += end - start;
+ if (len)
+ err = __issue_discard_async(sbi, bdev, start, len);
+ return err;
}
-static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
+ bool check_only)
{
int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
int max_blocks = sbi->blocks_per_seg;
unsigned long *discard_map = (unsigned long *)se->discard_map;
unsigned long *dmap = SIT_I(sbi)->tmp_map;
unsigned int start = 0, end = -1;
- bool force = (cpc->reason == CP_DISCARD);
+ bool force = (cpc->reason & CP_DISCARD);
+ struct discard_entry *de = NULL;
+ struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
int i;
- if (se->valid_blocks == max_blocks)
- return;
+ if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
+ return false;
if (!force) {
if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
- SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
- return;
+ SM_I(sbi)->dcc_info->nr_discards >=
+ SM_I(sbi)->dcc_info->max_discards)
+ return false;
}
/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
- while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
+ while (force || SM_I(sbi)->dcc_info->nr_discards <=
+ SM_I(sbi)->dcc_info->max_discards) {
start = __find_rev_next_bit(dmap, max_blocks, end + 1);
if (start >= max_blocks)
break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
- __add_discard_entry(sbi, cpc, se, start, end);
+ if (force && start && end != max_blocks
+ && (end - start) < cpc->trim_minlen)
+ continue;
+
+ if (check_only)
+ return true;
+
+ if (!de) {
+ de = f2fs_kmem_cache_alloc(discard_entry_slab,
+ GFP_F2FS_ZERO);
+ de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
+ list_add_tail(&de->list, head);
+ }
+
+ for (i = start; i < end; i++)
+ __set_bit_le(i, (void *)de->discard_map);
+
+ SM_I(sbi)->dcc_info->nr_discards += end - start;
}
+ return false;
}
void release_discard_addrs(struct f2fs_sb_info *sbi)
{
- struct list_head *head = &(SM_I(sbi)->discard_list);
+ struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
struct discard_entry *entry, *this;
/* drop caches */
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
- struct list_head *head = &(SM_I(sbi)->discard_list);
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *head = &dcc->entry_list;
struct discard_entry *entry, *this;
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
+ unsigned int secno, start_segno;
+ bool force = (cpc->reason & CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock);
if (!test_opt(sbi, DISCARD))
continue;
- f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+ if (force && start >= cpc->trim_start &&
+ (end - 1) <= cpc->trim_end)
+ continue;
+
+ if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
+ continue;
+ }
+next:
+ secno = GET_SEC_FROM_SEG(sbi, start);
+ start_segno = GET_SEG_FROM_SEC(sbi, secno);
+ if (!IS_CURSEC(sbi, secno) &&
+ !get_valid_blocks(sbi, start, true))
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+ sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+ start = start_segno + sbi->segs_per_sec;
+ if (start < end)
+ goto next;
+ else
+ end = start - 1;
}
mutex_unlock(&dirty_i->seglist_lock);
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
- if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
- goto skip;
- f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
- cpc->trimmed += entry->len;
+ unsigned int cur_pos = 0, next_pos, len, total_len = 0;
+ bool is_valid = test_bit_le(0, entry->discard_map);
+
+find_next:
+ if (is_valid) {
+ next_pos = find_next_zero_bit_le(entry->discard_map,
+ sbi->blocks_per_seg, cur_pos);
+ len = next_pos - cur_pos;
+
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
+ (force && len < cpc->trim_minlen))
+ goto skip;
+
+ f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
+ len);
+ cpc->trimmed += len;
+ total_len += len;
+ } else {
+ next_pos = find_next_bit_le(entry->discard_map,
+ sbi->blocks_per_seg, cur_pos);
+ }
skip:
+ cur_pos = next_pos;
+ is_valid = !is_valid;
+
+ if (cur_pos < sbi->blocks_per_seg)
+ goto find_next;
+
list_del(&entry->list);
- SM_I(sbi)->nr_discards -= entry->len;
+ dcc->nr_discards -= total_len;
kmem_cache_free(discard_entry_slab, entry);
}
+
+ wake_up_discard_thread(sbi, false);
+}
+
+static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
+{
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ struct discard_cmd_control *dcc;
+ int err = 0, i;
+
+ if (SM_I(sbi)->dcc_info) {
+ dcc = SM_I(sbi)->dcc_info;
+ goto init_thread;
+ }
+
+ dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
+ if (!dcc)
+ return -ENOMEM;
+
+ dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
+ INIT_LIST_HEAD(&dcc->entry_list);
+ for (i = 0; i < MAX_PLIST_NUM; i++) {
+ INIT_LIST_HEAD(&dcc->pend_list[i]);
+ if (i >= dcc->discard_granularity - 1)
+ dcc->pend_list_tag[i] |= P_ACTIVE;
+ }
+ INIT_LIST_HEAD(&dcc->wait_list);
+ mutex_init(&dcc->cmd_lock);
+ atomic_set(&dcc->issued_discard, 0);
+ atomic_set(&dcc->issing_discard, 0);
+ atomic_set(&dcc->discard_cmd_cnt, 0);
+ dcc->nr_discards = 0;
+ dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
+ dcc->undiscard_blks = 0;
+ dcc->root = RB_ROOT;
+
+ init_waitqueue_head(&dcc->discard_wait_queue);
+ SM_I(sbi)->dcc_info = dcc;
+init_thread:
+ dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
+ "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
+ if (IS_ERR(dcc->f2fs_issue_discard)) {
+ err = PTR_ERR(dcc->f2fs_issue_discard);
+ kfree(dcc);
+ SM_I(sbi)->dcc_info = NULL;
+ return err;
+ }
+
+ return err;
+}
+
+static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+
+ if (!dcc)
+ return;
+
+ stop_discard_thread(sbi);
+
+ kfree(dcc);
+ SM_I(sbi)->dcc_info = NULL;
}
static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
struct seg_entry *se;
unsigned int segno, offset;
long int new_vblocks;
+ bool exist;
+#ifdef CONFIG_F2FS_CHECK_FS
+ bool mir_exist;
+#endif
segno = GET_SEGNO(sbi, blkaddr);
/* Update valid block bitmap */
if (del > 0) {
- if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
+ exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+ mir_exist = f2fs_test_and_set_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
+ "when setting bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
f2fs_bug_on(sbi, 1);
- if (!f2fs_test_and_set_bit(offset, se->discard_map))
+ }
+#endif
+ if (unlikely(exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Bitmap was wrongly set, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks--;
+ del = 0;
+ }
+
+ if (f2fs_discard_en(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
+
+ /* don't overwrite by SSR to keep node chain */
+ if (se->type == CURSEG_WARM_NODE) {
+ if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
+ se->ckpt_valid_blocks++;
+ }
} else {
- if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
+ exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+ mir_exist = f2fs_test_and_clear_bit(offset,
+ se->cur_valid_map_mir);
+ if (unlikely(exist != mir_exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error "
+ "when clearing bitmap, blk:%u, old bit:%d",
+ blkaddr, exist);
f2fs_bug_on(sbi, 1);
- if (f2fs_test_and_clear_bit(offset, se->discard_map))
+ }
+#endif
+ if (unlikely(!exist)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Bitmap was wrongly cleared, blk:%u", blkaddr);
+ f2fs_bug_on(sbi, 1);
+ se->valid_blocks++;
+ del = 0;
+ }
+
+ if (f2fs_discard_en(sbi) &&
+ f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
}
}
- sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+ sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
SUM_FOOTER_SIZE) / SUMMARY_SIZE;
if (valid_sum_count <= sum_in_page)
return 1;
else if ((valid_sum_count - sum_in_page) <=
- (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
+ (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
return 2;
return 3;
}
void *dst = page_address(page);
if (src)
- memcpy(dst, src, PAGE_CACHE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
else
- memset(dst, 0, PAGE_CACHE_SIZE);
+ memset(dst, 0, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
update_meta_page(sbi, (void *)sum_blk, blk_addr);
}
+static void write_current_sum_page(struct f2fs_sb_info *sbi,
+ int type, block_t blk_addr)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ struct page *page = grab_meta_page(sbi, blk_addr);
+ struct f2fs_summary_block *src = curseg->sum_blk;
+ struct f2fs_summary_block *dst;
+
+ dst = (struct f2fs_summary_block *)page_address(page);
+
+ mutex_lock(&curseg->curseg_mutex);
+
+ down_read(&curseg->journal_rwsem);
+ memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
+ up_read(&curseg->journal_rwsem);
+
+ memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
+ memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
+
+ mutex_unlock(&curseg->curseg_mutex);
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+}
+
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno, secno, zoneno;
unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
- unsigned int hint = *newseg / sbi->segs_per_sec;
- unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
+ unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
+ unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
unsigned int left_start = hint;
bool init = true;
int go_left = 0;
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
- MAIN_SEGS(sbi), *newseg + 1);
- if (segno - *newseg < sbi->segs_per_sec -
- (*newseg % sbi->segs_per_sec))
+ GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
+ if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
goto got_it;
}
find_other_zone:
secno = left_start;
skip_left:
hint = secno;
- segno = secno * sbi->segs_per_sec;
- zoneno = secno / sbi->secs_per_zone;
+ segno = GET_SEG_FROM_SEC(sbi, secno);
+ zoneno = GET_ZONE_FROM_SEC(sbi, secno);
/* give up on finding another zone */
if (!init)
struct summary_footer *sum_footer;
curseg->segno = curseg->next_segno;
- curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
+ curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
curseg->next_blkoff = 0;
curseg->next_segno = NULL_SEGNO;
__set_sit_entry_type(sbi, type, curseg->segno, modified);
}
+static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
+{
+ /* if segs_per_sec is large than 1, we need to keep original policy. */
+ if (sbi->segs_per_sec != 1)
+ return CURSEG_I(sbi, type)->segno;
+
+ if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ return 0;
+
+ if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
+ return SIT_I(sbi)->last_victim[ALLOC_NEXT];
+ return CURSEG_I(sbi, type)->segno;
+}
+
/*
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
if (test_opt(sbi, NOHEAP))
dir = ALLOC_RIGHT;
+ segno = __get_next_segno(sbi, type);
get_new_segment(sbi, &segno, new_sec, dir);
curseg->next_segno = segno;
reset_curseg(sbi, type, 1);
* This function always allocates a used segment(from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
-static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
+static void change_curseg(struct f2fs_sb_info *sbi, int type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
curseg->alloc_type = SSR;
__next_free_blkoff(sbi, curseg, 0);
- if (reuse) {
- sum_page = get_sum_page(sbi, new_segno);
- sum_node = (struct f2fs_summary_block *)page_address(sum_page);
- memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
- f2fs_put_page(sum_page, 1);
- }
+ sum_page = get_sum_page(sbi, new_segno);
+ sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
+ f2fs_put_page(sum_page, 1);
}
static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
+ unsigned segno = NULL_SEGNO;
+ int i, cnt;
+ bool reversed = false;
+
+ /* need_SSR() already forces to do this */
+ if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
+ curseg->next_segno = segno;
+ return 1;
+ }
- if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
- return v_ops->get_victim(sbi,
- &(curseg)->next_segno, BG_GC, type, SSR);
+ /* For node segments, let's do SSR more intensively */
+ if (IS_NODESEG(type)) {
+ if (type >= CURSEG_WARM_NODE) {
+ reversed = true;
+ i = CURSEG_COLD_NODE;
+ } else {
+ i = CURSEG_HOT_NODE;
+ }
+ cnt = NR_CURSEG_NODE_TYPE;
+ } else {
+ if (type >= CURSEG_WARM_DATA) {
+ reversed = true;
+ i = CURSEG_COLD_DATA;
+ } else {
+ i = CURSEG_HOT_DATA;
+ }
+ cnt = NR_CURSEG_DATA_TYPE;
+ }
- /* For data segments, let's do SSR more intensively */
- for (; type >= CURSEG_HOT_DATA; type--)
- if (v_ops->get_victim(sbi, &(curseg)->next_segno,
- BG_GC, type, SSR))
+ for (; cnt-- > 0; reversed ? i-- : i++) {
+ if (i == type)
+ continue;
+ if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
+ curseg->next_segno = segno;
return 1;
+ }
+ }
return 0;
}
if (force)
new_curseg(sbi, type, true);
- else if (type == CURSEG_WARM_NODE)
+ else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
+ type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
new_curseg(sbi, type, false);
else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
- change_curseg(sbi, type, true);
+ change_curseg(sbi, type);
else
new_curseg(sbi, type, false);
stat_inc_seg_type(sbi, curseg);
}
-static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
-{
- struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int old_segno;
-
- old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
- locate_dirty_segment(sbi, old_segno);
-}
-
void allocate_new_segments(struct f2fs_sb_info *sbi)
{
+ struct curseg_info *curseg;
+ unsigned int old_segno;
int i;
- for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
- __allocate_new_segments(sbi, i);
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ curseg = CURSEG_I(sbi, i);
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_segno);
+ }
}
static const struct segment_allocation default_salloc_ops = {
.allocate_segment = allocate_segment_by_default,
};
+bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ __u64 trim_start = cpc->trim_start;
+ bool has_candidate = false;
+
+ mutex_lock(&SIT_I(sbi)->sentry_lock);
+ for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
+ if (add_discard_addrs(sbi, cpc, true)) {
+ has_candidate = true;
+ break;
+ }
+ }
+ mutex_unlock(&SIT_I(sbi)->sentry_lock);
+
+ cpc->trim_start = trim_start;
+ return has_candidate;
+}
+
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
{
__u64 start = F2FS_BYTES_TO_BLK(range->start);
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno;
struct cp_control cpc;
+ int err = 0;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
if (end <= MAIN_BLKADDR(sbi))
goto out;
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Found FS corruption, run fsck to fix.");
+ goto out;
+ }
+
/* start/end segment number in main_area */
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
sbi->segs_per_sec) - 1, end_segno);
mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
+ err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
+ if (err)
+ break;
+
+ schedule();
}
+ /* It's time to issue all the filed discards */
+ mark_discard_range_all(sbi);
+ f2fs_wait_discard_bios(sbi, false);
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
- return 0;
+ return err;
}
static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
return false;
}
-static int __get_segment_type_2(struct page *page, enum page_type p_type)
+static int __get_segment_type_2(struct f2fs_io_info *fio)
{
- if (p_type == DATA)
+ if (fio->type == DATA)
return CURSEG_HOT_DATA;
else
return CURSEG_HOT_NODE;
}
-static int __get_segment_type_4(struct page *page, enum page_type p_type)
+static int __get_segment_type_4(struct f2fs_io_info *fio)
{
- if (p_type == DATA) {
- struct inode *inode = page->mapping->host;
+ if (fio->type == DATA) {
+ struct inode *inode = fio->page->mapping->host;
if (S_ISDIR(inode->i_mode))
return CURSEG_HOT_DATA;
else
return CURSEG_COLD_DATA;
} else {
- if (IS_DNODE(page) && is_cold_node(page))
+ if (IS_DNODE(fio->page) && is_cold_node(fio->page))
return CURSEG_WARM_NODE;
else
return CURSEG_COLD_NODE;
}
}
-static int __get_segment_type_6(struct page *page, enum page_type p_type)
+static int __get_segment_type_6(struct f2fs_io_info *fio)
{
- if (p_type == DATA) {
- struct inode *inode = page->mapping->host;
+ if (fio->type == DATA) {
+ struct inode *inode = fio->page->mapping->host;
- if (S_ISDIR(inode->i_mode))
- return CURSEG_HOT_DATA;
- else if (is_cold_data(page) || file_is_cold(inode))
+ if (is_cold_data(fio->page) || file_is_cold(inode))
return CURSEG_COLD_DATA;
- else
- return CURSEG_WARM_DATA;
+ if (is_inode_flag_set(inode, FI_HOT_DATA))
+ return CURSEG_HOT_DATA;
+ return CURSEG_WARM_DATA;
} else {
- if (IS_DNODE(page))
- return is_cold_node(page) ? CURSEG_WARM_NODE :
+ if (IS_DNODE(fio->page))
+ return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
CURSEG_HOT_NODE;
- else
- return CURSEG_COLD_NODE;
+ return CURSEG_COLD_NODE;
}
}
-static int __get_segment_type(struct page *page, enum page_type p_type)
+static int __get_segment_type(struct f2fs_io_info *fio)
{
- switch (F2FS_P_SB(page)->active_logs) {
+ int type = 0;
+
+ switch (fio->sbi->active_logs) {
case 2:
- return __get_segment_type_2(page, p_type);
+ type = __get_segment_type_2(fio);
+ break;
case 4:
- return __get_segment_type_4(page, p_type);
+ type = __get_segment_type_4(fio);
+ break;
+ case 6:
+ type = __get_segment_type_6(fio);
+ break;
+ default:
+ f2fs_bug_on(fio->sbi, true);
}
- /* NR_CURSEG_TYPE(6) logs by default */
- f2fs_bug_on(F2FS_P_SB(page),
- F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
- return __get_segment_type_6(page, p_type);
+
+ if (IS_HOT(type))
+ fio->temp = HOT;
+ else if (IS_WARM(type))
+ fio->temp = WARM;
+ else
+ fio->temp = COLD;
+ return type;
}
void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
block_t old_blkaddr, block_t *new_blkaddr,
- struct f2fs_summary *sum, int type)
+ struct f2fs_summary *sum, int type,
+ struct f2fs_io_info *fio, bool add_list)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct curseg_info *curseg;
- bool direct_io = (type == CURSEG_DIRECT_IO);
-
- type = direct_io ? CURSEG_WARM_DATA : type;
-
- curseg = CURSEG_I(sbi, type);
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
- /* direct_io'ed data is aligned to the segment for better performance */
- if (direct_io && curseg->next_blkoff &&
- !has_not_enough_free_secs(sbi, 0))
- __allocate_new_segments(sbi, type);
-
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
+ f2fs_wait_discard_bio(sbi, *new_blkaddr);
+
/*
* __add_sum_entry should be resided under the curseg_mutex
* because, this function updates a summary entry in the
if (!__has_curseg_space(sbi, type))
sit_i->s_ops->allocate_segment(sbi, type, false);
/*
- * SIT information should be updated before segment allocation,
- * since SSR needs latest valid block information.
+ * SIT information should be updated after segment allocation,
+ * since we need to keep dirty segments precisely under SSR.
*/
refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
mutex_unlock(&sit_i->sentry_lock);
- if (page && IS_NODESEG(type))
+ if (page && IS_NODESEG(type)) {
fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
+ f2fs_inode_chksum_set(sbi, page);
+ }
+
+ if (add_list) {
+ struct f2fs_bio_info *io;
+
+ INIT_LIST_HEAD(&fio->list);
+ fio->in_list = true;
+ io = sbi->write_io[fio->type] + fio->temp;
+ spin_lock(&io->io_lock);
+ list_add_tail(&fio->list, &io->io_list);
+ spin_unlock(&io->io_lock);
+ }
+
mutex_unlock(&curseg->curseg_mutex);
}
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
- int type = __get_segment_type(fio->page, fio->type);
+ int type = __get_segment_type(fio);
+ int err;
- allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
- &fio->blk_addr, sum, type);
+reallocate:
+ allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+ &fio->new_blkaddr, sum, type, fio, true);
/* writeout dirty page into bdev */
- f2fs_submit_page_mbio(fio);
+ err = f2fs_submit_page_write(fio);
+ if (err == -EAGAIN) {
+ fio->old_blkaddr = fio->new_blkaddr;
+ goto reallocate;
+ }
}
-void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
+void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+ enum iostat_type io_type)
{
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
- .blk_addr = page->index,
+ .op = REQ_OP_WRITE,
+ .op_flags = REQ_SYNC | REQ_NOIDLE | REQ_META | REQ_PRIO,
+ .old_blkaddr = page->index,
+ .new_blkaddr = page->index,
.page = page,
.encrypted_page = NULL,
+ .in_list = false,
};
if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
- fio.rw &= ~REQ_META;
+ fio.op_flags &= ~REQ_META;
set_page_writeback(page);
- f2fs_submit_page_mbio(&fio);
+ f2fs_submit_page_write(&fio);
+
+ f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
}
void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
set_summary(&sum, nid, 0, 0);
do_write_page(&sum, fio);
+
+ f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
}
void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
do_write_page(&sum, fio);
- dn->data_blkaddr = fio->blk_addr;
+ f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
+
+ f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
}
-void rewrite_data_page(struct f2fs_io_info *fio)
+int rewrite_data_page(struct f2fs_io_info *fio)
{
+ int err;
+
+ fio->new_blkaddr = fio->old_blkaddr;
stat_inc_inplace_blocks(fio->sbi);
- f2fs_submit_page_mbio(fio);
+
+ err = f2fs_submit_page_bio(fio);
+
+ f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+
+ return err;
}
-static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
- struct f2fs_summary *sum,
+void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t old_blkaddr, block_t new_blkaddr,
- bool recover_curseg)
+ bool recover_curseg, bool recover_newaddr)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg;
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
- change_curseg(sbi, type, true);
+ change_curseg(sbi, type);
}
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
__add_sum_entry(sbi, type, sum);
- if (!recover_curseg)
+ if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
update_sit_entry(sbi, old_blkaddr, -1);
if (recover_curseg) {
if (old_cursegno != curseg->segno) {
curseg->next_segno = old_cursegno;
- change_curseg(sbi, type, true);
+ change_curseg(sbi, type);
}
curseg->next_blkoff = old_blkoff;
}
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr,
- unsigned char version, bool recover_curseg)
+ unsigned char version, bool recover_curseg,
+ bool recover_newaddr)
{
struct f2fs_summary sum;
set_summary(&sum, dn->nid, dn->ofs_in_node, version);
- __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg);
-
- dn->data_blkaddr = new_addr;
- set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
-}
-
-static inline bool is_merged_page(struct f2fs_sb_info *sbi,
- struct page *page, enum page_type type)
-{
- enum page_type btype = PAGE_TYPE_OF_BIO(type);
- struct f2fs_bio_info *io = &sbi->write_io[btype];
- struct bio_vec *bvec;
- struct page *target;
- int i;
-
- down_read(&io->io_rwsem);
- if (!io->bio) {
- up_read(&io->io_rwsem);
- return false;
- }
-
- bio_for_each_segment_all(bvec, io->bio, i) {
+ __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
+ recover_curseg, recover_newaddr);
- if (bvec->bv_page->mapping) {
- target = bvec->bv_page;
- } else {
- struct f2fs_crypto_ctx *ctx;
-
- /* encrypted page */
- ctx = (struct f2fs_crypto_ctx *)page_private(
- bvec->bv_page);
- target = ctx->w.control_page;
- }
-
- if (page == target) {
- up_read(&io->io_rwsem);
- return true;
- }
- }
-
- up_read(&io->io_rwsem);
- return false;
+ f2fs_update_data_blkaddr(dn, new_addr);
}
void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type)
+ enum page_type type, bool ordered)
{
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- if (is_merged_page(sbi, page, type))
- f2fs_submit_merged_bio(sbi, type, WRITE);
- wait_on_page_writeback(page);
+ f2fs_submit_merged_write_cond(sbi, page->mapping->host,
+ 0, page->index, type);
+ if (ordered)
+ wait_on_page_writeback(page);
+ else
+ wait_for_stable_page(page);
}
}
-void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
- block_t blkaddr)
+void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct page *cpage;
- if (blkaddr == NEW_ADDR)
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return;
- f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
-
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
- f2fs_wait_on_page_writeback(cpage, DATA);
+ f2fs_wait_on_page_writeback(cpage, DATA, true);
f2fs_put_page(cpage, 1);
}
}
/* Step 1: restore nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
- memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
+ memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
/* Step 2: restore sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
- memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
- SUM_JOURNAL_SIZE);
+ memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
offset = 2 * SUM_JOURNAL_SIZE;
/* Step 3: restore summary entries */
s = (struct f2fs_summary *)(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
- if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (offset + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
/* set uncompleted segment to curseg */
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
- memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
+
+ /* update journal info */
+ down_write(&curseg->journal_rwsem);
+ memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
+ up_write(&curseg->journal_rwsem);
+
+ memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
+ memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
curseg->next_segno = segno;
reset_curseg(sbi, type, 0);
curseg->alloc_type = ckpt->alloc_type[type];
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
{
- struct f2fs_summary_block *s_sits =
- CURSEG_I(sbi, CURSEG_COLD_DATA)->sum_blk;
- struct f2fs_summary_block *s_nats =
- CURSEG_I(sbi, CURSEG_HOT_DATA)->sum_blk;
+ struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
+ struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
int type = CURSEG_HOT_DATA;
int err;
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
int npages = npages_for_summary_flush(sbi, true);
if (npages >= 2)
}
/* sanity check for summary blocks */
- if (nats_in_cursum(s_nats) > NAT_JOURNAL_ENTRIES ||
- sits_in_cursum(s_sits) > SIT_JOURNAL_ENTRIES)
+ if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
+ sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
return -EINVAL;
return 0;
/* Step 1: write nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
- memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
+ memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 2: write sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
- memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
- SUM_JOURNAL_SIZE);
+ memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 3: write summary entries */
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
- if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
else
end = type + NR_CURSEG_NODE_TYPE;
- for (i = type; i < end; i++) {
- struct curseg_info *sum = CURSEG_I(sbi, i);
- mutex_lock(&sum->curseg_mutex);
- write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
- mutex_unlock(&sum->curseg_mutex);
- }
+ for (i = type; i < end; i++)
+ write_current_sum_page(sbi, i, blkaddr + (i - type));
}
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk);
else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
}
-int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
+int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc)
{
int i;
if (type == NAT_JOURNAL) {
- for (i = 0; i < nats_in_cursum(sum); i++) {
- if (le32_to_cpu(nid_in_journal(sum, i)) == val)
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ if (le32_to_cpu(nid_in_journal(journal, i)) == val)
return i;
}
- if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
- return update_nats_in_cursum(sum, 1);
+ if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
+ return update_nats_in_cursum(journal, 1);
} else if (type == SIT_JOURNAL) {
- for (i = 0; i < sits_in_cursum(sum); i++)
- if (le32_to_cpu(segno_in_journal(sum, i)) == val)
+ for (i = 0; i < sits_in_cursum(journal); i++)
+ if (le32_to_cpu(segno_in_journal(journal, i)) == val)
return i;
- if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
- return update_sits_in_cursum(sum, 1);
+ if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
+ return update_sits_in_cursum(journal, 1);
}
return -1;
}
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
- for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
unsigned int segno;
bool dirtied;
- segno = le32_to_cpu(segno_in_journal(sum, i));
+ segno = le32_to_cpu(segno_in_journal(journal, i));
dirtied = __mark_sit_entry_dirty(sbi, segno);
if (!dirtied)
add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
}
- update_sits_in_cursum(sum, -sits_in_cursum(sum));
+ update_sits_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
}
/*
struct sit_info *sit_i = SIT_I(sbi);
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
struct sit_entry_set *ses, *tmp;
struct list_head *head = &SM_I(sbi)->sit_entry_set;
bool to_journal = true;
struct seg_entry *se;
- mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
if (!sit_i->dirty_sentries)
* entries, remove all entries from journal and add and account
* them in sit entry set.
*/
- if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
+ if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
remove_sits_in_journal(sbi);
/*
unsigned int segno = start_segno;
if (to_journal &&
- !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
+ !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
to_journal = false;
- if (!to_journal) {
+ if (to_journal) {
+ down_write(&curseg->journal_rwsem);
+ } else {
page = get_next_sit_page(sbi, start_segno);
raw_sit = page_address(page);
}
se = get_seg_entry(sbi, segno);
/* add discard candidates */
- if (cpc->reason != CP_DISCARD) {
+ if (!(cpc->reason & CP_DISCARD)) {
cpc->trim_start = segno;
- add_discard_addrs(sbi, cpc);
+ add_discard_addrs(sbi, cpc, false);
}
if (to_journal) {
- offset = lookup_journal_in_cursum(sum,
+ offset = lookup_journal_in_cursum(journal,
SIT_JOURNAL, segno, 1);
f2fs_bug_on(sbi, offset < 0);
- segno_in_journal(sum, offset) =
+ segno_in_journal(journal, offset) =
cpu_to_le32(segno);
seg_info_to_raw_sit(se,
- &sit_in_journal(sum, offset));
+ &sit_in_journal(journal, offset));
} else {
sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
seg_info_to_raw_sit(se,
ses->entry_cnt--;
}
- if (!to_journal)
+ if (to_journal)
+ up_write(&curseg->journal_rwsem);
+ else
f2fs_put_page(page, 1);
f2fs_bug_on(sbi, ses->entry_cnt);
f2fs_bug_on(sbi, !list_empty(head));
f2fs_bug_on(sbi, sit_i->dirty_sentries);
out:
- if (cpc->reason == CP_DISCARD) {
+ if (cpc->reason & CP_DISCARD) {
+ __u64 trim_start = cpc->trim_start;
+
for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
- add_discard_addrs(sbi, cpc);
+ add_discard_addrs(sbi, cpc, false);
+
+ cpc->trim_start = trim_start;
}
mutex_unlock(&sit_i->sentry_lock);
- mutex_unlock(&curseg->curseg_mutex);
set_prefree_as_free_segments(sbi);
}
static int build_sit_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
- char *src_bitmap, *dst_bitmap;
+ char *src_bitmap;
unsigned int bitmap_size;
/* allocate memory for SIT information */
SM_I(sbi)->sit_info = sit_i;
- sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
+ sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
sizeof(struct seg_entry), GFP_KERNEL);
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
+ sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- sit_i->sentries[start].discard_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
- !sit_i->sentries[start].ckpt_valid_map ||
- !sit_i->sentries[start].discard_map)
+ !sit_i->sentries[start].ckpt_valid_map)
+ return -ENOMEM;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ sit_i->sentries[start].cur_valid_map_mir
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->sentries[start].cur_valid_map_mir)
return -ENOMEM;
+#endif
+
+ if (f2fs_discard_en(sbi)) {
+ sit_i->sentries[start].discard_map
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->sentries[start].discard_map)
+ return -ENOMEM;
+ }
}
sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
return -ENOMEM;
if (sbi->segs_per_sec > 1) {
- sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
+ sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
sizeof(struct sec_entry), GFP_KERNEL);
if (!sit_i->sec_entries)
return -ENOMEM;
bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
- dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
- if (!dst_bitmap)
+ sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
+ if (!sit_i->sit_bitmap)
+ return -ENOMEM;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
+ if (!sit_i->sit_bitmap_mir)
return -ENOMEM;
+#endif
/* init SIT information */
sit_i->s_ops = &default_salloc_ops;
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
- sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
- sit_i->sit_bitmap = dst_bitmap;
+ sit_i->written_valid_blocks = 0;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
SM_I(sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
+ free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
+ free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
- array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
+ init_rwsem(&array[i].journal_rwsem);
+ array[i].journal = kzalloc(sizeof(struct f2fs_journal),
+ GFP_KERNEL);
+ if (!array[i].journal)
+ return -ENOMEM;
array[i].segno = NULL_SEGNO;
array[i].next_blkoff = 0;
}
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
+ struct seg_entry *se;
+ struct f2fs_sit_entry sit;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
- int nrpages = MAX_BIO_BLOCKS(sbi);
do {
- readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
+ readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+ META_SIT, true);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < MAIN_SEGS(sbi); start++) {
- struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk;
- struct f2fs_sit_entry sit;
struct page *page;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < sits_in_cursum(sum); i++) {
- if (le32_to_cpu(segno_in_journal(sum, i))
- == start) {
- sit = sit_in_journal(sum, i);
- mutex_unlock(&curseg->curseg_mutex);
- goto got_it;
- }
- }
- mutex_unlock(&curseg->curseg_mutex);
-
+ se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
-got_it:
+
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
- memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
-
- if (sbi->segs_per_sec > 1) {
- struct sec_entry *e = get_sec_entry(sbi, start);
- e->valid_blocks += se->valid_blocks;
+ if (f2fs_discard_en(sbi)) {
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map,
+ se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks +=
+ sbi->blocks_per_seg -
+ se->valid_blocks;
+ }
}
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks;
}
start_blk += readed;
} while (start_blk < sit_blk_cnt);
+
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
+ unsigned int old_valid_blocks;
+
+ start = le32_to_cpu(segno_in_journal(journal, i));
+ se = &sit_i->sentries[start];
+ sit = sit_in_journal(journal, i);
+
+ old_valid_blocks = se->valid_blocks;
+
+ check_block_count(sbi, start, &sit);
+ seg_info_from_raw_sit(se, &sit);
+
+ if (f2fs_discard_en(sbi)) {
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks -
+ se->valid_blocks;
+ }
+ }
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks - old_valid_blocks;
+ }
+ up_read(&curseg->journal_rwsem);
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
struct seg_entry *sentry = get_seg_entry(sbi, start);
if (!sentry->valid_blocks)
__set_free(sbi, start);
+ else
+ SIT_I(sbi)->written_valid_blocks +=
+ sentry->valid_blocks;
}
/* set use the current segments */
if (segno >= MAIN_SEGS(sbi))
break;
offset = segno + 1;
- valid_blocks = get_valid_blocks(sbi, segno, 0);
+ valid_blocks = get_valid_blocks(sbi, segno, false);
if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
continue;
if (valid_blocks > sbi->blocks_per_seg) {
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
- dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
- sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+ if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+ sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+ if (!test_opt(sbi, LFS))
+ sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
-
- INIT_LIST_HEAD(&sm_info->discard_list);
- sm_info->nr_discards = 0;
- sm_info->max_discards = 0;
+ sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
- if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
+ if (!f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
}
+ err = create_discard_cmd_control(sbi);
+ if (err)
+ return err;
+
err = build_sit_info(sbi);
if (err)
return err;
if (!array)
return;
SM_I(sbi)->curseg_array = NULL;
- for (i = 0; i < NR_CURSEG_TYPE; i++)
+ for (i = 0; i < NR_CURSEG_TYPE; i++) {
kfree(array[i].sum_blk);
+ kfree(array[i].journal);
+ }
kfree(array);
}
if (sit_i->sentries) {
for (start = 0; start < MAIN_SEGS(sbi); start++) {
kfree(sit_i->sentries[start].cur_valid_map);
+#ifdef CONFIG_F2FS_CHECK_FS
+ kfree(sit_i->sentries[start].cur_valid_map_mir);
+#endif
kfree(sit_i->sentries[start].ckpt_valid_map);
kfree(sit_i->sentries[start].discard_map);
}
SM_I(sbi)->sit_info = NULL;
kfree(sit_i->sit_bitmap);
+#ifdef CONFIG_F2FS_CHECK_FS
+ kfree(sit_i->sit_bitmap_mir);
+#endif
kfree(sit_i);
}
if (!sm_info)
return;
- destroy_flush_cmd_control(sbi);
+ destroy_flush_cmd_control(sbi, true);
+ destroy_discard_cmd_control(sbi);
destroy_dirty_segmap(sbi);
destroy_curseg(sbi);
destroy_free_segmap(sbi);
if (!discard_entry_slab)
goto fail;
+ discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
+ sizeof(struct discard_cmd));
+ if (!discard_cmd_slab)
+ goto destroy_discard_entry;
+
sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
sizeof(struct sit_entry_set));
if (!sit_entry_set_slab)
- goto destory_discard_entry;
+ goto destroy_discard_cmd;
inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
sizeof(struct inmem_pages));
destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab);
-destory_discard_entry:
+destroy_discard_cmd:
+ kmem_cache_destroy(discard_cmd_slab);
+destroy_discard_entry:
kmem_cache_destroy(discard_entry_slab);
fail:
return -ENOMEM;
void destroy_segment_manager_caches(void)
{
kmem_cache_destroy(sit_entry_set_slab);
+ kmem_cache_destroy(discard_cmd_slab);
kmem_cache_destroy(discard_entry_slab);
kmem_cache_destroy(inmem_entry_slab);
}
#define NULL_SECNO ((unsigned int)(~0))
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
+
+#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
/* L: Logical segment # in volume, R: Relative segment # in main area */
-#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
-#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
+#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
+#define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
+
+#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
+#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE)
-#define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
-#define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
+#define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
+#define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
+#define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
#define IS_CURSEG(sbi, seg) \
- ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
+ (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
#define IS_CURSEC(sbi, secno) \
- ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
- sbi->segs_per_sec)) \
+ (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
+ (sbi)->segs_per_sec)) \
#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
-#define MAIN_SECS(sbi) (sbi->total_sections)
+#define MAIN_SECS(sbi) ((sbi)->total_sections)
#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
-#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
+#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
-#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
- sbi->log_blocks_per_seg))
+#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
+ (sbi)->log_blocks_per_seg))
#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
- (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
+ (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
#define NEXT_FREE_BLKADDR(sbi, curseg) \
- (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
+ (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
#define GET_SEGNO(sbi, blk_addr) \
- (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
+ ((((blk_addr) == NULL_ADDR) || ((blk_addr) == NEW_ADDR)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
-#define GET_SECNO(sbi, segno) \
- ((segno) / sbi->segs_per_sec)
-#define GET_ZONENO_FROM_SEGNO(sbi, segno) \
- ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
+#define BLKS_PER_SEC(sbi) \
+ ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
+#define GET_SEC_FROM_SEG(sbi, segno) \
+ ((segno) / (sbi)->segs_per_sec)
+#define GET_SEG_FROM_SEC(sbi, secno) \
+ ((secno) * (sbi)->segs_per_sec)
+#define GET_ZONE_FROM_SEC(sbi, secno) \
+ ((secno) / (sbi)->secs_per_zone)
+#define GET_ZONE_FROM_SEG(sbi, segno) \
+ GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
#define GET_SUM_BLOCK(sbi, segno) \
- ((sbi->sm_info->ssa_blkaddr) + segno)
+ ((sbi)->sm_info->ssa_blkaddr + (segno))
#define GET_SUM_TYPE(footer) ((footer)->entry_type)
-#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
+#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
#define SIT_ENTRY_OFFSET(sit_i, segno) \
- (segno % sit_i->sents_per_block)
+ ((segno) % (sit_i)->sents_per_block)
#define SIT_BLOCK_OFFSET(segno) \
- (segno / SIT_ENTRY_PER_BLOCK)
+ ((segno) / SIT_ENTRY_PER_BLOCK)
#define START_SEGNO(segno) \
(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
#define SIT_BLK_CNT(sbi) \
#define SECTOR_FROM_BLOCK(blk_addr) \
(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
#define SECTOR_TO_BLOCK(sectors) \
- (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
-#define MAX_BIO_BLOCKS(sbi) \
- ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
+ ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
/*
* indicate a block allocation direction: RIGHT and LEFT.
*/
enum {
GC_CB = 0,
- GC_GREEDY
+ GC_GREEDY,
+ ALLOC_NEXT,
+ FLUSH_DEVICE,
+ MAX_GC_POLICY,
};
/*
};
struct seg_entry {
- unsigned short valid_blocks; /* # of valid blocks */
+ unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
+ unsigned int valid_blocks:10; /* # of valid blocks */
+ unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
+ unsigned int padding:6; /* padding */
unsigned char *cur_valid_map; /* validity bitmap of blocks */
+#ifdef CONFIG_F2FS_CHECK_FS
+ unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
+#endif
/*
* # of valid blocks and the validity bitmap stored in the the last
* checkpoint pack. This information is used by the SSR mode.
*/
- unsigned short ckpt_valid_blocks;
- unsigned char *ckpt_valid_map;
+ unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
unsigned char *discard_map;
- unsigned char type; /* segment type like CURSEG_XXX_TYPE */
unsigned long long mtime; /* modification time of the segment */
};
* this value is set in page as a private data which indicate that
* the page is atomically written, and it is in inmem_pages list.
*/
-#define ATOMIC_WRITTEN_PAGE 0x0000ffff
+#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
+#define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
#define IS_ATOMIC_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+#define IS_DUMMY_WRITTEN_PAGE(page) \
+ (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
struct inmem_pages {
struct list_head list;
struct page *page;
+ block_t old_addr; /* for revoking when fail to commit */
};
struct sit_info {
block_t sit_blocks; /* # of blocks used by SIT area */
block_t written_valid_blocks; /* # of valid blocks in main area */
char *sit_bitmap; /* SIT bitmap pointer */
+#ifdef CONFIG_F2FS_CHECK_FS
+ char *sit_bitmap_mir; /* SIT bitmap mirror */
+#endif
unsigned int bitmap_size; /* SIT bitmap size */
unsigned long *tmp_map; /* bitmap for temporal use */
unsigned long long mounted_time; /* mount time */
unsigned long long min_mtime; /* min. modification time */
unsigned long long max_mtime; /* max. modification time */
+
+ unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
};
struct free_segmap_info {
struct curseg_info {
struct mutex curseg_mutex; /* lock for consistency */
struct f2fs_summary_block *sum_blk; /* cached summary block */
+ struct rw_semaphore journal_rwsem; /* protect journal area */
+ struct f2fs_journal *journal; /* cached journal info */
unsigned char alloc_type; /* current allocation type */
unsigned int segno; /* current segment number */
unsigned short next_blkoff; /* next block offset to write */
unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
+ return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
}
static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
- unsigned int segno, int section)
+ unsigned int segno, bool use_section)
{
/*
* In order to get # of valid blocks in a section instantly from many
* segments, f2fs manages two counting structures separately.
*/
- if (section > 1)
+ if (use_section && sbi->segs_per_sec > 1)
return get_sec_entry(sbi, segno)->valid_blocks;
else
return get_seg_entry(sbi, segno)->valid_blocks;
se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
+#ifdef CONFIG_F2FS_CHECK_FS
+ memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
+#endif
se->type = GET_SIT_TYPE(rs);
se->mtime = le64_to_cpu(rs->mtime);
}
static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
- unsigned int start_segno = secno * sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
spin_lock(&free_i->segmap_lock);
unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
set_bit(segno, free_i->free_segmap);
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
- unsigned int start_segno = secno * sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
spin_lock(&free_i->segmap_lock);
unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
spin_lock(&free_i->segmap_lock);
if (!test_and_set_bit(segno, free_i->free_segmap)) {
free_i->free_segments--;
void *dst_addr)
{
struct sit_info *sit_i = SIT_I(sbi);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
+ sit_i->bitmap_size))
+ f2fs_bug_on(sbi, 1);
+#endif
memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
}
return SM_I(sbi)->ovp_segments;
}
-static inline int overprovision_sections(struct f2fs_sb_info *sbi)
-{
- return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
-}
-
static inline int reserved_sections(struct f2fs_sb_info *sbi)
{
- return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
-}
-
-static inline bool need_SSR(struct f2fs_sb_info *sbi)
-{
- int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
- int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- return free_sections(sbi) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi) + 1);
+ return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
}
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+ int freed, int needed)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi));
+ return (free_sections(sbi) + freed) <=
+ (node_secs + 2 * dent_secs + imeta_secs +
+ reserved_sections(sbi) + needed);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
*/
#define DEF_MIN_IPU_UTIL 70
#define DEF_MIN_FSYNC_BLOCKS 8
+#define DEF_MIN_HOT_BLOCKS 16
enum {
F2FS_IPU_FORCE,
F2FS_IPU_UTIL,
F2FS_IPU_SSR_UTIL,
F2FS_IPU_FSYNC,
+ F2FS_IPU_ASYNC,
};
-static inline bool need_inplace_update(struct inode *inode)
+static inline bool need_inplace_update_policy(struct inode *inode,
+ struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int policy = SM_I(sbi)->ipu_policy;
- /* IPU can be done only for the user data */
- if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
+ if (test_opt(sbi, LFS))
return false;
+ /* if this is cold file, we should overwrite to avoid fragmentation */
+ if (file_is_cold(inode))
+ return true;
+
if (policy & (0x1 << F2FS_IPU_FORCE))
return true;
if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
+ /*
+ * IPU for rewrite async pages
+ */
+ if (policy & (0x1 << F2FS_IPU_ASYNC) &&
+ fio && fio->op == REQ_OP_WRITE &&
+ !(fio->op_flags & REQ_SYNC) &&
+ !f2fs_encrypted_inode(inode))
+ return true;
+
/* this is only set during fdatasync */
if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
+ is_inode_flag_set(inode, FI_NEED_IPU))
return true;
return false;
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
- f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
+ || blk_addr >= MAX_BLKADDR(sbi));
}
/*
check_seg_range(sbi, start);
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
+ f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
+ f2fs_bug_on(sbi, 1);
+#endif
+
/* calculate sit block address */
if (f2fs_test_bit(offset, sit_i->sit_bitmap))
blk_addr += sit_i->sit_blocks;
unsigned int block_off = SIT_BLOCK_OFFSET(start);
f2fs_change_bit(block_off, sit_i->sit_bitmap);
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
+#endif
}
static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
- (base + 1) + type;
}
-static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
+static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
+ unsigned int secno)
{
- if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
+ if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >=
+ sbi->fggc_threshold)
return true;
return false;
}
-static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
+static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- return SECTOR_TO_BLOCK(queue_max_sectors(q));
+ if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
+ return true;
+ return false;
}
/*
* It is very important to gather dirty pages and write at once, so that we can
* submit a big bio without interfering other data writes.
* By default, 512 pages for directory data,
- * 512 pages (2MB) * 3 for three types of nodes, and
- * max_bio_blocks for meta are set.
+ * 512 pages (2MB) * 8 for nodes, and
+ * 256 pages * 8 for meta are set.
*/
static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
{
if (type == DATA)
return sbi->blocks_per_seg;
else if (type == NODE)
- return 3 * sbi->blocks_per_seg;
+ return 8 * sbi->blocks_per_seg;
else if (type == META)
- return MAX_BIO_BLOCKS(sbi);
+ return 8 * BIO_MAX_PAGES;
else
return 0;
}
return 0;
nr_to_write = wbc->nr_to_write;
-
- if (type == DATA)
- desired = 4096;
- else if (type == NODE)
- desired = 3 * max_hw_blocks(sbi);
- else
- desired = MAX_BIO_BLOCKS(sbi);
+ desired = BIO_MAX_PAGES;
+ if (type == NODE)
+ desired <<= 1;
wbc->nr_to_write = desired;
return desired - nr_to_write;
}
+
+static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ bool wakeup = false;
+ int i;
+
+ if (force)
+ goto wake_up;
+
+ mutex_lock(&dcc->cmd_lock);
+ for (i = MAX_PLIST_NUM - 1;
+ i >= 0 && plist_issue(dcc->pend_list_tag[i]); i--) {
+ if (!list_empty(&dcc->pend_list[i])) {
+ wakeup = true;
+ break;
+ }
+ }
+ mutex_unlock(&dcc->cmd_lock);
+ if (!wakeup)
+ return;
+wake_up:
+ dcc->discard_wake = 1;
+ wake_up_interruptible_all(&dcc->discard_wait_queue);
+}
#include <linux/f2fs_fs.h>
#include "f2fs.h"
+#include "node.h"
static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock);
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
- return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+ long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{
- if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
- return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
- return 0;
+ long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
- return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+ return atomic_read(&sbi->total_zombie_tree) +
+ atomic_read(&sbi->total_ext_node);
}
unsigned long f2fs_shrink_count(struct shrinker *shrink,
#include <linux/random.h>
#include <linux/exportfs.h>
#include <linux/blkdev.h>
+#include <linux/quotaops.h>
#include <linux/f2fs_fs.h>
#include <linux/sysfs.h>
+#include <linux/quota.h>
#include "f2fs.h"
#include "node.h"
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
-static struct proc_dir_entry *f2fs_proc_root;
static struct kmem_cache *f2fs_inode_cachep;
-static struct kset *f2fs_kset;
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+
+char *fault_name[FAULT_MAX] = {
+ [FAULT_KMALLOC] = "kmalloc",
+ [FAULT_PAGE_ALLOC] = "page alloc",
+ [FAULT_ALLOC_NID] = "alloc nid",
+ [FAULT_ORPHAN] = "orphan",
+ [FAULT_BLOCK] = "no more block",
+ [FAULT_DIR_DEPTH] = "too big dir depth",
+ [FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_TRUNCATE] = "truncate fail",
+ [FAULT_IO] = "IO error",
+ [FAULT_CHECKPOINT] = "checkpoint error",
+};
+
+static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+ unsigned int rate)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_rate = rate;
+ ffi->inject_type = (1 << FAULT_MAX) - 1;
+ } else {
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
+ }
+}
+#endif
/* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = {
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
+ Opt_nodiscard,
Opt_noheap,
+ Opt_heap,
Opt_user_xattr,
Opt_nouser_xattr,
Opt_acl,
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
+ Opt_noinline_xattr,
Opt_inline_data,
Opt_inline_dentry,
+ Opt_noinline_dentry,
Opt_flush_merge,
+ Opt_noflush_merge,
Opt_nobarrier,
Opt_fastboot,
Opt_extent_cache,
Opt_noextent_cache,
Opt_noinline_data,
+ Opt_data_flush,
+ Opt_mode,
+ Opt_io_size_bits,
+ Opt_fault_injection,
+ Opt_lazytime,
+ Opt_nolazytime,
+ Opt_quota,
+ Opt_noquota,
+ Opt_usrquota,
+ Opt_grpquota,
+ Opt_prjquota,
+ Opt_usrjquota,
+ Opt_grpjquota,
+ Opt_prjjquota,
+ Opt_offusrjquota,
+ Opt_offgrpjquota,
+ Opt_offprjjquota,
+ Opt_jqfmt_vfsold,
+ Opt_jqfmt_vfsv0,
+ Opt_jqfmt_vfsv1,
Opt_err,
};
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
+ {Opt_heap, "heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_acl, "acl"},
{Opt_active_logs, "active_logs=%u"},
{Opt_disable_ext_identify, "disable_ext_identify"},
{Opt_inline_xattr, "inline_xattr"},
+ {Opt_noinline_xattr, "noinline_xattr"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
+ {Opt_noinline_dentry, "noinline_dentry"},
{Opt_flush_merge, "flush_merge"},
+ {Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"},
{Opt_extent_cache, "extent_cache"},
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
+ {Opt_data_flush, "data_flush"},
+ {Opt_mode, "mode=%s"},
+ {Opt_io_size_bits, "io_bits=%u"},
+ {Opt_fault_injection, "fault_injection=%u"},
+ {Opt_lazytime, "lazytime"},
+ {Opt_nolazytime, "nolazytime"},
+ {Opt_quota, "quota"},
+ {Opt_noquota, "noquota"},
+ {Opt_usrquota, "usrquota"},
+ {Opt_grpquota, "grpquota"},
+ {Opt_prjquota, "prjquota"},
+ {Opt_usrjquota, "usrjquota=%s"},
+ {Opt_grpjquota, "grpjquota=%s"},
+ {Opt_prjjquota, "prjjquota=%s"},
+ {Opt_offusrjquota, "usrjquota="},
+ {Opt_offgrpjquota, "grpjquota="},
+ {Opt_offprjjquota, "prjjquota="},
+ {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
+ {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+ {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
{Opt_err, NULL},
};
-/* Sysfs support for f2fs */
-enum {
- GC_THREAD, /* struct f2fs_gc_thread */
- SM_INFO, /* struct f2fs_sm_info */
- NM_INFO, /* struct f2fs_nm_info */
- F2FS_SBI, /* struct f2fs_sb_info */
-};
-
-struct f2fs_attr {
- struct attribute attr;
- ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
- ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
- const char *, size_t);
- int struct_type;
- int offset;
-};
-
-static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
-{
- if (struct_type == GC_THREAD)
- return (unsigned char *)sbi->gc_thread;
- else if (struct_type == SM_INFO)
- return (unsigned char *)SM_I(sbi);
- else if (struct_type == NM_INFO)
- return (unsigned char *)NM_I(sbi);
- else if (struct_type == F2FS_SBI)
- return (unsigned char *)sbi;
- return NULL;
-}
-
-static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi, char *buf)
-{
- unsigned char *ptr = NULL;
- unsigned int *ui;
-
- ptr = __struct_ptr(sbi, a->struct_type);
- if (!ptr)
- return -EINVAL;
-
- ui = (unsigned int *)(ptr + a->offset);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
-}
-
-static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi,
- const char *buf, size_t count)
-{
- unsigned char *ptr;
- unsigned long t;
- unsigned int *ui;
- ssize_t ret;
-
- ptr = __struct_ptr(sbi, a->struct_type);
- if (!ptr)
- return -EINVAL;
-
- ui = (unsigned int *)(ptr + a->offset);
-
- ret = kstrtoul(skip_spaces(buf), 0, &t);
- if (ret < 0)
- return ret;
- *ui = t;
- return count;
-}
-
-static ssize_t f2fs_attr_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
- struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
- s_kobj);
- struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+ struct va_format vaf;
+ va_list args;
- return a->show ? a->show(a, sbi, buf) : 0;
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ va_end(args);
}
-static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
+static void init_once(void *foo)
{
- struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
- s_kobj);
- struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+ struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
- return a->store ? a->store(a, sbi, buf, len) : 0;
+ inode_init_once(&fi->vfs_inode);
}
-static void f2fs_sb_release(struct kobject *kobj)
+#ifdef CONFIG_QUOTA
+static const char * const quotatypes[] = INITQFNAMES;
+#define QTYPE2NAME(t) (quotatypes[t])
+static int f2fs_set_qf_name(struct super_block *sb, int qtype,
+ substring_t *args)
{
- struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
- s_kobj);
- complete(&sbi->s_kobj_unregister);
-}
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ char *qname;
+ int ret = -EINVAL;
-#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
-static struct f2fs_attr f2fs_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
- .struct_type = _struct_type, \
- .offset = _offset \
+ if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
+ f2fs_msg(sb, KERN_ERR,
+ "Cannot change journaled "
+ "quota options when quota turned on");
+ return -EINVAL;
+ }
+ qname = match_strdup(args);
+ if (!qname) {
+ f2fs_msg(sb, KERN_ERR,
+ "Not enough memory for storing quotafile name");
+ return -EINVAL;
+ }
+ if (sbi->s_qf_names[qtype]) {
+ if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+ ret = 0;
+ else
+ f2fs_msg(sb, KERN_ERR,
+ "%s quota file already specified",
+ QTYPE2NAME(qtype));
+ goto errout;
+ }
+ if (strchr(qname, '/')) {
+ f2fs_msg(sb, KERN_ERR,
+ "quotafile must be on filesystem root");
+ goto errout;
+ }
+ sbi->s_qf_names[qtype] = qname;
+ set_opt(sbi, QUOTA);
+ return 0;
+errout:
+ kfree(qname);
+ return ret;
}
-#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
- F2FS_ATTR_OFFSET(struct_type, name, 0644, \
- f2fs_sbi_show, f2fs_sbi_store, \
- offsetof(struct struct_name, elname))
-
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, cp_interval);
-
-#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
-static struct attribute *f2fs_attrs[] = {
- ATTR_LIST(gc_min_sleep_time),
- ATTR_LIST(gc_max_sleep_time),
- ATTR_LIST(gc_no_gc_sleep_time),
- ATTR_LIST(gc_idle),
- ATTR_LIST(reclaim_segments),
- ATTR_LIST(max_small_discards),
- ATTR_LIST(batched_trim_sections),
- ATTR_LIST(ipu_policy),
- ATTR_LIST(min_ipu_util),
- ATTR_LIST(min_fsync_blocks),
- ATTR_LIST(max_victim_search),
- ATTR_LIST(dir_level),
- ATTR_LIST(ram_thresh),
- ATTR_LIST(ra_nid_pages),
- ATTR_LIST(cp_interval),
- NULL,
-};
-
-static const struct sysfs_ops f2fs_attr_ops = {
- .show = f2fs_attr_show,
- .store = f2fs_attr_store,
-};
-
-static struct kobj_type f2fs_ktype = {
- .default_attrs = f2fs_attrs,
- .sysfs_ops = &f2fs_attr_ops,
- .release = f2fs_sb_release,
-};
-
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
{
- struct va_format vaf;
- va_list args;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
- va_end(args);
+ if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
+ f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
+ " when quota turned on");
+ return -EINVAL;
+ }
+ kfree(sbi->s_qf_names[qtype]);
+ sbi->s_qf_names[qtype] = NULL;
+ return 0;
}
-static void init_once(void *foo)
+static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
{
- struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
+ /*
+ * We do the test below only for project quotas. 'usrquota' and
+ * 'grpquota' mount options are allowed even without quota feature
+ * to support legacy quotas in quota files.
+ */
+ if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
+ "Cannot enable project quota enforcement.");
+ return -1;
+ }
+ if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
+ sbi->s_qf_names[PRJQUOTA]) {
+ if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ clear_opt(sbi, USRQUOTA);
+
+ if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
+ clear_opt(sbi, GRPQUOTA);
+
+ if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
+ clear_opt(sbi, PRJQUOTA);
+
+ if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
+ test_opt(sbi, PRJQUOTA)) {
+ f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
+ "format mixing");
+ return -1;
+ }
- inode_init_once(&fi->vfs_inode);
+ if (!sbi->s_jquota_fmt) {
+ f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
+ "not specified");
+ return -1;
+ }
+ }
+ return 0;
}
+#endif
static int parse_options(struct super_block *sb, char *options)
{
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
+#ifdef CONFIG_QUOTA
+ int ret;
+#endif
if (!options)
return 0;
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
- } else {
+ } else if (!f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
+ case Opt_nodiscard:
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "discard is required for zoned block devices");
+ return -EINVAL;
+ }
+ clear_opt(sbi, DISCARD);
+ break;
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
+ case Opt_heap:
+ clear_opt(sbi, NOHEAP);
+ break;
#ifdef CONFIG_F2FS_FS_XATTR
case Opt_user_xattr:
set_opt(sbi, XATTR_USER);
case Opt_inline_xattr:
set_opt(sbi, INLINE_XATTR);
break;
+ case Opt_noinline_xattr:
+ clear_opt(sbi, INLINE_XATTR);
+ break;
#else
case Opt_user_xattr:
f2fs_msg(sb, KERN_INFO,
f2fs_msg(sb, KERN_INFO,
"inline_xattr options not supported");
break;
+ case Opt_noinline_xattr:
+ f2fs_msg(sb, KERN_INFO,
+ "noinline_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
case Opt_acl:
case Opt_inline_dentry:
set_opt(sbi, INLINE_DENTRY);
break;
+ case Opt_noinline_dentry:
+ clear_opt(sbi, INLINE_DENTRY);
+ break;
case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE);
break;
+ case Opt_noflush_merge:
+ clear_opt(sbi, FLUSH_MERGE);
+ break;
case Opt_nobarrier:
set_opt(sbi, NOBARRIER);
break;
case Opt_noinline_data:
clear_opt(sbi, INLINE_DATA);
break;
+ case Opt_data_flush:
+ set_opt(sbi, DATA_FLUSH);
+ break;
+ case Opt_mode:
+ name = match_strdup(&args[0]);
+
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 8 &&
+ !strncmp(name, "adaptive", 8)) {
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "adaptive mode is not allowed with "
+ "zoned block device feature");
+ kfree(name);
+ return -EINVAL;
+ }
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "lfs", 3)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_io_size_bits:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "Not support %d, larger than %d",
+ 1 << arg, BIO_MAX_PAGES);
+ return -EINVAL;
+ }
+ sbi->write_io_size_bits = arg;
+ break;
+ case Opt_fault_injection:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, arg);
+ set_opt(sbi, FAULT_INJECTION);
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "FAULT_INJECTION was not selected");
+#endif
+ break;
+ case Opt_lazytime:
+ sb->s_flags |= MS_LAZYTIME;
+ break;
+ case Opt_nolazytime:
+ sb->s_flags &= ~MS_LAZYTIME;
+ break;
+#ifdef CONFIG_QUOTA
+ case Opt_quota:
+ case Opt_usrquota:
+ set_opt(sbi, USRQUOTA);
+ break;
+ case Opt_grpquota:
+ set_opt(sbi, GRPQUOTA);
+ break;
+ case Opt_prjquota:
+ set_opt(sbi, PRJQUOTA);
+ break;
+ case Opt_usrjquota:
+ ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
+ if (ret)
+ return ret;
+ break;
+ case Opt_grpjquota:
+ ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
+ if (ret)
+ return ret;
+ break;
+ case Opt_prjjquota:
+ ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
+ if (ret)
+ return ret;
+ break;
+ case Opt_offusrjquota:
+ ret = f2fs_clear_qf_name(sb, USRQUOTA);
+ if (ret)
+ return ret;
+ break;
+ case Opt_offgrpjquota:
+ ret = f2fs_clear_qf_name(sb, GRPQUOTA);
+ if (ret)
+ return ret;
+ break;
+ case Opt_offprjjquota:
+ ret = f2fs_clear_qf_name(sb, PRJQUOTA);
+ if (ret)
+ return ret;
+ break;
+ case Opt_jqfmt_vfsold:
+ sbi->s_jquota_fmt = QFMT_VFS_OLD;
+ break;
+ case Opt_jqfmt_vfsv0:
+ sbi->s_jquota_fmt = QFMT_VFS_V0;
+ break;
+ case Opt_jqfmt_vfsv1:
+ sbi->s_jquota_fmt = QFMT_VFS_V1;
+ break;
+ case Opt_noquota:
+ clear_opt(sbi, QUOTA);
+ clear_opt(sbi, USRQUOTA);
+ clear_opt(sbi, GRPQUOTA);
+ clear_opt(sbi, PRJQUOTA);
+ break;
+#else
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ case Opt_prjquota:
+ case Opt_usrjquota:
+ case Opt_grpjquota:
+ case Opt_prjjquota:
+ case Opt_offusrjquota:
+ case Opt_offgrpjquota:
+ case Opt_offprjjquota:
+ case Opt_jqfmt_vfsold:
+ case Opt_jqfmt_vfsv0:
+ case Opt_jqfmt_vfsv1:
+ case Opt_noquota:
+ f2fs_msg(sb, KERN_INFO,
+ "quota operations not supported");
+ break;
+#endif
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
return -EINVAL;
}
}
+#ifdef CONFIG_QUOTA
+ if (f2fs_check_quota_options(sbi))
+ return -EINVAL;
+#endif
+
+ if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Should set mode=lfs with %uKB-sized IO",
+ F2FS_IO_SIZE_KB(sbi));
+ return -EINVAL;
+ }
return 0;
}
fi->i_current_depth = 1;
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
+ INIT_LIST_HEAD(&fi->dirty_list);
+ INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
-
- set_inode_flag(fi, FI_NEW_INODE);
-
- if (test_opt(F2FS_SB(sb), INLINE_XATTR))
- set_inode_flag(fi, FI_INLINE_XATTR);
-
+ init_rwsem(&fi->dio_rwsem[READ]);
+ init_rwsem(&fi->dio_rwsem[WRITE]);
+ init_rwsem(&fi->i_mmap_sem);
+ init_rwsem(&fi->i_xattr_sem);
+
+#ifdef CONFIG_QUOTA
+ memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
+ fi->i_reserved_quota = 0;
+#endif
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- fi->i_crypt_info = NULL;
-#endif
return &fi->vfs_inode;
}
static int f2fs_drop_inode(struct inode *inode)
{
+ int ret;
/*
* This is to avoid a deadlock condition like below.
* writeback_single_inode(inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
- if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
+ if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb);
- i_size_write(inode, 0);
+ f2fs_i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
- f2fs_truncate(inode, true);
+ f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (F2FS_I(inode)->i_crypt_info)
- f2fs_free_encryption_info(inode,
- F2FS_I(inode)->i_crypt_info);
-#endif
+ fscrypt_put_encryption_info(inode, NULL);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
+ trace_f2fs_drop_inode(inode, 0);
return 0;
}
- return generic_drop_inode(inode);
+ ret = generic_drop_inode(inode);
+ trace_f2fs_drop_inode(inode, ret);
+ return ret;
+}
+
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret = 0;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ ret = 1;
+ } else {
+ set_inode_flag(inode, FI_DIRTY_INODE);
+ stat_inc_dirty_inode(sbi, DIRTY_META);
+ }
+ if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_add_tail(&F2FS_I(inode)->gdirty_list,
+ &sbi->inode_list[DIRTY_META]);
+ inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+}
+
+void f2fs_inode_synced(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return;
+ }
+ if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_del_init(&F2FS_I(inode)->gdirty_list);
+ dec_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
+ clear_inode_flag(inode, FI_DIRTY_INODE);
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
}
/*
*/
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
- set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi))
+ return;
+
+ if (flags == I_DIRTY_TIME)
+ return;
+
+ if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+
+ f2fs_inode_dirtied(inode, false);
}
static void f2fs_i_callback(struct rcu_head *head)
call_rcu(&inode->i_rcu, f2fs_i_callback);
}
-static void f2fs_put_super(struct super_block *sb)
+static void destroy_percpu_info(struct f2fs_sb_info *sbi)
{
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
+ percpu_counter_destroy(&sbi->total_valid_inode_count);
+}
+
+static void destroy_device_list(struct f2fs_sb_info *sbi)
+{
+ int i;
- if (sbi->s_proc) {
- remove_proc_entry("segment_info", sbi->s_proc);
- remove_proc_entry(sb->s_id, f2fs_proc_root);
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+ kfree(FDEV(i).blkz_type);
+#endif
}
- kobject_del(&sbi->s_kobj);
+ kfree(sbi->devs);
+}
- stop_gc_thread(sbi);
+static void f2fs_put_super(struct super_block *sb)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i;
+
+ f2fs_quota_off_umount(sb);
/* prevent remaining shrinker jobs */
mutex_lock(&sbi->umount_mutex);
* clean checkpoint again.
*/
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
- !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
write_checkpoint(sbi, &cpc);
}
+ /* be sure to wait for any on-going discard commands */
+ f2fs_wait_discard_bios(sbi, true);
+
+ if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT | CP_TRIMMED,
+ };
+ write_checkpoint(sbi, &cpc);
+ }
+
/* write_checkpoint can update stat informaion */
f2fs_destroy_stats(sbi);
* normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well.
*/
- release_dirty_inode(sbi);
- release_discard_addrs(sbi);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
+ /* our cp_error case, we can wait for any writeback page */
+ f2fs_flush_merged_writes(sbi);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
destroy_segment_manager(sbi);
kfree(sbi->ckpt);
- kobject_put(&sbi->s_kobj);
- wait_for_completion(&sbi->s_kobj_unregister);
+
+ f2fs_unregister_sysfs(sbi);
sb->s_fs_info = NULL;
- brelse(sbi->raw_super_buf);
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
+ kfree(sbi->raw_super);
+
+ destroy_device_list(sbi);
+ if (sbi->write_io_dummy)
+ mempool_destroy(sbi->write_io_dummy);
+#ifdef CONFIG_QUOTA
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(sbi->s_qf_names[i]);
+#endif
+ destroy_percpu_info(sbi);
+ for (i = 0; i < NR_PAGE_TYPE; i++)
+ kfree(sbi->write_io[i]);
kfree(sbi);
}
int f2fs_sync_fs(struct super_block *sb, int sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err = 0;
trace_f2fs_sync_fs(sb, sync);
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ return -EAGAIN;
+
if (sync) {
struct cp_control cpc;
cpc.reason = __get_cp_reason(sbi);
mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
+ err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
- } else {
- f2fs_balance_fs(sbi);
}
f2fs_trace_ios(NULL, 1);
- return 0;
+ return err;
}
static int f2fs_freeze(struct super_block *sb)
{
- int err;
-
if (f2fs_readonly(sb))
return 0;
- err = f2fs_sync_fs(sb, 1);
- return err;
+ /* IO error happened before */
+ if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ return -EIO;
+
+ /* must be clean, since sync_filesystem() was already called */
+ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ return -EINVAL;
+ return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
return 0;
}
+#ifdef CONFIG_QUOTA
+static int f2fs_statfs_project(struct super_block *sb,
+ kprojid_t projid, struct kstatfs *buf)
+{
+ struct kqid qid;
+ struct dquot *dquot;
+ u64 limit;
+ u64 curblock;
+
+ qid = make_kqid_projid(projid);
+ dquot = dqget(sb, qid);
+ if (IS_ERR(dquot))
+ return PTR_ERR(dquot);
+ spin_lock(&dq_data_lock);
+
+ limit = (dquot->dq_dqb.dqb_bsoftlimit ?
+ dquot->dq_dqb.dqb_bsoftlimit :
+ dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+ if (limit && buf->f_blocks > limit) {
+ curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
+ buf->f_blocks = limit;
+ buf->f_bfree = buf->f_bavail =
+ (buf->f_blocks > curblock) ?
+ (buf->f_blocks - curblock) : 0;
+ }
+
+ limit = dquot->dq_dqb.dqb_isoftlimit ?
+ dquot->dq_dqb.dqb_isoftlimit :
+ dquot->dq_dqb.dqb_ihardlimit;
+ if (limit && buf->f_files > limit) {
+ buf->f_files = limit;
+ buf->f_ffree =
+ (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
+ (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+ }
+
+ spin_unlock(&dq_data_lock);
+ dqput(dquot);
+ return 0;
+}
+#endif
+
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
block_t total_count, user_block_count, start_count, ovp_count;
+ u64 avail_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
user_block_count = sbi->user_block_count;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
- buf->f_bavail = user_block_count - valid_user_blocks(sbi);
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
+ buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
+ sbi->reserved_blocks;
+
+ avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
- buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
- buf->f_ffree = buf->f_files - valid_inode_count(sbi);
+ if (avail_node_count > user_block_count) {
+ buf->f_files = user_block_count;
+ buf->f_ffree = buf->f_bavail;
+ } else {
+ buf->f_files = avail_node_count;
+ buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
+ buf->f_bavail);
+ }
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
+#ifdef CONFIG_QUOTA
+ if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
+ sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
+ f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
+ }
+#endif
return 0;
}
+static inline void f2fs_show_quota_options(struct seq_file *seq,
+ struct super_block *sb)
+{
+#ifdef CONFIG_QUOTA
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ if (sbi->s_jquota_fmt) {
+ char *fmtname = "";
+
+ switch (sbi->s_jquota_fmt) {
+ case QFMT_VFS_OLD:
+ fmtname = "vfsold";
+ break;
+ case QFMT_VFS_V0:
+ fmtname = "vfsv0";
+ break;
+ case QFMT_VFS_V1:
+ fmtname = "vfsv1";
+ break;
+ }
+ seq_printf(seq, ",jqfmt=%s", fmtname);
+ }
+
+ if (sbi->s_qf_names[USRQUOTA])
+ seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
+
+ if (sbi->s_qf_names[GRPQUOTA])
+ seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+
+ if (sbi->s_qf_names[PRJQUOTA])
+ seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
+#endif
+}
+
static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
if (test_opt(sbi, DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sbi, NOHEAP))
- seq_puts(seq, ",no_heap_alloc");
+ seq_puts(seq, ",no_heap");
+ else
+ seq_puts(seq, ",heap");
#ifdef CONFIG_F2FS_FS_XATTR
if (test_opt(sbi, XATTR_USER))
seq_puts(seq, ",user_xattr");
seq_puts(seq, ",nouser_xattr");
if (test_opt(sbi, INLINE_XATTR))
seq_puts(seq, ",inline_xattr");
+ else
+ seq_puts(seq, ",noinline_xattr");
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry");
+ else
+ seq_puts(seq, ",noinline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge");
if (test_opt(sbi, NOBARRIER))
seq_puts(seq, ",extent_cache");
else
seq_puts(seq, ",noextent_cache");
+ if (test_opt(sbi, DATA_FLUSH))
+ seq_puts(seq, ",data_flush");
+
+ seq_puts(seq, ",mode=");
+ if (test_opt(sbi, ADAPTIVE))
+ seq_puts(seq, "adaptive");
+ else if (test_opt(sbi, LFS))
+ seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+ if (F2FS_IO_SIZE_BITS(sbi))
+ seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (test_opt(sbi, FAULT_INJECTION))
+ seq_printf(seq, ",fault_injection=%u",
+ sbi->fault_info.inject_rate);
+#endif
+#ifdef CONFIG_QUOTA
+ if (test_opt(sbi, QUOTA))
+ seq_puts(seq, ",quota");
+ if (test_opt(sbi, USRQUOTA))
+ seq_puts(seq, ",usrquota");
+ if (test_opt(sbi, GRPQUOTA))
+ seq_puts(seq, ",grpquota");
+ if (test_opt(sbi, PRJQUOTA))
+ seq_puts(seq, ",prjquota");
+#endif
+ f2fs_show_quota_options(seq, sbi->sb);
return 0;
}
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
-{
- struct super_block *sb = seq->private;
- struct f2fs_sb_info *sbi = F2FS_SB(sb);
- unsigned int total_segs =
- le32_to_cpu(sbi->raw_super->segment_count_main);
- int i;
-
- seq_puts(seq, "format: segment_type|valid_blocks\n"
- "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
-
- for (i = 0; i < total_segs; i++) {
- struct seg_entry *se = get_seg_entry(sbi, i);
-
- if ((i % 10) == 0)
- seq_printf(seq, "%-10d", i);
- seq_printf(seq, "%d|%-3u", se->type,
- get_valid_blocks(sbi, i, 1));
- if ((i % 10) == 9 || i == (total_segs - 1))
- seq_putc(seq, '\n');
- else
- seq_putc(seq, ' ');
- }
-
- return 0;
-}
-
-static int segment_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, segment_info_seq_show, PDE_DATA(inode));
-}
-
-static const struct file_operations f2fs_seq_segment_info_fops = {
- .owner = THIS_MODULE,
- .open = segment_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
set_opt(sbi, BG_GC);
+ set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
+ set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
+ set_opt(sbi, NOHEAP);
+ sbi->sb->s_flags |= MS_LAZYTIME;
+ set_opt(sbi, FLUSH_MERGE);
+ if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ set_opt(sbi, DISCARD);
+ } else {
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
#ifdef CONFIG_F2FS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL);
#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0);
+#endif
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct f2fs_mount_info org_mount_opt;
+ unsigned long old_sb_flags;
int err, active_logs;
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
-
- sync_filesystem(sb);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info ffi = sbi->fault_info;
+#endif
+#ifdef CONFIG_QUOTA
+ int s_jquota_fmt;
+ char *s_qf_names[MAXQUOTAS];
+ int i, j;
+#endif
/*
* Save the old mount options in case we
* need to restore them.
*/
org_mount_opt = sbi->mount_opt;
+ old_sb_flags = sb->s_flags;
active_logs = sbi->active_logs;
- sbi->mount_opt.opt = 0;
+#ifdef CONFIG_QUOTA
+ s_jquota_fmt = sbi->s_jquota_fmt;
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (sbi->s_qf_names[i]) {
+ s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
+ GFP_KERNEL);
+ if (!s_qf_names[i]) {
+ for (j = 0; j < i; j++)
+ kfree(s_qf_names[j]);
+ return -ENOMEM;
+ }
+ } else {
+ s_qf_names[i] = NULL;
+ }
+ }
+#endif
+
+ /* recover superblocks we couldn't write due to previous RO mount */
+ if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ err = f2fs_commit_super(sbi, false);
+ f2fs_msg(sb, KERN_INFO,
+ "Try to recover all the superblocks, ret: %d", err);
+ if (!err)
+ clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ }
+
default_options(sbi);
/* parse mount options */
if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
goto skip;
+ if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+ err = dquot_suspend(sb, -1);
+ if (err < 0)
+ goto restore_opts;
+ } else {
+ /* dquot_resume needs RW */
+ sb->s_flags &= ~MS_RDONLY;
+ dquot_resume(sb, -1);
+ }
+
/* disallow enable/disable extent_cache dynamically */
if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
err = -EINVAL;
if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) {
stop_gc_thread(sbi);
- f2fs_sync_fs(sb, 1);
need_restart_gc = true;
}
} else if (!sbi->gc_thread) {
need_stop_gc = true;
}
- /*
- * We stop issue flush thread if FS is mounted as RO
- * or if flush_merge is not passed in mount option.
- */
- if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
- destroy_flush_cmd_control(sbi);
- } else if (!SM_I(sbi)->cmd_control_info) {
- err = create_flush_cmd_control(sbi);
- if (err)
- goto restore_gc;
- }
-skip:
- /* Update the POSIXACL Flag */
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ if (*flags & MS_RDONLY) {
+ writeback_inodes_sb(sb, WB_REASON_SYNC);
+ sync_inodes_sb(sb);
+
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+ set_sbi_flag(sbi, SBI_IS_CLOSE);
+ f2fs_sync_fs(sb, 1);
+ clear_sbi_flag(sbi, SBI_IS_CLOSE);
+ }
+
+ /*
+ * We stop issue flush thread if FS is mounted as RO
+ * or if flush_merge is not passed in mount option.
+ */
+ if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+ clear_opt(sbi, FLUSH_MERGE);
+ destroy_flush_cmd_control(sbi, false);
+ } else {
+ err = create_flush_cmd_control(sbi);
+ if (err)
+ goto restore_gc;
+ }
+skip:
+#ifdef CONFIG_QUOTA
+ /* Release old quota file names */
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(s_qf_names[i]);
+#endif
+ /* Update the POSIXACL Flag */
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+
+ return 0;
+restore_gc:
+ if (need_restart_gc) {
+ if (start_gc_thread(sbi))
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "background gc thread has stopped");
+ } else if (need_stop_gc) {
+ stop_gc_thread(sbi);
+ }
+restore_opts:
+#ifdef CONFIG_QUOTA
+ sbi->s_jquota_fmt = s_jquota_fmt;
+ for (i = 0; i < MAXQUOTAS; i++) {
+ kfree(sbi->s_qf_names[i]);
+ sbi->s_qf_names[i] = s_qf_names[i];
+ }
+#endif
+ sbi->mount_opt = org_mount_opt;
+ sbi->active_logs = active_logs;
+ sb->s_flags = old_sb_flags;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ sbi->fault_info = ffi;
+#endif
+ return err;
+}
+
+#ifdef CONFIG_QUOTA
+/* Read data from quotafile */
+static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
+ size_t len, loff_t off)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ struct address_space *mapping = inode->i_mapping;
+ block_t blkidx = F2FS_BYTES_TO_BLK(off);
+ int offset = off & (sb->s_blocksize - 1);
+ int tocopy;
+ size_t toread;
+ loff_t i_size = i_size_read(inode);
+ struct page *page;
+ char *kaddr;
+
+ if (off > i_size)
+ return 0;
+
+ if (off + len > i_size)
+ len = i_size - off;
+ toread = len;
+ while (toread > 0) {
+ tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
+repeat:
+ page = read_mapping_page(mapping, blkidx, NULL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != mapping)) {
+ f2fs_put_page(page, 1);
+ goto repeat;
+ }
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 1);
+ return -EIO;
+ }
+
+ kaddr = kmap_atomic(page);
+ memcpy(data, kaddr + offset, tocopy);
+ kunmap_atomic(kaddr);
+ f2fs_put_page(page, 1);
+
+ offset = 0;
+ toread -= tocopy;
+ data += tocopy;
+ blkidx++;
+ }
+ return len;
+}
+
+/* Write to quotafile */
+static ssize_t f2fs_quota_write(struct super_block *sb, int type,
+ const char *data, size_t len, loff_t off)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ struct address_space *mapping = inode->i_mapping;
+ const struct address_space_operations *a_ops = mapping->a_ops;
+ int offset = off & (sb->s_blocksize - 1);
+ size_t towrite = len;
+ struct page *page;
+ char *kaddr;
+ int err = 0;
+ int tocopy;
+
+ while (towrite > 0) {
+ tocopy = min_t(unsigned long, sb->s_blocksize - offset,
+ towrite);
+
+ err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
+ &page, NULL);
+ if (unlikely(err))
+ break;
+
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr + offset, data, tocopy);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(page);
+
+ a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
+ page, NULL);
+ offset = 0;
+ towrite -= tocopy;
+ off += tocopy;
+ data += tocopy;
+ cond_resched();
+ }
+
+ if (len == towrite)
+ return 0;
+ inode->i_version++;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ return len - towrite;
+}
+
+static struct dquot **f2fs_get_dquots(struct inode *inode)
+{
+ return F2FS_I(inode)->i_dquot;
+}
+
+static qsize_t *f2fs_get_reserved_space(struct inode *inode)
+{
+ return &F2FS_I(inode)->i_reserved_quota;
+}
+
+static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
+{
+ return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
+ sbi->s_jquota_fmt, type);
+}
+
+void f2fs_enable_quota_files(struct f2fs_sb_info *sbi)
+{
+ int i, ret;
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (sbi->s_qf_names[i]) {
+ ret = f2fs_quota_on_mount(sbi, i);
+ if (ret < 0)
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Cannot turn on journaled "
+ "quota: error %d", ret);
+ }
+ }
+}
+
+static int f2fs_quota_sync(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int cnt;
+ int ret;
+
+ ret = dquot_writeback_dquots(sb, type);
+ if (ret)
+ return ret;
+
+ /*
+ * Now when everything is written we can discard the pagecache so
+ * that userspace sees the changes.
+ */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+
+ ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
+ if (ret)
+ return ret;
+
+ inode_lock(dqopt->files[cnt]);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ inode_unlock(dqopt->files[cnt]);
+ }
+ return 0;
+}
+
+static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
+ struct path *path)
+{
+ struct inode *inode;
+ int err;
+
+ err = f2fs_quota_sync(sb, type);
+ if (err)
+ return err;
+
+ err = dquot_quota_on(sb, type, format_id, path);
+ if (err)
+ return err;
+
+ inode = d_inode(path->dentry);
+
+ inode_lock(inode);
+ F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
+ inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
+ S_NOATIME | S_IMMUTABLE);
+ inode_unlock(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+
return 0;
-restore_gc:
- if (need_restart_gc) {
- if (start_gc_thread(sbi))
- f2fs_msg(sbi->sb, KERN_WARNING,
- "background gc thread has stopped");
- } else if (need_stop_gc) {
- stop_gc_thread(sbi);
- }
-restore_opts:
- sbi->mount_opt = org_mount_opt;
- sbi->active_logs = active_logs;
+}
+
+static int f2fs_quota_off(struct super_block *sb, int type)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ int err;
+
+ if (!inode || !igrab(inode))
+ return dquot_quota_off(sb, type);
+
+ f2fs_quota_sync(sb, type);
+
+ err = dquot_quota_off(sb, type);
+ if (err)
+ goto out_put;
+
+ inode_lock(inode);
+ F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
+ inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
+ inode_unlock(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+out_put:
+ iput(inode);
return err;
}
-static struct super_operations f2fs_sops = {
+void f2fs_quota_off_umount(struct super_block *sb)
+{
+ int type;
+
+ for (type = 0; type < MAXQUOTAS; type++)
+ f2fs_quota_off(sb, type);
+}
+
+#if 0
+int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+{
+ *projid = F2FS_I(inode)->i_projid;
+ return 0;
+}
+#endif
+
+static const struct dquot_operations f2fs_quota_operations = {
+ .get_reserved_space = f2fs_get_reserved_space,
+ .write_dquot = dquot_commit,
+ .acquire_dquot = dquot_acquire,
+ .release_dquot = dquot_release,
+ .mark_dirty = dquot_mark_dquot_dirty,
+ .write_info = dquot_commit_info,
+ .alloc_dquot = dquot_alloc,
+ .destroy_dquot = dquot_destroy,
+#if 0
+ .get_projid = f2fs_get_projid,
+ .get_next_id = dquot_get_next_id,
+#endif
+};
+
+static const struct quotactl_ops f2fs_quotactl_ops = {
+ .quota_on = f2fs_quota_on,
+ .quota_off = f2fs_quota_off,
+ .quota_sync = f2fs_quota_sync,
+ .get_state = dquot_get_state,
+ .set_info = dquot_set_dqinfo,
+ .get_dqblk = dquot_get_dqblk,
+ .set_dqblk = dquot_set_dqblk,
+};
+#else
+void f2fs_quota_off_umount(struct super_block *sb)
+{
+}
+#endif
+
+static const struct super_operations f2fs_sops = {
.alloc_inode = f2fs_alloc_inode,
.drop_inode = f2fs_drop_inode,
.destroy_inode = f2fs_destroy_inode,
.write_inode = f2fs_write_inode,
.dirty_inode = f2fs_dirty_inode,
.show_options = f2fs_show_options,
+#ifdef CONFIG_QUOTA
+ .quota_read = f2fs_quota_read,
+ .quota_write = f2fs_quota_write,
+ .get_dquots = f2fs_get_dquots,
+#endif
.evict_inode = f2fs_evict_inode,
.put_super = f2fs_put_super,
.sync_fs = f2fs_sync_fs,
.remount_fs = f2fs_remount,
};
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, NULL);
+}
+
+static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data)
+{
+ return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, fs_data, XATTR_CREATE);
+}
+
+static unsigned f2fs_max_namelen(struct inode *inode)
+{
+ return S_ISLNK(inode->i_mode) ?
+ inode->i_sb->s_blocksize : F2FS_NAME_LEN;
+}
+
+static const struct fscrypt_operations f2fs_cryptops = {
+ .key_prefix = "f2fs:",
+ .get_context = f2fs_get_context,
+ .set_context = f2fs_set_context,
+ .is_encrypted = f2fs_encrypted_inode,
+ .empty_dir = f2fs_empty_dir,
+ .max_namelen = f2fs_max_namelen,
+};
+#else
+static const struct fscrypt_operations f2fs_cryptops = {
+ .is_encrypted = f2fs_encrypted_inode,
+};
+#endif
+
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
.get_parent = f2fs_get_parent,
};
-loff_t max_file_size(unsigned bits)
+static loff_t max_file_blocks(void)
{
- loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
+ loff_t result = 0;
loff_t leaf_count = ADDRS_PER_BLOCK;
+ /*
+ * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
+ * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
+ * space in inode.i_addr, it will be more safe to reassign
+ * result as zero.
+ */
+
/* two direct node blocks */
result += (leaf_count * 2);
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
- result <<= bits;
return result;
}
-static inline bool sanity_check_area_boundary(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+static int __f2fs_commit_super(struct buffer_head *bh,
+ struct f2fs_super_block *super)
+{
+ lock_buffer(bh);
+ if (super)
+ memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+ set_buffer_uptodate(bh);
+ set_buffer_dirty(bh);
+ unlock_buffer(bh);
+
+ /* it's rare case, we can do fua all the time */
+ return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
+static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
+ struct buffer_head *bh)
{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
+ struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ u64 main_end_blkaddr = main_blkaddr +
+ (segment_count_main << log_blocks_per_seg);
+ u64 seg_end_blkaddr = segment0_blkaddr +
+ (segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_msg(sb, KERN_INFO,
return true;
}
- if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
- segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+ if (main_end_blkaddr > seg_end_blkaddr) {
f2fs_msg(sb, KERN_INFO,
- "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+ "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
main_blkaddr,
- segment0_blkaddr + (segment_count << log_blocks_per_seg),
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
segment_count_main << log_blocks_per_seg);
return true;
- }
+ } else if (main_end_blkaddr < seg_end_blkaddr) {
+ int err = 0;
+ char *res;
+
+ /* fix in-memory information all the time */
+ raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+ segment0_blkaddr) >> log_blocks_per_seg);
+ if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+ set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ res = "internally";
+ } else {
+ err = __f2fs_commit_super(bh, NULL);
+ res = err ? "failed" : "done";
+ }
+ f2fs_msg(sb, KERN_INFO,
+ "Fix alignment : %s, start(%u) end(%u) block(%u)",
+ res, main_blkaddr,
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
+ segment_count_main << log_blocks_per_seg);
+ if (err)
+ return true;
+ }
return false;
}
-static int sanity_check_raw_super(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+ struct buffer_head *bh)
{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
+ struct super_block *sb = sbi->sb;
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
}
/* Currently, support only 4KB page cache size */
- if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+ if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
return 1;
}
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sb, raw_super))
+ if (sanity_check_area_boundary(sbi, bh))
return 1;
return 0;
}
-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned int ovp_segments, reserved_segments;
unsigned int main_segs, blocks_per_seg;
int i;
if (unlikely(fsmeta >= total))
return 1;
- main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+
+ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+ ovp_segments == 0 || reserved_segments == 0)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong layout: check mkfs.f2fs version");
+ return 1;
+ }
+
+ main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
+ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
- }
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
+ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
- }
}
if (unlikely(f2fs_cp_error(sbi))) {
static void init_sb_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = sbi->raw_super;
- int i;
+ int i, j;
sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block);
sbi->cur_victim_sec = NULL_SECNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
+ sbi->dir_level = DEF_DIR_LEVEL;
+ sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
+ sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
+ clear_sbi_flag(sbi, SBI_NEED_FSCK);
+
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
- sbi->dir_level = DEF_DIR_LEVEL;
- sbi->cp_interval = DEF_CP_INTERVAL;
- clear_sbi_flag(sbi, SBI_NEED_FSCK);
+ atomic_set(&sbi->wb_sync_req, 0);
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
+ for (i = 0; i < NR_PAGE_TYPE - 1; i++)
+ for (j = HOT; j < NR_TEMP_TYPE; j++)
+ mutex_init(&sbi->wio_mutex[i][j]);
+ spin_lock_init(&sbi->cp_lock);
+}
+
+static int init_percpu_info(struct f2fs_sb_info *sbi)
+{
+ int err;
+
+ err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
+ if (err)
+ return err;
+
+ return percpu_counter_init(&sbi->total_valid_inode_count, 0,
+ GFP_KERNEL);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+ struct block_device *bdev = FDEV(devi).bdev;
+ sector_t nr_sectors = bdev->bd_part->nr_sects;
+ sector_t sector = 0;
+ struct blk_zone *zones;
+ unsigned int i, nr_zones;
+ unsigned int n = 0;
+ int err = -EIO;
+
+ if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+ return 0;
+
+ if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+ SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
+ return -EINVAL;
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
+ if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+ __ilog2_u32(sbi->blocks_per_blkz))
+ return -EINVAL;
+ sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+ FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+ sbi->log_blocks_per_blkz;
+ if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
+ FDEV(devi).nr_blkz++;
+
+ FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+ if (!FDEV(devi).blkz_type)
+ return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES 4096
+
+ zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
+ GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ /* Get block zones type */
+ while (zones && sector < nr_sectors) {
+
+ nr_zones = F2FS_REPORT_NR_ZONES;
+ err = blkdev_report_zones(bdev, sector,
+ zones, &nr_zones,
+ GFP_KERNEL);
+ if (err)
+ break;
+ if (!nr_zones) {
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < nr_zones; i++) {
+ FDEV(devi).blkz_type[n] = zones[i].type;
+ sector += zones[i].len;
+ n++;
+ }
+ }
+
+ kfree(zones);
+
+ return err;
}
+#endif
/*
* Read f2fs raw super block.
- * Because we have two copies of super block, so read the first one at first,
- * if the first one is invalid, move to read the second one.
+ * Because we have two copies of super block, so read both of them
+ * to get the first valid one. If any one of them is broken, we pass
+ * them recovery flag back to the caller.
*/
-static int read_raw_super_block(struct super_block *sb,
+static int read_raw_super_block(struct f2fs_sb_info *sbi,
struct f2fs_super_block **raw_super,
- struct buffer_head **raw_super_buf,
- int *recovery)
+ int *valid_super_block, int *recovery)
{
- int block = 0;
- struct buffer_head *buffer;
+ struct super_block *sb = sbi->sb;
+ int block;
+ struct buffer_head *bh;
struct f2fs_super_block *super;
int err = 0;
-retry:
- buffer = sb_bread(sb, block);
- if (!buffer) {
- *recovery = 1;
- f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+ super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
+ if (!super)
+ return -ENOMEM;
+
+ for (block = 0; block < 2; block++) {
+ bh = sb_bread(sb, block);
+ if (!bh) {
+ f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
block + 1);
- if (block == 0) {
- block++;
- goto retry;
- } else {
err = -EIO;
- goto out;
+ continue;
}
- }
-
- super = (struct f2fs_super_block *)
- ((char *)(buffer)->b_data + F2FS_SUPER_OFFSET);
- /* sanity checking of raw super */
- if (sanity_check_raw_super(sb, super)) {
- brelse(buffer);
- *recovery = 1;
- f2fs_msg(sb, KERN_ERR,
- "Can't find valid F2FS filesystem in %dth superblock",
- block + 1);
- if (block == 0) {
- block++;
- goto retry;
- } else {
+ /* sanity checking of raw super */
+ if (sanity_check_raw_super(sbi, bh)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Can't find valid F2FS filesystem in %dth superblock",
+ block + 1);
err = -EINVAL;
- goto out;
+ brelse(bh);
+ continue;
}
- }
- if (!*raw_super) {
- *raw_super_buf = buffer;
- *raw_super = super;
- } else {
- /* already have a valid superblock */
- brelse(buffer);
+ if (!*raw_super) {
+ memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ sizeof(*super));
+ *valid_super_block = block;
+ *raw_super = super;
+ }
+ brelse(bh);
}
- /* check the validity of the second superblock */
- if (block == 0) {
- block++;
- goto retry;
- }
+ /* Fail to read any one of the superblocks*/
+ if (err < 0)
+ *recovery = 1;
-out:
/* No valid superblock */
if (!*raw_super)
- return err;
+ kfree(super);
+ else
+ err = 0;
- return 0;
+ return err;
}
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *sbh = sbi->raw_super_buf;
- sector_t block = sbh->b_blocknr;
+ struct buffer_head *bh;
int err;
- /* write back-up superblock first */
- sbh->b_blocknr = block ? 0 : 1;
- mark_buffer_dirty(sbh);
- err = sync_dirty_buffer(sbh);
+ if ((recover && f2fs_readonly(sbi->sb)) ||
+ bdev_read_only(sbi->sb->s_bdev)) {
+ set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ return -EROFS;
+ }
- sbh->b_blocknr = block;
+ /* write back-up superblock first */
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
- goto out;
+ return err;
/* write current valid superblock */
- mark_buffer_dirty(sbh);
- err = sync_dirty_buffer(sbh);
-out:
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
return err;
}
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ unsigned int max_devices = MAX_DEVICES;
+ int i;
+
+ /* Initialize single device information */
+ if (!RDEV(0).path[0]) {
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (!bdev_is_zoned(sbi->sb->s_bdev))
+ return 0;
+ max_devices = 1;
+#else
+ return 0;
+#endif
+ }
+
+ /*
+ * Initialize multiple devices information, or single
+ * zoned block device information.
+ */
+ sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
+ GFP_KERNEL);
+ if (!sbi->devs)
+ return -ENOMEM;
+
+ for (i = 0; i < max_devices; i++) {
+
+ if (i > 0 && !RDEV(i).path[0])
+ break;
+
+ if (max_devices == 1) {
+ /* Single zoned block device mount */
+ FDEV(0).bdev =
+ blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ } else {
+ /* Multi-device mount */
+ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ FDEV(i).total_segments =
+ le32_to_cpu(RDEV(i).total_segments);
+ if (i == 0) {
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ } else {
+ FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1;
+ }
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ }
+ if (IS_ERR(FDEV(i).bdev))
+ return PTR_ERR(FDEV(i).bdev);
+
+ /* to release errored devices */
+ sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+ !f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Zoned block device feature not enabled\n");
+ return -EINVAL;
+ }
+ if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (init_blkz_info(sbi, i)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Failed to initialize F2FS blkzone information");
+ return -EINVAL;
+ }
+ if (max_devices == 1)
+ break;
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk,
+ bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+ "Host-aware" : "Host-managed");
+ continue;
+ }
+#endif
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk);
+ }
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
+ return 0;
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super;
- struct buffer_head *raw_super_buf;
struct inode *root;
- long err;
+ int err;
bool retry = true, need_fsck = false;
char *options = NULL;
- int recovery, i;
+ int recovery, i, valid_super_block;
+ struct curseg_info *seg_i;
try_onemore:
err = -EINVAL;
raw_super = NULL;
- raw_super_buf = NULL;
+ valid_super_block = -1;
recovery = 0;
/* allocate memory for f2fs-specific super block info */
if (!sbi)
return -ENOMEM;
+ sbi->sb = sb;
+
+ /* Load the checksum driver */
+ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(sbi->s_chksum_driver)) {
+ f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
+ err = PTR_ERR(sbi->s_chksum_driver);
+ sbi->s_chksum_driver = NULL;
+ goto free_sbi;
+ }
+
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
goto free_sbi;
}
- err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
+ err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
+ &recovery);
if (err)
goto free_sbi;
sb->s_fs_info = sbi;
+ sbi->raw_super = raw_super;
+
+ /* precompute checksum seed for metadata */
+ if (f2fs_sb_has_inode_chksum(sb))
+ sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
+ sizeof(raw_super->uuid));
+
+ /*
+ * The BLKZONED feature indicates that the drive was formatted with
+ * zone alignment optimization. This is optional for host-aware
+ * devices, but mandatory for host-managed zoned block devices.
+ */
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Zoned block device support is not enabled\n");
+ err = -EOPNOTSUPP;
+ goto free_sb_buf;
+ }
+#endif
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
if (err)
goto free_options;
- sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
+ sbi->max_file_blocks = max_file_blocks();
+ sb->s_maxbytes = sbi->max_file_blocks <<
+ le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+#ifdef CONFIG_QUOTA
+ sb->dq_op = &f2fs_quota_operations;
+ sb->s_qcop = &f2fs_quotactl_ops;
+ sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
+#endif
+
sb->s_op = &f2fs_sops;
+ sb->s_cop = &f2fs_cryptops;
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
- sbi->sb = sb;
- sbi->raw_super = raw_super;
- sbi->raw_super_buf = raw_super_buf;
+ sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
- mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
+ init_rwsem(&sbi->node_change);
/* disallow all the data/node/meta page writes */
set_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock);
- init_rwsem(&sbi->read_io.io_rwsem);
- sbi->read_io.sbi = sbi;
- sbi->read_io.bio = NULL;
+ /* init iostat info */
+ spin_lock_init(&sbi->iostat_lock);
+ sbi->iostat_enable = false;
+
for (i = 0; i < NR_PAGE_TYPE; i++) {
- init_rwsem(&sbi->write_io[i].io_rwsem);
- sbi->write_io[i].sbi = sbi;
- sbi->write_io[i].bio = NULL;
+ int n = (i == META) ? 1: NR_TEMP_TYPE;
+ int j;
+
+ sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
+ GFP_KERNEL);
+ if (!sbi->write_io[i]) {
+ err = -ENOMEM;
+ goto free_options;
+ }
+
+ for (j = HOT; j < n; j++) {
+ init_rwsem(&sbi->write_io[i][j].io_rwsem);
+ sbi->write_io[i][j].sbi = sbi;
+ sbi->write_io[i][j].bio = NULL;
+ spin_lock_init(&sbi->write_io[i][j].io_lock);
+ INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+ }
}
init_rwsem(&sbi->cp_rwsem);
init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
+ err = init_percpu_info(sbi);
+ if (err)
+ goto free_options;
+
+ if (F2FS_IO_SIZE(sbi) > 1) {
+ sbi->write_io_dummy =
+ mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
+ if (!sbi->write_io_dummy) {
+ err = -ENOMEM;
+ goto free_options;
+ }
+ }
+
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
- goto free_options;
+ goto free_io_dummy;
}
err = get_valid_checkpoint(sbi);
goto free_meta_inode;
}
- /* sanity checking of checkpoint */
- err = -EINVAL;
- if (sanity_check_ckpt(sbi)) {
- f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
- goto free_cp;
+ /* Initialize device list */
+ err = f2fs_scan_devices(sbi);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+ goto free_devices;
}
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
- sbi->total_valid_inode_count =
- le32_to_cpu(sbi->ckpt->valid_inode_count);
+ percpu_counter_set(&sbi->total_valid_inode_count,
+ le32_to_cpu(sbi->ckpt->valid_inode_count));
sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
- INIT_LIST_HEAD(&sbi->dir_inode_list);
- spin_lock_init(&sbi->dir_inode_lock);
+ sbi->reserved_blocks = 0;
+
+ for (i = 0; i < NR_INODE_TYPE; i++) {
+ INIT_LIST_HEAD(&sbi->inode_list[i]);
+ spin_lock_init(&sbi->inode_lock[i]);
+ }
init_extent_cache_info(sbi);
goto free_nm;
}
+ /* For write statistics */
+ if (sb->s_bdev->bd_part)
+ sbi->sectors_written_start =
+ (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+
+ /* Read accumulated write IO statistics if exists */
+ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ if (__exist_node_summaries(sbi))
+ sbi->kbytes_written =
+ le64_to_cpu(seg_i->journal->info.kbytes_written);
+
build_gc_manager(sbi);
/* get an inode for node space */
f2fs_join_shrinker(sbi);
- /* if there are nt orphan nodes free them */
- err = recover_orphan_inodes(sbi);
+ err = f2fs_build_stats(sbi);
if (err)
- goto free_node_inode;
+ goto free_nm;
/* read root inode and dentry */
root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
goto free_root_inode;
}
- err = f2fs_build_stats(sbi);
+ err = f2fs_register_sysfs(sbi);
if (err)
goto free_root_inode;
- if (f2fs_proc_root)
- sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
-
- if (sbi->s_proc)
- proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
- &f2fs_seq_segment_info_fops, sb);
-
- sbi->s_kobj.kset = f2fs_kset;
- init_completion(&sbi->s_kobj_unregister);
- err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
- "%s", sb->s_id);
+ /* if there are nt orphan nodes free them */
+ err = recover_orphan_inodes(sbi);
if (err)
- goto free_proc;
+ goto free_sysfs;
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
* previous checkpoint was not done by clean system shutdown.
*/
if (bdev_read_only(sb->s_bdev) &&
- !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS;
- goto free_kobj;
+ goto free_meta;
}
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = recover_fsync_data(sbi);
- if (err) {
+ if (!retry)
+ goto skip_recovery;
+
+ err = recover_fsync_data(sbi, false);
+ if (err < 0) {
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
- "Cannot recover all fsync data errno=%ld", err);
- goto free_kobj;
+ "Cannot recover all fsync data errno=%d", err);
+ goto free_meta;
+ }
+ } else {
+ err = recover_fsync_data(sbi, true);
+
+ if (!f2fs_readonly(sb) && err > 0) {
+ err = -EINVAL;
+ f2fs_msg(sb, KERN_ERR,
+ "Need to recover fsync data");
+ goto free_sysfs;
}
}
+skip_recovery:
/* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
/* After POR, we can run background GC thread.*/
err = start_gc_thread(sbi);
if (err)
- goto free_kobj;
+ goto free_meta;
}
kfree(options);
/* recover broken superblock */
- if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
- f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
- f2fs_commit_super(sbi, true);
+ if (recovery) {
+ err = f2fs_commit_super(sbi, true);
+ f2fs_msg(sb, KERN_INFO,
+ "Try to recover %dth superblock, ret: %d",
+ sbi->valid_super_block ? 1 : 2, err);
}
- sbi->cp_expires = round_jiffies_up(jiffies);
-
+ f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
+ cur_cp_version(F2FS_CKPT(sbi)));
+ f2fs_update_time(sbi, CP_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
-free_kobj:
- kobject_del(&sbi->s_kobj);
-free_proc:
- if (sbi->s_proc) {
- remove_proc_entry("segment_info", sbi->s_proc);
- remove_proc_entry(sb->s_id, f2fs_proc_root);
- }
- f2fs_destroy_stats(sbi);
+free_meta:
+ f2fs_sync_inode_meta(sbi);
+ /*
+ * Some dirty meta pages can be produced by recover_orphan_inodes()
+ * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+ * followed by write_checkpoint() through f2fs_write_node_pages(), which
+ * falls into an infinite loop in sync_meta_pages().
+ */
+ truncate_inode_pages_final(META_MAPPING(sbi));
+free_sysfs:
+ f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
free_node_inode:
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
mutex_lock(&sbi->umount_mutex);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
+ f2fs_destroy_stats(sbi);
free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
-free_cp:
+free_devices:
+ destroy_device_list(sbi);
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
+free_io_dummy:
+ mempool_destroy(sbi->write_io_dummy);
free_options:
+ for (i = 0; i < NR_PAGE_TYPE; i++)
+ kfree(sbi->write_io[i]);
+ destroy_percpu_info(sbi);
+#ifdef CONFIG_QUOTA
+ for (i = 0; i < MAXQUOTAS; i++)
+ kfree(sbi->s_qf_names[i]);
+#endif
kfree(options);
free_sb_buf:
- brelse(raw_super_buf);
+ kfree(raw_super);
free_sbi:
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
/* give only one another chance */
static void kill_f2fs_super(struct super_block *sb)
{
- if (sb->s_root)
+ if (sb->s_root) {
set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
+ stop_gc_thread(F2FS_SB(sb));
+ stop_discard_thread(F2FS_SB(sb));
+ }
kill_block_super(sb);
}
static int __init init_inodecache(void)
{
- f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
- sizeof(struct f2fs_inode_info));
+ f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
+ sizeof(struct f2fs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
if (!f2fs_inode_cachep)
return -ENOMEM;
return 0;
err = create_extent_cache();
if (err)
goto free_checkpoint_caches;
- f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
- if (!f2fs_kset) {
- err = -ENOMEM;
- goto free_extent_cache;
- }
- err = f2fs_init_crypto();
+ err = f2fs_init_sysfs();
if (err)
- goto free_kset;
-
+ goto free_extent_cache;
err = register_shrinker(&f2fs_shrinker_info);
if (err)
- goto free_crypto;
-
+ goto free_sysfs;
err = register_filesystem(&f2fs_fs_type);
if (err)
goto free_shrinker;
- f2fs_create_root_stats();
- f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ err = f2fs_create_root_stats();
+ if (err)
+ goto free_filesystem;
return 0;
+free_filesystem:
+ unregister_filesystem(&f2fs_fs_type);
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
-free_crypto:
- f2fs_exit_crypto();
-free_kset:
- kset_unregister(f2fs_kset);
+free_sysfs:
+ f2fs_exit_sysfs();
free_extent_cache:
destroy_extent_cache();
free_checkpoint_caches:
static void __exit exit_f2fs_fs(void)
{
- remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats();
- unregister_shrinker(&f2fs_shrinker_info);
unregister_filesystem(&f2fs_fs_type);
- f2fs_exit_crypto();
+ unregister_shrinker(&f2fs_shrinker_info);
+ f2fs_exit_sysfs();
destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
destroy_inodecache();
- kset_unregister(f2fs_kset);
f2fs_destroy_trace_ios();
}
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
+
--- /dev/null
+/*
+ * f2fs sysfs interface
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Copyright (c) 2017 Chao Yu <chao@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/proc_fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/seq_file.h>
+
+#include "f2fs.h"
+#include "segment.h"
+#include "gc.h"
+
+static struct proc_dir_entry *f2fs_proc_root;
+
+/* Sysfs support for f2fs */
+enum {
+ GC_THREAD, /* struct f2fs_gc_thread */
+ SM_INFO, /* struct f2fs_sm_info */
+ DCC_INFO, /* struct discard_cmd_control */
+ NM_INFO, /* struct f2fs_nm_info */
+ F2FS_SBI, /* struct f2fs_sb_info */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ FAULT_INFO_RATE, /* struct f2fs_fault_info */
+ FAULT_INFO_TYPE, /* struct f2fs_fault_info */
+#endif
+ RESERVED_BLOCKS,
+};
+
+struct f2fs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
+ ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
+ const char *, size_t);
+ int struct_type;
+ int offset;
+ int id;
+};
+
+static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
+{
+ if (struct_type == GC_THREAD)
+ return (unsigned char *)sbi->gc_thread;
+ else if (struct_type == SM_INFO)
+ return (unsigned char *)SM_I(sbi);
+ else if (struct_type == DCC_INFO)
+ return (unsigned char *)SM_I(sbi)->dcc_info;
+ else if (struct_type == NM_INFO)
+ return (unsigned char *)NM_I(sbi);
+ else if (struct_type == F2FS_SBI || struct_type == RESERVED_BLOCKS)
+ return (unsigned char *)sbi;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ else if (struct_type == FAULT_INFO_RATE ||
+ struct_type == FAULT_INFO_TYPE)
+ return (unsigned char *)&sbi->fault_info;
+#endif
+ return NULL;
+}
+
+static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct super_block *sb = sbi->sb;
+
+ if (!sb->s_bdev->bd_part)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)(sbi->kbytes_written +
+ BD_PART_WRITTEN(sbi)));
+}
+
+static ssize_t features_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct super_block *sb = sbi->sb;
+ int len = 0;
+
+ if (!sb->s_bdev->bd_part)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+
+ if (f2fs_sb_has_crypto(sb))
+ len += snprintf(buf, PAGE_SIZE - len, "%s",
+ "encryption");
+ if (f2fs_sb_mounted_blkzoned(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "blkzoned");
+ if (f2fs_sb_has_extra_attr(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "extra_attr");
+ if (f2fs_sb_has_project_quota(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "projquota");
+ if (f2fs_sb_has_inode_chksum(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "inode_checksum");
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+
+static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ unsigned char *ptr = NULL;
+ unsigned int *ui;
+
+ ptr = __struct_ptr(sbi, a->struct_type);
+ if (!ptr)
+ return -EINVAL;
+
+ ui = (unsigned int *)(ptr + a->offset);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+}
+
+static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi,
+ const char *buf, size_t count)
+{
+ unsigned char *ptr;
+ unsigned long t;
+ unsigned int *ui;
+ ssize_t ret;
+
+ ptr = __struct_ptr(sbi, a->struct_type);
+ if (!ptr)
+ return -EINVAL;
+
+ ui = (unsigned int *)(ptr + a->offset);
+
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret < 0)
+ return ret;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
+ return -EINVAL;
+#endif
+ if (a->struct_type == RESERVED_BLOCKS) {
+ spin_lock(&sbi->stat_lock);
+ if ((unsigned long)sbi->total_valid_block_count + t >
+ (unsigned long)sbi->user_block_count) {
+ spin_unlock(&sbi->stat_lock);
+ return -EINVAL;
+ }
+ *ui = t;
+ spin_unlock(&sbi->stat_lock);
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "discard_granularity")) {
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ int i;
+
+ if (t == 0 || t > MAX_PLIST_NUM)
+ return -EINVAL;
+ if (t == *ui)
+ return count;
+
+ mutex_lock(&dcc->cmd_lock);
+ for (i = 0; i < MAX_PLIST_NUM; i++) {
+ if (i >= t - 1)
+ dcc->pend_list_tag[i] |= P_ACTIVE;
+ else
+ dcc->pend_list_tag[i] &= (~P_ACTIVE);
+ }
+ mutex_unlock(&dcc->cmd_lock);
+
+ *ui = t;
+ return count;
+ }
+
+ *ui = t;
+
+ if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
+ f2fs_reset_iostat(sbi);
+ if (!strcmp(a->attr.name, "gc_urgent") && t == 1 && sbi->gc_thread) {
+ sbi->gc_thread->gc_wake = 1;
+ wake_up_interruptible_all(&sbi->gc_thread->gc_wait_queue_head);
+ wake_up_discard_thread(sbi, true);
+ }
+
+ return count;
+}
+
+static ssize_t f2fs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->store ? a->store(a, sbi, buf, len) : 0;
+}
+
+static void f2fs_sb_release(struct kobject *kobj)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_kobj);
+ complete(&sbi->s_kobj_unregister);
+}
+
+enum feat_id {
+ FEAT_CRYPTO = 0,
+ FEAT_BLKZONED,
+ FEAT_ATOMIC_WRITE,
+ FEAT_EXTRA_ATTR,
+ FEAT_PROJECT_QUOTA,
+ FEAT_INODE_CHECKSUM,
+};
+
+static ssize_t f2fs_feature_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ switch (a->id) {
+ case FEAT_CRYPTO:
+ case FEAT_BLKZONED:
+ case FEAT_ATOMIC_WRITE:
+ case FEAT_EXTRA_ATTR:
+ case FEAT_PROJECT_QUOTA:
+ case FEAT_INODE_CHECKSUM:
+ return snprintf(buf, PAGE_SIZE, "supported\n");
+ }
+ return 0;
+}
+
+#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
+static struct f2fs_attr f2fs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+ .struct_type = _struct_type, \
+ .offset = _offset \
+}
+
+#define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
+ F2FS_ATTR_OFFSET(struct_type, name, 0644, \
+ f2fs_sbi_show, f2fs_sbi_store, \
+ offsetof(struct struct_name, elname))
+
+#define F2FS_GENERAL_RO_ATTR(name) \
+static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
+
+#define F2FS_FEATURE_RO_ATTR(_name, _id) \
+static struct f2fs_attr f2fs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0444 }, \
+ .show = f2fs_feature_show, \
+ .id = _id, \
+}
+
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent_sleep_time,
+ urgent_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent, gc_urgent);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
+F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
+F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
+F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
+F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
+#endif
+F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
+F2FS_GENERAL_RO_ATTR(features);
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
+#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED);
+#endif
+F2FS_FEATURE_RO_ATTR(atomic_write, FEAT_ATOMIC_WRITE);
+F2FS_FEATURE_RO_ATTR(extra_attr, FEAT_EXTRA_ATTR);
+F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA);
+F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
+
+#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
+static struct attribute *f2fs_attrs[] = {
+ ATTR_LIST(gc_urgent_sleep_time),
+ ATTR_LIST(gc_min_sleep_time),
+ ATTR_LIST(gc_max_sleep_time),
+ ATTR_LIST(gc_no_gc_sleep_time),
+ ATTR_LIST(gc_idle),
+ ATTR_LIST(gc_urgent),
+ ATTR_LIST(reclaim_segments),
+ ATTR_LIST(max_small_discards),
+ ATTR_LIST(discard_granularity),
+ ATTR_LIST(batched_trim_sections),
+ ATTR_LIST(ipu_policy),
+ ATTR_LIST(min_ipu_util),
+ ATTR_LIST(min_fsync_blocks),
+ ATTR_LIST(min_hot_blocks),
+ ATTR_LIST(max_victim_search),
+ ATTR_LIST(dir_level),
+ ATTR_LIST(ram_thresh),
+ ATTR_LIST(ra_nid_pages),
+ ATTR_LIST(dirty_nats_ratio),
+ ATTR_LIST(cp_interval),
+ ATTR_LIST(idle_interval),
+ ATTR_LIST(iostat_enable),
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ ATTR_LIST(inject_rate),
+ ATTR_LIST(inject_type),
+#endif
+ ATTR_LIST(lifetime_write_kbytes),
+ ATTR_LIST(features),
+ ATTR_LIST(reserved_blocks),
+ NULL,
+};
+
+static struct attribute *f2fs_feat_attrs[] = {
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ ATTR_LIST(encryption),
+#endif
+#ifdef CONFIG_BLK_DEV_ZONED
+ ATTR_LIST(block_zoned),
+#endif
+ ATTR_LIST(atomic_write),
+ ATTR_LIST(extra_attr),
+ ATTR_LIST(project_quota),
+ ATTR_LIST(inode_checksum),
+ NULL,
+};
+
+static const struct sysfs_ops f2fs_attr_ops = {
+ .show = f2fs_attr_show,
+ .store = f2fs_attr_store,
+};
+
+static struct kobj_type f2fs_sb_ktype = {
+ .default_attrs = f2fs_attrs,
+ .sysfs_ops = &f2fs_attr_ops,
+ .release = f2fs_sb_release,
+};
+
+static struct kobj_type f2fs_ktype = {
+ .sysfs_ops = &f2fs_attr_ops,
+};
+
+static struct kset f2fs_kset = {
+ .kobj = {.ktype = &f2fs_ktype},
+};
+
+static struct kobj_type f2fs_feat_ktype = {
+ .default_attrs = f2fs_feat_attrs,
+ .sysfs_ops = &f2fs_attr_ops,
+};
+
+static struct kobject f2fs_feat = {
+ .kset = &f2fs_kset,
+};
+
+static int segment_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned int total_segs =
+ le32_to_cpu(sbi->raw_super->segment_count_main);
+ int i;
+
+ seq_puts(seq, "format: segment_type|valid_blocks\n"
+ "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+
+ for (i = 0; i < total_segs; i++) {
+ struct seg_entry *se = get_seg_entry(sbi, i);
+
+ if ((i % 10) == 0)
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d|%-3u", se->type,
+ get_valid_blocks(sbi, i, false));
+ if ((i % 10) == 9 || i == (total_segs - 1))
+ seq_putc(seq, '\n');
+ else
+ seq_putc(seq, ' ');
+ }
+
+ return 0;
+}
+
+static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned int total_segs =
+ le32_to_cpu(sbi->raw_super->segment_count_main);
+ int i, j;
+
+ seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+ "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+
+ for (i = 0; i < total_segs; i++) {
+ struct seg_entry *se = get_seg_entry(sbi, i);
+
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d|%-3u|", se->type,
+ get_valid_blocks(sbi, i, false));
+ for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
+ seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+ seq_putc(seq, '\n');
+ }
+ return 0;
+}
+
+static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ time64_t now = ktime_get_real_seconds();
+
+ if (!sbi->iostat_enable)
+ return 0;
+
+ seq_printf(seq, "time: %-16llu\n", now);
+
+ /* print app IOs */
+ seq_printf(seq, "app buffered: %-16llu\n",
+ sbi->write_iostat[APP_BUFFERED_IO]);
+ seq_printf(seq, "app direct: %-16llu\n",
+ sbi->write_iostat[APP_DIRECT_IO]);
+ seq_printf(seq, "app mapped: %-16llu\n",
+ sbi->write_iostat[APP_MAPPED_IO]);
+
+ /* print fs IOs */
+ seq_printf(seq, "fs data: %-16llu\n",
+ sbi->write_iostat[FS_DATA_IO]);
+ seq_printf(seq, "fs node: %-16llu\n",
+ sbi->write_iostat[FS_NODE_IO]);
+ seq_printf(seq, "fs meta: %-16llu\n",
+ sbi->write_iostat[FS_META_IO]);
+ seq_printf(seq, "fs gc data: %-16llu\n",
+ sbi->write_iostat[FS_GC_DATA_IO]);
+ seq_printf(seq, "fs gc node: %-16llu\n",
+ sbi->write_iostat[FS_GC_NODE_IO]);
+ seq_printf(seq, "fs cp data: %-16llu\n",
+ sbi->write_iostat[FS_CP_DATA_IO]);
+ seq_printf(seq, "fs cp node: %-16llu\n",
+ sbi->write_iostat[FS_CP_NODE_IO]);
+ seq_printf(seq, "fs cp meta: %-16llu\n",
+ sbi->write_iostat[FS_CP_META_IO]);
+ seq_printf(seq, "fs discard: %-16llu\n",
+ sbi->write_iostat[FS_DISCARD]);
+
+ return 0;
+}
+
+#define F2FS_PROC_FILE_DEF(_name) \
+static int _name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
+} \
+ \
+static const struct file_operations f2fs_seq_##_name##_fops = { \
+ .open = _name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+F2FS_PROC_FILE_DEF(segment_info);
+F2FS_PROC_FILE_DEF(segment_bits);
+F2FS_PROC_FILE_DEF(iostat_info);
+
+int __init f2fs_init_sysfs(void)
+{
+ int ret;
+
+ kobject_set_name(&f2fs_kset.kobj, "f2fs");
+ f2fs_kset.kobj.parent = fs_kobj;
+ ret = kset_register(&f2fs_kset);
+ if (ret)
+ return ret;
+
+ ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
+ NULL, "features");
+ if (ret)
+ kset_unregister(&f2fs_kset);
+ else
+ f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ return ret;
+}
+
+void f2fs_exit_sysfs(void)
+{
+ kobject_put(&f2fs_feat);
+ kset_unregister(&f2fs_kset);
+ remove_proc_entry("fs/f2fs", NULL);
+ f2fs_proc_root = NULL;
+}
+
+int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
+{
+ struct super_block *sb = sbi->sb;
+ int err;
+
+ sbi->s_kobj.kset = &f2fs_kset;
+ init_completion(&sbi->s_kobj_unregister);
+ err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
+ "%s", sb->s_id);
+ if (err)
+ return err;
+
+ if (f2fs_proc_root)
+ sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+
+ if (sbi->s_proc) {
+ proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_segment_info_fops, sb);
+ proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_segment_bits_fops, sb);
+ proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_iostat_info_fops, sb);
+ }
+ return 0;
+}
+
+void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
+{
+ if (sbi->s_proc) {
+ remove_proc_entry("iostat_info", sbi->s_proc);
+ remove_proc_entry("segment_info", sbi->s_proc);
+ remove_proc_entry("segment_bits", sbi->s_proc);
+ remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
+ }
+ kobject_del(&sbi->s_kobj);
+}
if (!last_io.len)
return;
- trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n",
+ trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n",
last_io.major, last_io.minor,
last_io.pid, "----------------",
last_io.type,
- last_io.fio.rw, last_io.fio.blk_addr,
+ last_io.fio.op, last_io.fio.op_flags,
+ last_io.fio.new_blkaddr,
last_io.len);
memset(&last_io, 0, sizeof(last_io));
}
pid_t pid = task_pid_nr(current);
void *p;
- page->private = pid;
+ set_page_private(page, (unsigned long)pid);
if (radix_tree_preload(GFP_NOFS))
return;
if (last_io.major == major && last_io.minor == minor &&
last_io.pid == pid &&
last_io.type == __file_type(inode, pid) &&
- last_io.fio.rw == fio->rw &&
- last_io.fio.blk_addr + last_io.len == fio->blk_addr) {
+ last_io.fio.op == fio->op &&
+ last_io.fio.op_flags == fio->op_flags &&
+ last_io.fio.new_blkaddr + last_io.len ==
+ fio->new_blkaddr) {
last_io.len++;
return;
}
radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
results[ret] = iter.index;
- if (++ret == PIDVEC_SIZE)
+ if (++ret == max_items)
break;
}
return ret;
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
return entry;
}
-static void *read_all_xattrs(struct inode *inode, struct page *ipage)
+static struct f2fs_xattr_entry *__find_inline_xattr(void *base_addr,
+ void **last_addr, int index,
+ size_t len, const char *name)
+{
+ struct f2fs_xattr_entry *entry;
+ unsigned int inline_size = F2FS_INLINE_XATTR_ADDRS << 2;
+
+ list_for_each_xattr(entry, base_addr) {
+ if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
+ (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) >
+ base_addr + inline_size) {
+ *last_addr = entry;
+ return NULL;
+ }
+ if (entry->e_name_index != index)
+ continue;
+ if (entry->e_name_len != len)
+ continue;
+ if (!memcmp(entry->e_name, name, len))
+ break;
+ }
+ return entry;
+}
+
+static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+ unsigned int index, unsigned int len,
+ const char *name, struct f2fs_xattr_entry **xe,
+ void **base_addr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ void *cur_addr, *txattr_addr, *last_addr = NULL;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
+ unsigned int inline_size = inline_xattr_size(inode);
+ int err = 0;
+
+ if (!size && !inline_size)
+ return -ENODATA;
+
+ txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
+ GFP_F2FS_ZERO);
+ if (!txattr_addr)
+ return -ENOMEM;
+
+ /* read from inline xattr */
+ if (inline_size) {
+ struct page *page = NULL;
+ void *inline_addr;
+
+ if (ipage) {
+ inline_addr = inline_xattr_addr(ipage);
+ } else {
+ page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
+ }
+ inline_addr = inline_xattr_addr(page);
+ }
+ memcpy(txattr_addr, inline_addr, inline_size);
+ f2fs_put_page(page, 1);
+
+ *xe = __find_inline_xattr(txattr_addr, &last_addr,
+ index, len, name);
+ if (*xe)
+ goto check;
+ }
+
+ /* read from xattr node block */
+ if (xnid) {
+ struct page *xpage;
+ void *xattr_addr;
+
+ /* The inode already has an extended attribute block. */
+ xpage = get_node_page(sbi, xnid);
+ if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
+ goto out;
+ }
+
+ xattr_addr = page_address(xpage);
+ memcpy(txattr_addr + inline_size, xattr_addr, size);
+ f2fs_put_page(xpage, 1);
+ }
+
+ if (last_addr)
+ cur_addr = XATTR_HDR(last_addr) - 1;
+ else
+ cur_addr = txattr_addr;
+
+ *xe = __find_xattr(cur_addr, index, len, name);
+check:
+ if (IS_XATTR_LAST_ENTRY(*xe)) {
+ err = -ENODATA;
+ goto out;
+ }
+
+ *base_addr = txattr_addr;
+ return 0;
+out:
+ kzfree(txattr_addr);
+ return err;
+}
+
+static int read_all_xattrs(struct inode *inode, struct page *ipage,
+ void **base_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header;
- size_t size = PAGE_SIZE, inline_size = 0;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ unsigned int size = VALID_XATTR_BLOCK_SIZE;
+ unsigned int inline_size = inline_xattr_size(inode);
void *txattr_addr;
+ int err;
- inline_size = inline_xattr_size(inode);
-
- txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
+ txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
+ GFP_F2FS_ZERO);
if (!txattr_addr)
- return NULL;
+ return -ENOMEM;
/* read from inline xattr */
if (inline_size) {
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
goto fail;
+ }
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
}
/* read from xattr node block */
- if (F2FS_I(inode)->i_xattr_nid) {
+ if (xnid) {
struct page *xpage;
void *xattr_addr;
/* The inode already has an extended attribute block. */
- xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
- if (IS_ERR(xpage))
+ xpage = get_node_page(sbi, xnid);
+ if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
goto fail;
+ }
xattr_addr = page_address(xpage);
- memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
+ memcpy(txattr_addr + inline_size, xattr_addr, size);
f2fs_put_page(xpage, 1);
}
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1);
}
- return txattr_addr;
+ *base_addr = txattr_addr;
+ return 0;
fail:
kzfree(txattr_addr);
- return NULL;
+ return err;
}
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
void *txattr_addr, struct page *ipage)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- size_t inline_size = 0;
+ size_t inline_size = inline_xattr_size(inode);
void *xattr_addr;
struct page *xpage;
nid_t new_nid = 0;
int err;
- inline_size = inline_xattr_size(inode);
-
if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
if (!alloc_nid(sbi, &new_nid))
return -ENOSPC;
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
+ set_page_dirty(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
return PTR_ERR(page);
}
inline_addr = inline_xattr_addr(page);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
}
memcpy(inline_addr, txattr_addr, inline_size);
f2fs_put_page(page, 1);
return PTR_ERR(xpage);
}
f2fs_bug_on(sbi, new_nid);
- f2fs_wait_on_page_writeback(xpage, NODE);
+ f2fs_wait_on_page_writeback(xpage, NODE, true);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
- xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
+ xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(xpage)) {
alloc_nid_failed(sbi, new_nid);
return PTR_ERR(xpage);
}
xattr_addr = page_address(xpage);
- memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE -
- sizeof(struct node_footer));
+ memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
set_page_dirty(xpage);
f2fs_put_page(xpage, 1);
- /* need to checkpoint during fsync */
- F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
return 0;
}
int f2fs_getxattr(struct inode *inode, int index, const char *name,
void *buffer, size_t buffer_size, struct page *ipage)
{
- struct f2fs_xattr_entry *entry;
- void *base_addr;
+ struct f2fs_xattr_entry *entry = NULL;
int error = 0;
- size_t size, len;
+ unsigned int size, len;
+ void *base_addr = NULL;
if (name == NULL)
return -EINVAL;
if (len > F2FS_NAME_LEN)
return -ERANGE;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- return -ENOMEM;
-
- entry = __find_xattr(base_addr, index, len, name);
- if (IS_XATTR_LAST_ENTRY(entry)) {
- error = -ENODATA;
- goto cleanup;
- }
+ down_read(&F2FS_I(inode)->i_xattr_sem);
+ error = lookup_all_xattrs(inode, ipage, index, len, name,
+ &entry, &base_addr);
+ up_read(&F2FS_I(inode)->i_xattr_sem);
+ if (error)
+ return error;
size = le16_to_cpu(entry->e_value_size);
if (buffer && size > buffer_size) {
error = -ERANGE;
- goto cleanup;
+ goto out;
}
if (buffer) {
memcpy(buffer, pval, size);
}
error = size;
-
-cleanup:
+out:
kzfree(base_addr);
return error;
}
int error = 0;
size_t rest = buffer_size;
- base_addr = read_all_xattrs(inode, NULL);
- if (!base_addr)
- return -ENOMEM;
+ down_read(&F2FS_I(inode)->i_xattr_sem);
+ error = read_all_xattrs(inode, NULL, &base_addr);
+ up_read(&F2FS_I(inode)->i_xattr_sem);
+ if (error)
+ return error;
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
return error;
}
+static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry,
+ const void *value, size_t size)
+{
+ void *pval = entry->e_name + entry->e_name_len;
+
+ return (le16_to_cpu(entry->e_value_size) == size) &&
+ !memcmp(pval, value, size);
+}
+
static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr;
int found, newsize;
size_t len;
__u32 new_hsize;
- int error = -ENOMEM;
+ int error = 0;
if (name == NULL)
return -EINVAL;
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- goto exit;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
/* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name);
found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
- if ((flags & XATTR_REPLACE) && !found) {
+ if (found) {
+ if ((flags & XATTR_CREATE)) {
+ error = -EEXIST;
+ goto exit;
+ }
+
+ if (f2fs_xattr_value_same(here, value, size))
+ goto exit;
+ } else if ((flags & XATTR_REPLACE)) {
error = -ENODATA;
goto exit;
- } else if ((flags & XATTR_CREATE) && found) {
- error = -EEXIST;
- goto exit;
}
last = here;
free = free + ENTRY_SIZE(here);
if (unlikely(free < newsize)) {
- error = -ENOSPC;
+ error = -E2BIG;
goto exit;
}
}
* Before we come here, old entry is removed.
* We just write new entry.
*/
- memset(last, 0, newsize);
last->e_name_index = index;
last->e_name_len = len;
memcpy(last->e_name, name, len);
if (error)
goto exit;
- if (is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- inode->i_ctime = CURRENT_TIME;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ inode->i_ctime = current_time(inode);
+ clear_inode_flag(inode, FI_ACL_MODE);
}
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
-
- if (ipage)
- update_inode(inode, ipage);
- else
- update_inode_page(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (!error && S_ISDIR(inode->i_mode))
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit:
kzfree(base_addr);
return error;
if (ipage)
return __f2fs_setxattr(inode, index, name, value,
size, ipage, flags);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
/* protect xattr_ver */
down_write(&F2FS_I(inode)->i_sem);
+ down_write(&F2FS_I(inode)->i_xattr_sem);
err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
+ up_write(&F2FS_I(inode)->i_xattr_sem);
up_write(&F2FS_I(inode)->i_sem);
f2fs_unlock_op(sbi);
+ f2fs_update_time(sbi, REQ_TIME);
return err;
}
#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr) + 1))
#define XATTR_ROUND (3)
-#define XATTR_ALIGN(size) ((size + XATTR_ROUND) & ~XATTR_ROUND)
+#define XATTR_ALIGN(size) (((size) + XATTR_ROUND) & ~XATTR_ROUND)
#define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \
- entry->e_name_len + le16_to_cpu(entry->e_value_size)))
+ (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)))
#define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\
ENTRY_SIZE(entry)))
for (entry = XATTR_FIRST_ENTRY(addr);\
!IS_XATTR_LAST_ENTRY(entry);\
entry = XATTR_NEXT_ENTRY(entry))
-
-#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + PAGE_SIZE - \
- sizeof(struct node_footer) - sizeof(__u32))
+#define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
+#define XATTR_PADDING_SIZE (sizeof(__u32))
+#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
+ VALID_XATTR_BLOCK_SIZE)
#define MAX_VALUE_LEN(i) (MIN_OFFSET(i) - \
sizeof(struct f2fs_xattr_header) - \
#define f2fs_xattr_handlers NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
- const char *name, const void *value, size_t size, int flags)
+ const char *name, const void *value, size_t size,
+ struct page *page, int flags)
{
return -EOPNOTSUPP;
}
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
- do {
- gi->gl = rhashtable_walk_next(&gi->hti);
+ while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
if (IS_ERR(gi->gl)) {
if (PTR_ERR(gi->gl) == -EAGAIN)
continue;
gi->gl = NULL;
+ return;
}
- /* Skip entries for other sb and dead entries */
- } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
- __lockref_is_dead(&gi->gl->gl_lockref)));
+ /* Skip entries for other sb and dead entries */
+ if (gi->sdp == gi->gl->gl_name.ln_sbd &&
+ !__lockref_is_dead(&gi->gl->gl_lockref))
+ return;
+ }
}
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
{
struct gfs2_glock_iter *gi = seq->private;
loff_t n = *pos;
- int ret;
- if (gi->last_pos <= *pos)
- n = (*pos - gi->last_pos);
-
- ret = rhashtable_walk_start(&gi->hti);
- if (ret)
+ if (rhashtable_walk_init(&gl_hash_table, &gi->hti) != 0)
+ return NULL;
+ if (rhashtable_walk_start(&gi->hti) != 0)
return NULL;
do {
} while (gi->gl && n--);
gi->last_pos = *pos;
+
return gi->gl;
}
(*pos)++;
gi->last_pos = *pos;
gfs2_glock_iter_next(gi);
+
return gi->gl;
}
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
- rhashtable_walk_stop(&gi->hti);
+ if (gi->hti.walker) {
+ rhashtable_walk_stop(&gi->hti);
+ rhashtable_walk_exit(&gi->hti);
+ }
}
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
- gi->last_pos = 0;
seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
}
return ret;
}
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
- rhashtable_walk_exit(&gi->hti);
return seq_release_private(inode, file);
}
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
- gi->last_pos = 0;
seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
}
return ret;
}
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
- s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
#endif
+ mnt->mnt.data = NULL;
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
if (!mnt)
return ERR_PTR(-ENOMEM);
- mnt->mnt.data = NULL;
if (type->alloc_mnt_data) {
mnt->mnt.data = type->alloc_mnt_data();
if (!mnt->mnt.data) {
root = mount_fs(type, flags, name, &mnt->mnt, data);
if (IS_ERR(root)) {
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_CAST(root);
return mnt;
out_free:
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
- hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
*
* @hdr: A header that has had nfs_generic_pgio called
*/
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+ nfs_pgio_data_destroy(hdr);
+ hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
u32 midx;
set_bit(NFS_IOHDR_REDO, &hdr->flags);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
/* TODO: Make sure it's right to clean up all mirrors here
* and not just hdr->pgio_mirror_idx */
static void nfs_pgio_release(void *calldata)
{
struct nfs_pgio_header *hdr = calldata;
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
return 0;
}
+void cleanup_callback_cred(void)
+{
+ if (callback_cred) {
+ put_rpccred(callback_cred);
+ callback_cred = NULL;
+ }
+}
+
static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
{
if (clp->cl_minorversion == 0) {
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
-
- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
- spin_lock(&oo->oo_owner.so_client->cl_lock);
+ spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
- spin_unlock(&oo->oo_owner.so_client->cl_lock);
+ spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
ret = set_callback_cred();
if (ret)
- return -ENOMEM;
+ return ret;
+
laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
if (laundry_wq == NULL) {
ret = -ENOMEM;
- goto out_recovery;
+ goto out_cleanup_cred;
}
ret = nfsd4_create_callback_queue();
if (ret)
goto out_free_laundry;
set_max_delegations();
-
return 0;
out_free_laundry:
destroy_workqueue(laundry_wq);
-out_recovery:
+out_cleanup_cred:
+ cleanup_callback_cred();
return ret;
}
{
destroy_workqueue(laundry_wq);
nfsd4_destroy_callback_queue();
+ cleanup_callback_cred();
}
static void
extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
extern int set_callback_cred(void);
+extern void cleanup_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX;
- sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+ sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
err = load_nilfs(nilfs, sb);
if (err)
init_waitqueue_head(&res->l_event);
INIT_LIST_HEAD(&res->l_blocked_list);
INIT_LIST_HEAD(&res->l_mask_waiters);
+ INIT_LIST_HEAD(&res->l_holders);
}
void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
res->l_flags = 0UL;
}
+/*
+ * Keep a list of processes who have interest in a lockres.
+ * Note: this is now only uesed for check recursive cluster locking.
+ */
+static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ INIT_LIST_HEAD(&oh->oh_list);
+ oh->oh_owner_pid = get_pid(task_pid(current));
+
+ spin_lock(&lockres->l_lock);
+ list_add_tail(&oh->oh_list, &lockres->l_holders);
+ spin_unlock(&lockres->l_lock);
+}
+
+static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ spin_lock(&lockres->l_lock);
+ list_del(&oh->oh_list);
+ spin_unlock(&lockres->l_lock);
+
+ put_pid(oh->oh_owner_pid);
+}
+
+static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_lock_holder *oh;
+ struct pid *pid;
+
+ /* look in the list of holders for one with the current task as owner */
+ spin_lock(&lockres->l_lock);
+ pid = task_pid(current);
+ list_for_each_entry(oh, &lockres->l_holders, oh_list) {
+ if (oh->oh_owner_pid == pid) {
+ spin_unlock(&lockres->l_lock);
+ return 1;
+ }
+ }
+ spin_unlock(&lockres->l_lock);
+
+ return 0;
+}
+
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
goto getbh;
}
- if (ocfs2_mount_local(osb))
- goto local;
+ if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
+ ocfs2_mount_local(osb))
+ goto update;
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
-local:
+update:
/*
* We only see this flag if we're being called from
* ocfs2_read_locked_inode(). It means we're locking an inode
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
}
+/*
+ * This _tracker variantes are introduced to deal with the recursive cluster
+ * locking issue. The idea is to keep track of a lock holder on the stack of
+ * the current process. If there's a lock holder on the stack, we know the
+ * task context is already protected by cluster locking. Currently, they're
+ * used in some VFS entry routines.
+ *
+ * return < 0 on error, return == 0 if there's no lock holder on the stack
+ * before this call, return == 1 if this call would be a recursive locking.
+ */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh)
+{
+ int status;
+ int arg_flags = 0, has_locked;
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ has_locked = ocfs2_is_locked_by_me(lockres);
+ /* Just get buffer head if the cluster lock has been taken */
+ if (has_locked)
+ arg_flags = OCFS2_META_LOCK_GETBH;
+
+ if (likely(!has_locked || ret_bh)) {
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+ }
+ if (!has_locked)
+ ocfs2_add_holder(lockres, oh);
+
+ return has_locked;
+}
+
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock)
+{
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ if (!had_lock) {
+ ocfs2_remove_holder(lockres, oh);
+ ocfs2_inode_unlock(inode, ex);
+ }
+}
+
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
struct ocfs2_lock_res *lockres;
__be32 lvb_os_seqno;
};
+struct ocfs2_lock_holder {
+ struct list_head oh_list;
+ struct pid *oh_owner_pid;
+};
+
/* ocfs2_inode_lock_full() 'arg_flags' flags */
/* don't wait on recovery. */
#define OCFS2_META_LOCK_RECOVERY (0x01)
#define OCFS2_META_LOCK_NOQUEUE (0x02)
/* don't block waiting for the downconvert thread, instead return -EAGAIN */
#define OCFS2_LOCK_NONBLOCK (0x04)
+/* just get back disk inode bh if we've got cluster lock. */
+#define OCFS2_META_LOCK_GETBH (0x08)
/* Locking subclasses of inode cluster lock */
enum {
/* To set the locking protocol on module initialization */
void ocfs2_set_locking_protocol(void);
+
+/* The _tracker pair is used to avoid cluster recursive locking */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh);
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock);
+
#endif /* DLMGLUE_H */
struct list_head l_blocked_list;
struct list_head l_mask_waiters;
+ struct list_head l_holders;
unsigned long l_flags;
char l_name[OCFS2_LOCK_ID_MAX_LEN];
* In the generic case the entire file is data, so as long as
* offset isn't at the end of the file then the offset is data.
*/
- if (offset >= eof)
+ if ((unsigned long long)offset >= eof)
return -ENXIO;
break;
case SEEK_HOLE:
* There is a virtual hole at the end of the file, so as long as
* offset isn't i_size or larger, return i_size.
*/
- if (offset >= eof)
+ if ((unsigned long long)offset >= eof)
return -ENXIO;
offset = eof;
break;
gid_t gid = sbi->options.fs_low_gid;
struct iattr newattrs;
+ if (!sbi->options.gid_derivation)
+ return;
+
info = SDCARDFS_I(d_inode(dentry));
info_d = info->data;
perm = info_d->perm;
if (!cred)
return NULL;
- if (data->under_obb)
- uid = AID_MEDIA_OBB;
- else
- uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ if (sbi->options.gid_derivation) {
+ if (data->under_obb)
+ uid = AID_MEDIA_OBB;
+ else
+ uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ } else {
+ uid = sbi->options.fs_low_uid;
+ }
cred->fsuid = make_kuid(&init_user_ns, uid);
cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
Opt_multiuser,
Opt_userid,
Opt_reserved_mb,
+ Opt_gid_derivation,
Opt_err,
};
{Opt_mask, "mask=%u"},
{Opt_userid, "userid=%d"},
{Opt_multiuser, "multiuser"},
+ {Opt_gid_derivation, "derive_gid"},
{Opt_reserved_mb, "reserved_mb=%u"},
{Opt_err, NULL}
};
vfsopts->gid = 0;
/* by default, 0MB is reserved */
opts->reserved_mb = 0;
+ /* by default, gid derivation is off */
+ opts->gid_derivation = false;
*debug = 0;
return 0;
opts->reserved_mb = option;
break;
+ case Opt_gid_derivation:
+ opts->gid_derivation = true;
+ break;
/* unknown option */
default:
if (!silent)
gid_t fs_low_gid;
userid_t fs_user_id;
bool multiuser;
+ bool gid_derivation;
unsigned int reserved_mb;
};
seq_printf(m, ",mask=%u", vfsopts->mask);
if (opts->fs_user_id)
seq_printf(m, ",userid=%u", opts->fs_user_id);
+ if (opts->gid_derivation)
+ seq_puts(m, ",derive_gid");
if (opts->reserved_mb != 0)
seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
- s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
}
memcpy(value, buffer, len);
out:
- security_release_secctx(buffer, len);
+ kfree(buffer);
out_noalloc:
return len;
}
#include "kmem.h"
#include "xfs_message.h"
-/*
- * Greedy allocation. May fail and may return vmalloced memory.
- */
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
-{
- void *ptr;
- size_t kmsize = maxsize;
-
- while (!(ptr = vzalloc(kmsize))) {
- if ((kmsize >>= 1) <= minsize)
- kmsize = minsize;
- }
- if (ptr)
- *size = kmsize;
- return ptr;
-}
-
void *
kmem_alloc(size_t size, xfs_km_flags_t flags)
{
}
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
-
static inline void *
kmem_zalloc(size_t size, xfs_km_flags_t flags)
{
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
- size_t irbsize; /* size of irec buffer in bytes */
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
int nirbuf; /* size of irbuf */
int ubcount; /* size of user's buffer */
*ubcountp = 0;
*done = 0;
- irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+ irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
if (!irbuf)
return -ENOMEM;
-
- nirbuf = irbsize / sizeof(*irbuf);
+ nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
/*
* Loop over the allocation groups, starting from the last
#endif /* DEBUG */
#ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/*
+ * make sure we ignore the inode flag if the filesystem doesn't have a
+ * configured realtime device.
+ */
+#define XFS_IS_REALTIME_INODE(ip) \
+ (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
+ (ip)->i_mount->m_rtdev_targp)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
#endif
(__ret); \
})
-#define this_cpu_generic_read(pcp) \
+#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
typeof(pcp) __ret; \
preempt_disable(); \
- __ret = *this_cpu_ptr(&(pcp)); \
+ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable(); \
__ret; \
})
+#define __this_cpu_generic_read_noirq(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = *raw_cpu_ptr(&(pcp)); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_read(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ if (__native_word(pcp)) \
+ __ret = __this_cpu_generic_read_nopreempt(pcp); \
+ else \
+ __ret = __this_cpu_generic_read_noirq(pcp); \
+ __ret; \
+})
+
#define this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long __flags; \
#define parent_node(node) ((void)(node),0)
#endif
#ifndef cpumask_of_node
-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #endif
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
return __audit_socketcall(nargs, args);
return 0;
}
+
+static inline int audit_socketcall_compat(int nargs, u32 *args)
+{
+ unsigned long a[AUDITSC_ARGS];
+ int i;
+
+ if (audit_dummy_context())
+ return 0;
+
+ for (i = 0; i < nargs; i++)
+ a[i] = (unsigned long)args[i];
+ return __audit_socketcall(nargs, a);
+}
+
static inline int audit_sockaddr(int len, void *addr)
{
if (unlikely(!audit_dummy_context()))
{
return 0;
}
+
+static inline int audit_socketcall_compat(int nargs, u32 *args)
+{
+ return 0;
+}
+
static inline void audit_fd_pair(int fd1, int fd2)
{ }
static inline int audit_sockaddr(int len, void *addr)
#include <linux/flex_proportions.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
struct page;
struct device;
void *congested_data; /* Pointer to aux data for congested func */
char *name;
+ struct kref refcnt; /* Reference counter for the structure */
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_exit(struct backing_dev_info *bdi);
+
+static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
+{
+ kref_get(&bdi->refcnt);
+ return bdi;
+}
+
+void bdi_put(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_destroy(struct backing_dev_info *bdi);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
*/
struct delayed_work delay_work;
- struct backing_dev_info backing_dev_info;
+ struct backing_dev_info *backing_dev_info;
/*
* The queue owner gets to use this for whatever they like.
#undef uninitialized_var
#define uninitialized_var(x) x = *(&(x))
#endif
+
+/* same as gcc, this was present in clang-2.6 so we can assume it works
+ * with any version that can compile the kernel
+ */
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
/*
* Force always-inline if the user requests it so via the .config,
- * or if gcc is too old:
+ * or if gcc is too old.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline)) notrace
-#define __inline__ __inline__ __attribute__((always_inline)) notrace
-#define __inline __inline __attribute__((always_inline)) notrace
+#define inline inline __attribute__((always_inline,unused)) notrace
+#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
+#define __inline __inline __attribute__((always_inline,unused)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline notrace
-#define __inline__ __inline__ notrace
-#define __inline __inline notrace
+#define inline inline __attribute__((unused)) notrace
+#define __inline__ __inline__ __attribute__((unused)) notrace
+#define __inline __inline __attribute__((unused)) notrace
#endif
#define __always_inline inline __attribute__((always_inline))
bool fast_switch_possible;
bool fast_switch_enabled;
- /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int up_transition_delay_us;
+ unsigned int down_transition_delay_us;
+
+ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(bool cpu_online);
+extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_force_rebuild(void) { }
+
static inline void cpuset_update_active_cpus(bool cpu_online)
{
partition_sched_domains(1, NULL, NULL);
}
+static inline void cpuset_wait_for_hotplug(void) { }
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
#define DCACHE_OP_REAL 0x08000000
extern seqlock_t rename_lock;
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
+#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
+#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
+#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
+#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
+
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
#define MAX_ACTIVE_DATA_LOGS 8
#define VERSION_LEN 256
+#define MAX_VOLUME_NAME 512
+#define MAX_PATH_LEN 64
+#define MAX_DEVICES 8
/*
* For superblock
*/
+struct f2fs_device {
+ __u8 path[MAX_PATH_LEN];
+ __le32 total_segments;
+} __packed;
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
__le32 node_ino; /* node inode number */
__le32 meta_ino; /* meta inode number */
__u8 uuid[16]; /* 128-bit uuid for volume */
- __le16 volume_name[512]; /* volume name */
+ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
__le32 feature; /* defined features */
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
- __u8 reserved[871]; /* valid reserved region */
+ struct f2fs_device devs[MAX_DEVICES]; /* device list */
+ __u8 reserved[327]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_TRIMMED_FLAG 0x00000100
+#define CP_NAT_BITS_FLAG 0x00000080
+#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
#define F2FS_NAME_LEN 255
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
+ get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
-#define ADDRS_PER_PAGE(page, fi) \
- (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode) \
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
-
-#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
- F2FS_INLINE_XATTR_ADDRS - 1))
+#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
struct f2fs_inode {
__le16 i_mode; /* file mode */
struct f2fs_extent i_ext; /* caching a largest extent */
- __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
-
+ union {
+ struct {
+ __le16 i_extra_isize; /* extra inode attribute size */
+ __le16 i_padding; /* padding */
+ __le32 i_projid; /* project id */
+ __le32 i_inode_checksum;/* inode meta checksum */
+ __le32 i_extra_end[0]; /* for attribute size calculation */
+ };
+ __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
+ };
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
double_indirect(1) node id */
} __packed;
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
struct summary_footer {
unsigned char entry_type; /* SUM_TYPE_XXX */
- __u32 check_sum; /* summary checksum */
+ __le32 check_sum; /* summary checksum */
} __packed;
#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
sizeof(struct sit_journal_entry))
#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
+
/*
* frequently updated NAT/SIT entries can be stored in the spare area in
* summary blocks
__u8 reserved[SIT_JOURNAL_RESERVED];
} __packed;
-/* 4KB-sized summary block structure */
-struct f2fs_summary_block {
- struct f2fs_summary entries[ENTRIES_IN_SUM];
+struct f2fs_extra_info {
+ __le64 kbytes_written;
+ __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
+struct f2fs_journal {
union {
__le16 n_nats;
__le16 n_sits;
};
- /* spare area is used by NAT or SIT journals */
+ /* spare area is used by NAT or SIT journals or extra info */
union {
struct nat_journal nat_j;
struct sit_journal sit_j;
+ struct f2fs_extra_info info;
};
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ struct f2fs_journal journal;
struct summary_footer footer;
} __packed;
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
/*
- * space utilization of regular dentry and inline dentry
+ * space utilization of regular dentry and inline dentry (w/o extra reservation)
* regular dentry inline dentry
* bitmap 1 * 27 = 27 1 * 23 = 23
* reserved 1 * 3 = 3 1 * 7 = 7
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
-/* for inline dir */
-#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- BITS_PER_BYTE + 1))
-#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
- BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
-
-/* inline directory entry structure */
-struct f2fs_inline_dentry {
- __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
- __u8 reserved[INLINE_RESERVED_SIZE];
- struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
- __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
-} __packed;
-
/* file types used in inode_info->flags */
enum {
F2FS_FT_UNKNOWN,
F2FS_FT_MAX
};
+#define S_SHIFT 12
+
+#define F2FS_DEF_PROJID 0 /* default project ID */
+
#endif /* _LINUX_F2FS_FS_H */
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct fscrypt_info;
+struct fscrypt_operations;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
int bd_invalidated;
struct gendisk * bd_disk;
struct request_queue * bd_queue;
+ struct backing_dev_info *bd_bdi;
struct list_head bd_list;
/*
* Private data. You must have bd_claim'ed the block_device
struct hlist_head i_fsnotify_marks;
#endif
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ struct fscrypt_info *i_crypt_info;
+#endif
void *i_private; /* fs or device private pointer */
};
#endif
const struct xattr_handler **s_xattr;
+ const struct fscrypt_operations *s_cop;
+
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
#ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
+extern void bdev_unhash_inode(dev_t dev);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
--- /dev/null
+/*
+ * fscrypt_common.h: common declarations for per-file encryption
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPT_COMMON_H
+#define _LINUX_FSCRYPT_COMMON_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_CRYPTO_BLOCK_SIZE 16
+
+struct fscrypt_info;
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+};
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ unsigned int flags;
+ const char *key_prefix;
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ int (*dummy_context)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
+ u32 filenames_mode)
+{
+ if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
+ return true;
+
+ if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+ return true;
+
+ return false;
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(const struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return (inode->i_crypt_info != NULL);
+#else
+ return 0;
+#endif
+}
+
+#endif /* _LINUX_FSCRYPT_COMMON_H */
--- /dev/null
+/*
+ * fscrypt_notsupp.h
+ *
+ * This stubs out the fscrypt functions for filesystems configured without
+ * encryption support.
+ */
+
+#ifndef _LINUX_FSCRYPT_NOTSUPP_H
+#define _LINUX_FSCRYPT_NOTSUPP_H
+
+#include <linux/fscrypt_common.h>
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
+ gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+ return;
+}
+
+static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_decrypt_page(const struct inode *inode,
+ struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num)
+{
+ return -EOPNOTSUPP;
+}
+
+
+static inline void fscrypt_restore_control_page(struct page *page)
+{
+ return;
+}
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+ return;
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+ return;
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+ struct inode *child)
+{
+ return 0;
+}
+
+static inline int fscrypt_inherit_context(struct inode *parent,
+ struct inode *child,
+ void *fs_data, bool preload)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_get_encryption_info(struct inode *inode)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode,
+ struct fscrypt_info *ci)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (dir->i_sb->s_cop->is_encrypted(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
+ u32 ilen)
+{
+ /* never happens */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
+ u32 ilen,
+ struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
+ struct bio *bio)
+{
+ return;
+}
+
+static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+ return;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
--- /dev/null
+/*
+ * fscrypt_supp.h
+ *
+ * This is included by filesystems configured with encryption support.
+ */
+
+#ifndef _LINUX_FSCRYPT_SUPP_H
+#define _LINUX_FSCRYPT_SUPP_H
+
+#include <linux/fscrypt_common.h>
+
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+ unsigned int, unsigned int,
+ u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+ unsigned int, u64);
+extern void fscrypt_restore_control_page(struct page *);
+
+extern const struct dentry_operations fscrypt_d_ops;
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+ d_set_d_op(dentry, &fscrypt_d_ops);
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+}
+
+/* policy.c */
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ kfree(fname->crypto_buf.name);
+}
+
+extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+ struct fscrypt_str *);
+
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len) \
+ ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+ FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded). This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename. Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext. (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".) This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key. Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+ u32 hash;
+ u32 minor_hash;
+ u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry. The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ if (unlikely(!fname->disk_name.name)) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+ return false;
+ if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+ return false;
+ return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+ n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+ }
+
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+/* bio.c */
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
+ unsigned int);
+
+#endif /* _LINUX_FSCRYPT_SUPP_H */
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
#endif
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+ (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
struct hdcp_client_ops {
int (*wakeup)(struct hdmi_hdcp_wakeup_data *data);
void (*notify_lvl_change)(void *client_ctx, int min_lvl);
+ void (*srm_cb)(void *client_ctx);
};
enum hdcp_device_type {
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length);
+
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
#define __LINUX_KBUILD_H
#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+ asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val))
-#define BLANK() asm volatile("\n->" : : )
+#define BLANK() asm volatile("\n.ascii \"->\"" : : )
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem))
#define COMMENT(x) \
- asm volatile("\n->#" x)
+ asm volatile("\n.ascii \"->#" x "\"")
#endif
#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_UID_KEYRING 12 /* set if key is a user or user session keyring */
/* the key type and key description string
* - the desc is used to match a key against search criteria
#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
+#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
{
struct ppa_addr l;
+ l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
int memblock_is_memory(phys_addr_t addr);
int memblock_is_map_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
-int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
#include <linux/cpumask.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
+#include <linux/workqueue.h>
#include <asm/page.h>
#include <asm/mmu.h>
int app_setting;
#endif
+ struct work_struct async_put_work;
};
static inline void mm_init_cpumask(struct mm_struct *mm)
unsigned int state; /* function state */
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
- u8 tmpbuf[4]; /* DMA:able scratch buffer */
+ u8 *tmpbuf; /* DMA:able scratch buffer */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
/* Each module must use one module_init(). */
#define module_init(initfn) \
- static inline initcall_t __inittest(void) \
+ static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \
int init_module(void) __attribute__((alias(#initfn)));
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
- static inline exitcall_t __exittest(void) \
+ static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \
void cleanup_module(void) __attribute__((alias(#exitfn)));
#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
struct mmp_dma_platdata {
int dma_channels;
+ int nb_requestors;
};
#endif /* MMP_DMA_H */
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
struct mutex m_lock;
spinlock_t w_lock;
uint8_t pkt_owner;
+#ifdef CONFIG_MSM_QDSP6_APRV2_VM
+ uint16_t vm_dest_svc;
+ uint32_t vm_handle;
+#endif
};
struct apr_client {
--- /dev/null
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APRV2_VM_H__
+#define __APRV2_VM_H__
+
+#define APRV2_VM_MAX_DNS_SIZE (31)
+ /* Includes NULL character. */
+#define APRV2_VM_PKT_SERVICE_ID_MASK (0x00FF)
+ /* Bitmask of the service ID field. */
+
+/* Packet Structure Definition */
+struct aprv2_vm_packet_t {
+ uint32_t header;
+ uint16_t src_addr;
+ uint16_t src_port;
+ uint16_t dst_addr;
+ uint16_t dst_port;
+ uint32_t token;
+ uint32_t opcode;
+};
+
+/**
+ * In order to send command/event via MM HAB, the following buffer
+ * format shall be followed, where the buffer is provided to the
+ * HAB send API.
+ * |-----cmd_id or evt_id -----| <- 32 bit, e.g. APRV2_VM_CMDID_REGISTER
+ * |-----cmd payload ----------| e.g. aprv2_vm_cmd_register_t
+ * | ... |
+ *
+ * In order to receive a command response or event ack, the following
+ * buffer format shall be followed, where the buffer is provided to
+ * the HAB receive API.
+ * |-----cmd response ---------| e.g. aprv2_vm_cmd_register_rsp_t
+ * | ... |
+ */
+
+/* Registers a service with the backend APR driver. */
+#define APRV2_VM_CMDID_REGISTER (0x00000001)
+
+struct aprv2_vm_cmd_register_t {
+ uint32_t name_size;
+ /**< The service name string size in bytes. */
+ char name[APRV2_VM_MAX_DNS_SIZE];
+ /**<
+ * The service name string to register.
+ *
+ * A NULL name means the service does not have a name.
+ */
+ uint16_t addr;
+ /**<
+ * The address to register.
+ *
+ * A zero value means to auto-generate a free dynamic address.
+ * A non-zero value means to directly use the statically assigned address.
+ */
+};
+
+struct aprv2_vm_cmd_register_rsp_t {
+ int32_t status;
+ /**< The status of registration. */
+ uint32_t handle;
+ /**< The registered service handle. */
+ uint16_t addr;
+ /**< The actual registered address. */
+};
+
+#define APRV2_VM_CMDID_DEREGISTER (0x00000002)
+
+struct aprv2_vm_cmd_deregister_t {
+ uint32_t handle;
+ /**< The registered service handle. */
+};
+
+struct aprv2_vm_cmd_deregister_rsp_t {
+ int32_t status;
+ /**< The status of de-registration. */
+};
+
+#define APRV2_VM_CMDID_ASYNC_SEND (0x00000003)
+
+struct aprv2_vm_cmd_async_send_t {
+ uint32_t handle;
+ /**< The registered service handle. */
+ struct aprv2_vm_packet_t pkt_header;
+ /**< The packet header. */
+ /* The apr packet payload follows */
+};
+
+struct aprv2_vm_cmd_async_send_rsp_t {
+ int32_t status;
+ /**< The status of send. */
+};
+
+#define APRV2_VM_EVT_RX_PKT_AVAILABLE (0x00000004)
+
+struct aprv2_vm_evt_rx_pkt_available_t {
+ struct aprv2_vm_packet_t pkt_header;
+ /**< The packet header. */
+ /* The apr packet payload follows */
+};
+
+struct aprv2_vm_ack_rx_pkt_available_t {
+ int32_t status;
+};
+
+#endif /* __APRV2_VM_H__ */
/* mmput gets rid of the mappings and all user-space */
extern int mmput(struct mm_struct *);
+/* same as above but performs the slow path from the async kontext. Can
+ * be called from the atomic context as well
+ */
+extern void mmput_async(struct mm_struct *);
+
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
int perf_refcount;
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
+ struct perf_event *bpf_prog_owner;
int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+ return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
- __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
+ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
+ WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/percpu_counter.h>
-
struct netns_frags {
- /* The percpu_counter "mem" need to be cacheline aligned.
- * mem.count must not share cacheline with other writers
- */
- struct percpu_counter mem ____cacheline_aligned_in_smp;
-
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline int inet_frags_init_net(struct netns_frags *nf)
-{
- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
-}
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
+static inline void inet_frags_init_net(struct netns_frags *nf)
{
- percpu_counter_destroy(&nf->mem);
+ atomic_set(&nf->mem, 0);
}
-
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
/* Memory Tracking Functions. */
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
-
static inline int frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_read(&nf->mem);
+ return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+ atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+ atomic_add(i, &nf->mem);
}
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
- unsigned int res;
-
- local_bh_disable();
- res = percpu_counter_sum_positive(&nf->mem);
- local_bh_enable();
-
- return res;
+ return atomic_read(&nf->mem);
}
/* RFC 3168 support :
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
+ struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
* the same cache line.
*/
struct fib6_table *rt6i_table;
- struct fib6_node *rt6i_node;
+ struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
rt0->rt6i_flags |= RTF_EXPIRES;
}
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+ u32 *cookie)
+{
+ struct fib6_node *fn;
+ bool status = false;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+
+ if (fn) {
+ *cookie = fn->fn_sernum;
+ status = true;
+ }
+
+ rcu_read_unlock();
+ return status;
+}
+
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
+ u32 cookie = 0;
+
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ rt6_get_cookie_safe(rt, &cookie);
+
+ return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)
return ifindex;
}
+static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+ int rc = 0;
+
+ if (likely(ifindex)) {
+ rcu_read_lock();
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (dev)
+ rc = l3mdev_master_ifindex_rcu(dev);
+
+ rcu_read_unlock();
+ }
+
+ return rc;
+}
+
/* get index of an interface to use for FIB lookups. For devices
* enslaved to an L3 master device FIB lookups are based on the
* master index
return 0;
}
+static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
{
return dev ? dev->ifindex : 0;
static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
struct sctp_event_subscribe *mask)
{
+ int offset = sn_type - SCTP_SN_TYPE_BASE;
char *amask = (char *) mask;
- return amask[sn_type - SCTP_SN_TYPE_BASE];
+
+ if (offset >= sizeof(struct sctp_event_subscribe))
+ return 0;
+ return amask[offset];
}
/* Given an event subscription, is this event enabled? */
int header_len;
int trailer_len;
u32 extra_flags;
+ u32 output_mark;
} props;
struct xfrm_lifetime_cfg lft;
unsigned short family;
struct dst_ops *dst_ops;
void (*garbage_collect)(struct net *net);
- struct dst_entry *(*dst_lookup)(struct net *net, int tos,
+ struct dst_entry *(*dst_lookup)(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr);
- int (*get_saddr)(struct net *net,
+ const xfrm_address_t *daddr,
+ u32 mark);
+ int (*get_saddr)(struct net *net, int oif,
xfrm_address_t *saddr,
- xfrm_address_t *daddr);
+ xfrm_address_t *daddr,
+ u32 mark);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
int port; /* created/attached port */
unsigned int flags; /* SNDRV_VIRMIDI_* */
rwlock_t filelist_lock;
+ struct rw_semaphore filelist_sem;
struct list_head filelist;
};
#include <linux/tracepoint.h>
-#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev)
-#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino
+#define show_dev(dev) MAJOR(dev), MINOR(dev)
+#define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino
TRACE_DEFINE_ENUM(NODE);
TRACE_DEFINE_ENUM(DATA);
TRACE_DEFINE_ENUM(META_FLUSH);
TRACE_DEFINE_ENUM(INMEM);
TRACE_DEFINE_ENUM(INMEM_DROP);
+TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
TRACE_DEFINE_ENUM(CP_SYNC);
TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
+TRACE_DEFINE_ENUM(CP_TRIMMED);
#define show_block_type(type) \
__print_symbolic(type, \
{ META_FLUSH, "META_FLUSH" }, \
{ INMEM, "INMEM" }, \
{ INMEM_DROP, "INMEM_DROP" }, \
+ { INMEM_INVALIDATE, "INMEM_INVALIDATE" }, \
+ { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
-#define show_bio_type(type) show_bio_base(type), show_bio_extra(type)
+#define show_bio_type(op, op_flags) \
+ show_bio_base((op|op_flags)), show_bio_extra((op|op_flags))
#define show_bio_base(type) \
__print_symbolic(F2FS_BIO_MASK(type), \
{ REQ_META | REQ_PRIO, "(MP)" }, \
{ 0, " \b" })
+#define show_block_temp(temp) \
+ __print_symbolic(temp, \
+ { HOT, "HOT" }, \
+ { WARM, "WARM" }, \
+ { COLD, "COLD" })
+
#define show_data_type(type) \
__print_symbolic(type, \
{ CURSEG_HOT_DATA, "Hot DATA" }, \
{ CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
{ CP_RECOVERY, "Recovery" }, \
- { CP_DISCARD, "Discard" })
+ { CP_DISCARD, "Discard" }, \
+ { CP_UMOUNT | CP_TRIMMED, "Umount,Trimmed" })
struct victim_sel_policy;
struct f2fs_map_blocks;
),
TP_printk("dev = (%d,%d), superblock is %s, wait = %d",
- show_dev(__entry),
+ show_dev(__entry->dev),
__entry->dirty ? "dirty" : "not dirty",
__entry->wait)
);
TP_ARGS(inode, ret)
);
+DEFINE_EVENT(f2fs__inode_exit, f2fs_drop_inode,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
DEFINE_EVENT(f2fs__inode, f2fs_truncate,
TP_PROTO(struct inode *inode),
TRACE_EVENT(f2fs_background_gc,
- TP_PROTO(struct super_block *sb, long wait_ms,
+ TP_PROTO(struct super_block *sb, unsigned int wait_ms,
unsigned int prefree, unsigned int free),
TP_ARGS(sb, wait_ms, prefree, free),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(long, wait_ms)
+ __field(unsigned int, wait_ms)
__field(unsigned int, prefree)
__field(unsigned int, free)
),
__entry->free = free;
),
- TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u",
- show_dev(__entry),
+ TP_printk("dev = (%d,%d), wait_ms = %u, prefree = %u, free = %u",
+ show_dev(__entry->dev),
__entry->wait_ms,
__entry->prefree,
__entry->free)
);
+TRACE_EVENT(f2fs_gc_begin,
+
+ TP_PROTO(struct super_block *sb, bool sync, bool background,
+ long long dirty_nodes, long long dirty_dents,
+ long long dirty_imeta, unsigned int free_sec,
+ unsigned int free_seg, int reserved_seg,
+ unsigned int prefree_seg),
+
+ TP_ARGS(sb, sync, background, dirty_nodes, dirty_dents, dirty_imeta,
+ free_sec, free_seg, reserved_seg, prefree_seg),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(bool, sync)
+ __field(bool, background)
+ __field(long long, dirty_nodes)
+ __field(long long, dirty_dents)
+ __field(long long, dirty_imeta)
+ __field(unsigned int, free_sec)
+ __field(unsigned int, free_seg)
+ __field(int, reserved_seg)
+ __field(unsigned int, prefree_seg)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->sync = sync;
+ __entry->background = background;
+ __entry->dirty_nodes = dirty_nodes;
+ __entry->dirty_dents = dirty_dents;
+ __entry->dirty_imeta = dirty_imeta;
+ __entry->free_sec = free_sec;
+ __entry->free_seg = free_seg;
+ __entry->reserved_seg = reserved_seg;
+ __entry->prefree_seg = prefree_seg;
+ ),
+
+ TP_printk("dev = (%d,%d), sync = %d, background = %d, nodes = %lld, "
+ "dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, "
+ "rsv_seg:%d, prefree_seg:%u",
+ show_dev(__entry->dev),
+ __entry->sync,
+ __entry->background,
+ __entry->dirty_nodes,
+ __entry->dirty_dents,
+ __entry->dirty_imeta,
+ __entry->free_sec,
+ __entry->free_seg,
+ __entry->reserved_seg,
+ __entry->prefree_seg)
+);
+
+TRACE_EVENT(f2fs_gc_end,
+
+ TP_PROTO(struct super_block *sb, int ret, int seg_freed,
+ int sec_freed, long long dirty_nodes,
+ long long dirty_dents, long long dirty_imeta,
+ unsigned int free_sec, unsigned int free_seg,
+ int reserved_seg, unsigned int prefree_seg),
+
+ TP_ARGS(sb, ret, seg_freed, sec_freed, dirty_nodes, dirty_dents,
+ dirty_imeta, free_sec, free_seg, reserved_seg, prefree_seg),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, ret)
+ __field(int, seg_freed)
+ __field(int, sec_freed)
+ __field(long long, dirty_nodes)
+ __field(long long, dirty_dents)
+ __field(long long, dirty_imeta)
+ __field(unsigned int, free_sec)
+ __field(unsigned int, free_seg)
+ __field(int, reserved_seg)
+ __field(unsigned int, prefree_seg)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->ret = ret;
+ __entry->seg_freed = seg_freed;
+ __entry->sec_freed = sec_freed;
+ __entry->dirty_nodes = dirty_nodes;
+ __entry->dirty_dents = dirty_dents;
+ __entry->dirty_imeta = dirty_imeta;
+ __entry->free_sec = free_sec;
+ __entry->free_seg = free_seg;
+ __entry->reserved_seg = reserved_seg;
+ __entry->prefree_seg = prefree_seg;
+ ),
+
+ TP_printk("dev = (%d,%d), ret = %d, seg_freed = %d, sec_freed = %d, "
+ "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, "
+ "free_seg:%u, rsv_seg:%d, prefree_seg:%u",
+ show_dev(__entry->dev),
+ __entry->ret,
+ __entry->seg_freed,
+ __entry->sec_freed,
+ __entry->dirty_nodes,
+ __entry->dirty_dents,
+ __entry->dirty_imeta,
+ __entry->free_sec,
+ __entry->free_seg,
+ __entry->reserved_seg,
+ __entry->prefree_seg)
+);
+
TRACE_EVENT(f2fs_get_victim,
TP_PROTO(struct super_block *sb, int type, int gc_type,
TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u "
"ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u",
- show_dev(__entry),
+ show_dev(__entry->dev),
show_data_type(__entry->type),
show_gc_type(__entry->gc_type),
show_alloc_mode(__entry->alloc_mode),
__entry->ret)
);
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_reserve_new_blocks,
- TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+ TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+ blkcnt_t count),
- TP_ARGS(inode, nid, ofs_in_node),
+ TP_ARGS(inode, nid, ofs_in_node, count),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(nid_t, nid)
__field(unsigned int, ofs_in_node)
+ __field(blkcnt_t, count)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = nid;
__entry->ofs_in_node = ofs_in_node;
+ __entry->count = count;
),
- TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
- show_dev(__entry),
+ TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
+ show_dev(__entry->dev),
(unsigned int)__entry->nid,
- __entry->ofs_in_node)
+ __entry->ofs_in_node,
+ (unsigned long long)__entry->count)
);
DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(block_t, blkaddr)
- __field(int, rw)
+ __field(block_t, old_blkaddr)
+ __field(block_t, new_blkaddr)
+ __field(int, op)
+ __field(int, op_flags)
+ __field(int, temp)
__field(int, type)
),
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
- __entry->blkaddr = fio->blk_addr;
- __entry->rw = fio->rw;
+ __entry->old_blkaddr = fio->old_blkaddr;
+ __entry->new_blkaddr = fio->new_blkaddr;
+ __entry->op = fio->op;
+ __entry->op_flags = fio->op_flags;
+ __entry->temp = fio->temp;
__entry->type = fio->type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "blkaddr = 0x%llx, rw = %s%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- (unsigned long long)__entry->blkaddr,
- show_bio_type(__entry->rw),
+ (unsigned long long)__entry->old_blkaddr,
+ (unsigned long long)__entry->new_blkaddr,
+ show_bio_type(__entry->op, __entry->op_flags),
+ show_block_temp(__entry->temp),
show_block_type(__entry->type))
);
TP_CONDITION(page->mapping)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_CONDITION(page->mapping)
);
-DECLARE_EVENT_CLASS(f2fs__submit_bio,
+DECLARE_EVENT_CLASS(f2fs__bio,
- TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
- struct bio *bio),
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
- TP_ARGS(sb, fio, bio),
+ TP_ARGS(sb, type, bio),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(int, rw)
+ __field(dev_t, target)
+ __field(int, op)
+ __field(int, op_flags)
__field(int, type)
__field(sector_t, sector)
__field(unsigned int, size)
TP_fast_assign(
__entry->dev = sb->s_dev;
- __entry->rw = fio->rw;
- __entry->type = fio->type;
+ __entry->target = bio->bi_bdev->bd_dev;
+ __entry->op = bio_op(bio);
+ __entry->op_flags = bio->bi_rw;
+ __entry->type = type;
__entry->sector = bio->bi_iter.bi_sector;
__entry->size = bio->bi_iter.bi_size;
),
- TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
- show_dev(__entry),
- show_bio_type(__entry->rw),
+ TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
+ show_dev(__entry->target),
+ show_dev(__entry->dev),
+ show_bio_type(__entry->op, __entry->op_flags),
show_block_type(__entry->type),
(unsigned long long)__entry->sector,
__entry->size)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_write_bio,
+
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
+
+ TP_ARGS(sb, type, bio),
+
+ TP_CONDITION(bio)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_read_bio,
+
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
+
+ TP_ARGS(sb, type, bio),
+
+ TP_CONDITION(bio)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_read_bio,
- TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
- struct bio *bio),
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
- TP_ARGS(sb, fio, bio),
+ TP_ARGS(sb, type, bio),
TP_CONDITION(bio)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
+DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio,
- TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
- struct bio *bio),
+ TP_PROTO(struct super_block *sb, int type, struct bio *bio),
- TP_ARGS(sb, fio, bio),
+ TP_ARGS(sb, type, bio),
TP_CONDITION(bio)
);
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
- show_dev(__entry),
+ show_dev(__entry->dev),
show_cpreason(__entry->reason),
__entry->msg)
);
-TRACE_EVENT(f2fs_issue_discard,
+DECLARE_EVENT_CLASS(f2fs_discard,
- TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen),
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
- TP_ARGS(sb, blkstart, blklen),
+ TP_ARGS(dev, blkstart, blklen),
TP_STRUCT__entry(
__field(dev_t, dev)
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev = dev->bd_dev;
__entry->blkstart = blkstart;
__entry->blklen = blklen;
),
TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx",
- show_dev(__entry),
+ show_dev(__entry->dev),
(unsigned long long)__entry->blkstart,
(unsigned long long)__entry->blklen)
);
+DEFINE_EVENT(f2fs_discard, f2fs_queue_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
+DEFINE_EVENT(f2fs_discard, f2fs_issue_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(block_t, blkstart)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->bd_dev;
+ __entry->blkstart = blkstart;
+ ),
+
+ TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ show_dev(__entry->dev),
+ (unsigned long long)__entry->blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
- TP_PROTO(struct super_block *sb, unsigned int nobarrier,
- unsigned int flush_merge),
+ TP_PROTO(struct block_device *dev, unsigned int nobarrier,
+ unsigned int flush_merge, int ret),
- TP_ARGS(sb, nobarrier, flush_merge),
+ TP_ARGS(dev, nobarrier, flush_merge, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, nobarrier)
__field(unsigned int, flush_merge)
+ __field(int, ret)
),
TP_fast_assign(
- __entry->dev = sb->s_dev;
+ __entry->dev = dev->bd_dev;
__entry->nobarrier = nobarrier;
__entry->flush_merge = flush_merge;
+ __entry->ret = ret;
),
- TP_printk("dev = (%d,%d), %s %s",
- show_dev(__entry),
+ TP_printk("dev = (%d,%d), %s %s, ret = %d",
+ show_dev(__entry->dev),
__entry->nobarrier ? "skip (nobarrier)" : "issue",
- __entry->flush_merge ? " with flush_merge" : "")
+ __entry->flush_merge ? " with flush_merge" : "",
+ __entry->ret)
);
TRACE_EVENT(f2fs_lookup_extent_tree_start,
),
TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
- show_dev(__entry),
+ show_dev(__entry->dev),
__entry->node_cnt,
__entry->tree_cnt)
);
__entry->node_cnt)
);
+DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(s64, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->type = type;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev = (%d,%d), %s, dirty count = %lld",
+ show_dev(__entry->dev),
+ show_file_type(__entry->type),
+ __entry->count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
--- /dev/null
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+ TP_ARGS(ip, parent_ip),
+
+ TP_STRUCT__entry(
+ __field(u32, caller_offs)
+ __field(u32, parent_offs)
+ ),
+
+ TP_fast_assign(
+ __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+ __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+ ),
+
+ TP_printk("caller=%pF parent=%pF",
+ (void *)((unsigned long)(_stext) + __entry->caller_offs),
+ (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+#ifndef CONFIG_PROVE_LOCKING
+DEFINE_EVENT(preemptirq_template, irq_disable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+#endif
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
+
+#endif
__entry->util_avg_pelt = cfs_rq->avg.util_avg;
__entry->util_avg_walt = 0;
#ifdef CONFIG_SCHED_WALT
- __entry->util_avg_walt =
- cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
- do_div(__entry->util_avg_walt, walt_ravg_window);
+ __entry->util_avg_walt =
+ div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
__entry->util_avg = __entry->util_avg_walt;
#endif
TP_printk("overutilized=%d",
__entry->overutilized ? 1 : 0)
);
+#ifdef CONFIG_SCHED_WALT
+struct rq;
+
+TRACE_EVENT(walt_update_task_ravg,
+
+ TP_PROTO(struct task_struct *p, struct rq *rq, int evt,
+ u64 wallclock, u64 irqtime),
+
+ TP_ARGS(p, rq, evt, wallclock, irqtime),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, cur_pid )
+ __field( u64, wallclock )
+ __field( u64, mark_start )
+ __field( u64, delta_m )
+ __field( u64, win_start )
+ __field( u64, delta )
+ __field( u64, irqtime )
+ __field( int, evt )
+ __field(unsigned int, demand )
+ __field(unsigned int, sum )
+ __field( int, cpu )
+ __field( u64, cs )
+ __field( u64, ps )
+ __field(unsigned long, util )
+ __field( u32, curr_window )
+ __field( u32, prev_window )
+ __field( u64, nt_cs )
+ __field( u64, nt_ps )
+ __field( u32, active_windows )
+ ),
+
+ TP_fast_assign(
+ __entry->wallclock = wallclock;
+ __entry->win_start = rq->window_start;
+ __entry->delta = (wallclock - rq->window_start);
+ __entry->evt = evt;
+ __entry->cpu = rq->cpu;
+ __entry->cur_pid = rq->curr->pid;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->mark_start = p->ravg.mark_start;
+ __entry->delta_m = (wallclock - p->ravg.mark_start);
+ __entry->demand = p->ravg.demand;
+ __entry->sum = p->ravg.sum;
+ __entry->irqtime = irqtime;
+ __entry->cs = rq->curr_runnable_sum;
+ __entry->ps = rq->prev_runnable_sum;
+ __entry->util = rq->prev_runnable_sum << SCHED_LOAD_SHIFT;
+ do_div(__entry->util, walt_ravg_window);
+ __entry->curr_window = p->ravg.curr_window;
+ __entry->prev_window = p->ravg.prev_window;
+ __entry->nt_cs = rq->nt_curr_runnable_sum;
+ __entry->nt_ps = rq->nt_prev_runnable_sum;
+ __entry->active_windows = p->ravg.active_windows;
+ ),
+
+ TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
+ " cs %llu ps %llu util %lu cur_window %u prev_window %u active_wins %u"
+ , __entry->wallclock, __entry->win_start, __entry->delta,
+ __entry->evt, __entry->cpu, __entry->cur_pid,
+ __entry->pid, __entry->comm, __entry->mark_start,
+ __entry->delta_m, __entry->demand,
+ __entry->sum, __entry->irqtime,
+ __entry->cs, __entry->ps, __entry->util,
+ __entry->curr_window, __entry->prev_window,
+ __entry->active_windows
+ )
+);
+
+TRACE_EVENT(walt_update_history,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+ int evt),
+
+ TP_ARGS(rq, p, runtime, samples, evt),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field(unsigned int, runtime )
+ __field( int, samples )
+ __field( int, evt )
+ __field( u64, demand )
+ __field( u64, walt_avg )
+ __field(unsigned int, pelt_avg )
+ __array( u32, hist, RAVG_HIST_SIZE_MAX)
+ __field( int, cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->runtime = runtime;
+ __entry->samples = samples;
+ __entry->evt = evt;
+ __entry->demand = p->ravg.demand;
+ __entry->walt_avg = (__entry->demand << 10) / walt_ravg_window,
+ __entry->pelt_avg = p->se.avg.util_avg;
+ memcpy(__entry->hist, p->ravg.sum_history,
+ RAVG_HIST_SIZE_MAX * sizeof(u32));
+ __entry->cpu = rq->cpu;
+ ),
+
+ TP_printk("%d (%s): runtime %u samples %d event %d demand %llu"
+ " walt %llu pelt %u (hist: %u %u %u %u %u) cpu %d",
+ __entry->pid, __entry->comm,
+ __entry->runtime, __entry->samples, __entry->evt,
+ __entry->demand,
+ __entry->walt_avg,
+ __entry->pelt_avg,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3],
+ __entry->hist[4], __entry->cpu)
+);
+
+TRACE_EVENT(walt_migration_update_sum,
+
+ TP_PROTO(struct rq *rq, struct task_struct *p),
+
+ TP_ARGS(rq, p),
+
+ TP_STRUCT__entry(
+ __field(int, cpu )
+ __field(int, pid )
+ __field( u64, cs )
+ __field( u64, ps )
+ __field( s64, nt_cs )
+ __field( s64, nt_ps )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu_of(rq);
+ __entry->cs = rq->curr_runnable_sum;
+ __entry->ps = rq->prev_runnable_sum;
+ __entry->nt_cs = (s64)rq->nt_curr_runnable_sum;
+ __entry->nt_ps = (s64)rq->nt_prev_runnable_sum;
+ __entry->pid = p->pid;
+ ),
+
+ TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
+ __entry->cpu, __entry->cs, __entry->ps,
+ __entry->nt_cs, __entry->nt_ps, __entry->pid)
+);
+#endif /* CONFIG_SCHED_WALT */
#endif /* CONFIG_SMP */
/* Vendor Ids: */
#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
header-y += msm_audio_g711.h
header-y += msm_audio_g711_dec.h
header-y += msm_ion.h
+header-y += msm_ipc.h
header-y += msm_kgsl.h
header-y += msm_pft.h
header-y += msm_mdp.h
#define FS_IOC32_SETVERSION _IOW('v', 2, int)
/*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE 8
+
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+#define FS_ENCRYPTION_MODE_AES_128_CBC 5
+#define FS_ENCRYPTION_MODE_AES_128_CTS 6
+
+
+struct fscrypt_policy {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+} __packed;
+
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+
+/* Parameters for passing an encryption key into the kernel keyring */
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* Structure that userspace passes to the kernel keyring */
+#define FS_MAX_KEY_SIZE 64
+
+struct fscrypt_key {
+ __u32 mode;
+ __u8 raw[FS_MAX_KEY_SIZE];
+ __u32 size;
+};
+
+/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*/
#define FS_SECRM_FL 0x00000001 /* Secure deletion */
#include <linux/types.h>
#include <linux/sockios.h>
+#include <linux/in6.h> /* For struct sockaddr_in6. */
/*
* Based on the MROUTING 3.5 defines primarily to keep
#define _LINUX_RDS_H
#include <linux/types.h>
+#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
#define RDS_IB_ABI_VERSION 0x301
};
struct rds_get_mr_for_dest_args {
- struct sockaddr_storage dest_addr;
+ struct __kernel_sockaddr_storage dest_addr;
struct rds_iovec vec;
uint64_t cookie_addr;
uint64_t flags;
__u8 iFunction;
} __attribute__ ((packed));
+#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
/*-------------------------------------------------------------------------*/
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
+ XFRMA_PAD,
+ XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
+ XFRMA_OUTPUT_MARK, /* __u32 */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
extern int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
+
+extern int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
#endif /* __LINUX_SWIOTLB_XEN_H */
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
+ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
+ BPF_CLASS(insn->code) == BPF_ALU64) {
verbose("BPF_END uses reserved fields\n");
return -EINVAL;
}
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
+ .private = FILE_MEMORY_PRESSURE,
},
{
mutex_unlock(&cpuset_mutex);
}
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+ force_rebuild = true;
+}
+
/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
*
}
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated)
+ if (cpus_updated || force_rebuild) {
+ force_rebuild = false;
rebuild_sched_domains();
+ }
}
void cpuset_update_active_cpus(bool cpu_online)
schedule_work(&cpuset_hotplug_work);
}
+void cpuset_wait_for_hotplug(void)
+{
+ flush_work(&cpuset_hotplug_work);
+}
+
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
}
event->tp_event->prog = prog;
+ event->tp_event->bpf_prog_owner = event;
return 0;
}
return;
prog = event->tp_event->prog;
- if (prog) {
+ if (prog && event->tp_event->bpf_prog_owner == event) {
event->tp_event->prog = NULL;
bpf_prog_put_rcu(prog);
}
# else
static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_stack(unsigned long *stack)
+static void free_thread_stack(unsigned long *stack)
{
kmem_cache_free(thread_stack_cache, stack);
}
}
EXPORT_SYMBOL_GPL(__mmdrop);
+static inline void __mmput(struct mm_struct *mm)
+{
+ VM_BUG_ON(atomic_read(&mm->mm_users));
+
+ uprobe_clear_state(mm);
+ exit_aio(mm);
+ ksm_exit(mm);
+ khugepaged_exit(mm); /* must run before exit_mmap */
+ exit_mmap(mm);
+ set_mm_exe_file(mm, NULL);
+ if (!list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ list_del(&mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
+ mmdrop(mm);
+}
+
/*
* Decrement the use count and release all resources for an mm.
*/
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
- uprobe_clear_state(mm);
- exit_aio(mm);
- ksm_exit(mm);
- khugepaged_exit(mm); /* must run before exit_mmap */
- exit_mmap(mm);
- set_mm_exe_file(mm, NULL);
- if (!list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- list_del(&mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
- if (mm->binfmt)
- module_put(mm->binfmt->module);
- mmdrop(mm);
+ __mmput(mm);
mm_freed = 1;
}
return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
+static void mmput_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+ __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_users)) {
+ INIT_WORK(&mm->async_put_work, mmput_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+
/**
* set_mm_exe_file - change a reference to the mm's executable file
*
}
EXPORT_SYMBOL(__gcov_merge_icall_topn);
+void __gcov_exit(void)
+{
+ /* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
+
/**
* gcov_enable_events - enable event reporting through gcov_event()
*
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ >= 7)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
if (depth) {
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
- if (hlock->references)
+ if (hlock->references) {
+ /*
+ * Check: unsigned int references:12, overflow.
+ */
+ if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+ return 0;
+
hlock->references++;
- else
+ } else {
hlock->references = 2;
+ }
return 1;
}
else
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
+ kfree(cxt.lwsa);
+ kfree(cxt.lrsa);
torture_cleanup_end();
}
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+ kfree(writer_tasks);
+ writer_tasks = NULL;
firsterr = -ENOMEM;
goto unwind;
}
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/wakeup_reason.h>
+#include <linux/cpuset.h>
-/*
+/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
+ cpuset_wait_for_hotplug();
+
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
local_irq_save(flags);
rdtp = this_cpu_ptr(&rcu_dynticks);
+
+ /* Page faults can happen in NMI handlers, so check... */
+ if (READ_ONCE(rdtp->dynticks_nmi_nesting))
+ return;
+
+ RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
local_irq_save(flags);
rdtp = this_cpu_ptr(&rcu_dynticks);
+
+ /* Page faults can happen in NMI handlers, so check... */
+ if (READ_ONCE(rdtp->dynticks_nmi_nesting))
+ return;
+
+ RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, cpu);
+ p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(dst_rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(dst_rq, p, 0);
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
- unsigned long cpu_utilization = cpu_util(cpu);
+ unsigned long cpu_utilization = cpu_util_freq(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
return sched_freq_tick_pelt(cpu);
/*
- * Add a margin to the WALT utilization.
+ * Add a margin to the WALT utilization to check if we will need to
+ * increase frequency.
* NOTE: WALT tracks a single CPU signal for all the scheduling
* classes, thus this margin is going to be added to the DL class as
* well, which is something we do not do in sched_freq_tick_pelt case.
*/
- cpu_utilization = add_capacity_margin(cpu_utilization);
- if (cpu_utilization <= capacity_curr)
+ if (add_capacity_margin(cpu_utilization) <= capacity_curr)
return;
/*
static void sched_freq_tick(int cpu)
{
- unsigned long capacity_orig, capacity_curr;
-
if (!sched_freq())
return;
- capacity_orig = capacity_orig_of(cpu);
- capacity_curr = capacity_curr_of(cpu);
- if (capacity_curr == capacity_orig)
- return;
-
_sched_freq_tick(cpu);
}
#else
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
- num_cpus_frozen--;
- if (likely(num_cpus_frozen)) {
- partition_sched_domains(1, NULL, NULL);
+ partition_sched_domains(1, NULL, NULL);
+ if (--num_cpus_frozen)
break;
- }
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
+ cpuset_force_rebuild();
case CPU_ONLINE:
cpuset_update_active_cpus(true);
cpufreq_cpu_put(policy);
}
+#ifdef CONFIG_SCHED_WALT
+static inline unsigned long
+requested_capacity(struct sched_capacity_reqs *scr)
+{
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return scr->cfs;
+ return scr->cfs + scr->rt;
+}
+#else
+#define requested_capacity(scr) (scr->cfs + scr->rt)
+#endif
+
void update_cpu_capacity_request(int cpu, bool request)
{
unsigned long new_capacity;
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
- new_capacity = scr->cfs + scr->rt;
+ new_capacity = requested_capacity(scr);
new_capacity = new_capacity * capacity_margin
/ SCHED_CAPACITY_SCALE;
new_capacity += scr->dl;
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned long iowait_boost;
- unsigned long iowait_boost_max;
+ bool iowait_boost_pending;
+ unsigned int iowait_boost;
+ unsigned int iowait_boost_max;
u64 last_update;
/* The fields below are only needed when sharing a policy. */
unsigned int flags)
{
if (flags & SCHED_CPUFREQ_IOWAIT) {
- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ if (sg_cpu->iowait_boost_pending)
+ return;
+
+ sg_cpu->iowait_boost_pending = true;
+
+ if (sg_cpu->iowait_boost) {
+ sg_cpu->iowait_boost <<= 1;
+ if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else {
+ sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+ }
} else if (sg_cpu->iowait_boost) {
s64 delta_ns = time - sg_cpu->last_update;
/* Clear iowait_boost if the CPU apprears to have been idle. */
- if (delta_ns > TICK_NSEC)
+ if (delta_ns > TICK_NSEC) {
sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_pending = false;
+ }
}
}
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
- unsigned long boost_util = sg_cpu->iowait_boost;
- unsigned long boost_max = sg_cpu->iowait_boost_max;
+ unsigned int boost_util, boost_max;
- if (!boost_util)
+ if (!sg_cpu->iowait_boost)
return;
+ if (sg_cpu->iowait_boost_pending) {
+ sg_cpu->iowait_boost_pending = false;
+ } else {
+ sg_cpu->iowait_boost >>= 1;
+ if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+ sg_cpu->iowait_boost = 0;
+ return;
+ }
+ }
+
+ boost_util = sg_cpu->iowait_boost;
+ boost_max = sg_cpu->iowait_boost_max;
+
if (*util * boost_max < *max * boost_util) {
*util = boost_util;
*max = boost_max;
}
- sg_cpu->iowait_boost >>= 1;
}
#ifdef CONFIG_NO_HZ_COMMON
sugov_update_commit(sg_policy, time, next_f);
}
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- u64 last_freq_update_time = sg_policy->last_freq_update_time;
unsigned long util = 0, max = 1;
unsigned int j;
* enough, don't take the CPU into account as it probably is
* idle now (and clear iowait_boost for it).
*/
- delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+ delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
+ j_sg_cpu->iowait_boost_pending = false;
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
if (flags & SCHED_CPUFREQ_DL)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
- next_f = sugov_next_freq_shared(sg_cpu);
+ next_f = sugov_next_freq_shared(sg_cpu, time);
sugov_update_commit(sg_policy, time, next_f);
}
{
struct sugov_policy *sg_policy;
struct sugov_tunables *tunables;
- unsigned int lat;
int ret = 0;
/* State should be equivalent to EXIT */
goto stop_kthread;
}
- tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
- tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
- lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (lat) {
- tunables->up_rate_limit_us *= lat;
- tunables->down_rate_limit_us *= lat;
+ if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
+ tunables->up_rate_limit_us = policy->up_transition_delay_us;
+ tunables->down_rate_limit_us = policy->down_transition_delay_us;
+ } else {
+ unsigned int lat;
+
+ tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
+ tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat) {
+ tunables->up_rate_limit_us *= lat;
+ tunables->down_rate_limit_us *= lat;
+ }
}
policy->governor_data = sg_policy;
#include <linux/slab.h>
+#include "walt.h"
+
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
next_task->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, next_task, 0);
clear_average_bw(&next_task->dl, &rq->dl);
+ next_task->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(next_task, later_rq->cpu);
+ next_task->on_rq = TASK_ON_RQ_QUEUED;
add_average_bw(&next_task->dl, &later_rq->dl);
activate_task(later_rq, next_task, 0);
next_task->on_rq = TASK_ON_RQ_QUEUED;
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
clear_average_bw(&p->dl, &src_rq->dl);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, this_cpu);
+ p->on_rq = TASK_ON_RQ_QUEUED;
add_average_bw(&p->dl, &this_rq->dl);
activate_task(this_rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
#endif
#ifdef CONFIG_SMP
+static bool __cpu_overutilized(int cpu, int delta);
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
-#define boosted_cpu_util(cpu) cpu_util(cpu)
+#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
*/
static int sched_group_energy(struct energy_env *eenv)
{
- struct sched_domain *sd;
- int cpu, total_energy = 0;
struct cpumask visit_cpus;
- struct sched_group *sg;
+ u64 total_energy = 0;
WARN_ON(!eenv->sg_top->sge);
while (!cpumask_empty(&visit_cpus)) {
struct sched_group *sg_shared_cap = NULL;
-
- cpu = cpumask_first(&visit_cpus);
+ int cpu = cpumask_first(&visit_cpus);
+ struct sched_domain *sd;
/*
* Is the group utilization affected by cpus outside this
sg_shared_cap = sd->parent->groups;
for_each_domain(cpu, sd) {
- sg = sd->groups;
+ struct sched_group *sg = sd->groups;
/* Has this sched_domain already been visited? */
if (sd->child && group_first_cpu(sg) != cpu)
idle_idx = group_idle_state(eenv, sg);
group_util = group_norm_util(eenv, sg);
- sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
- * sg->sge->idle_states[idle_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ * sg->sge->idle_states[idle_idx].power);
total_energy += sg_busy_energy + sg_idle_energy;
continue;
}
- eenv->energy = total_energy;
+ eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
return 0;
}
return __task_fits(p, cpu, 0);
}
+static bool __cpu_overutilized(int cpu, int delta)
+{
+ return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
static bool cpu_overutilized(int cpu)
{
- return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+ return __cpu_overutilized(cpu, 0);
}
#ifdef CONFIG_SCHED_TUNE
unsigned long
boosted_cpu_util(int cpu)
{
- unsigned long util = cpu_util(cpu);
+ unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
{
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
- RCU_LOCKDEP_WARN(rcu_read_lock_sched_held(),
- "sched RCU must be held");
-
return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
}
if (target_cpu != prev_cpu) {
+ int delta = 0;
struct energy_env eenv = {
.util_delta = task_util(p),
.src_cpu = prev_cpu,
.task = p,
};
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ delta = task_util(p);
+#endif
/* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(prev_cpu)) {
+ if (__cpu_overutilized(prev_cpu, delta)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
goto unlock;
if (energy_aware() &&
(capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+ ((capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu))) &&
env->src_rq->cfs.h_nr_running == 1 &&
cpu_overutilized(env->src_cpu) &&
!cpu_overutilized(env->dst_cpu)) {
return true;
/* Do idle load balance if there have misfit task */
- if (energy_aware() && rq->misfit_task)
- return 1;
+ if (energy_aware())
+ return rq->misfit_task;
return (rq->nr_running >= 2);
}
#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_busy, cpu));
- if (sd && !energy_aware()) {
+ if (sd) {
sgc = sd->groups->sgc;
nr_busy = atomic_read(&sgc->nr_busy_cpus);
next_task->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, next_task, 0);
+ next_task->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(next_task, lowest_rq->cpu);
+ next_task->on_rq = TASK_ON_RQ_QUEUED;
activate_task(lowest_rq, next_task, 0);
next_task->on_rq = TASK_ON_RQ_QUEUED;
ret = 1;
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, this_cpu);
+ p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(this_rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
/*
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+
delta += util;
if (delta < 0)
return 0;
return __cpu_util(cpu, 0);
}
+static inline unsigned long cpu_util_freq(int cpu)
+{
+ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->prev_runnable_sum,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+ return (util >= capacity) ? capacity : util;
+}
+
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
*/
#include <linux/syscore_ops.h>
-#include <linux/cpufreq.h>
#include <trace/events/sched.h>
#include "sched.h"
#include "walt.h"
/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
unsigned int __read_mostly walt_disabled = 0;
-static unsigned int max_possible_efficiency = 1024;
-static unsigned int min_possible_efficiency = 1024;
-
-/*
- * Maximum possible frequency across all cpus. Task demand and cpu
- * capacity (cpu_power) metrics are scaled in reference to it.
- */
-static unsigned int max_possible_freq = 1;
-
-/*
- * Minimum possible max_freq across all cpus. This will be same as
- * max_possible_freq on homogeneous systems and could be different from
- * max_possible_freq on heterogenous systems. min_max_freq is used to derive
- * capacity (cpu_power) of cpus.
- */
-static unsigned int min_max_freq = 1;
-
-static unsigned int max_load_scale_factor = 1024;
-static unsigned int max_possible_capacity = 1024;
-
-/* Mask of all CPUs that have max_possible_capacity */
-static cpumask_t mpc_mask = CPU_MASK_ALL;
-
/* Window size (in ns) */
__read_mostly unsigned int walt_ravg_window = 20000000;
static void
fixup_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p, s64 task_load_delta)
+ struct task_struct *p, u64 new_task_load)
{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+
rq->cumulative_runnable_avg += task_load_delta;
if ((s64)rq->cumulative_runnable_avg < 0)
panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
}
+/*
+ * Translate absolute delta time accounted on a CPU
+ * to a scale where 1024 is the capacity of the most
+ * capable CPU running at FMAX
+ */
static u64 scale_exec_time(u64 delta, struct rq *rq)
{
- unsigned int cur_freq = rq->cur_freq;
- int sf;
-
- if (unlikely(cur_freq > max_possible_freq))
- cur_freq = rq->max_possible_freq;
-
- /* round up div64 */
- delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
- max_possible_freq);
-
- sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
+ unsigned long capcurr = capacity_curr_of(cpu_of(rq));
- delta *= sf;
- delta >>= 10;
-
- return delta;
+ return (delta * capcurr) >> SCHED_CAPACITY_SHIFT;
}
static int cpu_is_waiting_on_io(struct rq *rq)
p->ravg.mark_start = wallclock;
}
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
-{
- return SCHED_LOAD_SCALE;
-}
-
-void walt_init_cpu_efficiency(void)
-{
- int i, efficiency;
- unsigned int max = 0, min = UINT_MAX;
-
- for_each_possible_cpu(i) {
- efficiency = arch_get_cpu_efficiency(i);
- cpu_rq(i)->efficiency = efficiency;
-
- if (efficiency > max)
- max = efficiency;
- if (efficiency < min)
- min = efficiency;
- }
-
- if (max)
- max_possible_efficiency = max;
-
- if (min)
- min_possible_efficiency = min;
-}
-
static void reset_task_stats(struct task_struct *p)
{
u32 sum = 0;
int cpu = cpu_of(rq);
struct rq *sync_rq = cpu_rq(sync_cpu);
- if (rq->window_start)
+ if (likely(rq->window_start))
return;
if (cpu == sync_cpu) {
- rq->window_start = walt_ktime_clock();
+ rq->window_start = 1;
} else {
raw_spin_unlock(&rq->lock);
double_rq_lock(rq, sync_rq);
double_rq_unlock(src_rq, dest_rq);
}
-/*
- * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
- * least efficient cpu gets capacity of 1024
- */
-static unsigned long capacity_scale_cpu_efficiency(int cpu)
-{
- return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
-}
-
-/*
- * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
- * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
- */
-static unsigned long capacity_scale_cpu_freq(int cpu)
-{
- return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
- * that "most" efficient cpu gets a load_scale_factor of 1
- */
-static unsigned long load_scale_cpu_efficiency(int cpu)
-{
- return DIV_ROUND_UP(1024 * max_possible_efficiency,
- cpu_rq(cpu)->efficiency);
-}
-
-/*
- * Return load_scale_factor of a cpu in reference to cpu with best max_freq
- * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
- * of 1.
- */
-static unsigned long load_scale_cpu_freq(int cpu)
-{
- return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
-}
-
-static int compute_capacity(int cpu)
-{
- int capacity = 1024;
-
- capacity *= capacity_scale_cpu_efficiency(cpu);
- capacity >>= 10;
-
- capacity *= capacity_scale_cpu_freq(cpu);
- capacity >>= 10;
-
- return capacity;
-}
-
-static int compute_load_scale_factor(int cpu)
-{
- int load_scale = 1024;
-
- /*
- * load_scale_factor accounts for the fact that task load
- * is in reference to "best" performing cpu. Task's load will need to be
- * scaled (up) by a factor to determine suitability to be placed on a
- * (little) cpu.
- */
- load_scale *= load_scale_cpu_efficiency(cpu);
- load_scale >>= 10;
-
- load_scale *= load_scale_cpu_freq(cpu);
- load_scale >>= 10;
-
- return load_scale;
-}
-
-static int cpufreq_notifier_policy(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
- int i, update_max = 0;
- u64 highest_mpc = 0, highest_mplsf = 0;
- const struct cpumask *cpus = policy->related_cpus;
- unsigned int orig_min_max_freq = min_max_freq;
- unsigned int orig_max_possible_freq = max_possible_freq;
- /* Initialized to policy->max in case policy->related_cpus is empty! */
- unsigned int orig_max_freq = policy->max;
-
- if (val != CPUFREQ_NOTIFY)
- return 0;
-
- for_each_cpu(i, policy->related_cpus) {
- cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
- policy->related_cpus);
- orig_max_freq = cpu_rq(i)->max_freq;
- cpu_rq(i)->min_freq = policy->min;
- cpu_rq(i)->max_freq = policy->max;
- cpu_rq(i)->cur_freq = policy->cur;
- cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
- }
-
- max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
- if (min_max_freq == 1)
- min_max_freq = UINT_MAX;
- min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
- BUG_ON(!min_max_freq);
- BUG_ON(!policy->max);
-
- /* Changes to policy other than max_freq don't require any updates */
- if (orig_max_freq == policy->max)
- return 0;
-
- /*
- * A changed min_max_freq or max_possible_freq (possible during bootup)
- * needs to trigger re-computation of load_scale_factor and capacity for
- * all possible cpus (even those offline). It also needs to trigger
- * re-computation of nr_big_task count on all online cpus.
- *
- * A changed rq->max_freq otoh needs to trigger re-computation of
- * load_scale_factor and capacity for just the cluster of cpus involved.
- * Since small task definition depends on max_load_scale_factor, a
- * changed load_scale_factor of one cluster could influence
- * classification of tasks in another cluster. Hence a changed
- * rq->max_freq will need to trigger re-computation of nr_big_task
- * count on all online cpus.
- *
- * While it should be sufficient for nr_big_tasks to be
- * re-computed for only online cpus, we have inadequate context
- * information here (in policy notifier) with regard to hotplug-safety
- * context in which notification is issued. As a result, we can't use
- * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
- * fixed up to issue notification always in hotplug-safe context,
- * re-compute nr_big_task for all possible cpus.
- */
-
- if (orig_min_max_freq != min_max_freq ||
- orig_max_possible_freq != max_possible_freq) {
- cpus = cpu_possible_mask;
- update_max = 1;
- }
-
- /*
- * Changed load_scale_factor can trigger reclassification of tasks as
- * big or small. Make this change "atomic" so that tasks are accounted
- * properly due to changed load_scale_factor
- */
- for_each_cpu(i, cpus) {
- struct rq *rq = cpu_rq(i);
-
- rq->capacity = compute_capacity(i);
- rq->load_scale_factor = compute_load_scale_factor(i);
-
- if (update_max) {
- u64 mpc, mplsf;
-
- mpc = div_u64(((u64) rq->capacity) *
- rq->max_possible_freq, rq->max_freq);
- rq->max_possible_capacity = (int) mpc;
-
- mplsf = div_u64(((u64) rq->load_scale_factor) *
- rq->max_possible_freq, rq->max_freq);
-
- if (mpc > highest_mpc) {
- highest_mpc = mpc;
- cpumask_clear(&mpc_mask);
- cpumask_set_cpu(i, &mpc_mask);
- } else if (mpc == highest_mpc) {
- cpumask_set_cpu(i, &mpc_mask);
- }
-
- if (mplsf > highest_mplsf)
- highest_mplsf = mplsf;
- }
- }
-
- if (update_max) {
- max_possible_capacity = highest_mpc;
- max_load_scale_factor = highest_mplsf;
- }
-
- return 0;
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
- unsigned int cpu = freq->cpu, new_freq = freq->new;
- unsigned long flags;
- int i;
-
- if (val != CPUFREQ_POSTCHANGE)
- return 0;
-
- BUG_ON(!new_freq);
-
- if (cpu_rq(cpu)->cur_freq == new_freq)
- return 0;
-
- for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
- struct rq *rq = cpu_rq(i);
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
- walt_ktime_clock(), 0);
- rq->cur_freq = new_freq;
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
-
- return 0;
-}
-
-static struct notifier_block notifier_policy_block = {
- .notifier_call = cpufreq_notifier_policy
-};
-
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_notifier_trans
-};
-
-static int register_sched_callback(void)
-{
- int ret;
-
- ret = cpufreq_register_notifier(¬ifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
-
- if (!ret)
- ret = cpufreq_register_notifier(¬ifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- return 0;
-}
-
-/*
- * cpufreq callbacks can be registered at core_initcall or later time.
- * Any registration done prior to that is "forgotten" by cpufreq. See
- * initialization of variable init_cpufreq_transition_notifier_list_called
- * for further information.
- */
-core_initcall(register_sched_callback);
-
void walt_init_new_task_load(struct task_struct *p)
{
int i;
return 0;
}
+void __get_seccomp_filter(struct seccomp_filter *filter)
+{
+ /* Reference count is bounded by the number of total processes. */
+ atomic_inc(&filter->usage);
+}
+
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
if (!orig)
return;
- /* Reference count is bounded by the number of total processes. */
- atomic_inc(&orig->usage);
+ __get_seccomp_filter(orig);
}
static inline void seccomp_filter_free(struct seccomp_filter *filter)
}
}
-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
-void put_seccomp_filter(struct task_struct *tsk)
+static void __put_seccomp_filter(struct seccomp_filter *orig)
{
- struct seccomp_filter *orig = tsk->seccomp.filter;
/* Clean up single-reference branches iteratively. */
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
}
}
+/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+void put_seccomp_filter(struct task_struct *tsk)
+{
+ __put_seccomp_filter(tsk->seccomp.filter);
+}
+
/**
* seccomp_send_sigsys - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
if (!data)
goto out;
- get_seccomp_filter(task);
+ __get_seccomp_filter(filter);
spin_unlock_irq(&task->sighand->siglock);
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
- put_seccomp_filter(task);
+ __put_seccomp_filter(filter);
return ret;
out:
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = timer_migration_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
#ifdef CONFIG_BPF_SYSCALL
int ret;
mutex_lock(&mutex);
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write)
timers_update_migration(false);
mutex_unlock(&mutex);
address on the current task structure into a stack of calls.
+config PREEMPTIRQ_EVENTS
+ bool "Enable trace events for preempt and irq disable/enable"
+ select TRACE_IRQFLAGS
+ depends on DEBUG_PREEMPT || !PROVE_LOCKING
+ default n
+ help
+ Enable tracing of disable and enable events for preemption and irqs.
+ For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+ enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+ be disabled.
+
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
obj-$(CONFIG_TRACING) += trace_printk.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
if (!command || !ftrace_enabled) {
/*
- * If these are control ops, they still need their
- * per_cpu field freed. Since, function tracing is
+ * If these are dynamic or control ops, they still
+ * need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
- if (ops->flags & FTRACE_OPS_FL_CONTROL)
- control_ops_free(ops);
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
+ goto free_ops;
+
return 0;
}
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
schedule_on_each_cpu(ftrace_sync);
+ free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_CONTROL)
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
-static unsigned long save_global_trampoline;
-static unsigned long save_global_flags;
-
static int __init set_graph_function(char *str)
{
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-#ifdef CONFIG_DYNAMIC_FTRACE
- /*
- * Function graph does not allocate the trampoline, but
- * other global_ops do. We need to reset the ALLOC_TRAMP flag
- * if one was used.
- */
- global_ops.trampoline = save_global_trampoline;
- if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
- global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
-#endif
-
out:
mutex_unlock(&ftrace_lock);
}
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
int cpu = tracing_get_cpu(inode);
+ struct trace_buffer *trace_buf = &tr->trace_buffer;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (tr->current_trace->print_max)
+ trace_buf = &tr->max_buffer;
+#endif
if (cpu == RING_BUFFER_ALL_CPUS)
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(trace_buf);
else
- tracing_reset(&tr->trace_buffer, cpu);
+ tracing_reset(trace_buf, cpu);
}
if (file->f_mode & FMODE_READ) {
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (!tracing_is_on() && iter->pos)
+ if (!tracer_tracing_is_on(iter->tr) && iter->pos)
break;
mutex_unlock(&iter->mutex);
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
+ if (tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&tr->max_buffer);
#endif
#include "trace.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
+#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
#else /* !CONFIG_PROVE_LOCKING */
/*
- * Stubs:
- */
-
-void trace_softirqs_on(unsigned long ip)
-{
-}
-
-void trace_softirqs_off(unsigned long ip)
-{
-}
-
-inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-
-/*
* We are only interested in hardirq on/off events:
*/
-void trace_hardirqs_on(void)
+static inline void tracer_hardirqs_on(void)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-EXPORT_SYMBOL(trace_hardirqs_on);
-void trace_hardirqs_off(void)
+static inline void tracer_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-EXPORT_SYMBOL(trace_hardirqs_off);
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
}
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_PROVE_LOCKING */
#endif /* CONFIG_IRQSOFF_TRACER */
#ifdef CONFIG_PREEMPT_TRACER
-void trace_preempt_on(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
{
if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
-void trace_preempt_off(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
{
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
return 0;
}
core_initcall(init_irqsoff_tracer);
+#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
+
+#ifndef CONFIG_IRQSOFF_TRACER
+static inline void tracer_hardirqs_on(void) { }
+static inline void tracer_hardirqs_off(void) { }
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
+#endif
+
+#ifndef CONFIG_PREEMPT_TRACER
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
+#endif
+
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+void trace_hardirqs_on(void)
+{
+ if (!this_cpu_read(tracing_irq_cpu))
+ return;
+
+ trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ tracer_hardirqs_on();
+
+ this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on);
+
+void trace_hardirqs_off(void)
+{
+ if (this_cpu_read(tracing_irq_cpu))
+ return;
+
+ this_cpu_write(tracing_irq_cpu, 1);
+
+ trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ tracer_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off);
+
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+{
+ if (!this_cpu_read(tracing_irq_cpu))
+ return;
+
+ trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+ tracer_hardirqs_on_caller(caller_addr);
+
+ this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+{
+ if (this_cpu_read(tracing_irq_cpu))
+ return;
+
+ this_cpu_write(tracing_irq_cpu, 1);
+
+ trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+ tracer_hardirqs_off_caller(caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+/*
+ * Stubs:
+ */
+
+void trace_softirqs_on(unsigned long ip)
+{
+}
+
+void trace_softirqs_off(unsigned long ip)
+{
+}
+
+inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+#if defined(CONFIG_PREEMPT_TRACER) || \
+ (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
+void trace_preempt_on(unsigned long a0, unsigned long a1)
+{
+ trace_preempt_enable_rcuidle(a0, a1);
+ tracer_preempt_on(a0, a1);
+}
+
+void trace_preempt_off(unsigned long a0, unsigned long a1)
+{
+ trace_preempt_disable_rcuidle(a0, a1);
+ tracer_preempt_off(a0, a1);
+}
+#endif
goto out_free;
if (cnt > 1) {
if (trace_selftest_test_global_cnt == 0)
- goto out;
+ goto out_free;
}
if (trace_selftest_test_dyn_cnt == 0)
goto out_free;
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
-GCOV_PROFILE_hweight.o := n
-CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
* The Hamming Weight of a number is the total number of bits set in it.
*/
+#ifndef __HAVE_ARCH_SW_HWEIGHT
unsigned int __sw_hweight32(unsigned int w)
{
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
#endif
}
EXPORT_SYMBOL(__sw_hweight32);
+#endif
unsigned int __sw_hweight16(unsigned int w)
{
}
EXPORT_SYMBOL(__sw_hweight8);
+#ifndef __HAVE_ARCH_SW_HWEIGHT
unsigned long __sw_hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
#endif
}
EXPORT_SYMBOL(__sw_hweight64);
+#endif
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
int err;
shash->tfm = tfm;
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
- return *ctx;
+ ret = *ctx;
+ barrier_data(ctx);
+ return ret;
}
EXPORT_SYMBOL(crc32c);
bdi_class->dev_groups = bdi_dev_groups;
bdi_debug_init();
+
return 0;
}
postcore_initcall(bdi_class_init);
bdi->dev = NULL;
+ kref_init(&bdi->refcnt);
bdi->min_ratio = 0;
bdi->max_ratio = 100;
bdi->max_prop_frac = FPROP_FRAC_BASE;
}
EXPORT_SYMBOL(bdi_init);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
+{
+ struct backing_dev_info *bdi;
+
+ bdi = kmalloc_node(sizeof(struct backing_dev_info),
+ gfp_mask | __GFP_ZERO, node_id);
+ if (!bdi)
+ return NULL;
+
+ if (bdi_init(bdi)) {
+ kfree(bdi);
+ return NULL;
+ }
+ return bdi;
+}
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
}
}
-void bdi_exit(struct backing_dev_info *bdi)
+static void bdi_exit(struct backing_dev_info *bdi)
{
WARN_ON_ONCE(bdi->dev);
wb_exit(&bdi->wb);
}
+static void release_bdi(struct kref *ref)
+{
+ struct backing_dev_info *bdi =
+ container_of(ref, struct backing_dev_info, refcnt);
+
+ bdi_exit(bdi);
+ kfree(bdi);
+}
+
+void bdi_put(struct backing_dev_info *bdi)
+{
+ kref_put(&bdi->refcnt, release_bdi);
+}
+
void bdi_destroy(struct backing_dev_info *bdi)
{
bdi_unregister(bdi);
memblock.memory.regions[idx].size) >= end;
}
-int __init_memblock memblock_overlaps_memory(phys_addr_t base, phys_addr_t size)
+bool __init_memblock memblock_overlaps_memory(phys_addr_t base,
+ phys_addr_t size)
{
memblock_cap_size(base, &size);
- return memblock_overlaps_region(&memblock.memory, base, size) >= 0;
+ return memblock_overlaps_region(&memblock.memory, base, size);
}
/**
* We want to write everything out, not just down to the dirty
* threshold
*/
- if (!bdi_has_dirty_io(&q->backing_dev_info))
+ if (!bdi_has_dirty_io(q->backing_dev_info))
return;
rcu_read_lock();
- list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
+ list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
if (wb_has_dirty_io(wb))
wb_start_writeback(wb, nr_pages, true,
WB_REASON_LAPTOP_TIMER);
{
struct kmem_cache *s;
- if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
+ if (slab_nomerge)
return NULL;
if (ctor)
size = ALIGN(size, align);
flags = kmem_cache_flags(size, flags, name, NULL);
+ if (flags & SLAB_NEVER_MERGE)
+ return NULL;
+
list_for_each_entry_reverse(s, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
u8 code, u8 ident, u16 dlen, void *data);
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
return len;
}
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
{
struct l2cap_conf_opt *opt = *ptr;
BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ if (size < L2CAP_CONF_OPT_SIZE + len)
+ return;
+
opt->type = type;
opt->len = len;
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
{
struct l2cap_conf_efs efs;
}
l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, size);
}
static void l2cap_ack_timeout(struct work_struct *work)
chan->ack_win = chan->tx_win;
}
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ void *endptr = data + data_size;
u16 size;
BT_DBG("chan %pK", chan);
done:
if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
switch (chan->mode) {
case L2CAP_MODE_BASIC:
rfc.max_pdu_size = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_MODE_ERTM:
L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
}
return ptr - data;
}
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
+ void *endptr = data + data_size;
void *req = chan->conf_req;
int len = chan->conf_len;
int type, hint, olen;
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
}
if (result == L2CAP_CONF_SUCCESS) {
chan->omtu = mtu;
set_bit(CONF_MTU_DONE, &chan->conf_state);
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
if (remote_efs) {
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
} else {
/* Send PENDING Conf Rsp */
result = L2CAP_CONF_PENDING;
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->remote_id = efs.id;
le32_to_cpu(efs.sdu_itime);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
}
break;
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
}
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- void *data, u16 *result)
+ void *data, size_t size, u16 *result)
{
struct l2cap_conf_req *req = data;
void *ptr = req->data;
+ void *endptr = data + size;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
chan->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
chan->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
return;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
u8 buf[128];
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
break;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
+ l2cap_build_conf_req(chan, req, sizeof(req)), req);
chan->num_conf_req++;
break;
}
/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto unlock;
if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
u8 buf[64];
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
char buf[64];
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- buf, &result);
+ buf, sizeof(buf), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ req, sizeof(req), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
}
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf),
+ l2cap_build_conf_req(chan, buf, sizeof(buf)),
buf);
chan->num_conf_req++;
}
spin_unlock_bh(&br->lock);
}
- err = br_changelink(dev, tb, data);
+ err = register_netdevice(dev);
if (err)
return err;
- return register_netdevice(dev);
+ err = br_changelink(dev, tb, data);
+ if (err)
+ unregister_netdevice(dev);
+ return err;
}
static size_t br_get_size(const struct net_device *brdev)
#include <linux/filter.h>
#include <linux/compat.h>
#include <linux/security.h>
+#include <linux/audit.h>
#include <linux/export.h>
#include <net/scm.h>
COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
{
- int ret;
- u32 a[6];
+ u32 a[AUDITSC_ARGS];
+ unsigned int len;
u32 a0, a1;
+ int ret;
if (call < SYS_SOCKET || call > SYS_SENDMMSG)
return -EINVAL;
- if (copy_from_user(a, args, nas[call]))
+ len = nas[call];
+ if (len > sizeof(a))
+ return -EINVAL;
+
+ if (copy_from_user(a, args, len))
return -EFAULT;
+
+ ret = audit_socketcall_compat(len / sizeof(a[0]), a);
+ if (ret)
+ return ret;
+
a0 = a[0];
a1 = a[1];
{
unsigned long flags;
+ if (unlikely(!skb))
+ return;
+
if (likely(atomic_read(&skb->users) == 1)) {
smp_rmb();
atomic_set(&skb->users, 0);
sock_copy(newsk, sk);
+ newsk->sk_prot_creator = sk->sk_prot;
+
/* SANITY */
if (likely(newsk->sk_net_refcnt))
get_net(sock_net(newsk));
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
- int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&ieee802154_lowpan->frags);
- if (res)
- return res;
- res = lowpan_frags_ns_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&ieee802154_lowpan->frags);
- return res;
+ inet_frags_init_net(&ieee802154_lowpan->frags);
+
+ return lowpan_frags_ns_sysctl_register(net);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
cond_resched();
if (read_seqretry(&f->rnd_seqlock, seq) ||
- percpu_counter_sum(&nf->mem))
+ sum_frag_mem_limit(nf))
goto evict_again;
-
- percpu_counter_destroy(&nf->mem);
}
EXPORT_SYMBOL(inet_frags_exit_net);
static int __net_init ipv4_frags_init_net(struct net *net)
{
- int res;
-
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
*/
net->ipv4.frags.timeout = IP_FRAG_TIME;
- res = inet_frags_init_net(&net->ipv4.frags);
- if (res)
- return res;
- res = ip4_frags_ns_ctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->ipv4.frags);
- return res;
+ inet_frags_init_net(&net->ipv4.frags);
+
+ return ip4_frags_ns_ctl_register(net);
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
struct ip_tunnel_parm *parms = &tunnel->parms;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
+ int pkt_len = skb->len;
int err;
if (!dst) {
err = dst_output(tunnel->net, skb->sk, skb);
if (net_xmit_eval(err) == 0)
- err = skb->len;
+ err = pkt_len;
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
static void __exit nf_nat_snmp_basic_fini(void)
{
RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+ synchronize_rcu();
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+ * issue in __tcp_select_window()
+ */
+ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
- int tos,
+ int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr)
+ const xfrm_address_t *daddr,
+ u32 mark)
{
struct rtable *rt;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
+ fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
+ fl4->flowi4_mark = mark;
if (saddr)
fl4->saddr = saddr->a4;
return ERR_CAST(rt);
}
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr)
+ const xfrm_address_t *daddr,
+ u32 mark)
{
struct flowi4 fl4;
- return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+ return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
}
-static int xfrm4_get_saddr(struct net *net,
- xfrm_address_t *saddr, xfrm_address_t *daddr)
+static int xfrm4_get_saddr(struct net *net, int oif,
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u32 mark)
{
struct dst_entry *dst;
struct flowi4 fl4;
- dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+ dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
* our DAD process, so we don't need
* to do it again
*/
- if (!(ifp->rt->rt6i_node))
+ if (!rcu_access_pointer(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
return fn;
}
-static void node_free(struct fib6_node *fn)
+static void node_free_immediate(struct fib6_node *fn)
+{
+ kmem_cache_free(fib6_node_kmem, fn);
+}
+
+static void node_free_rcu(struct rcu_head *head)
{
+ struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
+
kmem_cache_free(fib6_node_kmem, fn);
}
+static void node_free(struct fib6_node *fn)
+{
+ call_rcu(&fn->rcu, node_free_rcu);
+}
+
static void rt6_rcu_free(struct rt6_info *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}
}
+static void fib6_free_table(struct fib6_table *table)
+{
+ inetpeer_invalidate_tree(&table->tb6_peers);
+ kfree(table);
+}
+
static void fib6_link_table(struct net *net, struct fib6_table *tb)
{
unsigned int h;
if (!in || !ln) {
if (in)
- node_free(in);
+ node_free_immediate(in);
if (ln)
- node_free(ln);
+ node_free_immediate(ln);
return ERR_PTR(-ENOMEM);
}
rt->dst.rt6_next = iter;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
return err;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
fn->fn_flags |= RTN_RTINFO;
}
nsiblings = iter->rt6i_nsiblings;
+ iter->rt6i_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
break;
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next;
+ iter->rt6i_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
root, and then (in failure) stale node
in main tree.
*/
- node_free(sfn);
+ node_free_immediate(sfn);
err = PTR_ERR(sn);
goto failure;
}
int fib6_del(struct rt6_info *rt, struct nl_info *info)
{
+ struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct net *net = info->nl_net;
- struct fib6_node *fn = rt->rt6i_node;
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
if (res) {
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
- __func__, rt, rt->rt6i_node, res);
+ __func__, rt,
+ rcu_access_pointer(rt->rt6i_node),
+ res);
#endif
continue;
}
static void fib6_net_exit(struct net *net)
{
+ unsigned int i;
+
rt6_ifdown(net, NULL);
del_timer_sync(&net->ipv6.ip6_fib_timer);
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
- kfree(net->ipv6.fib6_local_tbl);
-#endif
- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
- kfree(net->ipv6.fib6_main_tbl);
+ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
+ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
+ struct hlist_node *tmp;
+ struct fib6_table *tb;
+
+ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
+ hlist_del(&tb->tb6_hlist);
+ fib6_free_table(tb);
+ }
+ }
+
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
}
}
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned int len)
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
- __be16 *p = (__be16 *)(ipv6h+1);
+ struct ipv6hdr *ipv6h;
+ __be16 *p;
- ip6_flow_hdr(ipv6h, 0,
- ip6_make_flowlabel(dev_net(dev), skb,
- t->fl.u.ip6.flowlabel, true,
- &t->fl.u.ip6));
+ ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen + sizeof(*ipv6h));
+ ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
+ t->fl.u.ip6.flowlabel,
+ true, &t->fl.u.ip6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
ipv6h->daddr = t->parms.raddr;
- p[0] = t->parms.o_flags;
- p[1] = htons(type);
+ p = (__be16 *)(ipv6h + 1);
+ p[0] = t->parms.o_flags;
+ p[1] = htons(type);
/*
* Set the source hardware address.
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
- (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+ (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
+ (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
dst_release(dst);
dst = NULL;
}
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev;
struct xfrm_state *x;
+ int pkt_len = skb->len;
int err = -1;
int mtu;
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += skb->len;
+ tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
} else {
static int nf_ct_net_init(struct net *net)
{
- int res;
-
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&net->nf_frag.frags);
- if (res)
- return res;
- res = nf_ct_frag6_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->nf_frag.frags);
- return res;
+ inet_frags_init_net(&net->nf_frag.frags);
+
+ return nf_ct_frag6_sysctl_register(net);
}
static void nf_ct_net_exit(struct net *net)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
- unsigned int len;
switch (**nexthdr) {
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
- len = ipv6_optlen(exthdr);
- if (len + offset >= IPV6_MAXPLEN)
+ offset += ipv6_optlen(exthdr);
+ if (offset > IPV6_MAXPLEN)
return -EINVAL;
- offset += len;
*nexthdr = &exthdr->nexthdr;
}
static int __net_init ipv6_frags_init_net(struct net *net)
{
- int res;
-
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&net->ipv6.frags);
- if (res)
- return res;
- res = ip6_frags_ns_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->ipv6.frags);
- return res;
+ inet_frags_init_net(&net->ipv6.frags);
+
+ return ip6_frags_ns_sysctl_register(net);
}
static void __net_exit ipv6_frags_exit_net(struct net *net)
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
saved_fn = fn;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+ oif = 0;
+
redo_rt6_select:
rt = rt6_select(fn, oif, strict);
if (rt->rt6i_nsiblings)
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+ u32 rt_cookie;
+
+ if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
ip6_del_rt(rt);
- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
- rt->rt6i_node->fn_sernum = -1;
+ } else {
+ struct fib6_node *fn;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+ if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+ fn->fn_sernum = -1;
+ rcu_read_unlock();
}
}
}
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & RTF_CACHE) &&
- (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+ (rt->rt6i_flags & RTF_PCPU ||
+ rcu_access_pointer(rt->rt6i_node));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr)
+ const xfrm_address_t *daddr,
+ u32 mark)
{
struct flowi6 fl6;
struct dst_entry *dst;
int err;
memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
+ fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+ fl6.flowi6_mark = mark;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)
memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
return dst;
}
-static int xfrm6_get_saddr(struct net *net,
- xfrm_address_t *saddr, xfrm_address_t *daddr)
+static int xfrm6_get_saddr(struct net *net, int oif,
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u32 mark)
{
struct dst_entry *dst;
struct net_device *dev;
- dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+ dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
struct sock *sk = NULL;
tunnel = container_of(work, struct l2tp_tunnel, del_work);
+
+ l2tp_tunnel_closeall(tunnel);
+
sk = l2tp_tunnel_sock_lookup(tunnel);
if (!sk)
goto out;
/* This function is used by the netlink TUNNEL_DELETE command.
*/
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
- l2tp_tunnel_inc_refcount(tunnel);
- l2tp_tunnel_closeall(tunnel);
- if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
- l2tp_tunnel_dec_refcount(tunnel);
- return 1;
+ if (!test_and_set_bit(0, &tunnel->dead)) {
+ l2tp_tunnel_inc_refcount(tunnel);
+ queue_work(l2tp_wq, &tunnel->del_work);
}
- return 0;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+
+ unsigned long dead;
+
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
struct l2tp_tunnel **tunnelp);
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_create(int priv_size,
struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
struct ieee80211_roc_work *roc, *tmp;
LIST_HEAD(tmp_list);
+ flush_work(&local->hw_roc_start);
+
mutex_lock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
if (sdata && roc->sdata != sdata)
}
/* No need to do anything if the driver does all */
- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
+ if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
return;
if (sta->dead)
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
struct net *net = nf_ct_exp_net(expect);
struct hlist_node *next;
unsigned int h;
- int ret = 1;
+ int ret = 0;
if (!master_help) {
ret = -ESHUTDOWN;
spin_lock_bh(&nf_conntrack_expect_lock);
ret = __nf_ct_expect_check(expect);
- if (ret <= 0)
+ if (ret < 0)
goto out;
ret = nf_ct_expect_insert(expect);
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
RCU_INIT_POINTER(nfnl_ct_hook, NULL);
#endif
+ synchronize_rcu();
}
module_init(ctnetlink_init);
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
+ synchronize_rcu();
+
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
synchronize_net();
int i, ret;
struct nf_conntrack_expect_policy *expect_policy;
struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+ unsigned int class_max;
ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set);
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
- helper->expect_class_max =
- ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
-
- if (helper->expect_class_max != 0 &&
- helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+ if (class_max == 0)
+ return -EINVAL;
+ if (class_max > NF_CT_MAX_EXPECT_CLASSES)
return -EOVERFLOW;
expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
- helper->expect_class_max, GFP_KERNEL);
+ class_max, GFP_KERNEL);
if (expect_policy == NULL)
return -ENOMEM;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < class_max; i++) {
if (!tb[NFCTH_POLICY_SET+i])
goto err;
if (ret < 0)
goto err;
}
+
+ helper->expect_class_max = class_max - 1;
helper->expect_policy = expect_policy;
return 0;
err:
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
- htonl(helper->expect_class_max)))
+ htonl(helper->expect_class_max + 1)))
goto nla_put_failure;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
nest_parms2 = nla_nest_start(skb,
(NFCTH_POLICY_SET+i) | NLA_F_NESTED);
if (nest_parms2 == NULL)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+ synchronize_rcu();
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- rcu_barrier();
}
module_init(cttimeout_init);
MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
-
- if (sk == NULL) {
+ if (!sk) {
/*
* Here, the qtaguid_find_sk() using connection tracking
* couldn't find the owner, so for now we just count them
}
sock_uid = sk->sk_uid;
if (do_tag_stat)
- account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+ account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid),
+ par);
/*
* The following two tests fail the match when:
kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
- if ((uid_gte(sk->sk_uid, uid_min) &&
- uid_lte(sk->sk_uid, uid_max)) ^
+ if ((uid_gte(sock_uid, uid_min) &&
+ uid_lte(sock_uid, uid_max)) ^
!(info->invert & XT_QTAGUID_UID)) {
MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
par->hooknum);
set_sk_callback_lock = true;
read_lock_bh(&sk->sk_callback_lock);
MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n",
- par->hooknum, sk, sk->sk_socket,
- sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
filp = sk->sk_socket ? sk->sk_socket->file : NULL;
if (!filp) {
- res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0;
+ res = ((info->match ^ info->invert) &
+ XT_QTAGUID_GID) == 0;
atomic64_inc(&qtu_events.match_no_sk_gid);
goto put_sock_ret_res;
}
MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
- par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
+ par->hooknum, filp ?
+ from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_QTAGUID_GID)) {
mutex_lock(&fanout_mutex);
- err = -EINVAL;
- if (!po->running)
- goto out;
-
err = -EALREADY;
if (po->fanout)
goto out;
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
- if (match->type == type &&
+
+ spin_lock(&po->bind_lock);
+ if (po->running &&
+ match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
err = 0;
}
}
+ spin_unlock(&po->bind_lock);
+
+ if (err && !atomic_read(&match->sk_ref)) {
+ list_del(&match->list);
+ kfree(match);
+ }
+
out:
if (err && rollover) {
kfree(rollover);
int vnet_hdr_len;
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
+ bool has_vnet_hdr = false;
int hlen, tlen, linear;
int extra_len = 0;
ssize_t n;
goto out_unlock;
}
+ has_vnet_hdr = true;
}
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
packet_pick_tx_queue(dev, skb);
- if (po->has_vnet_hdr) {
+ if (has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
int ret = 0;
bool unlisted = false;
- if (po->fanout)
- return -EINVAL;
-
lock_sock(sk);
spin_lock(&po->bind_lock);
rcu_read_lock();
+ if (po->fanout) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
+ if (len < sizeof(int))
+ return -EINVAL;
if (copy_from_user(&val, optval, len))
return -EFAULT;
switch (val) {
ret = PTR_ERR(ic->i_send_cq);
ic->i_send_cq = NULL;
rdsdebug("ib_create_cq send failed: %d\n", ret);
- goto out;
+ goto rds_ibdev_out;
}
cq_attr.cqe = ic->i_recv_ring.w_nr;
ret = PTR_ERR(ic->i_recv_cq);
ic->i_recv_cq = NULL;
rdsdebug("ib_create_cq recv failed: %d\n", ret);
- goto out;
+ goto send_cq_out;
}
ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
if (ret) {
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
if (ret) {
rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
/* XXX negotiate max send/recv with remote? */
ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
if (ret) {
rdsdebug("rdma_create_qp failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
- goto out;
+ goto qp_out;
}
ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
- goto out;
+ goto send_hdrs_dma_out;
}
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
- goto out;
+ goto recv_hdrs_dma_out;
}
ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
- goto out;
+ goto ack_dma_out;
}
ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
- goto out;
+ goto sends_out;
}
rds_ib_recv_init_ack(ic);
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
-out:
+ return ret;
+
+sends_out:
+ vfree(ic->i_sends);
+ack_dma_out:
+ ib_dma_free_coherent(dev, sizeof(struct rds_header),
+ ic->i_ack, ic->i_ack_dma);
+recv_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+send_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_send_hdrs, ic->i_send_hdrs_dma);
+qp_out:
+ rdma_destroy_qp(ic->i_cm_id);
+recv_cq_out:
+ if (!ib_destroy_cq(ic->i_recv_cq))
+ ic->i_recv_cq = NULL;
+send_cq_out:
+ if (!ib_destroy_cq(ic->i_send_cq))
+ ic->i_send_cq = NULL;
+rds_ibdev_out:
+ rds_ib_remove_conn(rds_ibdev, conn);
rds_ib_dev_put(rds_ibdev);
+
return ret;
}
complete(rm, notify_status);
}
-static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
- struct rm_data_op *op,
- int wc_status)
-{
- if (op->op_nents)
- ib_dma_unmap_sg(ic->i_cm_id->device,
- op->op_sg, op->op_nents,
- DMA_TO_DEVICE);
-}
-
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
struct rm_rdma_op *op,
int wc_status)
rds_ib_stats_inc(s_ib_atomic_fadd);
}
+static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+ struct rm_data_op *op,
+ int wc_status)
+{
+ struct rds_message *rm = container_of(op, struct rds_message, data);
+
+ if (op->op_nents)
+ ib_dma_unmap_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents,
+ DMA_TO_DEVICE);
+
+ if (rm->rdma.op_active && rm->data.op_notify)
+ rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
+}
+
/*
* Unmap the resources associated with a struct send_work.
*
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
+
+ /* Enable rmda notification on data operation for composite
+ * rds messages and make sure notification is enabled only
+ * for the data operation which follows it so that application
+ * gets notified only after full message gets delivered.
+ */
+ if (rm->data.op_sg) {
+ rm->rdma.op_notify = 0;
+ rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+ }
}
/* The cookie contains the R_Key of the remote memory region, and
} rdma;
struct rm_data_op {
unsigned int op_active:1;
+ unsigned int op_notify:1;
unsigned int op_nents;
unsigned int op_count;
unsigned int op_dmasg;
struct rm_rdma_op *ro;
struct rds_notifier *notifier;
unsigned long flags;
+ unsigned int notify = 0;
spin_lock_irqsave(&rm->m_rs_lock, flags);
+ notify = rm->rdma.op_notify | rm->data.op_notify;
ro = &rm->rdma;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
- ro->op_active && ro->op_notify && ro->op_notifier) {
+ ro->op_active && notify && ro->op_notifier) {
notifier = ro->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
return false;
if (msg_errcode(msg))
return false;
- *err = -TIPC_ERR_NO_NAME;
+ *err = TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
msg = buf_msg(skb);
},
};
+/* policy for packet pattern attributes */
+static const struct nla_policy
+nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
+ [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
+ [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
+ [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
u8 *mask_pat;
nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
- nla_len(pat), NULL);
+ nla_len(pat), nl80211_packet_pattern_policy);
err = -EINVAL;
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
u8 *mask_pat;
nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
- nla_len(pat), NULL);
+ nla_len(pat), nl80211_packet_pattern_policy);
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
return -EINVAL;
goto error_nolock;
}
+ if (x->props.output_mark)
+ skb->mark = x->props.output_mark;
+
err = x->outer_mode->output(x, skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
rcu_read_unlock();
}
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
- int family)
+ int family, u32 mark)
{
struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
- dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+ dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
xfrm_policy_put_afinfo(afinfo);
return dst;
}
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+ int tos, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
- int family)
+ int family, u32 mark)
{
struct net *net = xs_net(x);
xfrm_address_t *saddr = &x->props.saddr;
daddr = x->coaddr;
}
- dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+ dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
}
static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
- unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+ xfrm_address_t *remote, unsigned short family, u32 mark)
{
int err;
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(net, local, remote);
+ err = afinfo->get_saddr(net, oif, local, remote, mark);
xfrm_policy_put_afinfo(afinfo);
return err;
}
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
- error = xfrm_get_saddr(net, &tmp, remote,
- tmpl->encap_family);
+ error = xfrm_get_saddr(net, fl->flowi_oif,
+ &tmp, remote,
+ tmpl->encap_family, 0);
if (error)
goto fail;
local = &tmp;
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
family = xfrm[i]->props.family;
- dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
- family);
+ dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+ &saddr, &daddr, family,
+ xfrm[i]->props.output_mark);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
+ /* Stage 0 - sanity checks */
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
+ if (dir >= XFRM_POLICY_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
xfrm_mark_get(attrs, &x->mark);
+ if (attrs[XFRMA_OUTPUT_MARK])
+ x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]);
+
err = __xfrm_init_state(x, false);
if (err)
goto error;
goto out;
if (x->security)
ret = copy_sec_ctx(x->security, skb);
+ if (x->props.output_mark) {
+ ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark);
+ if (ret)
+ goto out;
+ }
out:
return ret;
}
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
+ [XFRMA_OUTPUT_MARK] = { .len = NLA_U32 },
};
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
+ if (x->props.output_mark)
+ l += nla_total_size(sizeof(x->props.output_mark));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size(sizeof(u64));
as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+# __cc-option
+# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
+__cc-option = $(call try-run,\
+ $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
+
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
-cc-option = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+cc-option = $(call __cc-option, $(CC),\
+ $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS),$(1),$(2))
+
+# hostcc-option
+# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
+hostcc-option = $(call __cc-option, $(HOSTCC),\
+ $(HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
cc-option-yn = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
# cc-option-align
# Prefix align with either -falign or -malign
# cc-disable-warning
# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
cc-disable-warning = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
# cc-name
# Expands to either gcc or clang
$(obj)/%.symtypes : $(src)/%.c FORCE
$(call cmd,cc_symtypes_c)
+# LLVM assembly
+# Generate .ll files from .c
+quiet_cmd_cc_ll_c = CC $(quiet_modtag) $@
+ cmd_cc_ll_c = $(CC) $(c_flags) -emit-llvm -S -o $@ $<
+
+$(obj)/%.ll: $(src)/%.c FORCE
+ $(call if_changed_dep,cc_ll_c)
+
# C (.c) files
# The C file is compiled and updated dependency information is generated.
# (See cmd_cc_o_c + relevant part of rule_cc_o_c)
KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
KBUILD_CFLAGS += $(call cc-disable-warning, format)
-KBUILD_CFLAGS += $(call cc-disable-warning, unknown-warning-option)
KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
(rm -f $@ ; false)
+
+# ASM offsets
+# ---------------------------------------------------------------------------
+
+# Default sed regexp - multiline due to syntax constraints
+#
+# Use [:space:] because LLVM's integrated assembler inserts <tab> around
+# the .ascii directive whereas GCC keeps the <space> as-is.
+define sed-offsets
+ 's:^[[:space:]]*\.ascii[[:space:]]*"\(.*\)".*:\1:; \
+ /^->/{s:->#\(.*\):/* \1 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+ s:->::; p;}'
+endef
+
+# Use filechk to avoid rebuilds when a header changes, but the resulting file
+# does not
+define filechk_offsets
+ (set -e; \
+ echo "#ifndef $2"; \
+ echo "#define $2"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Kbuild"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-offsets); \
+ echo ""; \
+ echo "#endif" )
+endef
devicetable-offsets-file := devicetable-offsets.h
-define sed-y
- "/^->/{s:->#\(.*\):/* \1 */:; \
- s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
- s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN $@
-define cmd_offsets
- (set -e; \
- echo "#ifndef __DEVICETABLE_OFFSETS_H__"; \
- echo "#define __DEVICETABLE_OFFSETS_H__"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y) $<; \
- echo ""; \
- echo "#endif" ) > $@
-endef
-
-$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s
- $(call if_changed,offsets)
+$(obj)/$(devicetable-offsets-file): $(obj)/devicetable-offsets.s FORCE
+ $(call filechk,offsets,__DEVICETABLE_OFFSETS_H__)
targets += $(devicetable-offsets-file) devicetable-offsets.s
extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
+extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
extern int install_user_keyrings(void);
extern int install_thread_keyring_to_cred(struct cred *);
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_TRUSTED)
key->flags |= 1 << KEY_FLAG_TRUSTED;
+ if (flags & KEY_ALLOC_UID_KEYRING)
+ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+ ret = -ENOKEY;
+ goto error2;
+ }
+
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
}
struct keyring_read_iterator_context {
- size_t qty;
+ size_t buflen;
size_t count;
key_serial_t __user *buffer;
};
int ret;
kenter("{%s,%d},,{%zu/%zu}",
- key->type->name, key->serial, ctx->count, ctx->qty);
+ key->type->name, key->serial, ctx->count, ctx->buflen);
- if (ctx->count >= ctx->qty)
+ if (ctx->count >= ctx->buflen)
return 1;
ret = put_user(key->serial, ctx->buffer);
return 0;
/* Calculate how much data we could return */
- ctx.qty = nr_keys * sizeof(key_serial_t);
-
if (!buffer || !buflen)
- return ctx.qty;
-
- if (buflen > ctx.qty)
- ctx.qty = buflen;
+ return nr_keys * sizeof(key_serial_t);
/* Copy the IDs of the subscribed keys into the buffer */
ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.buflen = buflen;
ctx.count = 0;
ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
if (ret < 0) {
/*
* Find a keyring with the specified name.
*
- * All named keyrings in the current user namespace are searched, provided they
- * grant Search permission directly to the caller (unless this check is
- * skipped). Keyrings whose usage points have reached zero or who have been
- * revoked are skipped.
+ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
+ * user in the current user namespace are considered. If @uid_keyring is %true,
+ * the keyring additionally must have been allocated as a user or user session
+ * keyring; otherwise, it must grant Search permission directly to the caller.
*
* Returns a pointer to the keyring with the keyring's refcount having being
* incremented on success. -ENOKEY is returned if a key could not be found.
*/
-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
+struct key *find_keyring_by_name(const char *name, bool uid_keyring)
{
struct key *keyring;
int bucket;
if (strcmp(keyring->description, name) != 0)
continue;
- if (!skip_perm_check &&
- key_permission(make_key_ref(keyring, 0),
- KEY_NEED_SEARCH) < 0)
- continue;
+ if (uid_keyring) {
+ if (!test_bit(KEY_FLAG_UID_KEYRING,
+ &keyring->flags))
+ continue;
+ } else {
+ if (key_permission(make_key_ref(keyring, 0),
+ KEY_NEED_SEARCH) < 0)
+ continue;
+ }
/* we've got a match but we might end up racing with
* key_cleanup() if the keyring is currently 'dead'
if (IS_ERR(uid_keyring)) {
uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
- KEY_ALLOC_IN_QUOTA, NULL);
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
session_keyring =
keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
- KEY_ALLOC_IN_QUOTA, NULL);
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
return SECCLASS_KEY_SOCKET;
case PF_APPLETALK:
return SECCLASS_APPLETALK_SOCKET;
+ case PF_CAN:
+ return SECCLASS_CAN_SOCKET;
}
return SECCLASS_SOCKET;
{ COMMON_SOCK_PERMS, "attach_queue", NULL } },
{ "binder", { "impersonate", "call", "set_context_mgr", "transfer",
NULL } },
+ { "can_socket",
+ { COMMON_SOCK_PERMS, NULL } },
{ NULL }
};
* @inode: the object
* @name: attribute name
* @buffer: where to put the result
- * @alloc: unused
+ * @alloc: duplicate memory
*
* Returns the size of the attribute or an error code
*/
struct super_block *sbp;
struct inode *ip = (struct inode *)inode;
struct smack_known *isp;
- int ilen;
- int rc = 0;
- if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
isp = smk_of_inode(inode);
- ilen = strlen(isp->smk_known);
- *buffer = isp->smk_known;
- return ilen;
- }
+ else {
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ sbp = ip->i_sb;
+ if (sbp->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
- /*
- * The rest of the Smack xattrs are only on sockets.
- */
- sbp = ip->i_sb;
- if (sbp->s_magic != SOCKFS_MAGIC)
- return -EOPNOTSUPP;
+ sock = SOCKET_I(ip);
+ if (sock == NULL || sock->sk == NULL)
+ return -EOPNOTSUPP;
- sock = SOCKET_I(ip);
- if (sock == NULL || sock->sk == NULL)
- return -EOPNOTSUPP;
-
- ssp = sock->sk->sk_security;
+ ssp = sock->sk->sk_security;
- if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- isp = ssp->smk_in;
- else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
- isp = ssp->smk_out;
- else
- return -EOPNOTSUPP;
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ isp = ssp->smk_in;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+ isp = ssp->smk_out;
+ else
+ return -EOPNOTSUPP;
+ }
- ilen = strlen(isp->smk_known);
- if (rc == 0) {
- *buffer = isp->smk_known;
- rc = ilen;
+ if (alloc) {
+ *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
+ if (*buffer == NULL)
+ return -ENOMEM;
}
- return rc;
+ return strlen(isp->smk_known);
}
static int snd_compress_dev_register(struct snd_device *device)
{
int ret = -EINVAL;
- char str[16];
struct snd_compr *compr;
if (snd_BUG_ON(!device || !device->device_data))
return -EBADFD;
compr = device->device_data;
- pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
+ pr_debug("reg device %s, direction %d\n", compr->name,
compr->direction);
/* register compressed device */
ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
struct snd_seq_client_port *port;
struct snd_seq_port_info info;
struct snd_seq_port_callback *callback;
+ int port_idx;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
return -ENOMEM;
if (client->type == USER_CLIENT && info.kernel) {
- snd_seq_delete_port(client, port->addr.port);
+ port_idx = port->addr.port;
+ snd_seq_port_unlock(port);
+ snd_seq_delete_port(client, port_idx);
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
snd_seq_set_port_info(port, &info);
snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
+ snd_seq_port_unlock(port);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
}
-/* create a port, port number is returned (-1 on failure) */
+/* create a port, port number is returned (-1 on failure);
+ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
+ */
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
+ snd_use_lock_use(&new_port->use_lock);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
+ sprintf(new_port->name, "port-%d", num);
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
- sprintf(new_port->name, "port-%d", num);
return new_port;
}
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
- struct snd_seq_event *ev)
+ struct snd_seq_event *ev,
+ bool atomic)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
- read_lock(&rdev->filelist_lock);
+ if (atomic)
+ read_lock(&rdev->filelist_lock);
+ else
+ down_read(&rdev->filelist_sem);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
- read_unlock(&rdev->filelist_lock);
+ if (atomic)
+ read_unlock(&rdev->filelist_lock);
+ else
+ up_read(&rdev->filelist_sem);
return 0;
}
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, true);
}
#endif /* 0 */
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, atomic);
}
/*
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
- unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
- write_lock_irqsave(&rdev->filelist_lock, flags);
+ down_write(&rdev->filelist_sem);
+ write_lock_irq(&rdev->filelist_lock);
list_add_tail(&vmidi->list, &rdev->filelist);
- write_unlock_irqrestore(&rdev->filelist_lock, flags);
+ write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
vmidi->rdev = rdev;
return 0;
}
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_virmidi *vmidi = substream->runtime->private_data;
+ down_write(&rdev->filelist_sem);
write_lock_irq(&rdev->filelist_lock);
list_del(&vmidi->list);
write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
+ init_rwsem(&rdev->filelist_sem);
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+ u16 head, tail, size;
spin_lock_irqsave(&mpu->input_lock, flags);
- while (readw(mpu->dev->MIDQ + JQS_wTail) !=
- readw(mpu->dev->MIDQ + JQS_wHead)) {
- u16 wTmp, val;
- val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
-
- if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
- &mpu->mode))
- snd_rawmidi_receive(mpu->substream_input,
- (unsigned char *)&val, 1);
-
- wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
- if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
- writew(0, mpu->dev->MIDQ + JQS_wHead);
- else
- writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
+ head = readw(mpu->dev->MIDQ + JQS_wHead);
+ tail = readw(mpu->dev->MIDQ + JQS_wTail);
+ size = readw(mpu->dev->MIDQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ unsigned char val = readw(pwMIDQData + 2 * head);
+
+ if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
+ snd_rawmidi_receive(mpu->substream_input, &val, 1);
+ if (++head > size)
+ head = 0;
+ writew(head, mpu->dev->MIDQ + JQS_wHead);
}
+ out:
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
EXPORT_SYMBOL(snd_msndmidi_input_read);
{
struct snd_msnd *chip = dev_id;
void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+ u16 head, tail, size;
/* Send ack to DSP */
/* inb(chip->io + HP_RXL); */
/* Evaluate queued DSP messages */
- while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
- u16 wTmp;
-
- snd_msnd_eval_dsp_msg(chip,
- readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
-
- wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
- if (wTmp > readw(chip->DSPQ + JQS_wSize))
- writew(0, chip->DSPQ + JQS_wHead);
- else
- writew(wTmp, chip->DSPQ + JQS_wHead);
+ head = readw(chip->DSPQ + JQS_wHead);
+ tail = readw(chip->DSPQ + JQS_wTail);
+ size = readw(chip->DSPQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
+ if (++head > size)
+ head = 0;
+ writew(head, chip->DSPQ + JQS_wHead);
}
+ out:
/* Send ack to DSP */
inb(chip->io + HP_RXL);
return IRQ_HANDLED;
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
VORTEX_RESOURCE_MIXIN)) < 0) {
memset(stream->resources,
0,
- sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
stream->resources, en,
VORTEX_RESOURCE_A3D)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
dev_err(vortex->card->dev,
"out of A3D sources. Sorry\n");
return -EBUSY;
} else {
int src[2], mix[2];
+ if (nr_ch < 1)
+ return -EINVAL;
+
/* Get SRC and MIXER hardware resources. */
for (i = 0; i < nr_ch; i++) {
if ((mix[i] =
VORTEX_RESOURCE_MIXOUT))
< 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if ((src[i] =
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN,
};
+#define SDM660_TX_UNMUTE_DELAY_MS 40
+static int tx_unmute_delay = SDM660_TX_UNMUTE_DELAY_MS;
+module_param(tx_unmute_delay, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(tx_unmute_delay, "delay to unmute the tx path");
+
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
struct snd_soc_codec *registered_digcodec;
/* enable HPF */
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00);
+ schedule_delayed_work(
+ &msm_dig_cdc->tx_mute_dwork[decimator - 1].dwork,
+ msecs_to_jiffies(tx_unmute_delay));
if (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq !=
CF_MIN_3DB_150HZ) {
snd_soc_read(codec,
tx_digital_gain_reg[w->shift + offset])
);
- if (pdata->lb_mode) {
- pr_debug("%s: loopback mode unmute the DEC\n",
- __func__);
- snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
- }
- snd_soc_update_bits(codec, tx_vol_ctl_reg,
- 0x01, 0x00);
-
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
msleep(20);
snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork);
+ cancel_delayed_work_sync(
+ &msm_dig_cdc->tx_mute_dwork[decimator - 1].dwork);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
}
EXPORT_SYMBOL(msm_dig_codec_info_create_codec_entry);
+static void sdm660_tx_mute_update_callback(struct work_struct *work)
+{
+ struct tx_mute_work *tx_mute_dwork;
+ struct snd_soc_codec *codec = NULL;
+ struct msm_dig_priv *dig_cdc;
+ struct delayed_work *delayed_work;
+ u16 tx_vol_ctl_reg = 0;
+ u8 decimator = 0, i;
+
+ delayed_work = to_delayed_work(work);
+ tx_mute_dwork = container_of(delayed_work, struct tx_mute_work, dwork);
+ dig_cdc = tx_mute_dwork->dig_cdc;
+ codec = dig_cdc->codec;
+
+ for (i = 0; i < (NUM_DECIMATORS - 1); i++) {
+ if (dig_cdc->dec_active[i])
+ decimator = i + 1;
+ if (decimator && decimator < NUM_DECIMATORS) {
+ /* unmute decimators corresponding to Tx DAI's*/
+ tx_vol_ctl_reg =
+ MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
+ 32 * (decimator - 1);
+ snd_soc_update_bits(codec, tx_vol_ctl_reg,
+ 0x01, 0x00);
+ }
+ decimator = 0;
+ }
+}
+
static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
{
struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
tx_hpf_work[i].decimator = i + 1;
INIT_DELAYED_WORK(&tx_hpf_work[i].dwork,
tx_hpf_corner_freq_callback);
+ msm_dig_cdc->tx_mute_dwork[i].dig_cdc = msm_dig_cdc;
+ msm_dig_cdc->tx_mute_dwork[i].decimator = i + 1;
+ INIT_DELAYED_WORK(&msm_dig_cdc->tx_mute_dwork[i].dwork,
+ sdm660_tx_mute_update_callback);
}
for (i = 0; i < MSM89XX_RX_MAX; i++)
MSM89XX_CDC_CORE_TX5_MUX_CTL, 3, 1, 0),
};
-static int msm_dig_cdc_digital_mute(struct snd_soc_dai *dai, int mute)
-{
- struct snd_soc_codec *codec = NULL;
- u16 tx_vol_ctl_reg = 0;
- u8 decimator = 0, i;
- struct msm_dig_priv *dig_cdc;
-
- pr_debug("%s: Digital Mute val = %d\n", __func__, mute);
-
- if (!dai || !dai->codec) {
- pr_err("%s: Invalid params\n", __func__);
- return -EINVAL;
- }
- codec = dai->codec;
- dig_cdc = snd_soc_codec_get_drvdata(codec);
-
- if (dai->id == AIF1_PB) {
- dev_dbg(codec->dev, "%s: Not capture use case skip\n",
- __func__);
- return 0;
- }
-
- mute = (mute) ? 1 : 0;
- if (!mute) {
- /*
- * 15 ms is an emperical value for the mute time
- * that was arrived by checking the pop level
- * to be inaudible
- */
- usleep_range(15000, 15010);
- }
-
- if (dai->id == AIF3_SVA) {
- snd_soc_update_bits(codec,
- MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG, 0x01, mute);
- goto ret;
- }
- for (i = 0; i < (NUM_DECIMATORS - 1); i++) {
- if (dig_cdc->dec_active[i])
- decimator = i + 1;
- if (decimator && decimator < NUM_DECIMATORS) {
- /* mute/unmute decimators corresponding to Tx DAI's */
- tx_vol_ctl_reg =
- MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG +
- 32 * (decimator - 1);
- snd_soc_update_bits(codec, tx_vol_ctl_reg,
- 0x01, mute);
- }
- decimator = 0;
- }
-ret:
- return 0;
-}
-
static struct snd_soc_dai_ops msm_dig_dai_ops = {
.hw_params = msm_dig_cdc_hw_params,
- .digital_mute = msm_dig_cdc_digital_mute,
};
MSM89XX_RX_MAX,
};
+struct tx_mute_work {
+ struct msm_dig_priv *dig_cdc;
+ u32 decimator;
+ struct delayed_work dwork;
+};
+
struct msm_dig_priv {
struct snd_soc_codec *codec;
u32 comp_enabled[MSM89XX_RX_MAX];
int (*register_notifier)(void *handle,
struct notifier_block *nblock,
bool enable);
+ struct tx_mute_work tx_mute_dwork[NUM_DECIMATORS];
};
struct dig_ctrl_platform_data {
audio drivers. This includes q6asm, q6adm,
q6afe interfaces to DSP using apr.
+config SND_SOC_QDSP6V2_VM
+ tristate "SoC ALSA audio driver for QDSP6V2 virtualization"
+ depends on MSM_QDSP6_APRV2_VM
+ select SND_SOC_COMPRESS
+ help
+ To add support for MSM QDSP6V2 virtualization
+ Soc Audio.
+ This will enable sound soc platform specific
+ audio drivers. This includes q6asm, q6adm,
+ q6afe interfaces to DSP using virtualized apr.
+
config SND_SOC_QDSP_DEBUG
bool "QDSP Audio Driver Debug Feature"
help
config QTI_PP
bool "Enable QTI PP"
- depends on SND_SOC_MSM_QDSP6V2_INTF
+ depends on SND_SOC_MSM_QDSP6V2_INTF || SND_SOC_QDSP6V2_VM
help
To add support for default QTI post processing.
This support is to configure the post processing
the machine driver and the corresponding
DAI-links
+config SND_SOC_MSM8996_VM
+ tristate "SoC Machine driver for MSM8996 virtualization"
+ select SND_SOC_QDSP6V2_VM
+ select SND_SOC_MSM_STUB
+ select SND_SOC_MSM_HOSTLESS_PCM
+ select SND_DYNAMIC_MINORS
+ select MSM_QDSP6_APRV2_VM
+ select QTI_PP
+ help
+ To add support for SoC audio on MSM8996
+ virtualization platform.
+ This will enable sound soc drivers which
+ interfaces with DSP using virtualized apr,
+ also it will enable the machine driver and
+ the corresponding DAI-links
+
config SND_SOC_MSM8998
tristate "SoC Machine driver for MSM8998 boards"
depends on ARCH_QCOM
obj-$(CONFIG_SND_SOC_MSM_HOSTLESS_PCM) += snd-soc-hostless-pcm.o
obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += qdsp6v2/
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += qdsp6v2/
snd-soc-qdsp6v2-objs := msm-dai-fe.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += snd-soc-qdsp6v2.o
#for CPE drivers
snd-soc-cpe-objs := msm-cpe-lsm.o
snd-soc-msm8996-objs := msm8996.o apq8096-auto.o
obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-msm8996.o
+# for MSM8996 virtualization sound card driver
+snd-soc-msm8996-vm-objs := apq8096-auto.o
+obj-$(CONFIG_SND_SOC_MSM8996_VM) += snd-soc-msm8996-vm.o
+
# for MSM8998 sound card driver
snd-soc-msm8998-objs := msm8998.o
obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
static int msm_proxy_rx_ch = 2;
static int hdmi_rx_sample_rate = SAMPLING_RATE_48KHZ;
static int msm_sec_mi2s_tx_ch = 2;
+static int msm_sec_mi2s_rx_ch = 2;
static int msm_tert_mi2s_tx_ch = 2;
static int msm_quat_mi2s_rx_ch = 2;
static int msm_sec_mi2s_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_mi2s_tx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_quat_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ;
static int msm_sec_tdm_tx_2_ch = 2;
static int msm_sec_tdm_tx_3_ch = 2;
+static int msm_sec_tdm_rx_0_ch = 6;
+static int msm_sec_tdm_rx_1_ch = 1;
+static int msm_sec_tdm_rx_2_ch = 1;
+static int msm_sec_tdm_rx_3_ch;
+
static int msm_tert_tdm_rx_0_ch = 2; /* ICC STREAM */
static int msm_tert_tdm_rx_1_ch = 2;
static int msm_tert_tdm_rx_2_ch = 2;
static int msm_sec_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
static int msm_tert_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
/* SEC_TDM_RX */
- {0xFFFF}, /* not used */
- {0xFFFF}, /* not used */
- {0xFFFF}, /* not used */
+ {0, 4, 8, 12, 16, 20, 0xFFFF},
+ {24, 0xFFFF},
+ {28, 0xFFFF},
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
return 0;
}
+static int msm_sec_mi2s_rx_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_mi2s_rx_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_mi2s_rx_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_mi2s_rx_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_mi2s_rx_bit_format = %d\n",
+ __func__, msm_sec_mi2s_rx_bit_format);
+ return 0;
+}
+
+
static int msm_sec_mi2s_tx_bit_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return 0;
}
+static int msm_sec_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_0_ch = %d\n", __func__,
+ msm_sec_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_0_ch = %d\n", __func__,
+ msm_sec_tdm_rx_0_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_1_ch = %d\n", __func__,
+ msm_sec_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_1_ch = %d\n", __func__,
+ msm_sec_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_2_ch = %d\n", __func__,
+ msm_sec_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_2_ch = %d\n", __func__,
+ msm_sec_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_3_ch = %d\n", __func__,
+ msm_sec_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_3_ch = %d\n", __func__,
+ msm_sec_tdm_rx_3_ch);
+ return 0;
+}
+
static int msm_sec_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return 0;
}
+static int msm_sec_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_3_bit_format);
+ return 0;
+}
+
static int msm_sec_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
static int msm_mi2s_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_interval *rate = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- pr_debug("%s: channel:%d\n", __func__, msm_quat_mi2s_rx_ch);
rate->min = rate->max = SAMPLING_RATE_48KHZ;
- channels->min = channels->max = msm_quat_mi2s_rx_ch;
- param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
- msm_quat_mi2s_rx_bit_format);
+
+ switch (cpu_dai->id) {
+ case 1: /*MSM_SEC_MI2S*/
+ pr_debug("%s: channel:%d\n", __func__, msm_sec_mi2s_rx_ch);
+ rate->min = rate->max = msm_sec_mi2s_rate;
+ channels->min = channels->max = msm_sec_mi2s_rx_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_mi2s_rx_bit_format);
+ break;
+ case 3: /*MSM_QUAT_MI2S*/
+ pr_debug("%s: channel:%d\n", __func__, msm_quat_mi2s_rx_ch);
+ rate->min = rate->max = SAMPLING_RATE_48KHZ;
+ channels->min = channels->max = msm_quat_mi2s_rx_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_quat_mi2s_rx_bit_format);
+ break;
+ default:
+ pr_err("%s: dai id 0x%x not supported\n",
+ __func__, cpu_dai->id);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
+ __func__, cpu_dai->id, channels->max, rate->max,
+ params_format(params));
return 0;
}
msm_pri_tdm_rx_3_bit_format);
rate->min = rate->max = msm_pri_tdm_rate;
break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ channels->min = channels->max = msm_sec_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_0_bit_format);
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+ channels->min = channels->max = msm_sec_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_1_bit_format);
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+ channels->min = channels->max = msm_sec_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_2_bit_format);
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+ channels->min = channels->max = msm_sec_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_3_bit_format);
+ break;
case AFE_PORT_ID_SECONDARY_TDM_TX:
channels->min = channels->max = msm_sec_tdm_tx_0_ch;
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
msm_pri_tdm_rx_2_ch_get, msm_pri_tdm_rx_2_ch_put),
SOC_ENUM_EXT("PRI_TDM_RX_3 Channels", msm_snd_enum[5],
msm_pri_tdm_rx_3_ch_get, msm_pri_tdm_rx_3_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_0_ch_get, msm_sec_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_1 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_1_ch_get, msm_sec_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_2 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_2_ch_get, msm_sec_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_3 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_3_ch_get, msm_sec_tdm_rx_3_ch_put),
SOC_ENUM_EXT("SEC_TDM_TX_0 Channels", msm_snd_enum[5],
msm_sec_tdm_tx_0_ch_get, msm_sec_tdm_tx_0_ch_put),
SOC_ENUM_EXT("SEC_TDM_TX_1 Channels", msm_snd_enum[5],
SOC_ENUM_EXT("PRI_TDM_RX_3 Bit Format", msm_snd_enum[6],
msm_pri_tdm_rx_3_bit_format_get,
msm_pri_tdm_rx_3_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_0_bit_format_get,
+ msm_sec_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_1 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_1_bit_format_get,
+ msm_sec_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_2 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_2_bit_format_get,
+ msm_sec_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_3 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_3_bit_format_get,
+ msm_sec_tdm_rx_3_bit_format_put),
SOC_ENUM_EXT("SEC_TDM_TX_0 Bit Format", msm_snd_enum[6],
msm_sec_tdm_tx_0_bit_format_get,
msm_sec_tdm_tx_0_bit_format_put),
SOC_ENUM_EXT("TERT_MI2S_TX Bit Format", msm_snd_enum[7],
msm_tert_mi2s_tx_bit_format_get,
msm_tert_mi2s_tx_bit_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Bit Format", msm_snd_enum[7],
+ msm_sec_mi2s_rx_bit_format_get,
+ msm_sec_mi2s_rx_bit_format_put),
SOC_ENUM_EXT("SEC_MI2S_TX Bit Format", msm_snd_enum[7],
msm_sec_mi2s_tx_bit_format_get,
msm_sec_mi2s_tx_bit_format_put),
static struct snd_soc_dai_link apq8096_auto_be_dai_links[] = {
/* Backend DAI Links */
+ {
+ .name = LPASS_BE_SEC_MI2S_RX,
+ .stream_name = "Secondary MI2S Playback",
+ .cpu_dai_name = "msm-dai-q6-mi2s.1",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ .be_hw_params_fixup = msm_mi2s_rx_be_hw_params_fixup,
+ .ops = &apq8096_mi2s_be_ops,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ },
{
.name = LPASS_BE_SEC_MI2S_TX,
.stream_name = "Secondary MI2S Capture",
.ignore_suspend = 1,
},
{
+ .name = LPASS_BE_SEC_TDM_RX_0,
+ .stream_name = "Secondary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36880",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_1,
+ .stream_name = "Secondary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36882",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_2,
+ .stream_name = "Secondary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36884",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_3,
+ .stream_name = "Secondary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36886",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
.name = LPASS_BE_SEC_TDM_TX_0,
.stream_name = "Secondary TDM0 Capture",
.cpu_dai_name = "msm-dai-q6-tdm.36881",
.capture = {
.stream_name = "MultiMedia17 Capture",
.aif_name = "MM_UL17",
- .rates = (SNDRV_PCM_RATE_8000_48000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.compress_new = snd_soc_new_compress,
.capture = {
.stream_name = "MultiMedia18 Capture",
.aif_name = "MM_UL18",
- .rates = (SNDRV_PCM_RATE_8000_48000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
.capture = {
.stream_name = "MultiMedia19 Capture",
.aif_name = "MM_UL19",
- .rates = (SNDRV_PCM_RATE_8000_48000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.compress_new = snd_soc_new_compress,
.name = "MultiMedia27",
.probe = fe_dai_probe,
},
+ {
+ .capture = {
+ .stream_name = "MultiMedia28 Capture",
+ .aif_name = "MM_UL28",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .compress_new = snd_soc_new_compress,
+ .name = "MultiMedia28",
+ .probe = fe_dai_probe,
+ },
+ {
+ .capture = {
+ .stream_name = "MultiMedia29 Capture",
+ .aif_name = "MM_UL29",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .compress_new = snd_soc_new_compress,
+ .name = "MultiMedia29",
+ .probe = fe_dai_probe,
+ },
};
static int msm_fe_dai_dev_probe(struct platform_device *pdev)
msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
msm-lsm-client.o msm-pcm-host-voice-v2.o \
msm-audio-effects-q6-v2.o msm-pcm-loopback-v2.o \
- msm-dai-slim.o msm-transcode-loopback-q6-v2.o \
+ msm-transcode-loopback-q6-v2.o \
adsp_err.o
+obj-$(CONFIG_SLIMBUS) += msm-dai-slim.o audio_slimslave.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
msm-dai-stub-v2.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
+ msm-dai-stub-v2.o
obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
obj-$(CONFIG_QTI_PP) += msm-qti-pp-config.o
obj-y += audio_calibration.o audio_cal_utils.o q6adm.o q6afe.o q6asm.o \
- q6audio-v2.o q6voice.o q6core.o rtac.o q6lsm.o audio_slimslave.o \
+ q6audio-v2.o q6voice.o q6core.o rtac.o q6lsm.o \
msm-pcm-q6-noirq.o
ocmem-audio-objs += audio_ocmem.o
obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
return ret;
}
-
+#ifdef CONFIG_SND_HWDEP
static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
struct snd_pcm_mmap_fd *mmap_fd)
{
}
return mmap_fd->fd < 0 ? -EFAULT : 0;
}
+#endif
static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
return 0;
}
+#ifdef CONFIG_SND_HWDEP
static int msm_pcm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg)
{
hwdep->ops.ioctl_compat = msm_pcm_hwdep_compat_ioctl;
return 0;
}
+#endif
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
pr_err("%s: Could not add app type controls failed %d\n",
__func__, ret);
}
+#ifdef CONFIG_SND_HWDEP
ret = msm_pcm_add_hwdep_dev(rtd);
if (ret)
pr_err("%s: Could not add hw dep node\n", __func__);
+#endif
pcm->nonatomic = true;
exit:
return ret;
/* MULTIMEDIA27 */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA28 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA29 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
/* CS_VOICE */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
msm_route_ec_ref_rx_enum[0],
msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul28 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL28 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul29 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL29 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
static int msm_routing_ext_ec_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SPDIF_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SPDIF_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SPDIF_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_5_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new secondary_mi2s_rx2_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_PRI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_PRI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int0_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new display_port_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int_bt_a2dp_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_FM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_INT_FM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_INT_FM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul28_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul29_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
mmul21_mixer_controls, ARRAY_SIZE(mmul21_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia27 Mixer", SND_SOC_NOPM, 0, 0,
mmul27_mixer_controls, ARRAY_SIZE(mmul27_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia28 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul28_mixer_controls, ARRAY_SIZE(mmul28_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia29 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul29_mixer_controls, ARRAY_SIZE(mmul29_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
{"MultiMedia17 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia18 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia19 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia28 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia29 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia8 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia2 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia4 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia17 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia18 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia19 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia28 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia29 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia17 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia18 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia19 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia28 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia29 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia17 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia18 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia19 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia28 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia29 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia17 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia18 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia28 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia29 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia28 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia29 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia5 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia6 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia8 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia17 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia18 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia28 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia29 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MM_UL20", NULL, "MultiMedia20 Mixer"},
{"MM_UL21", NULL, "MultiMedia21 Mixer"},
{"MM_UL27", NULL, "MultiMedia27 Mixer"},
+ {"MM_UL28", NULL, "MultiMedia28 Mixer"},
+ {"MM_UL29", NULL, "MultiMedia29 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
{"AUDIO_REF_EC_UL19 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL19 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
+ {"AUDIO_REF_EC_UL29 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"AUDIO_REF_EC_UL29 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+ {"AUDIO_REF_EC_UL29 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL29 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},
{"MM_UL3", NULL, "AUDIO_REF_EC_UL3 MUX"},
{"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"},
{"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"},
{"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"},
+ {"MM_UL28", NULL, "AUDIO_REF_EC_UL28 MUX"},
+ {"MM_UL29", NULL, "AUDIO_REF_EC_UL29 MUX"},
{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
{"Voice_Tx Mixer", "PRI_MI2S_TX_Voice", "PRI_MI2S_TX"},
uint16_t ii;
uint16_t *dst_gain_ptr = NULL;
- if (be_id >= MSM_BACKEND_DAI_MAX) {
+ if (be_id < MSM_BACKEND_DAI_PRI_I2S_RX ||
+ be_id >= MSM_BACKEND_DAI_MAX) {
rc = -EINVAL;
return rc;
}
MSM_FRONTEND_DAI_MULTIMEDIA25,
MSM_FRONTEND_DAI_MULTIMEDIA26,
MSM_FRONTEND_DAI_MULTIMEDIA27,
+ MSM_FRONTEND_DAI_MULTIMEDIA28,
+ MSM_FRONTEND_DAI_MULTIMEDIA29,
MSM_FRONTEND_DAI_CS_VOICE,
MSM_FRONTEND_DAI_VOIP,
MSM_FRONTEND_DAI_AFE_RX,
MSM_FRONTEND_DAI_MAX,
};
-#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA27 + 1)
-#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA27
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA29 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA29
enum {
MSM_BACKEND_DAI_PRI_I2S_RX = 0,
bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ if (bytes_returned > rtac_cal[ADM_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_buf_size) {
pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
__func__, user_buf_size, bytes_returned);
bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ if (bytes_returned > rtac_cal[ASM_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_buf_size) {
pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
__func__, user_buf_size, bytes_returned);
bytes_returned = get_resp->param_size +
sizeof(struct afe_port_param_data_v2);
+ if (bytes_returned > rtac_cal[AFE_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_afe_buf.buf_size) {
pr_err("%s: user size = 0x%x, returned size = 0x%x\n",
__func__, user_afe_buf.buf_size,
bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ if (bytes_returned > rtac_cal[VOICE_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_buf_size) {
pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
__func__, user_buf_size, bytes_returned);
snd_soc_dapm_new_control_unlocked(widget->dapm,
&template);
kfree(name);
+ if (IS_ERR(data->widget)) {
+ ret = PTR_ERR(data->widget);
+ goto err_data;
+ }
if (!data->widget) {
ret = -ENOMEM;
goto err_data;
data->widget = snd_soc_dapm_new_control_unlocked(
widget->dapm, &template);
kfree(name);
+ if (IS_ERR(data->widget)) {
+ ret = PTR_ERR(data->widget);
+ goto err_data;
+ }
if (!data->widget) {
ret = -ENOMEM;
goto err_data;
mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
w = snd_soc_dapm_new_control_unlocked(dapm, widget);
+ /* Do not nag about probe deferrals */
+ if (IS_ERR(w)) {
+ int ret = PTR_ERR(w);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev,
+ "ASoC: Failed to create DAPM control %s (%d)\n",
+ widget->name, ret);
+ goto out_unlock;
+ }
if (!w)
dev_err(dapm->dev,
"ASoC: Failed to create DAPM control %s\n",
widget->name);
+out_unlock:
mutex_unlock(&dapm->card->dapm_mutex);
return w;
}
w->regulator = devm_regulator_get(dapm->dev, w->name);
if (IS_ERR(w->regulator)) {
ret = PTR_ERR(w->regulator);
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
w->name, ret);
return NULL;
w->clk = devm_clk_get(dapm->dev, w->name);
if (IS_ERR(w->clk)) {
ret = PTR_ERR(w->clk);
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
w->name, ret);
return NULL;
mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
for (i = 0; i < num; i++) {
w = snd_soc_dapm_new_control_unlocked(dapm, widget);
+ if (IS_ERR(w)) {
+ ret = PTR_ERR(w);
+ /* Do not nag about probe deferrals */
+ if (ret == -EPROBE_DEFER)
+ break;
+ dev_err(dapm->dev,
+ "ASoC: Failed to create DAPM control %s (%d)\n",
+ widget->name, ret);
+ break;
+ }
if (!w) {
dev_err(dapm->dev,
"ASoC: Failed to create DAPM control %s\n",
dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name);
w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template);
+ if (IS_ERR(w)) {
+ ret = PTR_ERR(w);
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(card->dev,
+ "ASoC: Failed to create %s widget (%d)\n",
+ link_name, ret);
+ goto outfree_kcontrol_news;
+ }
if (!w) {
dev_err(card->dev, "ASoC: Failed to create %s widget\n",
link_name);
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
+ if (IS_ERR(w)) {
+ int ret = PTR_ERR(w);
+
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev,
+ "ASoC: Failed to create %s widget (%d)\n",
+ dai->driver->playback.stream_name, ret);
+ return ret;
+ }
if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->playback.stream_name);
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
+ if (IS_ERR(w)) {
+ int ret = PTR_ERR(w);
+
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev,
+ "ASoC: Failed to create %s widget (%d)\n",
+ dai->driver->playback.stream_name, ret);
+ return ret;
+ }
if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->capture.stream_name);
widget = snd_soc_dapm_new_control(dapm, &template);
else
widget = snd_soc_dapm_new_control_unlocked(dapm, &template);
+ if (IS_ERR(widget)) {
+ ret = PTR_ERR(widget);
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(tplg->dev,
+ "ASoC: failed to create widget %s controls (%d)\n",
+ w->name, ret);
+ goto hdr_err;
+ }
if (widget == NULL) {
dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
w->name);
err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
if (err)
- return err;
+ goto err_kill_urb;
- if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
- return -ENODEV;
+ if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
+ err = -ENODEV;
+ goto err_kill_urb;
+ }
usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
cdev->vendor_name, CAIAQ_USB_STR_LEN);
setup_card(cdev);
return 0;
+
+ err_kill_urb:
+ usb_kill_urb(&cdev->ep1_in_urb);
+ return err;
}
static int snd_probe(struct usb_interface *intf,
case UAC_VERSION_1: {
void *control_header;
struct uac1_ac_header_descriptor *h1;
+ int rest_bytes;
control_header = snd_usb_find_csint_desc(host_iface->extra,
host_iface->extralen, NULL, UAC_HEADER);
return -EINVAL;
}
+ rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
+ control_header;
+
+ /* just to be sure -- this shouldn't hit at all */
+ if (rest_bytes <= 0) {
+ dev_err(&dev->dev, "invalid control header\n");
+ return -EINVAL;
+ }
+
h1 = control_header;
+
+ if (rest_bytes < sizeof(*h1)) {
+ dev_err(&dev->dev, "too short v1 buffer descriptor\n");
+ return -EINVAL;
+ }
+
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
}
+ if (rest_bytes < h1->bLength) {
+ dev_err(&dev->dev, "invalid buffer length (v1)\n");
+ return -EINVAL;
+ }
+
if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
return -EINVAL;
return 0;
error:
- if (line6->disconnect)
- line6->disconnect(line6);
- snd_card_free(card);
+ /* we can call disconnect callback here because no close-sync is
+ * needed yet at this point
+ */
+ line6_disconnect(interface);
return ret;
}
EXPORT_SYMBOL_GPL(line6_probe);
int index, char *buf, int maxlen)
{
int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
- buf[len] = 0;
return len;
}
static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
{
+ /* kill pending URBs */
+ snd_usb_mixer_disconnect(mixer);
+
kfree(mixer->id_elems);
if (mixer->urb) {
kfree(mixer->urb->transfer_buffer);
void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
{
- usb_kill_urb(mixer->urb);
- usb_kill_urb(mixer->rc_urb);
+ if (mixer->disconnected)
+ return;
+ if (mixer->urb)
+ usb_kill_urb(mixer->urb);
+ if (mixer->rc_urb)
+ usb_kill_urb(mixer->rc_urb);
+ mixer->disconnected = true;
}
#ifdef CONFIG_PM
struct urb *rc_urb;
struct usb_ctrlrequest *rc_setup_packet;
u8 rc_buffer[6];
+
+ bool disconnected;
};
#define MAX_CHANNELS 16 /* max logical channels */
}
pg = get_order(read_size);
- sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+ sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+ __GFP_NOWARN, pg);
if (!sk->s) {
snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
goto out;
pg = get_order(write_size);
sk->write_page =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+ (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+ __GFP_NOWARN, pg);
if (!sk->write_page) {
snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
usb_stream_free(sk);