OSDN Git Service

Merge "mm, kswapd: replace kswapd compaction with waking up kcompactd"
authorLinux Build Service Account <lnxbuild@localhost>
Sat, 25 Feb 2017 09:09:40 +0000 (01:09 -0800)
committerGerrit - the friendly Code Review server <code-review@localhost>
Sat, 25 Feb 2017 09:09:39 +0000 (01:09 -0800)
229 files changed:
Documentation/devicetree/bindings/clock/qcom,mmcc.txt
Documentation/devicetree/bindings/display/msm/hdmi.txt
Documentation/devicetree/bindings/drm/msm/hdmi-display.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt [deleted file]
Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
Documentation/devicetree/bindings/sound/wcd_codec.txt
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
arch/arm/boot/dts/qcom/Makefile
arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
arch/arm/boot/dts/qcom/msm-audio.dtsi
arch/arm/boot/dts/qcom/msm-smb138x.dtsi
arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi [new file with mode: 0644]
arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi [new file with mode: 0644]
arch/arm/boot/dts/qcom/msm8998-sde.dtsi [new file with mode: 0644]
arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
arch/arm/boot/dts/qcom/msm8998-v2.dtsi
arch/arm/boot/dts/qcom/msm8998-vidc.dtsi
arch/arm/boot/dts/qcom/msm8998.dtsi
arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts [new file with mode: 0644]
arch/arm/boot/dts/qcom/sdm630.dtsi
arch/arm/boot/dts/qcom/sdm660-audio.dtsi
arch/arm/boot/dts/qcom/sdm660-camera.dtsi
arch/arm/boot/dts/qcom/sdm660-common.dtsi
arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
arch/arm/boot/dts/qcom/sdm660-pm.dtsi
arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts [new file with mode: 0644]
arch/arm/boot/dts/qcom/sdm660.dtsi
arch/arm/configs/sdm660-perf_defconfig
arch/arm/configs/sdm660_defconfig
arch/arm64/configs/msmcortex-perf_defconfig
arch/arm64/configs/msmcortex_defconfig
arch/arm64/configs/sdm660-perf_defconfig
arch/arm64/configs/sdm660_defconfig
certs/Makefile
certs/verity.x509.pem [new file with mode: 0644]
drivers/base/dma-removed.c
drivers/bluetooth/btfm_slim.c
drivers/bluetooth/btfm_slim.h
drivers/bluetooth/btfm_slim_codec.c
drivers/bluetooth/btfm_slim_wcn3990.c
drivers/char/diag/diagchar_core.c
drivers/char/diag/diagfwd.c
drivers/char/diag/diagfwd.h
drivers/char/diag/diagfwd_glink.c
drivers/char/diag/diagfwd_glink.h
drivers/clk/msm/clock-generic.c
drivers/clk/qcom/clk-alpha-pll.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-smd-rpm.c
drivers/clk/qcom/gcc-sdm660.c
drivers/clk/qcom/mmcc-sdm660.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a2xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx.xml.h
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx.xml.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a5xx_gpu.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a5xx_gpu.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a5xx_power.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a5xx_preempt.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a5xx_snapshot.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/adreno_common.xml.h
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h [new file with mode: 0644]
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_iommu.h [new file with mode: 0644]
drivers/gpu/drm/msm/msm_mmu.h
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/msm/msm_ringbuffer.h
drivers/gpu/drm/msm/msm_smmu.c
drivers/gpu/drm/msm/msm_snapshot.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_snapshot.h [new file with mode: 0644]
drivers/gpu/drm/msm/msm_snapshot_api.h [new file with mode: 0644]
drivers/gpu/drm/msm/sde/sde_connector.c
drivers/gpu/drm/msm/sde/sde_connector.h
drivers/gpu/drm/msm/sde/sde_core_irq.c
drivers/gpu/drm/msm/sde/sde_core_irq.h
drivers/gpu/drm/msm/sde/sde_encoder_phys.h
drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
drivers/gpu/drm/msm/sde/sde_formats.c
drivers/gpu/drm/msm/sde/sde_formats.h
drivers/gpu/drm/msm/sde/sde_irq.c
drivers/gpu/drm/msm/sde/sde_kms.c
drivers/gpu/drm/msm/sde/sde_kms.h
drivers/gpu/drm/msm/sde/sde_plane.c
drivers/gpu/drm/msm/sde_io_util.c [new file with mode: 0644]
drivers/gpu/drm/msm/sde_power_handle.c
drivers/gpu/msm/adreno_dispatch.c
drivers/gpu/msm/kgsl.c
drivers/hwtracing/coresight/Kconfig
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/msg21xx_ts.c [deleted file]
drivers/iommu/arm-smmu.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.h
drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/host/cmdq_hci.c
drivers/mmc/host/cmdq_hci.h
drivers/mmc/host/sdhci-msm.c
drivers/net/ethernet/msm/msm_rmnet_mhi.c
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/sysfs.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/pci/host/pci-msm.c
drivers/phy/phy-qcom-ufs.c
drivers/platform/msm/gsi/gsi.c
drivers/platform/msm/gsi/gsi.h
drivers/platform/msm/gsi/gsi_reg.h
drivers/platform/msm/ipa/ipa_v2/ipa_client.c
drivers/platform/msm/ipa/ipa_v2/ipa_i.h
drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
drivers/platform/msm/ipa/ipa_v3/ipa.c
drivers/platform/msm/mhi/mhi.h
drivers/platform/msm/mhi/mhi_bhi.c
drivers/platform/msm/mhi/mhi_event.c
drivers/platform/msm/mhi/mhi_hwio.h
drivers/platform/msm/mhi/mhi_iface.c
drivers/platform/msm/mhi/mhi_init.c
drivers/platform/msm/mhi/mhi_isr.c
drivers/platform/msm/mhi/mhi_macros.h
drivers/platform/msm/mhi/mhi_main.c
drivers/platform/msm/mhi/mhi_mmio_ops.c
drivers/platform/msm/mhi/mhi_pm.c
drivers/platform/msm/mhi/mhi_ring_ops.c
drivers/platform/msm/mhi/mhi_ssr.c
drivers/platform/msm/mhi/mhi_states.c
drivers/platform/msm/mhi/mhi_sys.c
drivers/platform/msm/mhi/mhi_sys.h
drivers/power/supply/qcom/battery.c
drivers/power/supply/qcom/qpnp-smb2.c
drivers/power/supply/qcom/smb-lib.h
drivers/power/supply/qcom/smb138x-charger.c
drivers/regulator/cprh-kbss-regulator.c
drivers/regulator/qpnp-labibb-regulator.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-qcom.h
drivers/soc/qcom/glink.c
drivers/soc/qcom/msm_smem.c
drivers/soc/qcom/peripheral-loader.c
drivers/soc/qcom/qdsp6v2/audio_notifier.c
drivers/soc/qcom/rpm_rail_stats.c
drivers/soc/qcom/smcinvoke.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/pd/policy_engine.c
drivers/usb/phy/phy-msm-qusb.c
drivers/video/fbdev/msm/mdss.h
drivers/video/fbdev/msm/mdss_fb.c
drivers/video/fbdev/msm/mdss_mdp_ctl.c
drivers/video/fbdev/msm/mdss_smmu.c
include/dt-bindings/clock/qcom,gcc-sdm660.h
include/linux/iommu.h
include/linux/msm_gsi.h
include/linux/msm_mhi.h
include/linux/regulator/qpnp-labibb-regulator.h [new file with mode: 0644]
include/linux/sde_io_util.h [new file with mode: 0644]
include/uapi/drm/msm_drm.h
kernel/sched/core_ctl.c
mm/vmpressure.c
mm/vmscan.c
net/socket.c
scripts/checkpatch.pl
sound/soc/codecs/Kconfig
sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
sound/soc/codecs/wsa881x-regmap.c
sound/soc/codecs/wsa881x.c
sound/soc/codecs/wsa881x.h
sound/soc/msm/Kconfig
sound/soc/msm/msm8998.c
sound/soc/msm/qdsp6v2/Makefile
sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c [deleted file]
sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
sound/soc/msm/qdsp6v2/q6afe.c
sound/soc/msm/sdm660-common.c
sound/soc/msm/sdm660-ext-dai-links.c
sound/soc/msm/sdm660-internal.c

index 4b10572..f7b1bfd 100644 (file)
@@ -11,6 +11,7 @@ Required properties :
                        "qcom,mmcc-msm8974"
                        "qcom,mmcc-msm8996"
                        "qcom,mmcc-sdm660"
+                       "qcom,mmcc-sdm630"
 
 - reg : shall contain base register location and length
 - #clock-cells : shall contain 1
index 379ee2e..a0615ac 100644 (file)
@@ -3,6 +3,7 @@ Qualcomm adreno/snapdragon hdmi output
 Required properties:
 - compatible: one of the following
    * "qcom,hdmi-tx-8996"
+   * "qcom,hdmi-tx-8998"
    * "qcom,hdmi-tx-8994"
    * "qcom,hdmi-tx-8084"
    * "qcom,hdmi-tx-8974"
@@ -21,6 +22,7 @@ Required properties:
 
 Optional properties:
 - qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
+- qcom,hdmi-tx-hpd5v-gpio: hdmi 5v boost pin
 - qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
 - power-domains: reference to the power domain(s), if available.
 - pinctrl-names: the pin control state names; should contain "default"
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt b/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt
new file mode 100644 (file)
index 0000000..aaa3722
--- /dev/null
@@ -0,0 +1,59 @@
+Qualcomm Technologies,Inc. Adreno/Snapdragon hdmi display manager
+
+Required properties:
+- compatible: "qcom,hdmi-display"
+- label: label of this display manager
+
+Optional properties:
+- qcom,display-type: display type of this manager. It could be "primary",
+  "secondary", "tertiary", etc.
+- qcom,non-pluggable: Boolean to indicate if display is non pluggable.
+- qcom,customize-modes: Customized modes when it's non pluggable display.
+- qcom,customize-mode-id: Customized mode node.
+- qcom,mode-name: String which indicates the mode name which shall be used
+   by the connector in non pluggable mode. Refer the example below for details.
+   In pluggable mode, the modes shall be filled up
+   after edid parsing.
+- qcom,mode-h-active: Horizontal active pixels for this mode.
+- qcom,mode-h-front-porch: Horizontal front porch in pixels for this mode.
+- qcom,mode-h-pulse-width: Horizontal sync width in pixels for this mode.
+- qcom,mode-h-back-porch: Horizontal back porch in pixels for this mode.
+- qcom,mode-h-active-high: Boolean to indicate if mode horizontal polarity is active high.
+- qcom,mode-v-active: Vertical active lines for this mode.
+- qcom,mode-v-front-porch: Vertical front porch in lines for this mode.
+- qcom,mode-v-pulse-width: Vertical sync width in lines for this mode.
+- qcom,mode-v-back-porch: Vertical back porch in lines for this mode.
+- qcom,mode-v-active-high: Boolean to indicate if mode vertical polarity is active high.
+- qcom,mode-refersh-rate: Mode refresh rate in hertz.
+- qcom,mode-clock-in-khz: Mode pixel clock in KHz.
+
+Example:
+
+/ {
+       ...
+
+       hdmi_display: qcom,hdmi-display {
+               compatible = "qcom,hdmi-display";
+               label = "hdmi_display";
+               qcom,display-type = "secondary";
+               qcom,non-pluggable;
+               qcom,customize-modes {
+                       qcom,customize-mode-id@0 {
+                               qcom,mode-name = "3840x2160@30Hz";
+                               qcom,mode-h-active = <3840>;
+                               qcom,mode-h-front-porch = <176>;
+                               qcom,mode-h-pulse-width = <88>;
+                               qcom,mode-h-back-porch = <296>;
+                               qcom,mode-h-active-high;
+                               qcom,mode-v-active = <2160>;
+                               qcom,mode-v-front-porch = <8>;
+                               qcom,mode-v-pulse-width = <10>;
+                               qcom,mode-v-back-porch = <72>;
+                               qcom,mode-v-active-high;
+                               qcom,mode-refersh-rate = <30>;
+                               qcom,mode-clock-in-khz = <297000>;
+                       };
+               };
+       };
+
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt
deleted file mode 100644 (file)
index 7315aef..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-Mstar touch controller
-
-The mstar controller is connected to host processor
-via i2c. The controller generates interrupts when the
-user touches the panel. The host controller is expected
-to read the touch coordinates over i2c and pass the coordinates
-to the rest of the system.
-
-Required properties:
-
- - compatible          : should be "mstar,msg21xx".
- - reg                 : i2c slave address of the device.
- - interrupt-parent    : parent of interrupt.
- - interrupts          : touch sample interrupt to indicate presense or release
-                               of fingers on the panel.
- - vdd-supply          : Power supply needed to power up the device.
- - vcc_i2c-supply      : Power source required to power up i2c bus.
- - mstar,irq-gpio      : irq gpio which is to provide interrupts to host,
-                               same as "interrupts" node. It will also
-                               contain active low or active high information.
- - mstar,reset-gpio    : reset gpio to control the reset of chip.
- - mstar,display-coords : display coords in pixels. It is a four
-                               tuple consisting of min x, min y, max x and
-                               max y values.
- - pinctrl-names : This should be defined if a target uses pinctrl framework.
-                       See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
-                       Specify the names of the configs that pinctrl can install in driver.
-                       Following are the pinctrl configs that can be installed:
-                       "pmx_ts_active" : Active configuration of pins, this should specify active
-                       config defined in pin groups of interrupt and reset gpio.
-                       "pmx_ts_suspend" : Disabled configuration of pins, this should specify sleep
-                       config defined in pin groups of interrupt and reset gpio.
-                       "pmx_ts_release" : Release configuration of pins, this should specify
-                       release config defined in pin groups of interrupt and reset gpio.
- - mstar,num-max-touches: It defines the maximum number of touch supported by the controller.
- - mstar,hard-reset-delay-ms : hard reset delay in ms
- - mstar,post-hard-reset-delay-ms : post hard reset delay in ms
-
-Optional properties:
-
- - mstar,button-map : button map of key codes. It is a three tuple consisting of key codes.
- - mstar,panel-coords : panel coords for the chip in pixels.
-                               It is a four tuple consisting of min x,
-                               min y, max x and max y values.
- - mstar,ic-type : It defines the ic-type of the controller. Values are as folows:
-                       1 -> msg2133.
-                       2 -> msg21xxA.
-                       3 -> msg26xxM.
-
-Example:
-       i2c@78b9000 { /* BLSP1 QUP5 */
-               mstar@26 {
-                       compatible = "mstar,msg21xx";
-                       reg = <0x26>;
-                       interrupt-parent = <&msm_gpio>;
-                       interrupts = <13 0x2008>;
-                       mstar,irq-gpio = <&msm_gpio 13 0x00000001>;
-                       mstar,reset-gpio = <&msm_gpio 12 0x0>;
-                       vdd-supply = <&pm8916_l17>;
-                       vcc_i2c-supply = <&pm8916_l6>;
-                       mstar,display-coords = <0 0 480 854>;
-                       pinctrl-names = "pmx_ts_active","pmx_ts_suspend";
-                       pinctrl-0 = <&ts_int_active &ts_reset_active>;
-                       pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
-                       mstar,button-map = <172 139 158>;
-                       mstar,ic-type = <2>;
-                       mstar,num_max_touches = <2>;
-                       mstar,hard-reset-delay-ms = <100>;
-                       mstar,post-hard-reset-delay-ms = <100>;
-               };
-       };
index d08ca95..c9cfc88 100644 (file)
@@ -149,6 +149,8 @@ LAB subnode optional properties:
                                        already. If it it not specified, then
                                        output voltage can be configured to
                                        any value in the allowed limit.
+- qcom,notify-lab-vreg-ok-sts:         A boolean property which upon set will
+                                       poll and notify the lab_vreg_ok status.
 
 Following properties are available only for PM660A:
 
index b7a81ef..5b5d43a 100644 (file)
@@ -514,10 +514,6 @@ Required properties:
               which is also existing driver WSA881x that represents
               soundwire slave devices.
 
-Optional Properties:
- - qcom,cache-always : Boolean. This property is used in  WSA slave
-                         device to use cacheable for all registers.
-
 Example:
 
 msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
@@ -535,7 +531,6 @@ msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
                        compatible = "qcom,wsa881x";
                        reg = <0x00 0x20170212>;
                        qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
-                       qcom,cache-always;
                };
        };
 };
index a25961c..8b99dbc 100644 (file)
@@ -141,6 +141,9 @@ enabled and functional in the driver:
 - qcom,pm-qos-default-cpu:             PM QoS voting is based on the cpu associated with each IO request by the block layer.
                                        This defined the default cpu used for PM QoS voting in case a specific cpu value is not available.
 
+- qcom,vddp-ref-clk-supply      : reference clock to ufs device. Controlled by the host driver.
+- qcom,vddp-ref-clk-max-microamp : specifies max. load that can be drawn for
+                                  ref-clk supply.
 Example:
        ufshc@0xfc598000 {
                ...
index c6cff2e..db37dc6 100644 (file)
@@ -164,6 +164,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
        sdm660-headset-jacktype-no-rcm.dtb \
        sdm660-pm660a-headset-jacktype-no-cdp.dtb \
        sdm660-pm660a-headset-jacktype-no-rcm.dtb \
+       sdm660-usbc-audio-mtp.dtb \
        sdm658-mtp.dtb \
        sdm658-cdp.dtb \
        sdm658-rcm.dtb \
@@ -188,6 +189,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
 dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
        sdm630-pm660a-rumi.dtb \
        sdm630-mtp.dtb \
+       sdm630-usbc-audio-mtp.dtb \
        sdm630-cdp.dtb \
        sdm630-rcm.dtb \
        sdm630-internal-codec-mtp.dtb \
index daf43d4..00e3d0e 100644 (file)
 };
 
 &mdss_mdp {
+       status = "disabled";
        qcom,mdss-pref-prim-intf = "hdmi";
 };
 
+&sde_hdmi {
+       qcom,display-type = "primary";
+};
+
 &slim_aud {
        tasha_codec {
                wsa_spkr_sd1: msm_cdc_pinctrll {
index a8a1743..42cf30c 100644 (file)
                compatible = "qcom,msm-pcm-afe";
        };
 
+       dai_dp: qcom,msm-dai-q6-dp {
+               compatible = "qcom,msm-dai-q6-hdmi";
+               qcom,msm-dai-q6-dev-id = <24608>;
+       };
+
        loopback: qcom,msm-pcm-loopback {
                compatible = "qcom,msm-pcm-loopback";
        };
                qcom,wcn-btfm;
                qcom,mi2s-audio-intf;
                qcom,auxpcm-audio-intf;
+               qcom,ext-disp-audio-rx;
                qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
                qcom,audio-routing =
                        "AIF4 VI", "MCLK",
                                      "msm-pcm-routing", "msm-cpe-lsm",
                                      "msm-compr-dsp", "msm-pcm-dsp-noirq",
                                      "msm-cpe-lsm.3";
-               asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+               asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+                               <&dai_mi2s1>,
                                <&dai_mi2s2>, <&dai_mi2s3>,
                                <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
                                <&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
                                <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
                                <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
                                <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
-               asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+               asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+                               "msm-dai-q6-mi2s.1",
                                "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
                                "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
                                "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
                                "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896",
                                "msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912",
                                "msm-dai-q6-tdm.36913";
-               asoc-codec = <&stub_codec>;
-               asoc-codec-names = "msm-stub-codec.1";
+               asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+               asoc-codec-names = "msm-stub-codec.1",
+                                       "msm-ext-disp-audio-codec-rx";
                qcom,wsa-max-devs = <2>;
                qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
                                <&wsa881x_213>, <&wsa881x_214>;
                qcom,wcn-btfm;
                qcom,mi2s-audio-intf;
                qcom,auxpcm-audio-intf;
+               qcom,ext-disp-audio-rx;
                qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
                qcom,audio-routing =
                        "AIF4 VI", "MCLK",
                qcom,hph-en0-gpio = <&tavil_hph_en0>;
                qcom,hph-en1-gpio = <&tavil_hph_en1>;
                qcom,msm-mclk-freq = <9600000>;
+               qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+               qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
                asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
                                <&loopback>, <&compress>, <&hostless>,
                                <&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
                                "msm-pcm-afe", "msm-lsm-client",
                                "msm-pcm-routing", "msm-cpe-lsm",
                                "msm-compr-dsp", "msm-pcm-dsp-noirq";
-               asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+               asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+                               <&dai_mi2s1>,
                                <&dai_mi2s2>, <&dai_mi2s3>,
                                <&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
                                <&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
                                <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
                                <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
                                <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
-               asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+               asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+                               "msm-dai-q6-mi2s.1",
                                "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
                                "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
                                "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
                                "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896",
                                "msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912",
                                "msm-dai-q6-tdm.36913";
-               asoc-codec = <&stub_codec>;
-               asoc-codec-names = "msm-stub-codec.1";
+               asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+               asoc-codec-names = "msm-stub-codec.1",
+                                       "msm-ext-disp-audio-codec-rx";
                qcom,wsa-max-devs = <2>;
                qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
                                <&wsa881x_0213>, <&wsa881x_0214>;
                qcom,wcn-btfm;
                qcom,mi2s-audio-intf;
                qcom,auxpcm-audio-intf;
+               qcom,ext-disp-audio-rx;
                qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
                qcom,msm-mclk-freq = <9600000>;
                qcom,msm-mbhc-hphl-swh = <1>;
                        "DMIC4", "MIC BIAS External",
                        "MIC BIAS External", "Digital Mic4",
                        "SpkrLeft IN", "SPK1 OUT",
-                       "SpkrRight IN", "SPK2 OUT";
+                       "SpkrRight IN", "SPK2 OUT",
+                       "PDM_IN_RX1", "PDM_OUT_RX1",
+                       "PDM_IN_RX2", "PDM_OUT_RX2",
+                       "PDM_IN_RX3", "PDM_OUT_RX3",
+                       "ADC1_IN", "ADC1_OUT",
+                       "ADC2_IN", "ADC2_OUT",
+                       "ADC3_IN", "ADC3_OUT";
 
                asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
                                <&loopback>, <&compress>, <&hostless>,
                                "msm-pcm-afe", "msm-lsm-client",
                                "msm-pcm-routing", "msm-compr-dsp",
                                "msm-pcm-dsp-noirq";
-               asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+               asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+                               <&dai_mi2s1>,
                                <&dai_mi2s2>, <&dai_mi2s3>,
                                <&dai_int_mi2s0>, <&dai_int_mi2s1>,
                                <&dai_int_mi2s2>, <&dai_int_mi2s3>,
                                <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
                                <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
                                <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
-               asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+               asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+                               "msm-dai-q6-mi2s.1",
                                "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
                                "msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
                                "msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
                                "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
                                "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
                asoc-codec = <&stub_codec>, <&msm_digital_codec>,
-                               <&pmic_analog_codec>, <&msm_sdw_codec>;
+                               <&pmic_analog_codec>, <&msm_sdw_codec>,
+                               <&ext_disp_audio_codec>;
                asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
-                                       "analog-codec", "msm_sdw_codec";
+                                       "analog-codec", "msm_sdw_codec",
+                                       "msm-ext-disp-audio-codec-rx";
 
                qcom,wsa-max-devs = <2>;
                qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
index 8edc1fc..ea4f050 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <1>;
                        interrupt-parent = <&smb138x>;
-                       io-channels = <&smb138x_tadc 2>,
-                                     <&smb138x_tadc 3>,
-                                     <&smb138x_tadc 14>,
-                                     <&smb138x_tadc 15>,
-                                     <&smb138x_tadc 16>,
-                                     <&smb138x_tadc 17>;
-                       io-channel-names = "charger_temp",
-                                          "batt_i",
-                                          "connector_temp_thr1",
-                                          "connector_temp_thr2",
-                                          "connector_temp_thr3",
-                                          "charger_temp_max";
+                       io-channels =
+                               <&smb138x_tadc 1>,
+                               <&smb138x_tadc 2>,
+                               <&smb138x_tadc 3>,
+                               <&smb138x_tadc 14>,
+                               <&smb138x_tadc 15>,
+                               <&smb138x_tadc 16>,
+                               <&smb138x_tadc 17>;
+                       io-channel-names =
+                               "connector_temp",
+                               "charger_temp",
+                               "batt_i",
+                               "connector_temp_thr1",
+                               "connector_temp_thr2",
+                               "connector_temp_thr3",
+                               "charger_temp_max";
 
                        qcom,chgr@1000 {
                                reg = <0x1000 0x100>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
new file mode 100644 (file)
index 0000000..4b3748e
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-camera-sensor-skuk-evt3.dtsi"
+
+&tlmm{
+       cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+               /* MCLK1 */
+               mux {
+                       /* CLK, DATA */
+                       pins = "gpio14";
+                       function = "cam_mclk";
+               };
+
+               config {
+                       pins = "gpio14";
+                       bias-disable; /* No PULL */
+                       drive-strength = <8>; /* 2 MA */
+               };
+       };
+
+       cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+               /* MCLK1 */
+               mux {
+                       /* CLK, DATA */
+                       pins = "gpio15";
+                       function = "cam_mclk";
+               };
+
+               config {
+                       pins = "gpio15";
+                       bias-disable; /* No PULL */
+                       drive-strength = <8>; /* 2 MA */
+               };
+       };
+};
index d1c9bce..aa95872 100644 (file)
        };
 };
 
+&i2c_5 {
+       status = "okay";
+       st_fts@49 {
+               compatible = "st,fts";
+               reg = <0x49>;
+               interrupt-parent = <&tlmm>;
+               interrupts = <125 0x2008>;
+               vdd-supply = <&pm8998_l6>;
+               avdd-supply = <&pm8998_l28>;
+               pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+               pinctrl-0 = <&ts_active>;
+               pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+               st,irq-gpio = <&tlmm 125 0x2008>;
+               st,reset-gpio = <&tlmm 89 0x00>;
+               st,regulator_dvdd = "vdd";
+               st,regulator_avdd = "avdd";
+       };
+};
+
 &soc {
        gpio_keys {
                compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi
new file mode 100644 (file)
index 0000000..6cef416
--- /dev/null
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+       sde_wb: qcom,wb-display@0 {
+               compatible = "qcom,wb-display";
+               cell-index = <0>;
+               label = "wb_display";
+       };
+
+       sde_hdmi: qcom,hdmi-display {
+               compatible = "qcom,hdmi-display";
+               label = "sde_hdmi";
+               qcom,display-type = "secondary";
+       };
+
+};
+
+&sde_kms {
+       connectors = <&sde_hdmi_tx &sde_hdmi &sde_wb>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-sde.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi
new file mode 100644 (file)
index 0000000..6db6ec2
--- /dev/null
@@ -0,0 +1,217 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+       sde_kms: qcom,sde_kms@c900000 {
+               compatible = "qcom,sde-kms";
+               reg = <0x0c900000 0x90000>,
+                     <0x0c9b0000 0x1040>;
+               reg-names = "mdp_phys", "vbif_phys";
+
+               /* clock and supply entries */
+               clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+                       <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+                       <&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+                       <&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+                       <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+                        <&clock_mmss clk_mmss_mdss_ahb_clk>,
+                        <&clock_mmss clk_mmss_mdss_axi_clk>,
+                        <&clock_mmss clk_mdp_clk_src>,
+                        <&clock_mmss clk_mmss_mdss_mdp_clk>,
+                        <&clock_mmss clk_mmss_mdss_vsync_clk>,
+                        <&clock_mmss clk_mmss_mdss_mdp_lut_clk>;
+               clock-names = "mmss_noc_axi_clk",
+                                       "mmss_noc_ahb_clk",
+                                       "mmss_smmu_ahb_clk",
+                                       "mmss_smmu_axi_clk",
+                           "mnoc_clk", "iface_clk", "bus_clk",
+                               "core_clk_src", "core_clk", "vsync_clk",
+                               "lut_clk";
+               clock-rate = <0 0 0 0 0 0 0 330000000 0 0 0 0>;
+               clock-max-rate = <0 0 0 0 0 0 412500000 412500000 0 0 0 0>;
+               qcom,sde-max-bw-low-kbps = <6700000>;
+               qcom,sde-max-bw-high-kbps = <6700000>;
+
+               /* interrupt config */
+               interrupt-parent = <&intc>;
+               interrupts = <0 83 0>;
+               interrupt-controller;
+               #interrupt-cells = <1>;
+               iommus = <&mmss_smmu 0>;
+
+               /* hw blocks */
+               qcom,sde-off = <0x1000>;
+               qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+                                    0x2600 0x2800>;
+               qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+                                     0x48000 0x49000 0x4a000>;
+               qcom,sde-dspp-off = <0x55000 0x57000>;
+               qcom,sde-wb-off = <0x66000>;
+               qcom,sde-wb-id = <2>;
+               qcom,sde-wb-xin-id = <6>;
+               qcom,sde-wb-clk-ctrl = <0x2bc 0x10>;
+               qcom,sde-intf-off = <0x6b000 0x6b800
+                                       0x6c000 0x6c800>;
+               qcom,sde-intf-type = "dp", "dsi", "dsi", "hdmi";
+               qcom,sde-pp-off = <0x71000 0x71800
+                                         0x72000 0x72800>;
+               qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0>;
+               qcom,sde-cdm-off = <0x7a200>;
+               qcom,sde-dsc-off = <0x10000 0x10000 0x0 0x0>;
+               qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+               qcom,sde-sspp-type =  "vig", "vig", "vig", "vig",
+                                               "dma", "dma", "dma", "dma",
+                                               "cursor", "cursor";
+
+               qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000
+                                               0x25000 0x27000 0x29000 0x2b000
+                                               0x35000 0x37000>;
+
+               qcom,sde-sspp-xin-id = <0 4 8 12 1 5 9 13 2 10>;
+
+               /* offsets are relative to "mdp_phys + qcom,sde-off */
+               qcom,sde-sspp-clk-ctrl = <0x2ac 0x8>, <0x2b4 0x8>,
+                                 <0x2c4 0x8>, <0x2c4 0xc>, <0x3a8 0x10>,
+                                 <0x3b0 0x10>;
+
+               qcom,sde-qseed-type = "qseedv3";
+               qcom,sde-mixer-linewidth = <2560>;
+               qcom,sde-sspp-linewidth = <2560>;
+               qcom,sde-mixer-blendstages = <0x7>;
+               qcom,sde-highest-bank-bit = <0x2>;
+               qcom,sde-panic-per-pipe;
+               qcom,sde-has-cdp;
+               qcom,sde-has-src-split;
+               qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+               qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+               qcom,sde-vbif-off = <0>;
+               qcom,sde-vbif-id = <0>;
+               qcom,sde-vbif-default-ot-rd-limit = <32>;
+               qcom,sde-vbif-default-ot-wr-limit = <32>;
+               qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+                       <124416000 4>, <248832000 16>;
+               qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+                       <124416000 4>, <248832000 16>;
+
+               vdd-supply = <&gdsc_mdss>;
+               gdsc-mmagic-mdss-supply = <&gdsc_bimc_smmu>;
+               qcom,sde-csc-type = "csc-10bit";
+
+               qcom,sde-sspp-vig-blocks {
+                       qcom,sde-vig-csc-off = <0x1a00>;
+                       qcom,sde-vig-qseed-off = <0xa00>;
+               };
+
+               qcom,platform-supply-entries {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       qcom,platform-supply-entry@0 {
+                               reg = <0>;
+                               qcom,supply-name = "gdsc-mmagic-mdss";
+                               qcom,supply-min-voltage = <0>;
+                               qcom,supply-max-voltage = <0>;
+                               qcom,supply-enable-load = <0>;
+                               qcom,supply-disable-load = <0>;
+                       };
+
+                       qcom,platform-supply-entry@1 {
+                               reg = <1>;
+                               qcom,supply-name = "vdd";
+                               qcom,supply-min-voltage = <0>;
+                               qcom,supply-max-voltage = <0>;
+                               qcom,supply-enable-load = <0>;
+                               qcom,supply-disable-load = <0>;
+                       };
+               };
+
+               smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+                       compatible = "qcom,smmu_mdp_unsec";
+                       iommus = <&mmss_smmu 0>;
+               };
+
+               /* data and reg bus scale settings */
+               qcom,sde-data-bus {
+                       qcom,msm-bus,name = "mdss_sde";
+                       qcom,msm-bus,num-cases = <3>;
+                       qcom,msm-bus,num-paths = <2>;
+                       qcom,msm-bus,vectors-KBps =
+                               <22 512 0 0>, <23 512 0 0>,
+                               <22 512 0 6400000>, <23 512 0 6400000>,
+                               <22 512 0 6400000>, <23 512 0 6400000>;
+               };
+               qcom,sde-reg-bus {
+                       qcom,msm-bus,name = "mdss_reg";
+                       qcom,msm-bus,num-cases = <4>;
+                       qcom,msm-bus,num-paths = <1>;
+                       qcom,msm-bus,active-only;
+                       qcom,msm-bus,vectors-KBps =
+                               <1 590 0 0>,
+                               <1 590 0 76800>,
+                               <1 590 0 160000>,
+                               <1 590 0 320000>;
+               };
+       };
+
+       sde_hdmi_tx: qcom,hdmi_tx_8998@c9a0000 {
+               cell-index = <0>;
+               compatible = "qcom,hdmi-tx-8998";
+               reg =   <0xc9a0000 0x50c>,
+                       <0x780000 0x621c>,
+                       <0xc9e0000 0x28>;
+               reg-names = "core_physical", "qfprom_physical", "hdcp_physical";
+               interrupt-parent = <&sde_kms>;
+               interrupts = <8 0>;
+               qcom,hdmi-tx-ddc-clk-gpio = <&tlmm 32 0>;
+               qcom,hdmi-tx-ddc-data-gpio = <&tlmm 33 0>;
+               qcom,hdmi-tx-hpd-gpio = <&tlmm 34 0>;
+               qcom,hdmi-tx-hpd5v-gpio = <&tlmm 133 0>;
+               pinctrl-names = "default", "sleep";
+               pinctrl-0 = <&mdss_hdmi_hpd_active
+                       &mdss_hdmi_ddc_active
+                       &mdss_hdmi_cec_active
+                       &mdss_hdmi_5v_active>;
+               pinctrl-1 = <&mdss_hdmi_hpd_suspend
+                       &mdss_hdmi_ddc_suspend
+                       &mdss_hdmi_cec_suspend
+                       &mdss_hdmi_5v_suspend>;
+               hpd-gdsc-supply = <&gdsc_mdss>;
+               qcom,supply-names = "hpd-gdsc";
+               qcom,min-voltage-level = <0>;
+               qcom,max-voltage-level = <0>;
+               qcom,enable-load = <0>;
+               qcom,disable-load = <0>;
+
+               qcom,msm_ext_disp = <&msm_ext_disp>;
+
+               clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+                        <&clock_mmss clk_mmss_mdss_ahb_clk>,
+                        <&clock_mmss clk_mmss_mdss_hdmi_clk>,
+                        <&clock_mmss clk_mmss_mdss_mdp_clk>,
+                        <&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
+                        <&clock_mmss clk_mmss_mdss_extpclk_clk>,
+                        <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+                        <&clock_mmss clk_mmss_misc_ahb_clk>,
+                        <&clock_mmss clk_mmss_mdss_axi_clk>;
+               clock-names = "hpd_mnoc_clk", "hpd_iface_clk",
+                               "hpd_core_clk", "hpd_mdp_core_clk",
+                               "hpd_alt_iface_clk", "core_extp_clk",
+                               "mnoc_clk","hpd_misc_ahb_clk",
+                               "hpd_bus_clk";
+
+               /*qcom,mdss-fb-map = <&mdss_fb2>;*/
+               qcom,pluggable;
+       };
+};
+#include "msm8998-sde-display.dtsi"
index ace7fc1..73debc3 100644 (file)
@@ -15,7 +15,7 @@
 
 #include "msm8998-v2.dtsi"
 #include "msm8998-qrd-skuk.dtsi"
-#include "msm8998-camera-sensor-skuk.dtsi"
+#include "msm8998-camera-sensor-skuk-hdk.dtsi"
 
 / {
        model = "Qualcomm Technologies, Inc. MSM 8998 SKUK HDK";
        qcom,board-id = <0x06000b 0x10>;
 };
 
+&soc {
+       sound-tavil {
+               qcom,msm-mbhc-hphl-swh = <0>;
+       };
+};
+
 &pmx_mdss {
        mdss_dsi_active: mdss_dsi_active {
                mux {
        qcom,mdss-dsi-bl-max-level = <255>;
        qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 };
+
+&i2c_5 {
+       status = "okay";
+       st_fts@49 {
+               compatible = "st,fts";
+               reg = <0x49>;
+               interrupt-parent = <&tlmm>;
+               interrupts = <125 0x2008>;
+               vdd-supply = <&pm8998_l6>;
+               avdd-supply = <&pm8998_l28>;
+               pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+               pinctrl-0 = <&ts_active>;
+               pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+               st,irq-gpio = <&tlmm 125 0x2008>;
+               st,reset-gpio = <&tlmm 89 0x00>;
+               st,regulator_dvdd = "vdd";
+               st,regulator_avdd = "avdd";
+       };
+};
+
+&soc {
+       /* HDK835 do not use improveTouch. If do not remove this node,
+        * legacy TOUCH could not work.
+        */
+       /delete-node/hbtp;
+};
index b6ddd54..348faf9 100644 (file)
 
 &pm8998_s13 {
        regulator-min-microvolt = <568000>;
-       regulator-max-microvolt = <1056000>;
+       regulator-max-microvolt = <1136000>;
 };
 
 &pcie0 {
index 3e7caca..05a8816 100644 (file)
                        qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
                        qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
                        qcom,bus-governor = "msm-vidc-ddr";
-                       qcom,bus-range-kbps = <1000 3388000>;
+                       qcom,bus-range-kbps = <1000 4946000>;
                };
 
                venus_bus_vmem {
index 81bf177..f33b8bc 100644 (file)
                qcom,irq-mask = <0x200>;
                interrupts = <0 157 1>;
                label = "lpass";
+               qcom,qos-config = <&glink_qos_adsp>;
+               qcom,ramp-time = <0xaf>;
+       };
+
+       glink_qos_adsp: qcom,glink-qos-config-adsp {
+               compatible = "qcom,glink-qos-config";
+               qcom,flow-info = <0x3c 0x0>,
+                               <0x3c 0x0>,
+                               <0x3c 0x0>,
+                               <0x3c 0x0>;
+               qcom,mtu-size = <0x800>;
+               qcom,tput-stats-cycle = <0xa>;
        };
 
        qcom,glink-smem-native-xprt-dsps@86000000 {
 #include "msm8998-blsp.dtsi"
 #include "msm8998-audio.dtsi"
 #include "msm-smb138x.dtsi"
+#include "msm8998-sde.dtsi"
index 060923a..37fce82 100644 (file)
@@ -47,6 +47,9 @@
 };
 
 &soc {
+       qcom,msm-ssc-sensors {
+               compatible = "qcom,msm-ssc-sensors";
+       };
 };
 
 &pm660_charger {
index a071184..3c19dc7 100644 (file)
@@ -22,7 +22,7 @@
                qcom,panel-supply-entry@0 {
                        reg = <0>;
                        qcom,supply-name = "wqhd-vddio";
-                       qcom,supply-min-voltage = <1880000>;
+                       qcom,supply-min-voltage = <1800000>;
                        qcom,supply-max-voltage = <1950000>;
                        qcom,supply-enable-load = <32000>;
                        qcom,supply-disable-load = <80>;
index d236dbb..3fb6993 100644 (file)
@@ -62,7 +62,7 @@
                qcom,max-bandwidth-per-pipe-kbps = <1 3200000>, /* Default */
                                                   <2 2400000>; /* Camera */
                qcom,max-clk-rate = <412500000>;
-               qcom,mdss-default-ot-rd-limit = <40>;
+               qcom,mdss-default-ot-rd-limit = <32>;
                qcom,mdss-default-ot-wr-limit = <32>;
                qcom,mdss-dram-channels = <2>;
 
                qcom,mdss-prefill-post-scaler-buffer-pixels = <2560>;
                qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
 
+               qcom,mdss-reg-bus {
+                       /* Reg Bus Scale Settings */
+                       qcom,msm-bus,name = "mdss_reg";
+                       qcom,msm-bus,num-cases = <4>;
+                       qcom,msm-bus,num-paths = <1>;
+                       qcom,msm-bus,active-only;
+                       qcom,msm-bus,vectors-KBps =
+                               <1 590 0 0>,
+                               <1 590 0 76800>,
+                               <1 590 0 160000>,
+                               <1 590 0 320000>;
+               };
+
                qcom,mdss-pp-offsets {
                        qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
                        qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
                qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
                qcom,mdss-rot-xin-id = <14 15>;
 
-               qcom,mdss-default-ot-rd-limit = <40>;
+               qcom,mdss-default-ot-rd-limit = <32>;
                qcom,mdss-default-ot-wr-limit = <32>;
        };
 };
index 69dcb26..d13e1db 100644 (file)
@@ -56,6 +56,9 @@
 };
 
 &soc {
+       qcom,msm-ssc-sensors {
+               compatible = "qcom,msm-ssc-sensors";
+       };
 };
 
 &pm660_fg {
index b1fed58..f80562e 100644 (file)
        };
 };
 
+&pm660_gpios {
+       /* GPIO 11 for home key */
+       gpio@ca00 {
+               status = "ok";
+               qcom,mode = <0>;
+               qcom,pull = <0>;
+               qcom,vin-sel = <0>;
+               qcom,src-sel = <0>;
+               qcom,out-strength = <1>;
+       };
+};
+
 &soc {
        gpio_keys {
                compatible = "gpio-keys";
                        gpio-key,wakeup;
                        debounce-interval = <15>;
                };
+
+               home {
+                       label = "home";
+                       gpios = <&pm660_gpios 11 0x1>;
+                       linux,input-type = <1>;
+                       linux,code = <102>;
+                       gpio-key,wakeup;
+                       debounce-interval = <15>;
+               };
+
        };
 };
index 13b5619..c16c830 100644 (file)
                                regulator-min-microvolt = <1>;
                                regulator-max-microvolt = <8>;
 
-                               qcom,cpr-fuse-corners = <4>;
+                               qcom,cpr-fuse-corners = <3>;
                                qcom,cpr-fuse-combos = <24>;
                                qcom,cpr-speed-bins = <3>;
                                qcom,cpr-speed-bin-corners = <8 8 8>;
                                qcom,cpr-corners = <8>;
-                               qcom,cpr-corner-fmax-map = <2 4 8>;
+                               qcom,cpr-corner-fmax-map = <2 4 8>;
 
                                qcom,cpr-voltage-ceiling =
-                                       <724000  724000  724000  788000  868000
-                                        924000  988000 1068000>;
+                                       <724000   724000   788000  788000
+                                        1068000 1068000  1068000 1068000>;
 
                                qcom,cpr-voltage-floor =
-                                       <588000  588000  596000  652000  712000
-                                        744000  784000  844000>;
+                                       <588000   588000   596000   652000
+                                        712000   744000   784000   844000>;
 
                                qcom,corner-frequencies =
                                        <300000000  614400000  883200000
                                regulator-min-microvolt = <1>;
                                regulator-max-microvolt = <11>;
 
-                               qcom,cpr-fuse-corners = <4>;
+                               qcom,cpr-fuse-corners = <5>;
                                qcom,cpr-fuse-combos = <24>;
                                qcom,cpr-speed-bins = <3>;
                                qcom,cpr-speed-bin-corners = <10 10 11>;
 
                                qcom,cpr-corner-fmax-map =
                                        /* Speed bin 0 */
-                                       <2 4 6 10>,
+                                       <2 4 6 10>,
 
                                        /* Speed bin 1 */
-                                       <2 4 6 10>,
+                                       <2 4 6 10>,
 
                                        /* Speed bin 2 */
-                                       <2 4 6 11>;
+                                       <2 4 6 11>;
 
                                qcom,cpr-voltage-ceiling =
                                        /* Speed bin 0 */
                                        <724000  724000  724000  788000
-                                        868000  868000  924000  988000
+                                        868000  868000  988000  988000
                                         988000 1068000>,
 
                                        /* Speed bin 1 */
                                        <724000  724000  724000  788000
-                                        868000  868000  924000  988000
+                                        868000  868000  988000  988000
                                         988000 1068000>,
 
                                        /* Speed bin 2 */
                                        <724000  724000  724000  788000
-                                        868000  868000  924000  988000
-                                        988000 1068000 1140000>;
+                                        868000  868000  988000  988000
+                                        988000 1140000 1140000>;
 
                                qcom,cpr-voltage-floor =
                                        /* Speed bin 0 */
                                        <300000000   787200000 1113600000
                                         1344000000 1516800000 1670400000
                                         1881600000 2016000000 2150400000
-                                        2208000000 2515200000>;
+                                        2380800000 2515200000>;
 
                                qcom,allow-voltage-interpolation;
                                qcom,allow-quotient-interpolation;
diff --git a/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts
new file mode 100644 (file)
index 0000000..eb089b5
--- /dev/null
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-mtp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+       model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L, USBC Audio MTP";
+       compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp";
+       qcom,board-id = <8 2>;
+       qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+                       <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+       qcom,msm-mbhc-moist-cfg = <0>, <0>, <3>;
+       qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
index dc53fb0..a53094c 100644 (file)
@@ -52,6 +52,7 @@
                        reg = <0x0 0x100>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile0>;
+                       qcom,lmh-dcvs = <&lmh_dcvs1>;
                        qcom,ea = <&ea0>;
                        efficiency = <1126>;
                        next-level-cache = <&L2_1>;
@@ -80,6 +81,7 @@
                        reg = <0x0 0x101>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile1>;
+                       qcom,lmh-dcvs = <&lmh_dcvs1>;
                        qcom,ea = <&ea1>;
                        efficiency = <1126>;
                        next-level-cache = <&L2_1>;
                        reg = <0x0 0x102>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile2>;
+                       qcom,lmh-dcvs = <&lmh_dcvs1>;
                        qcom,ea = <&ea2>;
                        efficiency = <1126>;
                        next-level-cache = <&L2_1>;
                        reg = <0x0 0x103>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile3>;
+                       qcom,lmh-dcvs = <&lmh_dcvs1>;
                        qcom,ea = <&ea3>;
                        efficiency = <1126>;
                        next-level-cache = <&L2_1>;
                        reg = <0x0 0x0>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile4>;
+                       qcom,lmh-dcvs = <&lmh_dcvs0>;
                        qcom,ea = <&ea4>;
                        efficiency = <1024>;
                        next-level-cache = <&L2_0>;
                        reg = <0x0 0x1>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile4>;
+                       qcom,lmh-dcvs = <&lmh_dcvs0>;
                        qcom,ea = <&ea5>;
                        efficiency = <1024>;
                        next-level-cache = <&L2_0>;
                        reg = <0x0 0x2>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile4>;
+                       qcom,lmh-dcvs = <&lmh_dcvs0>;
                        qcom,ea = <&ea6>;
                        efficiency = <1024>;
                        next-level-cache = <&L2_0>;
                        reg = <0x0 0x3>;
                        enable-method = "psci";
                        qcom,limits-info = <&mitigation_profile4>;
+                       qcom,lmh-dcvs = <&lmh_dcvs0>;
                        qcom,ea = <&ea7>;
                        efficiency = <1024>;
                        next-level-cache = <&L2_0>;
                qcom,synchronous-cluster-map = <0 4 &CPU4 &CPU5 &CPU6 &CPU7>,
                                                <1 4 &CPU0 &CPU1 &CPU2 &CPU3>;
 
+               clock-names = "osm";
+               clocks = <&clock_cpu PERFCL_CLK>;
                qcom,cxip-lm-enable = <0>;
                qcom,vdd-restriction-temp = <5>;
                qcom,vdd-restriction-temp-hysteresis = <10>;
        };
 
        wdog: qcom,wdt@17817000 {
-               status = "disabled";
                compatible = "qcom,msm-watchdog";
                reg = <0x17817000 0x1000>;
                reg-names = "wdt-base";
        };
 
        clock_mmss: clock-controller@c8c0000 {
-               compatible = "qcom,mmcc-sdm660";
+               compatible = "qcom,mmcc-sdm630";
                reg = <0xc8c0000 0x40000>;
                vdd_mx_mmss-supply = <&pm660l_s5_level>;
                vdd_dig_mmss-supply = <&pm660l_s3_level>;
 
        clock_cpu: qcom,clk-cpu-630@179c0000 {
                compatible = "qcom,clk-cpu-osm-sdm630";
-               status = "disabled";
                reg = <0x179c0000 0x4000>, <0x17916000 0x1000>,
                        <0x17816000 0x1000>, <0x179d1000 0x1000>,
                        <0x00784130 0x8>;
                reg-names = "osm", "pwrcl_pll", "perfcl_pll",
                        "apcs_common", "perfcl_efuse";
 
+               vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+               vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
                interrupts = <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
                        <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>;
                interrupt-names = "pwrcl-irq", "perfcl-irq";
        };
 
        qcom,icnss@18800000 {
-               status = "disabled";
                compatible = "qcom,icnss";
                reg = <0x18800000 0x800000>,
-                     <0x10ac000 0x20>;
-               reg-names = "membase", "mpm_config";
+                     <0xa0000000 0x10000000>,
+                     <0xb0000000 0x10000>;
+               reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
+               iommus = <&anoc2_smmu 0x1a00>,
+                        <&anoc2_smmu 0x1a01>;
                interrupts = <0 413 0>,   /* CE0 */
                             <0 414 0>,   /* CE1 */
                             <0 415 0>,   /* CE2 */
        status = "ok";
 };
 
+&clock_cpu {
+       lmh_dcvs0: qcom,limits-dcvs@0 {
+               compatible = "qcom,msm-hw-limits";
+               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+       };
+
+       lmh_dcvs1: qcom,limits-dcvs@1 {
+               compatible = "qcom,msm-hw-limits";
+               interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+       };
+};
+
 &blsp2_uart1_hs {
        status = "ok";
 };
 
+&pm660l_gpios {
+       /* GPIO 7 for VOL_UP */
+       gpio@c600 {
+               status = "okay";
+               qcom,mode = <0>;
+               qcom,pull = <0>;
+               qcom,vin-sel = <0>;
+               qcom,src-sel = <0>;
+               qcom,out-strength = <1>;
+       };
+};
+
 &soc {
        gpio_keys {
                status = "okay";
                        linux,code = <0x2fe>;
                        debounce-interval = <15>;
                };
+
+               vol_up {
+                       label = "volume_up";
+                       gpios = <&pm660l_gpios 7 0x1>;
+                       linux,input-type = <1>;
+                       linux,code = <115>;
+                       gpio-key,wakeup;
+                       debounce-interval = <15>;
+               };
        };
 };
index e1244a4..0b216a1 100644 (file)
                        qcom,max-frequency = <24000000>;
                        qcom,mem-base-addr = <0x100000>;
                };
+
+               wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl_usbc_audio_en1 {
+                       compatible = "qcom,msm-cdc-pinctrl";
+                       pinctrl-names = "aud_active", "aud_sleep";
+                       pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+                       pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+               };
+
+               wcd_usbc_analog_en2n_gpio: msm_cdc_pinctrl_usbc_audio_en2 {
+                       compatible = "qcom,msm-cdc-pinctrl";
+                       pinctrl-names = "aud_active", "aud_sleep";
+                       pinctrl-0 = <&wcd_usbc_analog_en2n_active>;
+                       pinctrl-1 = <&wcd_usbc_analog_en2n_idle>;
+               };
        };
 };
 
                status = "disabled";
                compatible = "qcom,pmic-analog-codec";
                reg = <0xf000 0x200>;
+               #address-cells = <2>;
+               #size-cells = <0>;
                interrupt-parent = <&spmi_bus>;
                interrupts = <0x3 0xf0 0x0 IRQ_TYPE_NONE>,
                                <0x3 0xf0 0x1 IRQ_TYPE_NONE>,
                                compatible = "qcom,wsa881x";
                                reg = <0x0 0x20170211>;
                                qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
-                               qcom,cache-always;
                        };
 
                        wsa881x_212_en: wsa881x_en@20170212 {
                                compatible = "qcom,wsa881x";
                                reg = <0x0 0x20170212>;
                                qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
-                               qcom,cache-always;
                        };
 
                        wsa881x_213_en: wsa881x_en@21170213 {
                                compatible = "qcom,wsa881x";
                                reg = <0x0 0x21170213>;
                                qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
-                               qcom,cache-always;
                        };
 
                        wsa881x_214_en: wsa881x_en@21170214 {
                                compatible = "qcom,wsa881x";
                                reg = <0x0 0x21170214>;
                                qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
-                               qcom,cache-always;
                        };
                };
        };
index d6a3a14..10afca1 100644 (file)
                        "camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
                        "camss_vfe_vbif_axi_clk", "vfe_clk_src",
                        "camss_csi_vfe_clk";
-               qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 256000000 0
+               qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 404000000 0
                                        0 0 0 0 0 0 0 0 0 0 0 480000000 0
                                        0 0 0 0 0 0 0 0 0 0 0 576000000 0>;
                status = "ok";
                        "camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
                        "camss_vfe_vbif_axi_clk", "vfe_clk_src",
                        "camss_csi_vfe_clk";
-               qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 256000000 0
+               qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 404000000 0
                                        0 0 0 0 0 0 0 0 0 0 0 480000000 0
                                        0 0 0 0 0 0 0 0 0 0 0 576000000 0>;
                status = "ok";
index 05b7973..dc57ee6 100644 (file)
                        tx-fifo-resize;
                        snps,nominal-elastic-buffer;
                        snps,disable-clk-gating;
+                       snps,has-lpm-erratum;
                        snps,is-utmi-l1-suspend;
                        snps,hird-threshold = /bits/ 8 <0x0>;
                };
index 0fc24dc..9c3862c 100644 (file)
                23 18 07 08 04 03 04 a0];
        qcom,esd-check-enabled;
        qcom,mdss-dsi-panel-status-check-mode = "bta_check";
-       qcom,mdss-dsi-min-refresh-rate = <53>;
-       qcom,mdss-dsi-max-refresh-rate = <60>;
-       qcom,mdss-dsi-pan-enable-dynamic-fps;
-       qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
 };
 
 &dsi_dual_nt35597_truly_cmd {
index 8116f56..6e32c12 100644 (file)
@@ -61,7 +61,7 @@
                qcom,max-bandwidth-high-kbps = <6600000>;
                qcom,max-bandwidth-per-pipe-kbps = <3100000>;
                qcom,max-clk-rate = <412500000>;
-               qcom,mdss-default-ot-rd-limit = <40>;
+               qcom,mdss-default-ot-rd-limit = <32>;
                qcom,mdss-default-ot-wr-limit = <32>;
                qcom,mdss-dram-channels = <2>;
 
                qcom,mdss-prefill-post-scaler-buffer-pixels = <2560>;
                qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
 
+               qcom,mdss-reg-bus {
+                       /* Reg Bus Scale Settings */
+                       qcom,msm-bus,name = "mdss_reg";
+                       qcom,msm-bus,num-cases = <4>;
+                       qcom,msm-bus,num-paths = <1>;
+                       qcom,msm-bus,active-only;
+                       qcom,msm-bus,vectors-KBps =
+                               <1 590 0 0>,
+                               <1 590 0 76800>,
+                               <1 590 0 160000>,
+                               <1 590 0 320000>;
+               };
+
                qcom,mdss-pp-offsets {
                        qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
                        qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
                qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
                qcom,mdss-rot-xin-id = <14 15>;
 
-               qcom,mdss-default-ot-rd-limit = <40>;
+               qcom,mdss-default-ot-rd-limit = <32>;
                qcom,mdss-default-ot-wr-limit = <32>;
        };
 
index 24bab18..941fe80 100644 (file)
                qcom,parallel-charger;
                qcom,float-voltage-mv = <4400>;
                qcom,recharge-mv = <100>;
-               qcom,parallel-en-pin-polarity = <0>;
+               qcom,parallel-en-pin-polarity = <1>;
        };
 };
index 7ecab69..c6e2f43 100644 (file)
                                };
                        };
                };
+               /* USB C analog configuration */
+               wcd_usbc_analog_en1 {
+                       wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+                               mux {
+                                       pins = "gpio80";
+                                       function = "gpio";
+                               };
+
+                               config {
+                                       pins = "gpio80";
+                                       drive-strength = <2>;
+                                       bias-pull-down;
+                                       output-low;
+                               };
+                       };
+
+                       wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+                               mux {
+                                       pins = "gpio80";
+                                       function = "gpio";
+                               };
+
+                               config {
+                                       pins = "gpio80";
+                                       drive-strength = <2>;
+                                       bias-disable;
+                                       output-high;
+                               };
+                       };
+               };
+
+               wcd_usbc_analog_en2n {
+                       wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle {
+                               mux {
+                                       pins = "gpio77";
+                                       function = "gpio";
+                               };
+
+                               config {
+                                       pins = "gpio77";
+                                       drive-strength = <2>;
+                                       bias-disable;
+                                       output-high;
+                               };
+                       };
+
+                       wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active {
+                               mux {
+                                       pins = "gpio77";
+                                       function = "gpio";
+                               };
+
+                               config {
+                                       pins = "gpio77";
+                                       drive-strength = <2>;
+                                       bias-pull-down;
+                                       output-low;
+                               };
+                       };
+               };
 
                sdw_clk_pin {
                        sdw_clk_sleep: sdw_clk_sleep {
index 6a9fc5b..1624975 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
                                reg = <0>;
                                label = "system-wfi";
                                qcom,psci-mode = <0x0>;
-                               qcom,latency-us = <100>;
-                               qcom,ss-power = <725>;
-                               qcom,energy-overhead = <85000>;
-                               qcom,time-overhead = <120>;
+                               qcom,latency-us = <1654>;
+                               qcom,ss-power = <219>;
+                               qcom,energy-overhead = <98750>;
+                               qcom,time-overhead = <2294>;
                        };
 
                        qcom,pm-cluster-level@1{ /* E3 */
                                reg = <1>;
                                label = "system-pc";
                                qcom,psci-mode = <0x3>;
-                               qcom,latency-us = <350>;
-                               qcom,ss-power = <530>;
-                               qcom,energy-overhead = <160000>;
-                               qcom,time-overhead = <550>;
+                               qcom,latency-us = <4506>;
+                               qcom,ss-power = <88>;
+                               qcom,energy-overhead = <1228536>;
+                               qcom,time-overhead = <15337>;
                                qcom,min-child-idx = <3>;
                                qcom,is-reset;
                                qcom,notify-rpm;
                                        reg = <0>;
                                        label = "pwr-l2-wfi";
                                        qcom,psci-mode = <0x1>;
-                                       qcom,latency-us = <40>;
-                                       qcom,ss-power = <740>;
-                                       qcom,energy-overhead = <65000>;
-                                       qcom,time-overhead = <85>;
+                                       qcom,latency-us = <51>;
+                                       qcom,ss-power = <250>;
+                                       qcom,energy-overhead = <83452>;
+                                       qcom,time-overhead = <89>;
                                };
                                qcom,pm-cluster-level@1{ /* D2D */
                                        reg = <1>;
                                        label = "pwr-l2-dynret";
                                        qcom,psci-mode = <0x2>;
-                                       qcom,latency-us = <60>;
-                                       qcom,ss-power = <700>;
-                                       qcom,energy-overhead = <85000>;
-                                       qcom,time-overhead = <85>;
+                                       qcom,latency-us = <421>;
+                                       qcom,ss-power = <235>;
+                                       qcom,energy-overhead = <219416>;
+                                       qcom,time-overhead = <781>;
                                        qcom,min-child-idx = <1>;
                                };
 
                                        reg = <2>;
                                        label = "pwr-l2-ret";
                                        qcom,psci-mode = <0x3>;
-                                       qcom,latency-us = <100>;
-                                       qcom,ss-power = <640>;
-                                       qcom,energy-overhead = <135000>;
-                                       qcom,time-overhead = <85>;
+                                       qcom,latency-us = <517>;
+                                       qcom,ss-power = <226>;
+                                       qcom,energy-overhead= <299405>;
+                                       qcom,time-overhead = <922>;
                                        qcom,min-child-idx = <2>;
                                };
 
                                        reg = <3>;
                                        label = "pwr-l2-pc";
                                        qcom,psci-mode = <0x4>;
-                                       qcom,latency-us = <700>;
-                                       qcom,ss-power = <450>;
-                                       qcom,energy-overhead = <210000>;
-                                       qcom,time-overhead = <11500>;
+                                       qcom,latency-us = <2118>;
+                                       qcom,ss-power = <210>;
+                                       qcom,energy-overhead = <833056>;
+                                       qcom,time-overhead = <2918>;
                                        qcom,min-child-idx = <2>;
                                        qcom,is-reset;
                                };
                                                reg = <0>;
                                                qcom,spm-cpu-mode = "wfi";
                                                qcom,psci-cpu-mode = <0x1>;
-                                               qcom,latency-us = <20>;
-                                               qcom,ss-power = <750>;
-                                               qcom,energy-overhead = <32000>;
-                                               qcom,time-overhead = <60>;
+                                               qcom,latency-us = <42>;
+                                               qcom,ss-power = <250>;
+                                               qcom,energy-overhead = <30562>;
+                                               qcom,time-overhead = <91>;
                                        };
 
                                        qcom,pm-cpu-level@1 { /* C2D */
                                                reg = <1>;
                                                qcom,psci-cpu-mode = <2>;
                                                qcom,spm-cpu-mode = "ret";
-                                               qcom,latency-us = <40>;
-                                               qcom,ss-power = <730>;
-                                               qcom,energy-overhead = <85500>;
-                                               qcom,time-overhead = <110>;
+                                               qcom,latency-us = <63>;
+                                               qcom,ss-power = <245>;
+                                               qcom,energy-overhead = <49239>;
+                                               qcom,time-overhead = <172>;
                                        };
 
                                        qcom,pm-cpu-level@2 {  /* C3 */
                                                reg = <2>;
                                                qcom,spm-cpu-mode = "pc";
                                                qcom,psci-cpu-mode = <0x3>;
-                                               qcom,latency-us = <80>;
-                                               qcom,ss-power = <700>;
-                                               qcom,energy-overhead = <126480>;
-                                               qcom,time-overhead = <160>;
+                                               qcom,latency-us = <376>;
+                                               qcom,ss-power = <237>;
+                                               qcom,energy-overhead = <181018>;
+                                               qcom,time-overhead = <666>;
                                                qcom,is-reset;
                                        };
                                };
                                        reg = <0>;
                                        label = "perf-l2-wfi";
                                        qcom,psci-mode = <0x1>;
-                                       qcom,latency-us = <40>;
-                                       qcom,ss-power = <740>;
-                                       qcom,energy-overhead = <70000>;
-                                       qcom,time-overhead = <80>;
+                                       qcom,latency-us = <51>;
+                                       qcom,ss-power = <283>;
+                                       qcom,energy-overhead = <83083>;
+                                       qcom,time-overhead = <89>;
                                };
 
                                qcom,pm-cluster-level@1{ /* D2D */
                                        reg = <1>;
                                        label = "perf-l2-dynret";
                                        qcom,psci-mode = <2>;
-                                       qcom,latency-us = <60>;
-                                       qcom,ss-power = <700>;
-                                       qcom,energy-overhead = <85000>;
-                                       qcom,time-overhead = <85>;
+                                       qcom,latency-us = <345>;
+                                       qcom,ss-power = <254>;
+                                       qcom,energy-overhead = <198349>;
+                                       qcom,time-overhead = <659>;
                                        qcom,min-child-idx = <1>;
                                };
 
                                        reg = <2>;
                                        label = "perf-l2-ret";
                                        qcom,psci-mode = <3>;
-                                       qcom,latency-us = <100>;
-                                       qcom,ss-power = <640>;
-                                       qcom,energy-overhead = <135000>;
-                                       qcom,time-overhead = <85>;
+                                       qcom,latency-us = <419>;
+                                       qcom,ss-power = <244>;
+                                       qcom,energy-overhead = <281921>;
+                                       qcom,time-overhead = <737>;
                                        qcom,min-child-idx = <2>;
                                };
 
                                        reg = <3>;
                                        label = "perf-l2-pc";
                                        qcom,psci-mode = <0x4>;
-                                       qcom,latency-us = <800>;
-                                       qcom,ss-power = <450>;
-                                       qcom,energy-overhead = <240000>;
-                                       qcom,time-overhead = <11500>;
+                                       qcom,latency-us = <1654>;
+                                       qcom,ss-power = <219>;
+                                       qcom,energy-overhead = <815573>;
+                                       qcom,time-overhead = <2294>;
                                        qcom,min-child-idx = <2>;
                                        qcom,is-reset;
                                };
                                                reg = <0>;
                                                qcom,spm-cpu-mode = "wfi";
                                                qcom,psci-cpu-mode = <0x1>;
-                                               qcom,latency-us = <25>;
-                                               qcom,ss-power = <750>;
-                                               qcom,energy-overhead = <37000>;
-                                               qcom,time-overhead = <50>;
+                                               qcom,latency-us = <39>;
+                                               qcom,ss-power = <292>;
+                                               qcom,energy-overhead = <37558>;
+                                               qcom,time-overhead = <68>;
                                        };
 
                                        qcom,pm-cpu-level@1 { /* C2D */
                                                reg = <1>;
                                                qcom,psci-cpu-mode = <2>;
                                                qcom,spm-cpu-mode = "ret";
-                                               qcom,latency-us = <40>;
-                                               qcom,ss-power = <730>;
-                                               qcom,energy-overhead = <85500>;
-                                               qcom,time-overhead = <110>;
+                                               qcom,latency-us = <60>;
+                                               qcom,ss-power = <275>;
+                                               qcom,energy-overhead = <70737>;
+                                               qcom,time-overhead = <181>;
                                        };
 
                                        qcom,pm-cpu-level@2 { /* C3 */
                                                reg = <2>;
                                                qcom,spm-cpu-mode = "pc";
                                                qcom,psci-cpu-mode = <0x3>;
-                                               qcom,latency-us = <80>;
-                                               qcom,ss-power = <700>;
-                                               qcom,energy-overhead = <136480>;
-                                               qcom,time-overhead = <160>;
+                                               qcom,latency-us = <324>;
+                                               qcom,ss-power = <263>;
+                                               qcom,energy-overhead = <213213>;
+                                               qcom,time-overhead = <621>;
                                                qcom,is-reset;
                                        };
                                };
index 49b58c8..b2f6ac7 100644 (file)
 };
 
 &soc {
+       qcom,msm-ssc-sensors {
+               compatible = "qcom,msm-ssc-sensors";
+       };
 };
 
 &pm660_gpios {
        qcom,fg-cutoff-voltage = <3700>;
 };
 
+&i2c_2 {
+       status = "ok";
+       smb1351-charger@1d {
+               compatible = "qcom,smb1351-charger";
+               reg = <0x1d>;
+               qcom,parallel-charger;
+               qcom,float-voltage-mv = <4400>;
+               qcom,recharge-mv = <100>;
+               qcom,parallel-en-pin-polarity = <0>;
+       };
+};
+
 &tasha_snd {
        qcom,model = "sdm660-tasha-skus-snd-card";
        qcom,audio-routing =
                "MIC BIAS2", "Headset Mic",
                "DMIC0", "MIC BIAS1",
                "MIC BIAS1", "Digital Mic0",
-               "DMIC2", "MIC BIAS3",
-               "MIC BIAS3", "Digital Mic2",
-               "DMIC4", "MIC BIAS3",
-               "MIC BIAS3", "Digital Mic4",
+               "DMIC3", "MIC BIAS3",
+               "MIC BIAS3", "Digital Mic3",
+               "DMIC5", "MIC BIAS3",
+               "MIC BIAS3", "Digital Mic5",
                "SpkrLeft IN", "SPK1 OUT";
        qcom,msm-mbhc-hphl-swh = <1>;
        /delete-property/ qcom,us-euro-gpios;
index 44796ee..47db57b 100644 (file)
                qcom,cpr-panic-reg-name-list =
                        "PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
 
+               qcom,cpr-enable;
+               qcom,cpr-hw-closed-loop;
+
                thread@0 {
                        qcom,cpr-thread-id = <0>;
                        qcom,cpr-consecutive-up = <0>;
                                qcom,allow-voltage-interpolation;
                                qcom,allow-quotient-interpolation;
                                qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+                               qcom,cpr-ro-scaling-factor =
+                                       <3600 3600 3830 2430 2520 2700 1790
+                                        1760 1970 1880 2110 2010 2510 4900
+                                        4370 4780>,
+                                       <3600 3600 3830 2430 2520 2700 1790
+                                        1760 1970 1880 2110 2010 2510 4900
+                                        4370 4780>,
+                                       <3600 3600 3830 2430 2520 2700 1790
+                                        1760 1970 1880 2110 2010 2510 4900
+                                        4370 4780>,
+                                       <3600 3600 3830 2430 2520 2700 1790
+                                        1760 1970 1880 2110 2010 2510 4900
+                                        4370 4780>,
+                                       <3600 3600 3830 2430 2520 2700 1790
+                                        1760 1970 1880 2110 2010 2510 4900
+                                        4370 4780>;
+
+                               qcom,cpr-open-loop-voltage-fuse-adjustment =
+                                       <13000 31000 27000 37000 21000>;
+
+                               qcom,cpr-closed-loop-voltage-fuse-adjustment =
+                                       <0     0     0     0      8000>;
+
+                               qcom,cpr-floor-to-ceiling-max-range =
+                                       <32000  32000  32000  40000  40000
+                                        40000  40000  40000>;
                        };
                };
        };
                qcom,cpr-panic-reg-name-list =
                        "PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
 
+               qcom,cpr-enable;
+               qcom,cpr-hw-closed-loop;
+
                thread@0 {
                        qcom,cpr-thread-id = <0>;
                        qcom,cpr-consecutive-up = <0>;
                                qcom,allow-voltage-interpolation;
                                qcom,allow-quotient-interpolation;
                                qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+                               qcom,cpr-ro-scaling-factor =
+                                       <4040 4230 0000 2210 2560 2450 2230
+                                        2220 2410 2300 2560 2470 1600 3120
+                                        2620 2280>,
+                                       <4040 4230 0000 2210 2560 2450 2230
+                                        2220 2410 2300 2560 2470 1600 3120
+                                        2620 2280>,
+                                       <4040 4230 0000 2210 2560 2450 2230
+                                        2220 2410 2300 2560 2470 1600 3120
+                                        2620 2280>,
+                                       <4040 4230 0000 2210 2560 2450 2230
+                                        2220 2410 2300 2560 2470 1600 3120
+                                        2620 2280>,
+                                       <4040 4230 0000 2210 2560 2450 2230
+                                        2220 2410 2300 2560 2470 1600 3120
+                                        2620 2280>;
+
+                               qcom,cpr-open-loop-voltage-fuse-adjustment =
+                                       <31000 39000 53000 40000 29000>;
+
+                               qcom,cpr-closed-loop-voltage-fuse-adjustment =
+                                       <0      3000 19000 23000 28000>;
+
+                               qcom,cpr-floor-to-ceiling-max-range =
+                                       <40000  40000  40000  40000
+                                        40000  40000  40000>;
                        };
                };
        };
diff --git a/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts
new file mode 100644 (file)
index 0000000..dff55d8
--- /dev/null
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm660.dtsi"
+#include "sdm660-mtp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+       model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660L, USBC Audio MTP";
+       compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp";
+       qcom,board-id = <8 2>;
+       qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+                       <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+       qcom,msm-mbhc-moist-cfg = <0>, <0>, <3>;
+       qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
index 544f04a..9ba63be 100644 (file)
                interrupts = <0 184 0>, <0 430 0>;
                interrupt-names = "tsens-upper-lower", "tsens-critical";
                qcom,client-id = <0 1 2 3 4 5 6 7 8 9 10 11 12 13>;
-               qcom,sensor-id = <0 10 11 4 5 6 7 8 13 1 3 12 9 2>;
+               qcom,sensor-id = <0 10 11 4 5 6 7 8 13 2 3 12 9 1>;
                qcom,sensors = <14>;
                qcom,slope = <3200 3200 3200 3200 3200 3200 3200 3200
                                        3200 3200 3200 3200 3200 3200>;
                qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
                qcom,target-dev = <&memlat_cpu0>;
                qcom,core-dev-table =
-                       < 633600 762 >,
+                       < 902400 762 >,
                        < 1401600 2086 >,
                        < 1881600 3879 >;
        };
index 1e965b2..cb20c31 100644 (file)
@@ -620,6 +620,8 @@ CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_QCOM_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_QPDI=y
@@ -638,4 +640,5 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
 CONFIG_XZ_DEC=y
index d8bdeb7..37ace58 100644 (file)
@@ -680,4 +680,5 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
 CONFIG_XZ_DEC=y
index f64ca9a..5c07a8b 100644 (file)
@@ -287,6 +287,8 @@ CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
 CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -628,6 +630,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
index f08cd3b..28efc2f 100644 (file)
@@ -289,6 +289,8 @@ CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
 CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
@@ -694,6 +696,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
index d00772c..29b2c75 100644 (file)
@@ -627,6 +627,8 @@ CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_QCOM_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
 CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_QPDI=y
@@ -645,6 +647,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
index 207f3b2..adfcc6c 100644 (file)
@@ -233,6 +233,7 @@ CONFIG_NFC_NQ=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -714,6 +715,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
 CONFIG_CRYPTO_DEV_QCEDEV=y
 CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
 CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
index 28ac694..f30d601 100644 (file)
@@ -17,6 +17,10 @@ AFLAGS_system_certificates.o := -I$(srctree)
 quiet_cmd_extract_certs  = EXTRACT_CERTS   $(patsubst "%",%,$(2))
       cmd_extract_certs  = scripts/extract-cert $(2) $@ || ( rm $@; exit 1)
 
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYS),"verity.x509.pem")
+SYSTEM_TRUSTED_KEYS_SRCPREFIX := $(srctree)/certs/
+endif
+
 targets += x509_certificate_list
 $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
        $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
diff --git a/certs/verity.x509.pem b/certs/verity.x509.pem
new file mode 100644 (file)
index 0000000..86399c3
--- /dev/null
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
+AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
+A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
+6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
+fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
+T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
+AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
+jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
+HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
+oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
+NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
+JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
+dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
+UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
+yttuAJAEAymk1mipd9+zp38=
+-----END CERTIFICATE-----
index 87c7439..5fa3c6b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *
- *  Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *  Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *  Copyright (C) 2000-2004 Russell King
  *
  * This program is free software; you can redistribute it and/or modify
@@ -296,6 +296,7 @@ void removed_free(struct device *dev, size_t size, void *cpu_addr,
                                        attrs);
        struct removed_region *dma_mem = dev->removed_mem;
 
+       size = PAGE_ALIGN(size);
        if (!no_kernel_mapping)
                iounmap(cpu_addr);
        mutex_lock(&dma_mem->lock);
index 37cc628..969f755 100644 (file)
@@ -127,7 +127,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
        if (!btfmslim || !ch)
                return -EINVAL;
 
-       BTFMSLIM_DBG("port:%d", ch->port);
+       BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
 
        /* Define the channel with below parameters */
        prop.prot = SLIM_AUTO_ISO;
index dbb4c56..5d105fb 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,7 @@
 #define BTFM_SLIM_H
 #include <linux/slimbus/slimbus.h>
 
-#define BTFMSLIM_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_DBG(fmt, arg...)  pr_debug(fmt "\n", ## arg)
 #define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
 #define BTFMSLIM_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
 
index d7d24ff..354b48b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,8 +54,8 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
        int ret;
        struct btfmslim *btfmslim = dai->dev->platform_data;
 
-       BTFMSLIM_DBG("substream = %s  stream = %d",
-                substream->name, substream->stream);
+       BTFMSLIM_DBG("substream = %s  stream = %d dai name = %s",
+                substream->name, substream->stream, dai->name);
        ret = btfm_slim_hw_init(btfmslim);
        return ret;
 }
@@ -65,8 +65,8 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
 {
        struct btfmslim *btfmslim = dai->dev->platform_data;
 
-       BTFMSLIM_DBG("substream = %s  stream = %d",
-                substream->name, substream->stream);
+       BTFMSLIM_DBG("substream = %s  stream = %d dai name = %s",
+                substream->name, substream->stream, dai->name);
        btfm_slim_hw_deinit(btfmslim);
 }
 
@@ -74,7 +74,7 @@ static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
                            struct snd_pcm_hw_params *params,
                            struct snd_soc_dai *dai)
 {
-       BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
+       BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d",
                dai->name, dai->id, params_rate(params),
                params_channels(params));
 
@@ -89,7 +89,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
        struct btfmslim_ch *ch;
        uint8_t rxport, grp = false, nchan = 1;
 
-       BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+       BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
                dai->id, dai->rate);
 
        switch (dai->id) {
@@ -137,7 +137,7 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
        struct btfmslim_ch *ch;
        uint8_t rxport, grp = false, nchan = 1;
 
-       BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+       BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
                dai->id, dai->rate);
 
        switch (dai->id) {
@@ -384,7 +384,7 @@ static struct snd_soc_dai_driver btfmslim_dai[] = {
 static struct snd_soc_codec_driver btfmslim_codec = {
        .probe  = btfm_slim_codec_probe,
        .remove = btfm_slim_codec_remove,
-       .read           = btfm_slim_codec_read,
+       .read   = btfm_slim_codec_read,
        .write  = btfm_slim_codec_write,
 };
 
index 7d7bd24..72e28da 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -76,7 +76,7 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
        uint8_t reg_val = 0;
        uint16_t reg;
 
-       BTFMSLIM_DBG("enable(%d)", enable);
+       BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
        if (rxport) {
                /* Port enable */
                reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
index facdb0f..3350643 100644 (file)
@@ -2606,7 +2606,8 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
                info = diag_md_session_get_pid(current->tgid);
                ret = diag_process_apps_pkt(user_space_data, len, info);
                if (ret == 1)
-                       diag_send_error_rsp((void *)(user_space_data), len);
+                       diag_send_error_rsp((void *)(user_space_data), len,
+                                               info);
        }
 fail:
        diagmem_free(driver, user_space_data, mempool);
index a7069bc..99a16dd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -239,10 +239,11 @@ void chk_logging_wakeup(void)
        }
 }
 
-static void pack_rsp_and_send(unsigned char *buf, int len)
+static void pack_rsp_and_send(unsigned char *buf, int len,
+                               struct diag_md_session_t *info)
 {
        int err;
-       int retry_count = 0;
+       int retry_count = 0, i, rsp_ctxt;
        uint32_t write_len = 0;
        unsigned long flags;
        unsigned char *rsp_ptr = driver->encoded_rsp_buf;
@@ -257,6 +258,15 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
                return;
        }
 
+       if (info && info->peripheral_mask) {
+               for (i = 0; i <= NUM_PERIPHERALS; i++) {
+                       if (info->peripheral_mask & (1 << i))
+                               break;
+               }
+               rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+       } else
+               rsp_ctxt = driver->rsp_buf_ctxt;
+
        /*
         * Keep trying till we get the buffer back. It should probably
         * take one or two iterations. When this loops till UINT_MAX, it
@@ -298,8 +308,7 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
        *(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
        write_len += sizeof(uint8_t);
 
-       err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
-                            driver->rsp_buf_ctxt);
+       err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
        if (err) {
                pr_err("diag: In %s, unable to write to mux, err: %d\n",
                       __func__, err);
@@ -309,12 +318,13 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
        }
 }
 
-static void encode_rsp_and_send(unsigned char *buf, int len)
+static void encode_rsp_and_send(unsigned char *buf, int len,
+                               struct diag_md_session_t *info)
 {
        struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
        struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
        unsigned char *rsp_ptr = driver->encoded_rsp_buf;
-       int err, retry_count = 0;
+       int err, i, rsp_ctxt, retry_count = 0;
        unsigned long flags;
 
        if (!rsp_ptr || !buf)
@@ -326,6 +336,15 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
                return;
        }
 
+       if (info && info->peripheral_mask) {
+               for (i = 0; i <= NUM_PERIPHERALS; i++) {
+                       if (info->peripheral_mask & (1 << i))
+                               break;
+               }
+               rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+       } else
+               rsp_ctxt = driver->rsp_buf_ctxt;
+
        /*
         * Keep trying till we get the buffer back. It should probably
         * take one or two iterations. When this loops till UINT_MAX, it
@@ -369,7 +388,7 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
        diag_hdlc_encode(&send, &enc);
        driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
        err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
-                            driver->rsp_buf_ctxt);
+                            rsp_ctxt);
        if (err) {
                pr_err("diag: In %s, Unable to write to device, err: %d\n",
                        __func__, err);
@@ -380,21 +399,22 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
        memset(buf, '\0', DIAG_MAX_RSP_SIZE);
 }
 
-void diag_send_rsp(unsigned char *buf, int len)
+void diag_send_rsp(unsigned char *buf, int len, struct diag_md_session_t *info)
 {
        struct diag_md_session_t *session_info = NULL;
        uint8_t hdlc_disabled;
 
-       session_info = diag_md_session_get_peripheral(APPS_DATA);
+       session_info = (info) ? info :
+                               diag_md_session_get_peripheral(APPS_DATA);
        if (session_info)
                hdlc_disabled = session_info->hdlc_disabled;
        else
                hdlc_disabled = driver->hdlc_disabled;
 
        if (hdlc_disabled)
-               pack_rsp_and_send(buf, len);
+               pack_rsp_and_send(buf, len, session_info);
        else
-               encode_rsp_and_send(buf, len);
+               encode_rsp_and_send(buf, len, session_info);
 }
 
 void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
@@ -865,7 +885,8 @@ static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
        return write_len;
 }
 
-void diag_send_error_rsp(unsigned char *buf, int len)
+void diag_send_error_rsp(unsigned char *buf, int len,
+                       struct diag_md_session_t *info)
 {
        /* -1 to accomodate the first byte 0x13 */
        if (len > (DIAG_MAX_RSP_SIZE - 1)) {
@@ -875,7 +896,7 @@ void diag_send_error_rsp(unsigned char *buf, int len)
 
        *(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
        memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
-       diag_send_rsp(driver->apps_rsp_buf, len + 1);
+       diag_send_rsp(driver->apps_rsp_buf, len + 1, info);
 }
 
 int diag_process_apps_pkt(unsigned char *buf, int len,
@@ -895,7 +916,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
        /* Check if the command is a supported mask command */
        mask_ret = diag_process_apps_masks(buf, len, info);
        if (mask_ret > 0) {
-               diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+               diag_send_rsp(driver->apps_rsp_buf, mask_ret, info);
                return 0;
        }
 
@@ -917,7 +938,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                                                   driver->apps_rsp_buf,
                                                   DIAG_MAX_RSP_SIZE);
                if (write_len > 0)
-                       diag_send_rsp(driver->apps_rsp_buf, write_len);
+                       diag_send_rsp(driver->apps_rsp_buf, write_len, info);
                return 0;
        }
 
@@ -933,7 +954,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                } else {
                        if (MD_PERIPHERAL_MASK(reg_item->proc) &
                                driver->logging_mask)
-                               diag_send_error_rsp(buf, len);
+                               diag_send_error_rsp(buf, len, info);
                        else
                                write_len = diag_send_data(reg_item, buf, len);
                }
@@ -949,13 +970,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                for (i = 0; i < 4; i++)
                        *(driver->apps_rsp_buf+i) = *(buf+i);
                *(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
-               diag_send_rsp(driver->apps_rsp_buf, 8);
+               diag_send_rsp(driver->apps_rsp_buf, 8, info);
                return 0;
        } else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
                (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
                len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
                if (len > 0) {
-                       diag_send_rsp(driver->apps_rsp_buf, len);
+                       diag_send_rsp(driver->apps_rsp_buf, len, info);
                        return 0;
                }
                return len;
@@ -968,7 +989,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                                                        driver->apps_rsp_buf,
                                                        DIAG_MAX_RSP_SIZE);
                if (write_len > 0)
-                       diag_send_rsp(driver->apps_rsp_buf, write_len);
+                       diag_send_rsp(driver->apps_rsp_buf, write_len, info);
                return 0;
        }
        /* Check for time sync switch command */
@@ -979,14 +1000,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                                                        driver->apps_rsp_buf,
                                                        DIAG_MAX_RSP_SIZE);
                if (write_len > 0)
-                       diag_send_rsp(driver->apps_rsp_buf, write_len);
+                       diag_send_rsp(driver->apps_rsp_buf, write_len, info);
                return 0;
        }
        /* Check for download command */
        else if ((chk_apps_master()) && (*buf == 0x3A)) {
                /* send response back */
                driver->apps_rsp_buf[0] = *buf;
-               diag_send_rsp(driver->apps_rsp_buf, 1);
+               diag_send_rsp(driver->apps_rsp_buf, 1, info);
                msleep(5000);
                /* call download API */
                msm_set_restart_mode(RESTART_DLOAD);
@@ -1006,7 +1027,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                        for (i = 0; i < 13; i++)
                                driver->apps_rsp_buf[i+3] = 0;
 
-                       diag_send_rsp(driver->apps_rsp_buf, 16);
+                       diag_send_rsp(driver->apps_rsp_buf, 16, info);
                        return 0;
                }
        }
@@ -1015,7 +1036,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                (*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
                memcpy(driver->apps_rsp_buf, buf, 4);
                driver->apps_rsp_buf[4] = wrap_enabled;
-               diag_send_rsp(driver->apps_rsp_buf, 5);
+               diag_send_rsp(driver->apps_rsp_buf, 5, info);
                return 0;
        }
        /* Wrap the Delayed Rsp ID */
@@ -1024,7 +1045,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                wrap_enabled = true;
                memcpy(driver->apps_rsp_buf, buf, 4);
                driver->apps_rsp_buf[4] = wrap_count;
-               diag_send_rsp(driver->apps_rsp_buf, 6);
+               diag_send_rsp(driver->apps_rsp_buf, 6, info);
                return 0;
        }
        /* Mobile ID Rsp */
@@ -1035,7 +1056,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                                                   driver->apps_rsp_buf,
                                                   DIAG_MAX_RSP_SIZE);
                if (write_len > 0) {
-                       diag_send_rsp(driver->apps_rsp_buf, write_len);
+                       diag_send_rsp(driver->apps_rsp_buf, write_len, info);
                        return 0;
                }
        }
@@ -1055,7 +1076,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                        for (i = 0; i < 55; i++)
                                driver->apps_rsp_buf[i] = 0;
 
-                       diag_send_rsp(driver->apps_rsp_buf, 55);
+                       diag_send_rsp(driver->apps_rsp_buf, 55, info);
                        return 0;
                }
                /* respond to 0x7c command */
@@ -1068,14 +1089,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                                                         chk_config_get_id();
                        *(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
                        *(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
-                       diag_send_rsp(driver->apps_rsp_buf, 14);
+                       diag_send_rsp(driver->apps_rsp_buf, 14, info);
                        return 0;
                }
        }
        write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
                                       DIAG_MAX_RSP_SIZE);
        if (write_len > 0) {
-               diag_send_rsp(driver->apps_rsp_buf, write_len);
+               diag_send_rsp(driver->apps_rsp_buf, write_len, info);
                return 0;
        }
        write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
@@ -1087,7 +1108,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
                 * before disabling HDLC encoding on Apps processor.
                 */
                mutex_lock(&driver->hdlc_disable_mutex);
-               diag_send_rsp(driver->apps_rsp_buf, write_len);
+               diag_send_rsp(driver->apps_rsp_buf, write_len, info);
                /*
                 * Set the value of hdlc_disabled after sending the response to
                 * the tools. This is required since the tools is expecting a
@@ -1107,7 +1128,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
 
        /* We have now come to the end of the function. */
        if (chk_apps_only())
-               diag_send_error_rsp(buf, len);
+               diag_send_error_rsp(buf, len, info);
 
        return 0;
 }
@@ -1190,7 +1211,7 @@ fail:
         * recovery algorithm. Send an error response if the
         * packet is not in expected format.
         */
-       diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+       diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info);
        driver->hdlc_buf_len = 0;
 end:
        mutex_unlock(&driver->diag_hdlc_mutex);
@@ -1446,7 +1467,7 @@ start:
 
                if (actual_pkt->start != CONTROL_CHAR) {
                        diag_hdlc_start_recovery(buf, len, info);
-                       diag_send_error_rsp(buf, len);
+                       diag_send_error_rsp(buf, len, info);
                        goto end;
                }
 
@@ -1528,15 +1549,14 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
        case TYPE_CMD:
                if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
                        diagfwd_write_done(peripheral, type, num);
-               } else if (peripheral == APPS_DATA) {
+               }
+               if (peripheral == APPS_DATA ||
+                               ctxt == DIAG_MEMORY_DEVICE_MODE) {
                        spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
                        driver->rsp_buf_busy = 0;
                        driver->encoded_rsp_len = 0;
                        spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
                                               flags);
-               } else {
-                       pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
-                                          peripheral, __func__, type);
                }
                break;
        default:
index 0023e06..4c6d86f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,7 +45,8 @@ void diag_update_userspace_clients(unsigned int type);
 void diag_update_sleeping_process(int process_id, int data_type);
 int diag_process_apps_pkt(unsigned char *buf, int len,
                          struct diag_md_session_t *info);
-void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_send_error_rsp(unsigned char *buf, int len,
+                        struct diag_md_session_t *info);
 void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
 int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
 void diag_md_hdlc_reset_timer_func(unsigned long pid);
index e761e6e..37f3bd2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -462,6 +462,31 @@ static int  diag_glink_write(void *ctxt, unsigned char *buf, int len)
        return err;
 
 }
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+       struct diag_glink_info *glink_info = container_of(work,
+                                                       struct diag_glink_info,
+                                                       connect_work);
+       if (!glink_info || glink_info->hdl)
+               return;
+       atomic_set(&glink_info->opened, 1);
+       diagfwd_channel_open(glink_info->fwd_ctxt);
+       diagfwd_late_open(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+       struct diag_glink_info *glink_info = container_of(work,
+                                                       struct diag_glink_info,
+                                                       remote_disconnect_work);
+       if (!glink_info || glink_info->hdl)
+               return;
+       atomic_set(&glink_info->opened, 0);
+       diagfwd_channel_close(glink_info->fwd_ctxt);
+       atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
 static void diag_glink_transport_notify_state(void *handle, const void *priv,
                                          unsigned event)
 {
@@ -475,9 +500,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
                DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
                        "%s received channel connect for periph:%d\n",
                         glink_info->name, glink_info->peripheral);
-               atomic_set(&glink_info->opened, 1);
-               diagfwd_channel_open(glink_info->fwd_ctxt);
-               diagfwd_late_open(glink_info->fwd_ctxt);
+               queue_work(glink_info->wq, &glink_info->connect_work);
                break;
        case GLINK_LOCAL_DISCONNECTED:
                DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -489,9 +512,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
                DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
                        "%s received channel remote disconnect for periph:%d\n",
                         glink_info->name, glink_info->peripheral);
-               atomic_set(&glink_info->opened, 0);
-               diagfwd_channel_close(glink_info->fwd_ctxt);
-               atomic_set(&glink_info->tx_intent_ready, 0);
+               queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
                break;
        default:
                DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -641,6 +662,9 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
        INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
        INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
        INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+       INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+       INIT_WORK(&(glink_info->remote_disconnect_work),
+               diag_glink_remote_disconnect_work_fn);
        link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
        link_info.transport = NULL;
        link_info.edge = glink_info->edge;
@@ -681,6 +705,8 @@ int diag_glink_init(void)
        struct diag_glink_info *glink_info = NULL;
 
        for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+               if (peripheral != PERIPHERAL_WDSP)
+                       continue;
                glink_info = &glink_cntl[peripheral];
                __diag_glink_init(glink_info);
                diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
@@ -719,6 +745,8 @@ void diag_glink_early_exit(void)
        int peripheral = 0;
 
        for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+               if (peripheral != PERIPHERAL_WDSP)
+                       continue;
                __diag_glink_exit(&glink_cntl[peripheral]);
                glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
        }
@@ -729,6 +757,8 @@ void diag_glink_exit(void)
        int peripheral = 0;
 
        for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+               if (peripheral != PERIPHERAL_WDSP)
+                       continue;
                __diag_glink_exit(&glink_data[peripheral]);
                __diag_glink_exit(&glink_cmd[peripheral]);
                __diag_glink_exit(&glink_dci[peripheral]);
index bad4629..5c1abef 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@ struct diag_glink_info {
        struct work_struct open_work;
        struct work_struct close_work;
        struct work_struct read_work;
+       struct work_struct connect_work;
+       struct work_struct remote_disconnect_work;
        struct diagfwd_info *fwd_ctxt;
 };
 
index eeb940e..69e47a1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -491,6 +491,9 @@ static long __slave_div_round_rate(struct clk *c, unsigned long rate,
        if (best_div)
                *best_div = div;
 
+       if (d->data.is_half_divider)
+               p_rate *= 2;
+
        return p_rate / div;
 }
 
@@ -530,9 +533,16 @@ static int slave_div_set_rate(struct clk *c, unsigned long rate)
 static unsigned long slave_div_get_rate(struct clk *c)
 {
        struct div_clk *d = to_div_clk(c);
+       unsigned long rate;
+
        if (!d->data.div)
                return 0;
-       return clk_get_rate(c->parent) / d->data.div;
+
+       rate = clk_get_rate(c->parent) / d->data.div;
+       if (d->data.is_half_divider)
+               rate *= 2;
+
+       return rate;
 }
 
 struct clk_ops clk_ops_slave_div = {
index e605444..1d9085c 100644 (file)
@@ -876,5 +876,6 @@ const struct clk_ops clk_alpha_pll_slew_ops = {
        .recalc_rate = clk_alpha_pll_recalc_rate,
        .round_rate = clk_alpha_pll_round_rate,
        .set_rate = clk_alpha_pll_slew_set_rate,
+       .list_registers = clk_alpha_pll_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_slew_ops);
index a07deb9..9e5c0b6 100644 (file)
@@ -307,7 +307,18 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
 {
        u32 cfg, mask, old_cfg;
        struct clk_hw *hw = &rcg->clkr.hw;
-       int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+       int ret, index;
+
+       /*
+        * In case the frequency table of cxo_f is used, the src in parent_map
+        * and the source in cxo_f.src could be different. Update the index to
+        * '0' since it's assumed that CXO is always fed to port 0 of RCGs HLOS
+        * controls.
+        */
+       if (f == &cxo_f)
+               index = 0;
+       else
+               index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 
        if (index < 0)
                return index;
@@ -507,6 +518,15 @@ static int clk_rcg2_enable(struct clk_hw *hw)
        if (!f)
                return -EINVAL;
 
+       /*
+        * If CXO is not listed as a supported frequency in the frequency
+        * table, the above API would return the lowest supported frequency
+        * instead. This will lead to incorrect configuration of the RCG.
+        * Check if the RCG rate is CXO and configure it accordingly.
+        */
+       if (rate == cxo_f.freq)
+               f = &cxo_f;
+
        clk_rcg_set_force_enable(hw);
        clk_rcg2_configure(rcg, f);
        clk_rcg_clear_force_enable(hw);
index 095530e..0d76b6b 100644 (file)
@@ -570,8 +570,6 @@ static DEFINE_CLK_VOTER(pnoc_msmbus_clk, pnoc_clk, LONG_MAX);
 static DEFINE_CLK_VOTER(pnoc_msmbus_a_clk, pnoc_a_clk, LONG_MAX);
 static DEFINE_CLK_VOTER(pnoc_pm_clk, pnoc_clk, LONG_MAX);
 static DEFINE_CLK_VOTER(pnoc_sps_clk, pnoc_clk, 0);
-static DEFINE_CLK_VOTER(mmssnoc_a_cpu_clk, mmssnoc_axi_a_clk,
-                                                       19200000);
 
 /* Voter Branch clocks */
 static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, cxo);
@@ -740,7 +738,6 @@ static struct clk_hw *sdm660_clks[] = {
        [CXO_PIL_LPASS_CLK]     = &cxo_pil_lpass_clk.hw,
        [CXO_PIL_CDSP_CLK]      = &cxo_pil_cdsp_clk.hw,
        [CNOC_PERIPH_KEEPALIVE_A_CLK] = &cnoc_periph_keepalive_a_clk.hw,
-       [MMSSNOC_A_CLK_CPU_VOTE] = &mmssnoc_a_cpu_clk.hw
 };
 
 static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
@@ -861,7 +858,6 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
                /* Hold an active set vote for the cnoc_periph resource */
                clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000);
                clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk);
-               clk_prepare_enable(mmssnoc_a_cpu_clk.hw.clk);
        }
 
        dev_info(&pdev->dev, "Registered RPM clocks\n");
index 3413859..c282253 100644 (file)
@@ -730,32 +730,6 @@ static struct clk_rcg2 gp3_clk_src = {
        },
 };
 
-static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
-       F(19200000, P_XO, 1, 0, 0),
-       F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
-       F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
-       F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
-       { }
-};
-
-static struct clk_rcg2 hmss_ahb_clk_src = {
-       .cmd_rcgr = 0x48014,
-       .mnd_width = 0,
-       .hid_width = 5,
-       .parent_map = gcc_parent_map_1,
-       .freq_tbl = ftbl_hmss_ahb_clk_src,
-       .clkr.hw.init = &(struct clk_init_data){
-               .name = "hmss_ahb_clk_src",
-               .parent_names = gcc_parent_names_ao_1,
-               .num_parents = 3,
-               .ops = &clk_rcg2_ops,
-               VDD_DIG_FMAX_MAP3_AO(
-                               LOWER, 19200000,
-                               LOW, 50000000,
-                               NOMINAL, 100000000),
-       },
-};
-
 static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
        F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
        { }
@@ -1823,24 +1797,6 @@ static struct clk_branch gcc_gpu_gpll0_div_clk = {
        },
 };
 
-static struct clk_branch gcc_hmss_ahb_clk = {
-       .halt_reg = 0x48000,
-       .halt_check = BRANCH_HALT_VOTED,
-       .clkr = {
-               .enable_reg = 0x52004,
-               .enable_mask = BIT(21),
-               .hw.init = &(struct clk_init_data){
-                       .name = "gcc_hmss_ahb_clk",
-                       .parent_names = (const char *[]){
-                               "hmss_ahb_clk_src",
-                       },
-                       .num_parents = 1,
-                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
-                       .ops = &clk_branch2_ops,
-               },
-       },
-};
-
 static struct clk_branch gcc_hmss_dvm_bus_clk = {
        .halt_reg = 0x4808c,
        .halt_check = BRANCH_HALT,
@@ -2493,14 +2449,15 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
        },
 };
 
-static struct clk_gate2 gcc_usb3_phy_pipe_clk = {
-       .udelay = 50,
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+       .halt_reg = 0x50004,
+       .halt_check = BRANCH_HALT_DELAY,
        .clkr = {
                .enable_reg = 0x50004,
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
                        .name = "gcc_usb3_phy_pipe_clk",
-                       .ops = &clk_gate2_ops,
+                       .ops = &clk_branch2_ops,
                },
        },
 };
@@ -2640,7 +2597,6 @@ static struct clk_regmap *gcc_660_clocks[] = {
        [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
        [GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
        [GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
-       [GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
        [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
        [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
        [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
@@ -2690,7 +2646,6 @@ static struct clk_regmap *gcc_660_clocks[] = {
        [GPLL1] = &gpll1_out_main.clkr,
        [GPLL4] = &gpll4_out_main.clkr,
        [HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
-       [HMSS_AHB_CLK_SRC] = &hmss_ahb_clk_src.clkr,
        [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
        [HMSS_GPLL4_CLK_SRC] = &hmss_gpll4_clk_src.clkr,
        [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
@@ -2764,12 +2719,6 @@ static int gcc_660_probe(struct platform_device *pdev)
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
-       /*
-        * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
-        * turned off by hardware during certain apps low power modes.
-        */
-       regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
-
        vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
        if (IS_ERR(vdd_dig.regulator[0])) {
                if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
@@ -2882,7 +2831,6 @@ static const char *const debug_mux_parent_names[] = {
        "gcc_gp3_clk",
        "gcc_gpu_bimc_gfx_clk",
        "gcc_gpu_cfg_ahb_clk",
-       "gcc_hmss_ahb_clk",
        "gcc_hmss_dvm_bus_clk",
        "gcc_hmss_rbcpr_clk",
        "gcc_mmss_noc_cfg_ahb_clk",
@@ -3061,7 +3009,6 @@ static struct clk_debug_mux gcc_debug_mux = {
                { "gcc_gp3_clk",                        0x0E1 },
                { "gcc_gpu_bimc_gfx_clk",               0x13F },
                { "gcc_gpu_cfg_ahb_clk",                0x13B },
-               { "gcc_hmss_ahb_clk",                   0x0BA },
                { "gcc_hmss_dvm_bus_clk",               0x0BF },
                { "gcc_hmss_rbcpr_clk",                 0x0BC },
                { "gcc_mmss_noc_cfg_ahb_clk",           0x020 },
index aec73d6..87e6f8c 100644 (file)
@@ -529,7 +529,7 @@ static struct clk_rcg2 ahb_clk_src = {
        .hid_width = 5,
        .parent_map = mmcc_parent_map_10,
        .freq_tbl = ftbl_ahb_clk_src,
-       .flags = FORCE_ENABLE_RCGR,
+       .enable_safe_config = true,
        .clkr.hw.init = &(struct clk_init_data){
                .name = "ahb_clk_src",
                .parent_names = mmcc_parent_names_10,
@@ -3002,6 +3002,7 @@ static const struct qcom_cc_desc mmcc_660_desc = {
 
 static const struct of_device_id mmcc_660_match_table[] = {
        { .compatible = "qcom,mmcc-sdm660" },
+       { .compatible = "qcom,mmcc-sdm630" },
        { }
 };
 MODULE_DEVICE_TABLE(of, mmcc_660_match_table);
@@ -3010,11 +3011,15 @@ static int mmcc_660_probe(struct platform_device *pdev)
 {
        int ret = 0;
        struct regmap *regmap;
+       bool is_sdm630 = 0;
 
        regmap = qcom_cc_map(pdev, &mmcc_660_desc);
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
+       is_sdm630 = of_device_is_compatible(pdev->dev.of_node,
+                                               "qcom,mmcc-sdm630");
+
        /* PLLs connected on Mx rails of MMSS_CC  */
        vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx_mmss");
        if (IS_ERR(vdd_mx.regulator[0])) {
@@ -3048,6 +3053,17 @@ static int mmcc_660_probe(struct platform_device *pdev)
        clk_alpha_pll_configure(&mmpll8_pll_out_main, regmap, &mmpll8_config);
        clk_alpha_pll_configure(&mmpll10_pll_out_main, regmap, &mmpll10_config);
 
+       if (is_sdm630) {
+               mmcc_660_desc.clks[BYTE1_CLK_SRC] = 0;
+               mmcc_660_desc.clks[MMSS_MDSS_BYTE1_CLK] = 0;
+               mmcc_660_desc.clks[MMSS_MDSS_BYTE1_INTF_DIV_CLK] = 0;
+               mmcc_660_desc.clks[MMSS_MDSS_BYTE1_INTF_CLK] = 0;
+               mmcc_660_desc.clks[ESC1_CLK_SRC] = 0;
+               mmcc_660_desc.clks[MMSS_MDSS_ESC1_CLK] = 0;
+               mmcc_660_desc.clks[PCLK1_CLK_SRC] = 0;
+               mmcc_660_desc.clks[MMSS_MDSS_PCLK1_CLK] = 0;
+       }
+
        ret = qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
        if (ret) {
                dev_err(&pdev->dev, "Failed to register MMSS clocks\n");
index afd94a1..64ae44f 100644 (file)
@@ -88,3 +88,9 @@ config DRM_SDE_WB
        help
          Choose this option for writeback connector support.
 
+config DRM_SDE_HDMI
+       bool "Enable HDMI driver support in DRM SDE driver"
+       depends on DRM_MSM
+       default y
+       help
+         Choose this option if HDMI connector support is needed in SDE driver.
index 4ca16fc..4e968cf 100644 (file)
@@ -1,4 +1,6 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi-staging
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
@@ -45,14 +47,19 @@ msm_drm-y := \
        sde/sde_backlight.o \
        sde/sde_color_processing.o \
        sde/sde_vbif.o \
-       sde_dbg_evtlog.o
+       sde_dbg_evtlog.o \
+       sde_io_util.o \
 
 # use drm gpu driver only if qcom_kgsl driver not available
 ifneq ($(CONFIG_QCOM_KGSL),y)
 msm_drm-y += adreno/adreno_device.o \
        adreno/adreno_gpu.o \
        adreno/a3xx_gpu.o \
-       adreno/a4xx_gpu.o
+       adreno/a4xx_gpu.o \
+       adreno/a5xx_gpu.o \
+       adreno/a5xx_power.o \
+       adreno/a5xx_preempt.o \
+       adreno/a5xx_snapshot.o
 endif
 
 msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
@@ -90,6 +97,9 @@ msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
                                dsi-staging/dsi_panel.o \
                                dsi-staging/dsi_display_test.o
 
+msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
+       hdmi-staging/sde_hdmi.o
+
 msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
                                dsi/pll/dsi_pll_28nm.o
 
@@ -121,12 +131,14 @@ msm_drm-$(CONFIG_DRM_MSM) += \
        msm_gem.o \
        msm_gem_prime.o \
        msm_gem_submit.o \
+       msm_gem_vma.o \
        msm_gpu.o \
        msm_iommu.o \
        msm_smmu.o \
        msm_perf.o \
        msm_rd.o \
        msm_ringbuffer.o \
-       msm_prop.o
+       msm_prop.o \
+       msm_snapshot.o
 
 obj-$(CONFIG_DRM_MSM)  += msm_drm.o
index fee2429..8d16b21 100644 (file)
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
index 27dabd5..d521b13 100644 (file)
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
 Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -129,10 +130,14 @@ enum a3xx_tex_fmt {
        TFMT_Z16_UNORM = 9,
        TFMT_X8Z24_UNORM = 10,
        TFMT_Z32_FLOAT = 11,
-       TFMT_NV12_UV_TILED = 17,
-       TFMT_NV12_Y_TILED = 19,
-       TFMT_NV12_UV = 21,
-       TFMT_NV12_Y = 23,
+       TFMT_UV_64X32 = 16,
+       TFMT_VU_64X32 = 17,
+       TFMT_Y_64X32 = 18,
+       TFMT_NV12_64X32 = 19,
+       TFMT_UV_LINEAR = 20,
+       TFMT_VU_LINEAR = 21,
+       TFMT_Y_LINEAR = 22,
+       TFMT_NV12_LINEAR = 23,
        TFMT_I420_Y = 24,
        TFMT_I420_U = 26,
        TFMT_I420_V = 27,
@@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select {
        UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
 };
 
-enum a3xx_rb_blend_opcode {
-       BLEND_DST_PLUS_SRC = 0,
-       BLEND_SRC_MINUS_DST = 1,
-       BLEND_DST_MINUS_SRC = 2,
-       BLEND_MIN_DST_SRC = 3,
-       BLEND_MAX_DST_SRC = 4,
-};
-
 enum a3xx_intp_mode {
        SMOOTH = 0,
        FLAT = 1,
@@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
 {
        return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
 }
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE              0x00000080
 #define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK                   0x00000f00
 #define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT                  8
 static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
 {
        return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
 }
-#define A3XX_RB_COPY_CONTROL_UNK12                             0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE                   0x00001000
 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK                   0xffffc000
 #define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT                  14
 static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
 {
        return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
 }
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE                                0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE                   0x00000080
 #define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE                    0x80000000
 
 #define REG_A3XX_RB_DEPTH_CLEAR                                        0x00002101
index fd266ed..c4f886f 100644 (file)
 extern bool hang_debug;
 
 static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
 
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
 {
-       struct msm_ringbuffer *ring = gpu->rb;
+       struct msm_ringbuffer *ring = gpu->rb[0];
 
        OUT_PKT3(ring, CP_ME_INIT, 17);
        OUT_RING(ring, 0x000003f7);
@@ -64,8 +65,8 @@ static void a3xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu);
-       gpu->funcs->idle(gpu);
+       gpu->funcs->flush(gpu, ring);
+       return a3xx_idle(gpu);
 }
 
 static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,9 +295,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
        /* clear ME_HALT to start micro engine */
        gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
 
-       a3xx_me_init(gpu);
-
-       return 0;
+       return a3xx_me_init(gpu) ? 0 : -EINVAL;
 }
 
 static void a3xx_recover(struct msm_gpu *gpu)
@@ -330,17 +329,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
        kfree(a3xx_gpu);
 }
 
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
 {
        /* wait for ringbuffer to drain: */
-       adreno_idle(gpu);
+       if (!adreno_idle(gpu, gpu->rb[0]))
+               return false;
 
        /* then wait for GPU to finish: */
        if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
-                       A3XX_RBBM_STATUS_GPU_BUSY)))
+                       A3XX_RBBM_STATUS_GPU_BUSY))) {
                DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
 
-       /* TODO maybe we need to reset GPU here to recover from hang? */
+               /* TODO maybe we need to reset GPU here to recover from hang? */
+               return false;
+       }
+
+       return true;
 }
 
 static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -419,91 +423,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
 }
 /* Register offset defines for A3XX */
 static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
-                       REG_A3XX_CP_PFP_UCODE_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
-                       REG_A3XX_CP_PFP_UCODE_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
-                       REG_A3XX_CP_PROTECT_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
-                       REG_A3XX_RBBM_PERFCTR_CTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
-                       REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
-                       REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
-                       REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
-                       REG_A3XX_RBBM_INT_0_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
-                       REG_A3XX_RBBM_AHB_ERROR_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
-                       REG_A3XX_RBBM_INT_CLEAR_CMD),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
-                       REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
-       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
-                       REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
-       REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
-                       REG_A3XX_VSC_SIZE_ADDRESS),
-       REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
-       REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
-                       REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
-                       REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
-                       REG_A3XX_SP_VS_OBJ_START_REG),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
-                       REG_A3XX_SP_FS_OBJ_START_REG),
-       REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
-                       REG_A3XX_RBBM_PM_OVERRIDE2),
-       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
-       REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
-                       REG_A3XX_SQ_GPR_MANAGEMENT),
-       REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
-                       REG_A3XX_SQ_INST_STORE_MANAGMENT),
-       REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
-                       REG_A3XX_RBBM_SW_RESET_CMD),
-       REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
-                       REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
-                       REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
-                       REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 };
 
 static const struct adreno_gpu_funcs funcs = {
@@ -514,9 +440,10 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_resume = msm_gpu_pm_resume,
                .recover = a3xx_recover,
                .last_fence = adreno_last_fence,
+               .submitted_fence = adreno_submitted_fence,
                .submit = adreno_submit,
                .flush = adreno_flush,
-               .idle = a3xx_idle,
+               .active_ring = adreno_active_ring,
                .irq = a3xx_irq,
                .destroy = a3xx_destroy,
 #ifdef CONFIG_DEBUG_FS
@@ -564,7 +491,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        adreno_gpu->registers = a3xx_registers;
        adreno_gpu->reg_offsets = a3xx_register_offsets;
 
-       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret)
                goto fail;
 
@@ -583,7 +510,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
 #endif
        }
 
-       if (!gpu->mmu) {
+       if (!gpu->aspace) {
                /* TODO we think it is possible to configure the GPU to
                 * restrict access to VRAM carveout.  But the required
                 * registers are unknown.  For now just bail out and
index 3220b91..1004d4e 100644 (file)
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
 Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 enum a4xx_color_fmt {
        RB4_A8_UNORM = 1,
        RB4_R8_UNORM = 2,
+       RB4_R8_SNORM = 3,
+       RB4_R8_UINT = 4,
+       RB4_R8_SINT = 5,
        RB4_R4G4B4A4_UNORM = 8,
        RB4_R5G5B5A1_UNORM = 10,
        RB4_R5G6B5_UNORM = 14,
@@ -89,17 +93,10 @@ enum a4xx_color_fmt {
 
 enum a4xx_tile_mode {
        TILE4_LINEAR = 0,
+       TILE4_2 = 2,
        TILE4_3 = 3,
 };
 
-enum a4xx_rb_blend_opcode {
-       BLEND_DST_PLUS_SRC = 0,
-       BLEND_SRC_MINUS_DST = 1,
-       BLEND_DST_MINUS_SRC = 2,
-       BLEND_MIN_DST_SRC = 3,
-       BLEND_MAX_DST_SRC = 4,
-};
-
 enum a4xx_vtx_fmt {
        VFMT4_32_FLOAT = 1,
        VFMT4_32_32_FLOAT = 2,
@@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
 {
        return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
 }
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM                       0x00010000
 
 #define REG_A4XX_RB_RENDER_CONTROL                             0x000020a1
 #define A4XX_RB_RENDER_CONTROL_BINNING_PASS                    0x00000001
@@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
 }
 #define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK       0x000000e0
 #define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT      5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
 {
        return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
 }
@@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
 }
 #define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK     0x00e00000
 #define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT    21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
 {
        return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
 }
@@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
 }
 
 #define REG_A4XX_RB_BLEND_RED                                  0x000020f0
-#define A4XX_RB_BLEND_RED_UINT__MASK                           0x0000ffff
+#define A4XX_RB_BLEND_RED_UINT__MASK                           0x000000ff
 #define A4XX_RB_BLEND_RED_UINT__SHIFT                          0
 static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
 {
        return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
 }
+#define A4XX_RB_BLEND_RED_SINT__MASK                           0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT                          8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+       return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
 #define A4XX_RB_BLEND_RED_FLOAT__MASK                          0xffff0000
 #define A4XX_RB_BLEND_RED_FLOAT__SHIFT                         16
 static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
 }
 
 #define REG_A4XX_RB_BLEND_GREEN                                        0x000020f2
-#define A4XX_RB_BLEND_GREEN_UINT__MASK                         0x0000ffff
+#define A4XX_RB_BLEND_GREEN_UINT__MASK                         0x000000ff
 #define A4XX_RB_BLEND_GREEN_UINT__SHIFT                                0
 static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
 {
        return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
 }
+#define A4XX_RB_BLEND_GREEN_SINT__MASK                         0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT                                8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+       return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
 #define A4XX_RB_BLEND_GREEN_FLOAT__MASK                                0xffff0000
 #define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT                       16
 static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
 }
 
 #define REG_A4XX_RB_BLEND_BLUE                                 0x000020f4
-#define A4XX_RB_BLEND_BLUE_UINT__MASK                          0x0000ffff
+#define A4XX_RB_BLEND_BLUE_UINT__MASK                          0x000000ff
 #define A4XX_RB_BLEND_BLUE_UINT__SHIFT                         0
 static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
 {
        return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
 }
+#define A4XX_RB_BLEND_BLUE_SINT__MASK                          0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT                         8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+       return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
 #define A4XX_RB_BLEND_BLUE_FLOAT__MASK                         0xffff0000
 #define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT                                16
 static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
 }
 
 #define REG_A4XX_RB_BLEND_ALPHA                                        0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK                         0x0000ffff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK                         0x000000ff
 #define A4XX_RB_BLEND_ALPHA_UINT__SHIFT                                0
 static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
 {
        return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
 }
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK                         0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT                                8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+       return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
 #define A4XX_RB_BLEND_ALPHA_FLOAT__MASK                                0xffff0000
 #define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT                       16
 static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
 {
        return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
 }
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE                                0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE                   0x00000080
 #define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE                  0x00010000
 #define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS                        0x00020000
 #define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE                    0x80000000
@@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
 
 #define REG_A4XX_CP_DRAW_STATE_ADDR                            0x00000232
 
-#define REG_A4XX_CP_PROTECT_REG_0                              0x00000240
-
 static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
 
 static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK                    0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT                   0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+       return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK                     0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT                    24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+       return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE                         0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ                          0x40000000
 
 #define REG_A4XX_CP_PROTECT_CTRL                               0x00000250
 
@@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
 {
        return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0000fc00
 #define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           10
 static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
@@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
 {
        return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
 }
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0000fc00
 #define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           10
 static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
 {
@@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
 
 #define REG_A4XX_GRAS_CL_CLIP_CNTL                             0x00002000
 #define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE                    0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE              0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE               0x00020000
 #define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z                 0x00400000
 
 #define REG_A4XX_GRAS_CLEAR_CNTL                               0x00002003
@@ -3670,6 +3706,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
 #define REG_A4XX_PC_BINNING_COMMAND                            0x00000d00
 #define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE                 0x00000001
 
+#define REG_A4XX_PC_TESSFACTOR_ADDR                            0x00000d08
+
 #define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE                    0x00000d0c
 
 #define REG_A4XX_PC_PERFCTR_PC_SEL_0                           0x00000d10
@@ -3690,6 +3728,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
 
 #define REG_A4XX_PC_BIN_BASE                                   0x000021c0
 
+#define REG_A4XX_PC_VSTREAM_CONTROL                            0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK                     0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT                    16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+       return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK                                0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT                       22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+       return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
 #define REG_A4XX_PC_PRIM_VTX_CNTL                              0x000021c4
 #define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK                     0x0000000f
 #define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT                    0
@@ -3752,12 +3804,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
 {
        return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
 }
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK                                0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT                       23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
-       return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW                                    0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED                             0x01000000
 
 #define REG_A4XX_VBIF_VERSION                                  0x00003000
 
index 1fa2c7c..534a7c3 100644 (file)
@@ -31,6 +31,7 @@
 
 extern bool hang_debug;
 static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
 
 /*
  * a4xx_enable_hwcg() - Program the clock control registers
@@ -113,9 +114,9 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
 }
 
 
-static void a4xx_me_init(struct msm_gpu *gpu)
+static bool a4xx_me_init(struct msm_gpu *gpu)
 {
-       struct msm_ringbuffer *ring = gpu->rb;
+       struct msm_ringbuffer *ring = gpu->rb[0];
 
        OUT_PKT3(ring, CP_ME_INIT, 17);
        OUT_RING(ring, 0x000003f7);
@@ -136,8 +137,8 @@ static void a4xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu);
-       gpu->funcs->idle(gpu);
+       gpu->funcs->flush(gpu, ring);
+       return a4xx_idle(gpu);
 }
 
 static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -292,9 +293,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
        /* clear ME_HALT to start micro engine */
        gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
 
-       a4xx_me_init(gpu);
-
-       return 0;
+       return a4xx_me_init(gpu) ? 0 : -EINVAL;
 }
 
 static void a4xx_recover(struct msm_gpu *gpu)
@@ -328,17 +327,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
        kfree(a4xx_gpu);
 }
 
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
 {
        /* wait for ringbuffer to drain: */
-       adreno_idle(gpu);
+       if (!adreno_idle(gpu, gpu->rb[0]))
+               return false;
 
        /* then wait for GPU to finish: */
        if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
-                                       A4XX_RBBM_STATUS_GPU_BUSY)))
+                                       A4XX_RBBM_STATUS_GPU_BUSY))) {
                DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+               /* TODO maybe we need to reset GPU here to recover from hang? */
+               return false;
+       }
 
-       /* TODO maybe we need to reset GPU here to recover from hang? */
+       return true;
 }
 
 static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -453,87 +456,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
 
 /* Register offset defines for A4XX, in order of enum adreno_regs */
 static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
-                       REG_A4XX_CP_PFP_UCODE_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
-                       REG_A4XX_CP_PFP_UCODE_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
-                       REG_A4XX_CP_PROTECT_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
-                       REG_A4XX_RBBM_PERFCTR_CTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
-                       REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
-                       REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
-                       REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
-                       REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
-                       REG_A4XX_RBBM_INT_0_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
-                       REG_A4XX_RBBM_AHB_ERROR_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
-                       REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
-                       REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
-       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
-                       REG_A4XX_VPC_DEBUG_RAM_SEL),
-       REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
-                       REG_A4XX_VPC_DEBUG_RAM_READ),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
-                       REG_A4XX_RBBM_INT_CLEAR_CMD),
-       REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
-                       REG_A4XX_VSC_SIZE_ADDRESS),
-       REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
-                       REG_A4XX_SP_VS_PVT_MEM_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
-                       REG_A4XX_SP_FS_PVT_MEM_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
-                       REG_A4XX_SP_VS_OBJ_START),
-       REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
-                       REG_A4XX_SP_FS_OBJ_START),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
-                       REG_A4XX_RBBM_SW_RESET_CMD),
-       REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
-                       REG_A4XX_UCHE_INVALIDATE0),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
-                       REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
-                       REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 };
 
 static void a4xx_dump(struct msm_gpu *gpu)
@@ -580,16 +509,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
 
 static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       uint32_t hi, lo, tmp;
-
-       tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
-       do {
-               hi = tmp;
-               lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
-               tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
-       } while (tmp != hi);
-
-       *value = (((uint64_t)hi) << 32) | lo;
+       *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+               REG_A4XX_RBBM_PERFCTR_CP_0_HI);
 
        return 0;
 }
@@ -602,9 +523,10 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_resume = a4xx_pm_resume,
                .recover = a4xx_recover,
                .last_fence = adreno_last_fence,
+               .submitted_fence = adreno_submitted_fence,
                .submit = adreno_submit,
                .flush = adreno_flush,
-               .idle = a4xx_idle,
+               .active_ring = adreno_active_ring,
                .irq = a4xx_irq,
                .destroy = a4xx_destroy,
 #ifdef CONFIG_DEBUG_FS
@@ -646,7 +568,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
        adreno_gpu->registers = a4xx_registers;
        adreno_gpu->reg_offsets = a4xx_register_offsets;
 
-       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret)
                goto fail;
 
@@ -665,7 +587,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
 #endif
        }
 
-       if (!gpu->mmu) {
+       if (!gpu->aspace) {
                /* TODO we think it is possible to configure the GPU to
                 * restrict access to VRAM carveout.  But the required
                 * registers are unknown.  For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644 (file)
index 0000000..56dad22
--- /dev/null
@@ -0,0 +1,3497 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /local3/projects/drm/envytools/rnndb//adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a5xx.xml          (  81546 bytes, from 2016-10-31 16:38:41)
+- /local3/projects/drm/envytools/rnndb//adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+       RB5_R8_UNORM = 3,
+       RB5_R5G5B5A1_UNORM = 10,
+       RB5_R8G8B8A8_UNORM = 48,
+       RB5_R8G8B8_UNORM = 49,
+       RB5_R8G8B8A8_UINT = 51,
+       RB5_R10G10B10A2_UINT = 58,
+       RB5_R16G16B16A16_FLOAT = 98,
+};
+
+enum a5xx_tile_mode {
+       TILE5_LINEAR = 0,
+       TILE5_2 = 2,
+       TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+       VFMT5_8_UNORM = 3,
+       VFMT5_8_SNORM = 4,
+       VFMT5_8_UINT = 5,
+       VFMT5_8_SINT = 6,
+       VFMT5_8_8_UNORM = 15,
+       VFMT5_8_8_SNORM = 16,
+       VFMT5_8_8_UINT = 17,
+       VFMT5_8_8_SINT = 18,
+       VFMT5_16_UNORM = 21,
+       VFMT5_16_SNORM = 22,
+       VFMT5_16_FLOAT = 23,
+       VFMT5_16_UINT = 24,
+       VFMT5_16_SINT = 25,
+       VFMT5_8_8_8_UNORM = 33,
+       VFMT5_8_8_8_SNORM = 34,
+       VFMT5_8_8_8_UINT = 35,
+       VFMT5_8_8_8_SINT = 36,
+       VFMT5_8_8_8_8_UNORM = 48,
+       VFMT5_8_8_8_8_SNORM = 50,
+       VFMT5_8_8_8_8_UINT = 51,
+       VFMT5_8_8_8_8_SINT = 52,
+       VFMT5_16_16_UNORM = 67,
+       VFMT5_16_16_SNORM = 68,
+       VFMT5_16_16_FLOAT = 69,
+       VFMT5_16_16_UINT = 70,
+       VFMT5_16_16_SINT = 71,
+       VFMT5_32_UNORM = 72,
+       VFMT5_32_SNORM = 73,
+       VFMT5_32_FLOAT = 74,
+       VFMT5_32_UINT = 75,
+       VFMT5_32_SINT = 76,
+       VFMT5_32_FIXED = 77,
+       VFMT5_16_16_16_UNORM = 88,
+       VFMT5_16_16_16_SNORM = 89,
+       VFMT5_16_16_16_FLOAT = 90,
+       VFMT5_16_16_16_UINT = 91,
+       VFMT5_16_16_16_SINT = 92,
+       VFMT5_16_16_16_16_UNORM = 96,
+       VFMT5_16_16_16_16_SNORM = 97,
+       VFMT5_16_16_16_16_FLOAT = 98,
+       VFMT5_16_16_16_16_UINT = 99,
+       VFMT5_16_16_16_16_SINT = 100,
+       VFMT5_32_32_UNORM = 101,
+       VFMT5_32_32_SNORM = 102,
+       VFMT5_32_32_FLOAT = 103,
+       VFMT5_32_32_UINT = 104,
+       VFMT5_32_32_SINT = 105,
+       VFMT5_32_32_FIXED = 106,
+       VFMT5_32_32_32_UNORM = 112,
+       VFMT5_32_32_32_SNORM = 113,
+       VFMT5_32_32_32_UINT = 114,
+       VFMT5_32_32_32_SINT = 115,
+       VFMT5_32_32_32_FLOAT = 116,
+       VFMT5_32_32_32_FIXED = 117,
+       VFMT5_32_32_32_32_UNORM = 128,
+       VFMT5_32_32_32_32_SNORM = 129,
+       VFMT5_32_32_32_32_FLOAT = 130,
+       VFMT5_32_32_32_32_UINT = 131,
+       VFMT5_32_32_32_32_SINT = 132,
+       VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+       TFMT5_A8_UNORM = 2,
+       TFMT5_8_UNORM = 3,
+       TFMT5_4_4_4_4_UNORM = 8,
+       TFMT5_5_6_5_UNORM = 14,
+       TFMT5_L8_A8_UNORM = 19,
+       TFMT5_16_FLOAT = 23,
+       TFMT5_8_8_8_8_UNORM = 48,
+       TFMT5_10_10_10_2_UNORM = 54,
+       TFMT5_16_16_FLOAT = 69,
+       TFMT5_32_FLOAT = 74,
+       TFMT5_16_16_16_16_FLOAT = 98,
+       TFMT5_32_32_FLOAT = 103,
+       TFMT5_32_32_32_32_FLOAT = 130,
+       TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+       TFETCH5_1_BYTE = 0,
+       TFETCH5_2_BYTE = 1,
+       TFETCH5_4_BYTE = 2,
+       TFETCH5_8_BYTE = 3,
+       TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+       DEPTH5_NONE = 0,
+       DEPTH5_16 = 1,
+       DEPTH5_24_8 = 2,
+       DEPTH5_32 = 4,
+};
+
+enum a5xx_debugbus {
+       A5XX_RBBM_DBGBUS_CP = 1,
+       A5XX_RBBM_DBGBUS_RBBM = 2,
+       A5XX_RBBM_DBGBUS_VBIF = 3,
+       A5XX_RBBM_DBGBUS_HLSQ = 4,
+       A5XX_RBBM_DBGBUS_UCHE = 5,
+       A5XX_RBBM_DBGBUS_DPM = 6,
+       A5XX_RBBM_DBGBUS_TESS = 7,
+       A5XX_RBBM_DBGBUS_PC = 8,
+       A5XX_RBBM_DBGBUS_VFDP = 9,
+       A5XX_RBBM_DBGBUS_VPC = 10,
+       A5XX_RBBM_DBGBUS_TSE = 11,
+       A5XX_RBBM_DBGBUS_RAS = 12,
+       A5XX_RBBM_DBGBUS_VSC = 13,
+       A5XX_RBBM_DBGBUS_COM = 14,
+       A5XX_RBBM_DBGBUS_DCOM = 15,
+       A5XX_RBBM_DBGBUS_LRZ = 16,
+       A5XX_RBBM_DBGBUS_A2D_DSP = 17,
+       A5XX_RBBM_DBGBUS_CCUFCHE = 18,
+       A5XX_RBBM_DBGBUS_GPMU = 19,
+       A5XX_RBBM_DBGBUS_RBP = 20,
+       A5XX_RBBM_DBGBUS_HM = 21,
+       A5XX_RBBM_DBGBUS_RBBM_CFG = 22,
+       A5XX_RBBM_DBGBUS_VBIF_CX = 23,
+       A5XX_RBBM_DBGBUS_GPC = 29,
+       A5XX_RBBM_DBGBUS_LARC = 30,
+       A5XX_RBBM_DBGBUS_HLSQ_SPTP = 31,
+       A5XX_RBBM_DBGBUS_RB_0 = 32,
+       A5XX_RBBM_DBGBUS_RB_1 = 33,
+       A5XX_RBBM_DBGBUS_RB_2 = 34,
+       A5XX_RBBM_DBGBUS_RB_3 = 35,
+       A5XX_RBBM_DBGBUS_CCU_0 = 40,
+       A5XX_RBBM_DBGBUS_CCU_1 = 41,
+       A5XX_RBBM_DBGBUS_CCU_2 = 42,
+       A5XX_RBBM_DBGBUS_CCU_3 = 43,
+       A5XX_RBBM_DBGBUS_A2D_RAS_0 = 48,
+       A5XX_RBBM_DBGBUS_A2D_RAS_1 = 49,
+       A5XX_RBBM_DBGBUS_A2D_RAS_2 = 50,
+       A5XX_RBBM_DBGBUS_A2D_RAS_3 = 51,
+       A5XX_RBBM_DBGBUS_VFD_0 = 56,
+       A5XX_RBBM_DBGBUS_VFD_1 = 57,
+       A5XX_RBBM_DBGBUS_VFD_2 = 58,
+       A5XX_RBBM_DBGBUS_VFD_3 = 59,
+       A5XX_RBBM_DBGBUS_SP_0 = 64,
+       A5XX_RBBM_DBGBUS_SP_1 = 65,
+       A5XX_RBBM_DBGBUS_SP_2 = 66,
+       A5XX_RBBM_DBGBUS_SP_3 = 67,
+       A5XX_RBBM_DBGBUS_TPL1_0 = 72,
+       A5XX_RBBM_DBGBUS_TPL1_1 = 73,
+       A5XX_RBBM_DBGBUS_TPL1_2 = 74,
+       A5XX_RBBM_DBGBUS_TPL1_3 = 75,
+};
+
+enum a5xx_shader_blocks {
+       A5XX_TP_W_MEMOBJ = 1,
+       A5XX_TP_W_SAMPLER = 2,
+       A5XX_TP_W_MIPMAP_BASE = 3,
+       A5XX_TP_W_MEMOBJ_TAG = 4,
+       A5XX_TP_W_SAMPLER_TAG = 5,
+       A5XX_TP_S_3D_MEMOBJ = 6,
+       A5XX_TP_S_3D_SAMPLER = 7,
+       A5XX_TP_S_3D_MEMOBJ_TAG = 8,
+       A5XX_TP_S_3D_SAMPLER_TAG = 9,
+       A5XX_TP_S_CS_MEMOBJ = 10,
+       A5XX_TP_S_CS_SAMPLER = 11,
+       A5XX_TP_S_CS_MEMOBJ_TAG = 12,
+       A5XX_TP_S_CS_SAMPLER_TAG = 13,
+       A5XX_SP_W_INSTR = 14,
+       A5XX_SP_W_CONST = 15,
+       A5XX_SP_W_UAV_SIZE = 16,
+       A5XX_SP_W_CB_SIZE = 17,
+       A5XX_SP_W_UAV_BASE = 18,
+       A5XX_SP_W_CB_BASE = 19,
+       A5XX_SP_W_INST_TAG = 20,
+       A5XX_SP_W_STATE = 21,
+       A5XX_SP_S_3D_INSTR = 22,
+       A5XX_SP_S_3D_CONST = 23,
+       A5XX_SP_S_3D_CB_BASE = 24,
+       A5XX_SP_S_3D_CB_SIZE = 25,
+       A5XX_SP_S_3D_UAV_BASE = 26,
+       A5XX_SP_S_3D_UAV_SIZE = 27,
+       A5XX_SP_S_CS_INSTR = 28,
+       A5XX_SP_S_CS_CONST = 29,
+       A5XX_SP_S_CS_CB_BASE = 30,
+       A5XX_SP_S_CS_CB_SIZE = 31,
+       A5XX_SP_S_CS_UAV_BASE = 32,
+       A5XX_SP_S_CS_UAV_SIZE = 33,
+       A5XX_SP_S_3D_INSTR_DIRTY = 34,
+       A5XX_SP_S_3D_CONST_DIRTY = 35,
+       A5XX_SP_S_3D_CB_BASE_DIRTY = 36,
+       A5XX_SP_S_3D_CB_SIZE_DIRTY = 37,
+       A5XX_SP_S_3D_UAV_BASE_DIRTY = 38,
+       A5XX_SP_S_3D_UAV_SIZE_DIRTY = 39,
+       A5XX_SP_S_CS_INSTR_DIRTY = 40,
+       A5XX_SP_S_CS_CONST_DIRTY = 41,
+       A5XX_SP_S_CS_CB_BASE_DIRTY = 42,
+       A5XX_SP_S_CS_CB_SIZE_DIRTY = 43,
+       A5XX_SP_S_CS_UAV_BASE_DIRTY = 44,
+       A5XX_SP_S_CS_UAV_SIZE_DIRTY = 45,
+       A5XX_HLSQ_ICB = 46,
+       A5XX_HLSQ_ICB_DIRTY = 47,
+       A5XX_HLSQ_ICB_CB_BASE_DIRTY = 48,
+       A5XX_SP_POWER_RESTORE_RAM = 64,
+       A5XX_SP_POWER_RESTORE_RAM_TAG = 65,
+       A5XX_TP_POWER_RESTORE_RAM = 66,
+       A5XX_TP_POWER_RESTORE_RAM_TAG = 67,
+};
+
+enum a5xx_tex_filter {
+       A5XX_TEX_NEAREST = 0,
+       A5XX_TEX_LINEAR = 1,
+       A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+       A5XX_TEX_REPEAT = 0,
+       A5XX_TEX_CLAMP_TO_EDGE = 1,
+       A5XX_TEX_MIRROR_REPEAT = 2,
+       A5XX_TEX_CLAMP_TO_BORDER = 3,
+       A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+       A5XX_TEX_ANISO_1 = 0,
+       A5XX_TEX_ANISO_2 = 1,
+       A5XX_TEX_ANISO_4 = 2,
+       A5XX_TEX_ANISO_8 = 3,
+       A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+       A5XX_TEX_X = 0,
+       A5XX_TEX_Y = 1,
+       A5XX_TEX_Z = 2,
+       A5XX_TEX_W = 3,
+       A5XX_TEX_ZERO = 4,
+       A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+       A5XX_TEX_1D = 0,
+       A5XX_TEX_2D = 1,
+       A5XX_TEX_CUBE = 2,
+       A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE                                        0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR                               0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT                                0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT                           0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT                          0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT                          0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW                      0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR                               0x00000080
+#define A5XX_INT0_CP_SW                                                0x00000100
+#define A5XX_INT0_CP_HW_ERROR                                  0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS                                0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS                                0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS                            0x00001000
+#define A5XX_INT0_CP_IB2                                       0x00002000
+#define A5XX_INT0_CP_IB1                                       0x00004000
+#define A5XX_INT0_CP_RB                                                0x00008000
+#define A5XX_INT0_CP_UNUSED_1                                  0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS                                        0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS                                        0x00040000
+#define A5XX_INT0_UNKNOWN_1                                    0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS                            0x00100000
+#define A5XX_INT0_UNUSED_2                                     0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW                                0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT                             0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS                              0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR                               0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0                                        0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1                                        0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP                           0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE                                        0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ                                 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG                             0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR                            0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR                      0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR                          0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR                               0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR               0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR                               0x00000020
+#define REG_A5XX_CP_RB_BASE                                    0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI                                 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL                                    0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR                               0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI                            0x00000805
+
+#define REG_A5XX_CP_RB_RPTR                                    0x00000806
+
+#define REG_A5XX_CP_RB_WPTR                                    0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR                              0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA                              0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR                            0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA                            0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO                       0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI                       0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL                            0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR                               0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1                           0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2                           0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR                               0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA                               0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR                               0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA                               0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS                             0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE                                        0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR                            0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1                          0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2                          0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR                         0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA                         0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR                          0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA                          0x00000830
+
+#define REG_A5XX_CP_CNTL                                       0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL                                        0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG                                        0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO                          0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI                          0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO                           0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI                           0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL                                0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO             0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI             0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO                        0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI                        0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO                        0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI                        0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL                             0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA                               0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR                               0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS                           0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT                                   0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS                             0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE                                   0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI                                        0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ                                  0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE                                   0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI                                        0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ                                  0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK                    0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT                   0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+       return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK                     0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT                    24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+       return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE                         0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ                          0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL                               0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT                                  0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0                           0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1                           0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2                           0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3                           0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4                           0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5                           0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6                           0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7                           0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL                            0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0                          0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1                          0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2                          0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3                          0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A                         0x00000004
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK            0x000000ff
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT           0
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK          0x0000ff00
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT         8
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B                         0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C                         0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D                         0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT                         0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM                         0x00000009
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK                        0x0f000000
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT               24
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+       return ((val) << A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT            0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL                           0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE                           0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0                                0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1                                0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2                                0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3                                0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0                       0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1                       0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2                       0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3                       0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0                       0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1                       0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0                                0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1                                0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2                                0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3                                0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0                       0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1                       0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2                       0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3                       0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE                       0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0                         0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1                         0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG                       0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX                           0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC                          0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT                       0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL                  0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD                            0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK                               0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE                     0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR                    0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT             0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT                        0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT               0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT               0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW           0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR                    0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW                             0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR                       0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS             0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS             0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS                 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2                            0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1                            0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB                             0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS                     0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS                     0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS                 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW             0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT                  0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS                   0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR                    0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0                     0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1                     0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP                        0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE                     0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ                      0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG                  0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL                             0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL                                0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD                             0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD                       0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2                      0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO                           0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL                       0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0                           0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1                           0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2                           0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3                           0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0                          0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1                          0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2                          0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3                          0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0                          0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1                          0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2                          0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3                          0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG                     0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE                          0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE                         0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE                         0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE                         0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE                          0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE                         0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC                           0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC                          0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC                           0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM                  0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM                  0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM                 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ                         0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL                               0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0                           0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1                           0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2                           0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3                           0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0                          0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1                          0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2                          0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3                          0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0                           0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1                           0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2                           0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3                           0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0                          0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1                          0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2                          0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3                          0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0                           0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1                           0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2                           0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3                           0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0                          0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1                          0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2                          0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3                          0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC                           0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC                          0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0                          0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1                          0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2                          0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3                          0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0                       0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1                       0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2                       0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3                       0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC                           0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC                          0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0                  0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1                  0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2                  0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3                  0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD                           0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD                           0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD                          0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0                                        0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1                                        0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2                                        0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD                                  0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11               0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12               0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13               0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14               0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15               0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16               0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17               0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18               0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0                          0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1                          0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2                          0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3                          0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0                         0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1                         0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2                         0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3                         0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0                         0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1                         0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2                         0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3                         0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0                           0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1                           0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2                           0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3                           0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0                          0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1                          0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2                          0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3                          0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0                          0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1                          0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2                          0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3                          0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU                          0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU                         0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU                          0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO                          0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI                          0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO                          0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI                          0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO                          0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI                          0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO                          0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI                          0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO                          0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI                          0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO                          0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI                          0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO                          0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI                          0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO                          0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI                          0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO                                0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI                                0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO                                0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI                                0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO                                0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI                                0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO                                0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI                                0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO                          0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI                          0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO                          0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI                          0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO                          0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI                          0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO                          0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI                          0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO                          0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI                          0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO                          0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI                          0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO                          0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI                          0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO                          0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI                          0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO                         0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI                         0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO                         0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI                         0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO                         0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI                         0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO                         0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI                         0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO                         0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI                         0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO                         0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI                         0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO                         0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI                         0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO                         0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI                         0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO                                0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI                                0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO                                0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI                                0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO                                0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI                                0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO                                0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI                                0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO                                0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI                                0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO                                0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI                                0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO                                0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI                                0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO                                0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI                                0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO                         0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI                         0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO                         0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI                         0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO                         0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI                         0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO                         0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI                         0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO                         0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI                         0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO                         0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI                         0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO                         0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI                         0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO                         0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI                         0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO                         0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI                         0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO                         0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI                         0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO                         0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI                         0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO                         0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI                         0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO                         0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI                         0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO                         0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI                         0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO                         0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI                         0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO                         0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI                         0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO                                0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI                                0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO                                0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI                                0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO                                0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI                                0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO                                0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI                                0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO                                0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI                                0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO                                0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI                                0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO                                0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI                                0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO                                0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI                                0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO                          0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI                          0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO                          0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI                          0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO                          0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI                          0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO                          0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI                          0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO                          0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI                          0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO                          0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI                          0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO                          0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI                          0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO                          0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI                          0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO                          0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI                          0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO                          0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI                          0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO                          0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI                          0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO                          0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI                          0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO                          0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI                          0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO                          0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI                          0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO                          0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI                          0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO                          0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI                          0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO                          0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI                          0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO                          0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI                          0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO                         0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI                         0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO                         0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI                         0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO                          0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI                          0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO                          0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI                          0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO                          0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI                          0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO                          0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI                          0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO                          0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI                          0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO                          0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI                          0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO                          0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI                          0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO                          0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI                          0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO                         0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI                         0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO                         0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI                         0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO                         0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI                         0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO                         0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI                         0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO                         0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI                         0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO                         0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI                         0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO                         0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI                         0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO                         0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI                         0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO                         0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI                         0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO                         0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI                         0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0                       0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1                       0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2                       0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3                       0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO                      0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI                      0x000004d3
+
+#define REG_A5XX_RBBM_STATUS                                   0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB                      0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP                   0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY                             0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY                              0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY                             0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY                               0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY                             0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY                              0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY                             0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY                              0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY                             0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY                           0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY                         0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY                       0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY                             0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY                              0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY                              0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY                          0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY                          0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY                               0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY                              0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY                              0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY                             0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST                 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST                      0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY                               0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY                      0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY                         0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY                           0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY                           0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY                            0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY                               0x00000001
+
+#define REG_A5XX_RBBM_STATUS3                                  0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS                             0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS                      0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS                     0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS                     0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS                         0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL                             0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0                                0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1                                0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2                                0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3                                0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO                    0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI                    0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0                       0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1                       0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2                       0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3                       0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED                  0x0000046f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC                   0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER                          0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0                                0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1                                0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2                                0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3                                0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4                                0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5                                0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR                    0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0                    0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1                    0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2                    0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3                    0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4                    0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0                         0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1                         0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT                                 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG                      0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL                                0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO               0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI               0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE                  0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL                          0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO                        0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI                        0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO                        0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI                        0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL                        0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0                                0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0                         0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1                         0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE                                  0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE                        0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK                              0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT                             0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+       return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK                              0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT                             16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+       return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL                           0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0                                0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1                                0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2                                0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3                                0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0                                0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1                                0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2                                0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3                                0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0                                0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1                                0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2                                0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3                                0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL                               0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL                             0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL                                  0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL                                   0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0                           0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1                           0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2                           0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3                           0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4                           0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5                           0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6                           0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7                           0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0                          0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1                          0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2                          0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3                          0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0                          0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1                          0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2                          0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3                          0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0                         0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1                         0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0                          0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1                          0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2                          0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3                          0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL                               0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI                     0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL                             0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL                                  0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08                                  0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09                                  0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0                           0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1                           0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2                           0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3                           0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4                           0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5                           0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6                           0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7                           0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0                      0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1                      0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL                           0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL                                        0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0                       0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1                       0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2                       0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3                       0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4                       0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5                       0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6                       0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7                       0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL                               0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL                             0x0000bc00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK                 0x0000ff00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT                        8
+static inline uint32_t A5XX_HLSQ_DBG_READ_SEL_STATETYPE(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT) & A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK;
+}
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE                    0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL                            0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL                                 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0                         0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1                         0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2                         0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3                         0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4                         0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5                         0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6                         0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7                         0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL                              0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL                            0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL                                 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0                         0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1                         0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2                         0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3                         0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL                           0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL                                 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO                       0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI                       0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO                             0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI                             0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO                                0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI                                0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO                                0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI                                0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2                           0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL                             0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO                  0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI                  0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO                  0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI                  0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE                         0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS                               0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0                       0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1                       0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2                       0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3                       0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4                       0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5                       0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6                       0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7                       0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0                      0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1                      0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2                      0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3                      0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO                              0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI                              0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL                               0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL                             0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL                                  0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0                           0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1                           0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2                           0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3                           0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4                           0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5                           0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6                           0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7                           0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8                           0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9                           0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10                          0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11                          0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0                          0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1                          0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2                          0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3                          0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL                           0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL                                        0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0                         0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1                         0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2                         0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3                         0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4                         0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5                         0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6                         0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7                         0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0                                0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1                                0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2                                0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3                                0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION                                  0x00003000
+
+#define REG_A5XX_VBIF_CLKON                                    0x00003001
+#define A5XX_VBIF_CLKON_FORCE_ON                               0x00000001
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS                       0x00000002
+
+#define REG_A5XX_VBIF_ABIT_SORT                                        0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF                           0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB                      0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN                                0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0                          0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1                          0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0                           0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1                           0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL                                0x00003084
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN           0x00000001
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0                          0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1                          0x00003086
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK     0x0000000f
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT    0
+static inline uint32_t A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(uint32_t val)
+{
+       return ((val) << A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0                          0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1                          0x00003088
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK     0x0000001f
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT    0
+static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val)
+{
+       return ((val) << A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT                             0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0                            0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1                            0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2                            0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3                            0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0                            0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1                            0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2                            0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3                            0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0                           0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1                           0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2                           0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3                           0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0                         0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1                         0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2                         0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0                                0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1                                0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2                                0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0                       0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1                       0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2                       0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE                            0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE                            0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL                            0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL                         0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL                         0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS                                0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON                     0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS                     0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON                  0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY                    0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL                 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST                 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL                     0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL                      0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG                               0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO                           0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET                             0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0                                        0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1                                        0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO                         0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI                         0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO                         0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI                         0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO                         0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI                         0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO                         0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI                         0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO                         0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI                         0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO                         0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI                         0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO                         0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI                         0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO                         0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI                         0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO                         0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI                         0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO                         0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI                         0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO                         0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI                         0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO                         0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI                         0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO                                0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI                                0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO                                0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI                                0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO                       0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI                       0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO                       0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI                       0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO                       0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI                       0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO                       0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI                       0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO                         0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI                         0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO                         0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI                         0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO                         0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI                         0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO                         0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI                         0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO                       0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI                       0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO                       0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI                       0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO                       0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI                       0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO                       0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI                       0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO                       0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI                       0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO                       0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI                       0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE                     0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO                     0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI                     0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET                  0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0                   0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1                   0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL                      0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL               0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID                           0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG                       0x0000ac01
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK       0x0000000f
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT      0
+static inline uint32_t A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS(uint32_t val)
+{
+       return ((val) << A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT) & A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK;
+}
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_BCL_ENABLED               0x00000002
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_LLM_ENABLED               0x00000200
+
+#define REG_A5XX_GPMU_TEMP_VAL                                 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD                     0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS               0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK              0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1                   0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3                   0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1                    0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3                    0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE                             0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE                             0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS                 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK                        0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD                       0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL                  0x0000acc4
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_IDLE_FULL_LM         0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK 0x00000030
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT        4
+static inline uint32_t A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD(uint32_t val)
+{
+       return ((val) << A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT) & A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK;
+}
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS                        0x0000acc5
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_IDLE_FULL_ACK      0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_WAKEUP_ACK         0x00000002
+
+#define REG_A5XX_GDPM_CONFIG1                                  0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2                                  0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN                                   0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK                                 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE                               0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS                  0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0              0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2              0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4              0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG                             0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1               0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL                                  0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001                                  0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004                                  0x0000e004
+
+#define REG_A5XX_GRAS_CLEAR_CNTL                               0x0000e005
+#define A5XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR                     0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ                    0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK             0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT            0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK             0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT            10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0                       0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK                     0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT                    0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0                                0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK                      0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT                     0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0                       0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK                     0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT                    0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0                                0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK                      0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT                     0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0                       0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK                     0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT                    0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0                                0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK                      0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT                     0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL                                  0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW                             0x00000004
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET                          0x00000800
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX                          0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK                    0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT                   0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+       return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK                    0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT                   16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+       return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE                            0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK                          0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT                         0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+       return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093                                  0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL                      0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE                0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE                     0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK                   0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT                  0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET                    0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK                  0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT                 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP              0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK            0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT           0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+       return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO                     0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK      0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT     0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+       return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL                 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL                                  0x0000e0a0
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL                              0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL                         0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK               0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT              0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL                                0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK              0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT             0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE               0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL                   0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0                   0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK               0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT              0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK               0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT              16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0                   0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK               0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT              0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK               0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT              16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0                 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE       0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK             0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT            0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK             0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT            16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0                 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE       0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK             0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT            0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK             0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT            16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL                     0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE   0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK                 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT                        0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK                 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT                        16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR                     0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE   0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK                 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT                        0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK                 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT                        16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+       return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL                                 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO                       0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI                       0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH                         0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO            0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI            0x0000e105
+
+#define REG_A5XX_RB_CNTL                                       0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK                               0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT                              0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+       return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK                              0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT                             9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+       return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS                                    0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL                                        0x0000e141
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK                 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT                        16
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS(uint32_t val)
+{
+       return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK                        0xff000000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT               24
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS2(uint32_t val)
+{
+       return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL                              0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK                    0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT                   0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL                             0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK                   0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT                  0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE                    0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0                            0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING                                0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD                         0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD                         0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD                         0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD                         0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1                            0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS                       0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL                             0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK                       0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT                      0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+       return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z                   0x00000020
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND                              0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2                             0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK             0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT            7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+       return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK         0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT                0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK       0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT      5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+       return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK                0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT       8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK       0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT      16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK     0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT    21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+       return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK      0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT     24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+       return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK                        0x0000007f
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT               0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+       return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK             0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT            8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+       return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB                                0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK                        0x0007ffff
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT               0
+static inline uint32_t A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH(uint32_t val)
+{
+       return ((val >> 4) << A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT) & A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK           0x01ffffff
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT          0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE(uint32_t val)
+{
+       return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED                                  0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK                           0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT                          0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK                           0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT                          8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK                          0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT                         16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+       return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32                              0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK                            0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT                           0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+       return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN                                        0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK                         0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT                                0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK                         0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT                                8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK                                0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT                       16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+       return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32                            0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK                          0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT                         0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+       return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE                                 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK                          0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT                         0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK                          0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT                         8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK                         0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT                                16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+       return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32                             0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK                           0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT                          0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+       return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA                                        0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK                         0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT                                0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK                         0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT                                8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK                                0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT                       16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+       return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32                            0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK                          0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT                         0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+       return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL                              0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK                  0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT                 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+       return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST                       0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK            0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT           9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+       return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL                                 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK                  0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT                 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND                   0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK                   0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT                  16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+       return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL                           0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z                 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL                                 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE                            0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE                      0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK                         0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT                                2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+       return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE                       0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO                          0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK           0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT          0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+       return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO                       0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI                       0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH                         0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK                       0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT                      0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+       return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH                   0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK                 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT                        0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+       return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL                            0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE                 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF              0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ                   0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK                     0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT                    8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK                     0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT                    11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK                    0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT                   14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK                    0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT                   17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK                  0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT                 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK                  0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT                 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK                 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT                        26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK                 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT                        29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+       return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO                               0x0000e1c2
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL                  0x00000001
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK                        0xfffff000
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT               12
+static inline uint32_t A5XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+       return ((val >> 12) << A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C3                                  0x0000e1c3
+
+#define REG_A5XX_RB_STENCILREFMASK                             0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK                        0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT               0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+       return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK               0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT              8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+       return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK          0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT         16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+       return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET                              0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE            0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK                          0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT                         0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+       return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK                          0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT                         16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+       return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1                             0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE           0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK                         0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT                                0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+       return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK                         0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT                                16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+       return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2                             0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE           0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK                         0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT                                0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+       return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK                         0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT                                16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+       return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO                  0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI                  0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH                    0x0000e242
+
+#define REG_A5XX_VPC_CNTL_0                                    0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK                    0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT                   0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL                               0x0000e298
+
+#define REG_A5XX_VPC_PACK                                      0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK                       0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT                      0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+       return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL                       0x0000e2a0
+
+#define REG_A5XX_VPC_SO_OVERRIDE                               0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0                       0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0                       0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0                          0x0000e2a9
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0                                0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0                                0x0000e2ad
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL                             0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK             0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT            0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+       return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_RASTER_CNTL                                        0x0000e388
+
+#define REG_A5XX_PC_RESTART_INDEX                              0x0000e38c
+
+#define REG_A5XX_PC_GS_PARAM                                   0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM                                   0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL                                 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0                                 0x0000e400
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK               0x0000003f
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT              0
+static inline uint32_t A5XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+       return ((val) << A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1                                 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK                    0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT                   8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+       return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK                     0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT                    16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+       return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2                                 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3                                 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4                                 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5                                 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET                              0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET                     0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK                                0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT                       0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+       return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK                     0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT                    20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+       return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK               0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT              0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+       return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK                   0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT                  4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+       return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL                                        0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL                                    0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG                             0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED                         0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK         0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT                1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK           0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT          8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG                             0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED                         0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK         0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT                1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK           0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT          8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG                             0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED                         0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK         0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT                1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK           0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT          8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG                             0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED                         0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK         0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT                1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK           0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT          8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG                             0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED                         0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK         0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT                1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK           0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT          8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG                                  0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST                                0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST                                0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0                               0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK            0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT           4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING                           0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE                      0x00100000
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK                       0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT                      0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK                    0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT                   8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK                       0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT                      16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK                    0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT                   24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK                   0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT                  0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK                   0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT                  8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK                   0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT                  16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK                   0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT                  24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+       return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_SP_VS_OBJ_START_LO                            0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI                            0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0                               0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK            0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT           4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK            0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT           10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING                           0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE                      0x00100000
+
+#define REG_A5XX_SP_FS_OBJ_START_LO                            0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI                            0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL                                 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL                             0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK                       0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT                      0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK               0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT              5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK          0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT         13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK                      0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT                     0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+       return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION                   0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK                  0x0000007f
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT                 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+       return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_SP_CS_CNTL_0                                  0x0000e5f0
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL                         0x0000e704
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL                                0x0000e705
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO                           0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI                           0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO                          0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI                          0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO                          0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI                          0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO                           0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI                           0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL                      0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG                            0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG                            0x0000e785
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG                            0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK                        0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT               0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG                            0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__MASK                    0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT                   0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG                            0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK             0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT            16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK             0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT            24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL                              0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG                           0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED                       0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG                           0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED                       0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG                           0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED                       0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG                           0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED                       0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG                           0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED                       0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK       0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT      1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK         0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT                8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+       return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG                                        0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL                                  0x0000e791
+
+#define REG_A5XX_HLSQ_FS_CNTL                                  0x0000e792
+
+#define REG_A5XX_HLSQ_CS_CNTL                                  0x0000e796
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X                                0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y                                0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z                                0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0                             0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1                             0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2                             0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3                             0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4                             0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5                             0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6                             0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0                                        0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1                                        0x0000e7b8
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN                              0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN                              0x0000e7c4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN                              0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN                              0x0000e7d8
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3                   0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4                   0x0000e7dd
+
+#define REG_A5XX_TEX_SAMP_0                                    0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR                  0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK                           0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT                          1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+       return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK                           0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT                          3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+       return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK                           0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT                          5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+       return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK                           0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT                          8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+       return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK                           0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT                          11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+       return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK                            0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT                           14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+       return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK                         0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT                                19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+       return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1                                    0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK                     0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT                    1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+       return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF                 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS                          0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR                   0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK                          0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT                         8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+       return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK                          0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT                         20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+       return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2                                    0x00000002
+
+#define REG_A5XX_TEX_SAMP_3                                    0x00000003
+
+#define REG_A5XX_TEX_CONST_0                                   0x00000000
+#define A5XX_TEX_CONST_0_TILED                                 0x00000001
+#define A5XX_TEX_CONST_0_SRGB                                  0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK                          0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT                         4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+       return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK                          0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT                         7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+       return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK                          0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT                         10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+       return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK                          0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT                         13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+       return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK                             0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT                            22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+       return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1                                   0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK                           0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT                          0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+       return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK                          0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT                         15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+       return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2                                   0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK                       0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT                      0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+       return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK                           0x1fffff00
+#define A5XX_TEX_CONST_2_PITCH__SHIFT                          8
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+       return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK                            0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT                           29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+       return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3                                   0x00000003
+#define A5XX_TEX_CONST_3_LAYERSZ__MASK                         0x00003fff
+#define A5XX_TEX_CONST_3_LAYERSZ__SHIFT                                0
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+       return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_LAYERSZ2__MASK                                0xff800000
+#define A5XX_TEX_CONST_3_LAYERSZ2__SHIFT                       23
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+       return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_4                                   0x00000004
+#define A5XX_TEX_CONST_4_BASE__MASK                            0xffffffe0
+#define A5XX_TEX_CONST_4_BASE__SHIFT                           5
+static inline uint32_t A5XX_TEX_CONST_4_BASE(uint32_t val)
+{
+       return ((val >> 5) << A5XX_TEX_CONST_4_BASE__SHIFT) & A5XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5                                   0x00000005
+#define A5XX_TEX_CONST_5_DEPTH__MASK                           0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT                          17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+       return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6                                   0x00000006
+
+#define REG_A5XX_TEX_CONST_7                                   0x00000007
+
+#define REG_A5XX_TEX_CONST_8                                   0x00000008
+
+#define REG_A5XX_TEX_CONST_9                                   0x00000009
+
+#define REG_A5XX_TEX_CONST_10                                  0x0000000a
+
+#define REG_A5XX_TEX_CONST_11                                  0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644 (file)
index 0000000..f5847bc
--- /dev/null
@@ -0,0 +1,1345 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       uint32_t wptr;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ring->lock, flags);
+
+       /* Copy the shadow to the actual register */
+       ring->cur = ring->next;
+
+       /* Make sure to wrap wptr if we need to */
+       wptr = get_wptr(ring);
+
+       spin_unlock_irqrestore(&ring->lock, flags);
+
+       /* Make sure everything is posted before making a decision */
+       mb();
+
+       /* Update HW if this is the current ring and we are not in preempt */
+       if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+               gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+       struct msm_gem_address_space *aspace)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct msm_mmu *mmu = aspace->mmu;
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+       if (!iommu->ttbr0)
+               return;
+
+       /* Turn off protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 0);
+
+       /* Turn on APIV mode to access critical regions */
+       OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+       OUT_RING(ring, 1);
+
+       /* Make sure the ME is syncronized before staring the update */
+       OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+       /* Execute the table update */
+       OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 3);
+       OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+       OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+       OUT_RING(ring, iommu->contextidr);
+
+       /*
+        * Write the new TTBR0 to the preemption records - this will be used to
+        * reload the pagetable if the current ring gets preempted out.
+        */
+       OUT_PKT7(ring, CP_MEM_WRITE, 4);
+       OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+       OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+       OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+       OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+
+       /* Also write the current contextidr (ASID) */
+       OUT_PKT7(ring, CP_MEM_WRITE, 3);
+       OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
+               contextidr)));
+       OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
+               contextidr)));
+       OUT_RING(ring, iommu->contextidr);
+
+       /* Invalidate the draw state so we start off fresh */
+       OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+       OUT_RING(ring, 0x40000);
+       OUT_RING(ring, 1);
+       OUT_RING(ring, 0);
+
+       /* Turn off APRIV */
+       OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+       OUT_RING(ring, 0);
+
+       /* Turn off protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 1);
+}
+
+static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+       unsigned int i, ibs = 0;
+
+       a5xx_set_pagetable(gpu, ring, submit->aspace);
+
+       OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+       OUT_RING(ring, 0x02);
+
+       /* Turn off protected mode to write to special registers */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 0);
+
+       /* Set the save preemption record for the ring/command */
+       OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+       OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+       OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+
+       /* Turn back on protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 1);
+
+       /* Enable local preemption for finegrain preemption */
+       OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+       OUT_RING(ring, 0x02);
+
+       /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+       OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+       OUT_RING(ring, 0x02);
+
+       /* Submit the commands */
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       break;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       ibs++;
+                       break;
+               }
+       }
+
+       /*
+        * Write the render mode to NULL (0) to indicate to the CP that the IBs
+        * are done rendering - otherwise a lucky preemption would start
+        * replaying from the last checkpoint
+        */
+       OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+       OUT_RING(ring, 0);
+       OUT_RING(ring, 0);
+       OUT_RING(ring, 0);
+       OUT_RING(ring, 0);
+       OUT_RING(ring, 0);
+
+       /* Turn off IB level preemptions */
+       OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+       OUT_RING(ring, 0x01);
+
+       /* Write the fence to the scratch register */
+       OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+       OUT_RING(ring, submit->fence);
+
+       /*
+        * Execute a CACHE_FLUSH_TS event. This will ensure that the
+        * timestamp is written to the memory and then triggers the interrupt
+        */
+       OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+       OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+
+       OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+       OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+       OUT_RING(ring, submit->fence);
+
+       /* Yield the floor on command completion */
+       OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+       /*
+        * If dword[2:1] are non zero, they specify an address for the CP to
+        * write the value of dword[3] to on preemption complete. Write 0 to
+        * skip the write
+        */
+       OUT_RING(ring, 0x00);
+       OUT_RING(ring, 0x00);
+       /* Data value - not used if the address above is 0 */
+       OUT_RING(ring, 0x01);
+       /* Set bit 0 to trigger an interrupt on preempt complete */
+       OUT_RING(ring, 0x01);
+
+       a5xx_flush(gpu, ring);
+
+       /* Check to see if we need to start preemption */
+       a5xx_preempt_trigger(gpu);
+
+       return 0;
+}
+
+static const struct {
+       u32 offset;
+       u32 value;
+} a5xx_hwcg[] = {
+       {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+       {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+       {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+       {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+       {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+       {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+       {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+       {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+       {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+       {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+       {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+       {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+       {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+       {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+       {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+       {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+       {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+       {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+       {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+       {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+       {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+       {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+       {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+       {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+       {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+       {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+       {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+       {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+       {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+       {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+       {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+       {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+       {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+       {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+       {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+       {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+       {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+       {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+       {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+       {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+       {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+       {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+       {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+       {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+       {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+       {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+       {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+       {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+       {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+       {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+       {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+       {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+       {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+               gpu_write(gpu, a5xx_hwcg[i].offset,
+                       state ? a5xx_hwcg[i].value : 0);
+
+       /* There are a few additional registers just for A540 */
+       if (adreno_is_a540(adreno_gpu)) {
+               gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU,
+                       state  ? 0x770 : 0);
+               gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU,
+                       state ? 0x004 : 0);
+       }
+
+       gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+       gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct msm_ringbuffer *ring = gpu->rb[0];
+
+       OUT_PKT7(ring, CP_ME_INIT, 8);
+
+       OUT_RING(ring, 0x0000002F);
+
+       /* Enable multiple hardware contexts */
+       OUT_RING(ring, 0x00000003);
+
+       /* Enable error detection */
+       OUT_RING(ring, 0x20000000);
+
+       /* Don't enable header dump */
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+
+       /* Specify workarounds for various microcode issues */
+       if (adreno_is_a530(adreno_gpu)) {
+               /* Workaround for token end syncs
+                * Force a WFI after every direct-render 3D mode draw and every
+                * 2D mode 3 draw
+                */
+               OUT_RING(ring, 0x0000000B);
+       } else {
+               /* No workarounds enabled */
+               OUT_RING(ring, 0x00000000);
+       }
+
+       OUT_RING(ring, 0x00000000);
+       OUT_RING(ring, 0x00000000);
+
+       gpu->funcs->flush(gpu, ring);
+       return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct msm_ringbuffer *ring = gpu->rb[0];
+
+       if (gpu->nr_rings == 1)
+               return 0;
+
+       /* Turn off protected mode to write to special registers */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 0);
+
+       /* Set the save preemption record for the ring/command */
+       OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+       OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+       OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+       /* Turn back on protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 1);
+
+       OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+       OUT_RING(ring, 0x00);
+
+       OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+       OUT_RING(ring, 0x01);
+
+       OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+       OUT_RING(ring, 0x01);
+
+       /* Yield the floor on command completion */
+       OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+       OUT_RING(ring, 0x00);
+       OUT_RING(ring, 0x00);
+       OUT_RING(ring, 0x01);
+       OUT_RING(ring, 0x01);
+
+       gpu->funcs->flush(gpu, ring);
+
+       return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+               const struct firmware *fw, u64 *iova)
+{
+       struct drm_device *drm = gpu->dev;
+       struct drm_gem_object *bo;
+       void *ptr;
+
+       mutex_lock(&drm->struct_mutex);
+       bo = msm_gem_new(drm, fw->size - 4,
+               MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+       mutex_unlock(&drm->struct_mutex);
+
+       if (IS_ERR(bo))
+               return bo;
+
+       ptr = msm_gem_vaddr(bo);
+       if (!ptr) {
+               drm_gem_object_unreference_unlocked(bo);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       if (iova) {
+               int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+
+               if (ret) {
+                       drm_gem_object_unreference_unlocked(bo);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       memcpy(ptr, &fw->data[4], fw->size - 4);
+       return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       int ret;
+
+       if (!a5xx_gpu->pm4_bo) {
+               a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+                       &a5xx_gpu->pm4_iova);
+
+               if (IS_ERR(a5xx_gpu->pm4_bo)) {
+                       ret = PTR_ERR(a5xx_gpu->pm4_bo);
+                       a5xx_gpu->pm4_bo = NULL;
+                       dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       if (!a5xx_gpu->pfp_bo) {
+               a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+                       &a5xx_gpu->pfp_iova);
+
+               if (IS_ERR(a5xx_gpu->pfp_bo)) {
+                       ret = PTR_ERR(a5xx_gpu->pfp_bo);
+                       a5xx_gpu->pfp_bo = NULL;
+                       dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+               REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+               REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+       return 0;
+}
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       const char *name;
+       void *ptr;
+
+       /* If no zap shader was defined, we'll assume that none is needed */
+       if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,zap-shader", &name))
+               return;
+
+       /*
+        * If the zap shader has already been loaded then just ask the SCM to
+        * re-initialize the registers (not needed if CPZ retention is a thing)
+        */
+       if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+               int ret;
+               struct scm_desc desc = { 0 };
+
+               if (of_property_read_bool(GPU_OF_NODE(gpu),
+                       "qcom,cpz-retention"))
+                       return;
+
+               desc.args[0] = 0;
+               desc.args[1] = 13;
+               desc.arginfo = SCM_ARGS(2);
+
+               ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0x0A), &desc);
+               if (ret)
+                       DRM_ERROR(
+                               "%s: zap-shader resume failed with error %d\n",
+                               gpu->name, ret);
+
+               return;
+       }
+
+       ptr = subsystem_get(name);
+
+       if (IS_ERR_OR_NULL(ptr)) {
+               DRM_ERROR("%s: Unable to load the zap shader: %ld\n", gpu->name,
+                       IS_ERR(ptr) ? PTR_ERR(ptr) : -ENODEV);
+       } else {
+               set_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags);
+       }
+}
+#else
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+       if (of_find_property(GPU_OF_NODE(gpu), "qcom,zap-shader", NULL))
+               return;
+
+       DRM_INFO_ONCE("%s: Zap shader is defined but loader isn't available\n",
+               gpu->name);
+}
+#endif
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+         A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+         A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+         A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+         A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+         A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+         A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+         A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+         A5XX_RBBM_INT_0_MASK_CP_SW | \
+         A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+         A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+         A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       int ret, bit = 0;
+
+       pm_qos_update_request(&gpu->pm_qos_req_dma, 101);
+
+       gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+       if (adreno_is_a540(adreno_gpu))
+               gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+       /* Make all blocks contribute to the GPU BUSY perf counter */
+       gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+       /* Enable RBBM error reporting bits */
+       gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+       if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+               /*
+                * Mask out the activity signals from RB1-3 to avoid false
+                * positives
+                */
+
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+                       0xF0000000);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+                       0xFFFFFFFF);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+                       0xFFFFFFFF);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+                       0xFFFFFFFF);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+                       0xFFFFFFFF);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+                       0xFFFFFFFF);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+                       0xFFFFFFFF);
+               gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+                       0xFFFFFFFF);
+       }
+
+       /* Enable fault detection */
+       gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+               (1 << 30) | 0xFFFF);
+
+       /* Turn on performance counters */
+       gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+       /* Increase VFD cache access so LRZ and other data gets evicted less */
+       gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+       /* Disable L2 bypass in the UCHE */
+       gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+       gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+       gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+       gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+       /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+       gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO,
+               REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+
+       gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+               REG_A5XX_UCHE_GMEM_RANGE_MAX_HI,
+               0x00100000 + adreno_gpu->gmem - 1);
+
+       gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+       gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+       gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+       gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+       gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+       if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+               gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+       gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+       /* Enable USE_RETENTION_FLOPS */
+       gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+       /* Enable ME/PFP split notification */
+       gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+       /* Enable HWCG */
+       a5xx_set_hwcg(gpu, true);
+
+       gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+       /* Set the highest bank bit if specified in the device tree */
+       if (!of_property_read_u32(pdev->dev.of_node, "qcom,highest-bank-bit",
+               &bit)) {
+               if (bit >= 13 && bit <= 16) {
+                       bit -= 13;
+
+                       gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, bit << 7);
+                       gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, bit << 1);
+
+                       if (adreno_is_a540(adreno_gpu))
+                               gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2,
+                                       bit);
+               }
+       }
+
+       /* Try to load and initialize the zap shader if applicable */
+       a5xx_zap_shader_init(gpu);
+
+       /* Protect registers from the CP */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+       /* RBBM */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+       /* Content protect */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+               ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+                       16));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+               ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+       /* CP */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+       /* RB */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+       /* VPC */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+       /* UCHE */
+       gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+       if (adreno_is_a530(adreno_gpu))
+               gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+                       ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+       gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+       /*
+        * Disable the trusted memory range - we don't actually supported secure
+        * memory rendering at this point in time and we don't want to block off
+        * part of the virtual memory space.
+        */
+       gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+               REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+       gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+       /* Put the GPU into 64 bit by default */
+       gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+       gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+       /* Load the GPMU firmware before starting the HW init */
+       a5xx_gpmu_ucode_init(gpu);
+
+       ret = adreno_hw_init(gpu);
+       if (ret)
+               return ret;
+
+       a5xx_preempt_hw_init(gpu);
+
+       ret = a5xx_ucode_init(gpu);
+       if (ret)
+               return ret;
+
+       /* Disable the interrupts through the initial bringup stage */
+       gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+       /* Clear ME_HALT to start the micro engine */
+       gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+       ret = a5xx_me_init(gpu);
+       if (ret)
+               return ret;
+
+       /*
+        * Send a pipeline event stat to get misbehaving counters to start
+        * ticking correctly
+        */
+       if (adreno_is_a530(adreno_gpu)) {
+               OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+               OUT_RING(gpu->rb[0], 0x0F);
+
+               gpu->funcs->flush(gpu, gpu->rb[0]);
+               if (!a5xx_idle(gpu, gpu->rb[0]))
+                       return -EINVAL;
+       }
+
+       /*
+        * If a zap shader was specified in the device tree, assume that we are
+        * on a secure device that blocks access to the RBBM_SECVID registers
+        * so we need to use the CP to switch out of secure mode. If a zap
+        * shader was NOT specified then we assume we are on an unlocked device.
+        * If we guessed wrong then the access to the register will probably
+        * cause a XPU violation.
+        */
+       if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+               struct msm_ringbuffer *ring = gpu->rb[0];
+
+               OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+               OUT_RING(ring, 0x00000000);
+
+               gpu->funcs->flush(gpu, gpu->rb[0]);
+               if (!a5xx_idle(gpu, gpu->rb[0]))
+                       return -EINVAL;
+       } else {
+               /* Print a warning so if we die, we know why */
+               dev_warn_once(gpu->dev->dev,
+                       "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+               gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+       }
+
+       /* Next, start the power */
+       ret = a5xx_power_init(gpu);
+       if (ret)
+               return ret;
+
+
+       /* Last step - yield the ringbuffer */
+       a5xx_preempt_start(gpu);
+
+       pm_qos_update_request(&gpu->pm_qos_req_dma, 501);
+
+       return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+       adreno_dump_info(gpu);
+
+       msm_gpu_snapshot(gpu, gpu->snapshot);
+
+       /* Reset the GPU so it can work again */
+       gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+       gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+       gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+
+       adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+       DBG("%s", gpu->name);
+
+       a5xx_preempt_fini(gpu);
+
+       if (a5xx_gpu->pm4_bo) {
+               if (a5xx_gpu->pm4_iova)
+                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+               drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+       }
+
+       if (a5xx_gpu->pfp_bo) {
+               if (a5xx_gpu->pfp_iova)
+                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+               drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+       }
+
+       if (a5xx_gpu->gpmu_bo) {
+               if (a5xx_gpu->gpmu_iova)
+                       msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+               drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+       }
+
+       adreno_gpu_cleanup(adreno_gpu);
+       kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+       if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+               return false;
+
+       /*
+        * Nearly every abnormality ends up pausing the GPU and triggering a
+        * fault so we can safely just watch for this one interrupt to fire
+        */
+       return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+               A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+       if (ring != a5xx_gpu->cur_ring) {
+               WARN(1, "Tried to idle a non-current ringbuffer\n");
+               return false;
+       }
+
+       /* wait for CP to drain ringbuffer: */
+       if (!adreno_idle(gpu, ring))
+               return false;
+
+       if (spin_until(_a5xx_check_idle(gpu))) {
+               DRM_ERROR(
+                       "%s: timeout waiting for GPU RB %d to idle: status %8.8X rptr/wptr: %4.4X/%4.4X irq %8.8X\n",
+                       gpu->name, ring->id,
+                       gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+                       gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+                       gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+                       gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+               return false;
+       }
+
+       return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+       u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+       if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+               u32 val;
+
+               gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+               /*
+                * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+                * read it twice
+                */
+
+               gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+               val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+               dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+                       val);
+       }
+
+       if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+               dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+                       gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+       if (status & A5XX_CP_INT_CP_DMA_ERROR)
+               dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+       if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+               u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+               dev_err_ratelimited(gpu->dev->dev,
+                       "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+                       val & (1 << 24) ? "WRITE" : "READ",
+                       (val & 0xFFFFF) >> 2, val);
+       }
+
+       if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+               u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+               const char *access[16] = { "reserved", "reserved",
+                       "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+                       "", "", "me read", "me write", "", "", "crashdump read",
+                       "crashdump write" };
+
+               dev_err_ratelimited(gpu->dev->dev,
+                       "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+                       status & 0xFFFFF, access[(status >> 24) & 0xF],
+                       (status & (1 << 31)), status);
+       }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+               u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+               dev_err_ratelimited(gpu->dev->dev,
+                       "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+                       val & (1 << 28) ? "WRITE" : "READ",
+                       (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+                       (val >> 24) & 0xF);
+
+               /* Clear the error */
+               gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+               /* Clear the interrupt */
+               gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+                       A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+       }
+
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+               dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+               dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+                       gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+               dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+                       gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+               dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+                       gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+               dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+       if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+               dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+       uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+       addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+       dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+               addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+       dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+       dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+               ring ? ring->id : -1, adreno_submitted_fence(gpu, ring),
+               gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+               gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+               gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+               gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+               gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+               gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+               gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+       /* Turn off the hangcheck timer to keep it from bothering us */
+       del_timer(&gpu->hangcheck_timer);
+
+       queue_work(priv->wq, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+       (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+       A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+       A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+       A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+       A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+       A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+       u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+       /*
+        * Clear all the interrupts except for RBBM_AHB_ERROR
+        * which needs to be cleared after the error condition
+        * is cleared otherwise it will storm
+        */
+       gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+                       status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+       if (status & RBBM_ERROR_MASK)
+               a5xx_rbbm_err_irq(gpu, status);
+
+       if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+               a5xx_cp_err_irq(gpu);
+
+       if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+               a5xx_fault_detect_irq(gpu);
+
+       if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+               a5xx_uche_err_irq(gpu);
+
+       if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+               a5xx_gpmu_err_irq(gpu);
+
+       if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+               msm_gpu_retire(gpu);
+
+       if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+               a5xx_preempt_irq(gpu);
+
+       return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+               REG_A5XX_CP_RB_RPTR_ADDR_HI),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+       0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002b,
+       0x002e, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+       0x0097, 0x00bb, 0x03a0, 0x0464, 0x0469, 0x046f, 0x04d2, 0x04d3,
+       0x04e0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081a, 0x081f, 0x0841,
+       0x0860, 0x0860, 0x0880, 0x08a0, 0x0b00, 0x0b12, 0x0b14, 0x0b28,
+       0x0b78, 0x0b7f, 0x0bb0, 0x0bbd, 0x0bc0, 0x0bc6, 0x0bd0, 0x0c53,
+       0x0c60, 0x0c61, 0x0c80, 0x0c82, 0x0c84, 0x0c85, 0x0c90, 0x0c9b,
+       0x0ca0, 0x0ca0, 0x0cb0, 0x0cb2, 0x0cc1, 0x0cc1, 0x0cc4, 0x0cc7,
+       0x0ccc, 0x0ccc, 0x0cd0, 0x0cdb, 0x0ce0, 0x0ce5, 0x0ce8, 0x0ce8,
+       0x0cec, 0x0cf1, 0x0cfb, 0x0d0e, 0x0d10, 0x0d17, 0x0d20, 0x0d23,
+       0x0d30, 0x0d30, 0x0e40, 0x0e43, 0x0e4a, 0x0e4a, 0x0e50, 0x0e57,
+       0x0e60, 0x0e7c, 0x0e80, 0x0e8e, 0x0e90, 0x0e96, 0x0ea0, 0x0eab,
+       0x0eb0, 0x0eb2, 0x2100, 0x211e, 0x2140, 0x2145, 0x2180, 0x2185,
+       0x2500, 0x251e, 0x2540, 0x2545, 0x2580, 0x2585, 0x3000, 0x3014,
+       0x3018, 0x302c, 0x3030, 0x3030, 0x3034, 0x3036, 0x303c, 0x303d,
+       0x3040, 0x3040, 0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058,
+       0x305a, 0x3061, 0x3064, 0x3068, 0x306c, 0x306d, 0x3080, 0x3088,
+       0x308b, 0x308c, 0x3090, 0x3094, 0x3098, 0x3098, 0x309c, 0x309c,
+       0x3124, 0x3124, 0x340c, 0x340c, 0x3410, 0x3410, 0x3800, 0x3801,
+       0xa800, 0xa800, 0xa820, 0xa828, 0xa840, 0xa87d, 0xa880, 0xa88d,
+       0xa890, 0xa8a3, 0xa8a8, 0xa8aa, 0xa8c0, 0xa8c3, 0xa8c6, 0xa8ca,
+       0xa8cc, 0xa8cf, 0xa8d1, 0xa8d8, 0xa8dc, 0xa8dc, 0xa8e0, 0xa8f5,
+       0xac00, 0xac06, 0xac40, 0xac47, 0xac60, 0xac62, 0xac80, 0xac82,
+       0xb800, 0xb808, 0xb80c, 0xb812, 0xb814, 0xb817, 0xb900, 0xb904,
+       0xb906, 0xb90a, 0xb90c, 0xb90f, 0xb920, 0xb924, 0xb926, 0xb92a,
+       0xb92c, 0xb92f, 0xb940, 0xb944, 0xb946, 0xb94a, 0xb94c, 0xb94f,
+       0xb960, 0xb964, 0xb966, 0xb96a, 0xb96c, 0xb96f, 0xb980, 0xb984,
+       0xb986, 0xb98a, 0xb98c, 0xb98f, 0xb9a0, 0xb9b0, 0xb9b8, 0xb9ba,
+       0xd200, 0xd23f, 0xe000, 0xe006, 0xe010, 0xe09a, 0xe0a0, 0xe0a4,
+       0xe0aa, 0xe0eb, 0xe100, 0xe105, 0xe140, 0xe147, 0xe150, 0xe187,
+       0xe1a0, 0xe1a9, 0xe1b0, 0xe1b6, 0xe1c0, 0xe1c7, 0xe1d0, 0xe1d1,
+       0xe200, 0xe201, 0xe210, 0xe21c, 0xe240, 0xe268, 0xe280, 0xe280,
+       0xe282, 0xe2a3, 0xe2a5, 0xe2c2, 0xe380, 0xe38f, 0xe3b0, 0xe3b0,
+       0xe400, 0xe405, 0xe408, 0xe4e9, 0xe4f0, 0xe4f0, 0xe800, 0xe806,
+       0xe810, 0xe89a, 0xe8a0, 0xe8a4, 0xe8aa, 0xe8eb, 0xe900, 0xe905,
+       0xe940, 0xe947, 0xe950, 0xe987, 0xe9a0, 0xe9a9, 0xe9b0, 0xe9b6,
+       0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
+       0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
+       0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
+       0xecf0, 0xecf0, 0xf400, 0xf400, 0xf800, 0xf807,
+       ~0
+};
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+       int ret;
+
+       /* Turn on the core power */
+       ret = msm_gpu_pm_resume(gpu);
+       if (ret)
+               return ret;
+
+       /* Turn the RBCCU domain first to limit the chances of voltage droop */
+       gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+       /* Wait 3 usecs before polling */
+       udelay(3);
+
+       ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+               (1 << 20), (1 << 20));
+       if (ret) {
+               DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+                       gpu->name,
+                       gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+               return ret;
+       }
+
+       /* Turn on the SP domain */
+       gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+       ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+               (1 << 20), (1 << 20));
+       if (ret)
+               DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+                       gpu->name);
+
+       return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+       /* Clear the VBIF pipe before shutting down */
+
+       gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+       spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+       gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+       /*
+        * Reset the VBIF before power collapse to avoid issue with FIFO
+        * entries
+        */
+
+       if (adreno_is_a530(adreno_gpu)) {
+               /* These only need to be done for A530 */
+               gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+               gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+       }
+
+       return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+       *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+               REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+       gpu->funcs->pm_resume(gpu);
+
+       seq_printf(m, "status:   %08x\n",
+                       gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+       gpu->funcs->pm_suspend(gpu);
+
+       adreno_show(gpu, m);
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+       return a5xx_gpu->cur_ring;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+       .base = {
+               .get_param = adreno_get_param,
+               .hw_init = a5xx_hw_init,
+               .pm_suspend = a5xx_pm_suspend,
+               .pm_resume = a5xx_pm_resume,
+               .recover = a5xx_recover,
+               .last_fence = adreno_last_fence,
+               .submitted_fence = adreno_submitted_fence,
+               .submit = a5xx_submit,
+               .flush = a5xx_flush,
+               .active_ring = a5xx_active_ring,
+               .irq = a5xx_irq,
+               .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+               .show = a5xx_show,
+#endif
+               .snapshot = a5xx_snapshot,
+       },
+       .get_timestamp = a5xx_get_timestamp,
+};
+
+/* Read the limits management leakage from the efuses */
+static void a530_efuse_leakage(struct platform_device *pdev,
+               struct adreno_gpu *adreno_gpu, void *base,
+               size_t size)
+{
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       unsigned int row0, row2;
+       unsigned int leakage_pwr_on, coeff;
+
+       if (size < 0x148)
+               return;
+
+       /* Leakage */
+       row0 = readl_relaxed(base + 0x134);
+       row2 = readl_relaxed(base + 0x144);
+
+       /* Read barrier to get the previous two reads */
+       rmb();
+
+       /* Get the leakage coefficient from device tree */
+       if (of_property_read_u32(pdev->dev.of_node,
+               "qcom,base-leakage-coefficent", &coeff))
+               return;
+
+       leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
+       a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
+               ((leakage_pwr_on * coeff) / 100);
+}
+
+/* Read the speed bin from the efuses */
+static void a530_efuse_bin(struct platform_device *pdev,
+               struct adreno_gpu *adreno_gpu, void *base,
+               size_t size)
+{
+       uint32_t speed_bin[3];
+       uint32_t val;
+
+       if (of_property_read_u32_array(pdev->dev.of_node,
+               "qcom,gpu-speed-bin", speed_bin, 3))
+               return;
+
+       if (size < speed_bin[0] + 4)
+               return;
+
+       val = readl_relaxed(base + speed_bin[0]);
+
+       adreno_gpu->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+/* Read target specific configuration from the efuses */
+static void a5xx_efuses_read(struct platform_device *pdev,
+               struct adreno_gpu *adreno_gpu)
+{
+       struct adreno_platform_config *config = pdev->dev.platform_data;
+       const struct adreno_info *info = adreno_info(config->rev);
+       struct resource *res;
+       void *base;
+
+       /*
+        * The adreno_gpu->revn mechanism isn't set up yet so we need to check
+        * it directly here
+        */
+       if (info->revn != 530)
+               return;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+               "qfprom_memory");
+       if (!res)
+               return;
+
+       base = ioremap(res->start, resource_size(res));
+       if (!base)
+               return;
+
+       a530_efuse_bin(pdev, adreno_gpu, base, resource_size(res));
+       a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
+
+       iounmap(base);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       struct a5xx_gpu *a5xx_gpu = NULL;
+       struct adreno_gpu *adreno_gpu;
+       struct msm_gpu *gpu;
+       int ret;
+
+       if (!pdev) {
+               dev_err(dev->dev, "No A5XX device is defined\n");
+               return ERR_PTR(-ENXIO);
+       }
+
+       a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+       if (!a5xx_gpu)
+               return ERR_PTR(-ENOMEM);
+
+       adreno_gpu = &a5xx_gpu->base;
+       gpu = &adreno_gpu->base;
+
+       a5xx_gpu->pdev = pdev;
+       adreno_gpu->registers = a5xx_registers;
+       adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+       a5xx_gpu->lm_leakage = 0x4E001A;
+
+       /* Check the efuses for some configuration */
+       a5xx_efuses_read(pdev, adreno_gpu);
+
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+       if (ret) {
+               a5xx_destroy(&(a5xx_gpu->base.base));
+               return ERR_PTR(ret);
+       }
+
+       /* Set up the preemption specific bits and pieces for each ringbuffer */
+       a5xx_preempt_init(gpu);
+
+       return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644 (file)
index 0000000..3de14fe
--- /dev/null
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+enum {
+       A5XX_ZAP_SHADER_LOADED = 1,
+};
+
+struct a5xx_gpu {
+       unsigned long flags;
+
+       struct adreno_gpu base;
+       struct platform_device *pdev;
+
+       struct drm_gem_object *pm4_bo;
+       uint64_t pm4_iova;
+
+       struct drm_gem_object *pfp_bo;
+       uint64_t pfp_iova;
+
+       struct drm_gem_object *gpmu_bo;
+       uint64_t gpmu_iova;
+       uint32_t gpmu_dwords;
+
+       uint32_t lm_leakage;
+
+       struct msm_ringbuffer *cur_ring;
+       struct msm_ringbuffer *next_ring;
+
+       struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+       struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+       uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+       atomic_t preempt_state;
+       struct timer_list preempt_timer;
+
+       struct a5xx_smmu_info *smmu_info;
+       struct drm_gem_object *smmu_info_bo;
+       uint64_t smmu_info_iova;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress.  Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery.  Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+       PREEMPT_NONE = 0,
+       PREEMPT_START,
+       PREEMPT_TRIGGERED,
+       PREEMPT_FAULTED,
+       PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption.  When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+       uint32_t magic;
+       uint32_t info;
+       uint32_t data;
+       uint32_t cntl;
+       uint32_t rptr;
+       uint32_t wptr;
+       uint64_t rptr_addr;
+       uint64_t rbase;
+       uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocadtion for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+/*
+ * This is a global structure that the preemption code uses to switch in the
+ * pagetable for the preempted process - the code switches in whatever we
+ * after preempting in a new ring.
+ */
+struct a5xx_smmu_info {
+       uint32_t  magic;
+       uint32_t  _pad4;
+       uint64_t  ttbr0;
+       uint32_t  asid;
+       uint32_t  contextidr;
+};
+
+#define A5XX_SMMU_INFO_MAGIC 0x3618CDA3UL
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+               uint32_t reg, uint32_t mask, uint32_t value)
+{
+       while (usecs--) {
+               udelay(1);
+               if ((gpu_read(gpu, reg) & mask) == value)
+                       return 0;
+               cpu_relax();
+       }
+
+       return -ETIMEDOUT;
+}
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+       return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
+}
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644 (file)
index 0000000..e04feaa
--- /dev/null
@@ -0,0 +1,509 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+       uint32_t reg;
+       uint32_t value;
+} a5xx_sequence_regs[] = {
+       { 0xB9A1, 0x00010303 },
+       { 0xB9A2, 0x13000000 },
+       { 0xB9A3, 0x00460020 },
+       { 0xB9A4, 0x10000000 },
+       { 0xB9A5, 0x040A1707 },
+       { 0xB9A6, 0x00010000 },
+       { 0xB9A7, 0x0E000904 },
+       { 0xB9A8, 0x10000000 },
+       { 0xB9A9, 0x01165000 },
+       { 0xB9AA, 0x000E0002 },
+       { 0xB9AB, 0x03884141 },
+       { 0xB9AC, 0x10000840 },
+       { 0xB9AD, 0x572A5000 },
+       { 0xB9AE, 0x00000003 },
+       { 0xB9AF, 0x00000000 },
+       { 0xB9B0, 0x10000000 },
+       { 0xB828, 0x6C204010 },
+       { 0xB829, 0x6C204011 },
+       { 0xB82A, 0x6C204012 },
+       { 0xB82B, 0x6C204013 },
+       { 0xB82C, 0x6C204014 },
+       { 0xB90F, 0x00000004 },
+       { 0xB910, 0x00000002 },
+       { 0xB911, 0x00000002 },
+       { 0xB912, 0x00000002 },
+       { 0xB913, 0x00000002 },
+       { 0xB92F, 0x00000004 },
+       { 0xB930, 0x00000005 },
+       { 0xB931, 0x00000005 },
+       { 0xB932, 0x00000005 },
+       { 0xB933, 0x00000005 },
+       { 0xB96F, 0x00000001 },
+       { 0xB970, 0x00000003 },
+       { 0xB94F, 0x00000004 },
+       { 0xB950, 0x0000000B },
+       { 0xB951, 0x0000000B },
+       { 0xB952, 0x0000000B },
+       { 0xB953, 0x0000000B },
+       { 0xB907, 0x00000019 },
+       { 0xB927, 0x00000019 },
+       { 0xB947, 0x00000019 },
+       { 0xB967, 0x00000019 },
+       { 0xB987, 0x00000019 },
+       { 0xB906, 0x00220001 },
+       { 0xB926, 0x00220001 },
+       { 0xB946, 0x00220001 },
+       { 0xB966, 0x00220001 },
+       { 0xB986, 0x00300000 },
+       { 0xAC40, 0x0340FF41 },
+       { 0xAC41, 0x03BEFED0 },
+       { 0xAC42, 0x00331FED },
+       { 0xAC43, 0x021FFDD3 },
+       { 0xAC44, 0x5555AAAA },
+       { 0xAC45, 0x5555AAAA },
+       { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       struct dev_pm_opp *opp;
+
+       opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+       return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+/* Setup thermal limit management for A540 */
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       u32 max_power = 0;
+       u32 rate = gpu->gpufreq[gpu->active_level];
+       u32 config;
+
+       /* The battery current limiter isn't enabled for A540 */
+       config = AGC_LM_CONFIG_BCL_DISABLED;
+       config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+       /* For now disable GPMU side throttling */
+       config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+       /* Get the max-power from the device tree */
+       of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+       gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+       gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+       /*
+        * For now just write the one voltage level - we will do more when we
+        * can do scaling
+        */
+       gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+       gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+       gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+       gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+       gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+       gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+       gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+               PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+       gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Setup thermal limit management for A530 */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       uint32_t rate = gpu->gpufreq[gpu->active_level];
+       uint32_t tsens = 0;
+       uint32_t max_power = 0;
+       unsigned int i;
+
+       /* Write the block of sequence registers */
+       for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+               gpu_write(gpu, a5xx_sequence_regs[i].reg,
+                       a5xx_sequence_regs[i].value);
+
+       of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
+
+       gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
+       gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+       gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+       gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+       gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+       gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+       /* Write the voltage table */
+
+       /* Get the max-power from the device tree */
+       of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+       gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+       gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+       gpu_write(gpu, AGC_MSG_STATE, 1);
+       gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+       /*
+        * For now just write the one voltage level - we will do more when we
+        * can do scaling
+        */
+       gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+       gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+       gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+       gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+       gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+       gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+       gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+       gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+       gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+       gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct msm_ringbuffer *ring = gpu->rb[0];
+
+       if (!a5xx_gpu->gpmu_dwords)
+               return 0;
+
+       /* Turn off protected mode for this operation */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 0);
+
+       /* Kick off the IB to load the GPMU microcode */
+       OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+       OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+       OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+       OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+       /* Turn back on protected mode */
+       OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 1);
+
+       gpu->funcs->flush(gpu, ring);
+
+       /* This is "fatal" because the CP is left in a bad state */
+       if (!a5xx_idle(gpu, ring)) {
+               DRM_ERROR("%s: Unable to load GPMU firmwaren",
+                       gpu->name);
+               return -EINVAL;
+       }
+
+       /* Clock gating setup for A530 targets */
+       if (adreno_is_a530(adreno_gpu))
+               gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+       /* Kick off the GPMU */
+       gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+       /*
+        * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+        * won't have advanced power collapse.
+        */
+       if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+               0xBABEFACE)) {
+               DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+                       gpu->name);
+               return 0;
+       }
+
+       if (!adreno_is_a530(adreno_gpu)) {
+               u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+               if (val)
+                       DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
+                               gpu->name, val);
+       }
+
+       /* FIXME: Clear GPMU interrupts? */
+       return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+       /* This init sequence only applies to A530 */
+       if (!adreno_is_a530(adreno_gpu))
+               return;
+
+       gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+       gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+       gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+       gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+       gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+       gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       int ret;
+       u32 lm_limit = 6000;
+
+       /*
+        * Set up the limit management
+        * first, do some generic setup:
+        */
+       gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+       of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
+       gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
+
+       /* Now do the target specific setup */
+       if (adreno_is_a530(adreno_gpu))
+               a530_lm_setup(gpu);
+       else
+               a540_lm_setup(gpu);
+
+       /* Set up SP/TP power collpase */
+       a5xx_pc_init(gpu);
+
+       /* Start the GPMU */
+       ret = a5xx_gpmu_init(gpu);
+       if (ret)
+               return ret;
+
+       /* Start the limits management */
+       a5xx_lm_enable(gpu);
+
+       return 0;
+}
+
+static int _read_header(unsigned int *data, uint32_t fwsize,
+               unsigned int *major, unsigned int *minor)
+{
+       uint32_t size;
+       unsigned int i;
+
+       /* First dword of the header is the header size */
+       if (fwsize < 4)
+               return -EINVAL;
+
+       size = data[0];
+
+       /* Make sure the header isn't too big and is a multiple of two */
+       if ((size % 2) || (size > 10) || size > (fwsize >> 2))
+               return -EINVAL;
+
+       /* Read the values in pairs */
+       for (i = 1; i < size; i += 2) {
+               switch (data[i]) {
+               case 1:
+                       *major = data[i + 1];
+                       break;
+               case 2:
+                       *minor = data[i + 1];
+                       break;
+               default:
+                       /* Invalid values are non fatal */
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Make sure cur_major and cur_minor are greater than or equal to the minimum
+ * allowable major/minor
+ */
+static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
+               uint32_t min_major, uint32_t min_minor)
+{
+       return ((cur_major > min_major) ||
+               ((cur_major == min_major) && (cur_minor >= min_minor)));
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct drm_device *drm = gpu->dev;
+       const char *name;
+       const struct firmware *fw;
+       uint32_t version[2] = { 0, 0 };
+       uint32_t dwords = 0, offset = 0;
+       uint32_t major = 0, minor = 0, bosize;
+       unsigned int *data, *ptr, *cmds;
+       unsigned int cmds_size;
+
+       if (a5xx_gpu->gpmu_bo)
+               return;
+
+       /*
+        * Read the firmware name from the device tree - if it doesn't exist
+        * then don't initialize the GPMU for this target
+        */
+       if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
+               &name))
+               return;
+
+       /*
+        * The version isn't mandatory, but if it exists, we need to enforce
+        * that the version of the GPMU firmware matches or is newer than the
+        * value
+        */
+       of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
+               version, 2);
+
+       /* Get the firmware */
+       if (request_firmware(&fw, name, drm->dev)) {
+               DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+                       gpu->name);
+               return;
+       }
+
+       data = (unsigned int *) fw->data;
+
+       /*
+        * The first dword is the size of the remaining data in dwords. Use it
+        * as a checksum of sorts and make sure it matches the actual size of
+        * the firmware that we read
+        */
+
+       if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+               goto out;
+
+       /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+       if (data[1] != 2)
+               goto out;
+
+       /* Read the header and get the major/minor of the read firmware */
+       if (_read_header(&data[2], fw->size - 8, &major, &minor))
+               goto out;
+
+       if (!_check_gpmu_version(major, minor, version[0], version[1])) {
+               DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
+                       gpu->name, major, minor);
+               goto out;
+       }
+
+       cmds = data + data[2] + 3;
+       cmds_size = data[0] - data[2] - 2;
+
+       /*
+        * A single type4 opcode can only have so many values attached so
+        * add enough opcodes to load the all the commands
+        */
+       bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+       mutex_lock(&drm->struct_mutex);
+       a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
+               MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+       mutex_unlock(&drm->struct_mutex);
+
+       if (IS_ERR(a5xx_gpu->gpmu_bo))
+               goto err;
+
+       if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
+               &a5xx_gpu->gpmu_iova))
+               goto err;
+
+       ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
+       if (!ptr)
+               goto err;
+
+       while (cmds_size > 0) {
+               int i;
+               uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+                       TYPE4_MAX_PAYLOAD : cmds_size;
+
+               ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+                       _size);
+
+               for (i = 0; i < _size; i++)
+                       ptr[dwords++] = *cmds++;
+
+               offset += _size;
+               cmds_size -= _size;
+       }
+
+       a5xx_gpu->gpmu_dwords = dwords;
+
+       goto out;
+
+err:
+       if (a5xx_gpu->gpmu_iova)
+               msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+       if (a5xx_gpu->gpmu_bo)
+               drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+       a5xx_gpu->gpmu_bo = NULL;
+       a5xx_gpu->gpmu_iova = 0;
+       a5xx_gpu->gpmu_dwords = 0;
+
+out:
+       /* No need to keep that firmware laying around anymore */
+       release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644 (file)
index 0000000..648494c
--- /dev/null
@@ -0,0 +1,383 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
+               size_t size, uint32_t flags, struct drm_gem_object **bo,
+               u64 *iova)
+{
+       struct drm_gem_object *_bo;
+       u64 _iova;
+       void *ptr;
+       int ret;
+
+       mutex_lock(&drm->struct_mutex);
+       _bo = msm_gem_new(drm, size, flags);
+       mutex_unlock(&drm->struct_mutex);
+
+       if (IS_ERR(_bo))
+               return _bo;
+
+       ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova);
+       if (ret)
+               goto out;
+
+       ptr = msm_gem_vaddr(_bo);
+       if (!ptr) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (bo)
+               *bo = _bo;
+       if (iova)
+               *iova = _iova;
+
+       return ptr;
+out:
+       drm_gem_object_unreference_unlocked(_bo);
+       return ERR_PTR(ret);
+}
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+               enum preempt_state old, enum preempt_state new)
+{
+       enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+               old, new);
+
+       return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state.  This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+               enum preempt_state new)
+{
+       /*
+        * preempt_state may be read by other cores trying to trigger a
+        * preemption or in the interrupt handler so barriers are needed
+        * before...
+        */
+       smp_mb__before_atomic();
+       atomic_set(&gpu->preempt_state, new);
+       /* ... and after*/
+       smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       unsigned long flags;
+       uint32_t wptr;
+
+       if (!ring)
+               return;
+
+       spin_lock_irqsave(&ring->lock, flags);
+       wptr = get_wptr(ring);
+       spin_unlock_irqrestore(&ring->lock, flags);
+
+       gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       unsigned long flags;
+       int i;
+
+       for (i = gpu->nr_rings - 1; i >= 0; i--) {
+               bool empty;
+               struct msm_ringbuffer *ring = gpu->rb[i];
+
+               spin_lock_irqsave(&ring->lock, flags);
+               empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
+               spin_unlock_irqrestore(&ring->lock, flags);
+
+               if (!empty)
+                       return ring;
+       }
+
+       return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+       struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+       struct msm_gpu *gpu = &a5xx_gpu->base.base;
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+
+       if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+               return;
+
+       dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+       queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       unsigned long flags;
+       struct msm_ringbuffer *ring;
+
+       if (gpu->nr_rings == 1)
+               return;
+
+       /*
+        * Try to start preemption by moving from NONE to START. If
+        * unsuccessful, a preemption is already in flight
+        */
+       if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+               return;
+
+       /* Get the next ring to preempt to */
+       ring = get_next_ring(gpu);
+
+       /*
+        * If no ring is populated or the highest priority ring is the current
+        * one do nothing except to update the wptr to the latest and greatest
+        */
+       if (!ring || (a5xx_gpu->cur_ring == ring)) {
+               update_wptr(gpu, ring);
+
+               /* Set the state back to NONE */
+               set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+               return;
+       }
+
+       /* Make sure the wptr doesn't update while we're in motion */
+       spin_lock_irqsave(&ring->lock, flags);
+       a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+       spin_unlock_irqrestore(&ring->lock, flags);
+
+       /* Do read barrier to make sure we have updated pagetable info */
+       rmb();
+
+       /* Set the SMMU info for the preemption */
+       if (a5xx_gpu->smmu_info) {
+               a5xx_gpu->smmu_info->ttbr0 =
+                       adreno_gpu->memptrs->ttbr0[ring->id];
+               a5xx_gpu->smmu_info->contextidr =
+                       adreno_gpu->memptrs->contextidr[ring->id];
+       }
+
+       /* Set the address of the incoming preemption record */
+       gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+               REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+               a5xx_gpu->preempt_iova[ring->id]);
+
+       a5xx_gpu->next_ring = ring;
+
+       /* Start a timer to catch a stuck preemption */
+       mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+       /* Set the preemption state to triggered */
+       set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+       /* Make sure everything is written before hitting the button */
+       wmb();
+
+       /* And actually start the preemption */
+       gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+       uint32_t status;
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+
+       if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+               return;
+
+       /* Delete the preemption watchdog timer */
+       del_timer(&a5xx_gpu->preempt_timer);
+
+       /*
+        * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+        * firing the interrupt, but there is a non zero chance of a hardware
+        * condition or a software race that could set it again before we have a
+        * chance to finish. If that happens, log and go for recovery
+        */
+       status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+       if (unlikely(status)) {
+               set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+               dev_err(dev->dev, "%s: Preemption failed to complete\n",
+                       gpu->name);
+               queue_work(priv->wq, &gpu->recover_work);
+               return;
+       }
+
+       a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+       a5xx_gpu->next_ring = NULL;
+
+       update_wptr(gpu, a5xx_gpu->cur_ring);
+
+       set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct msm_ringbuffer *ring;
+       int i;
+
+       if (gpu->nr_rings > 1) {
+               /* Clear the preemption records */
+               FOR_EACH_RING(gpu, ring, i) {
+                       if (ring) {
+                               a5xx_gpu->preempt[ring->id]->wptr = 0;
+                               a5xx_gpu->preempt[ring->id]->rptr = 0;
+                               a5xx_gpu->preempt[ring->id]->rbase = ring->iova;
+                       }
+               }
+       }
+
+       /* Tell the CP where to find the smmu_info buffer */
+       gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+               REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+               a5xx_gpu->smmu_info_iova);
+
+       /* Reset the preemption state */
+       set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+       /* Always come up on rb 0 */
+       a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+               struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+       struct msm_gpu *gpu = &adreno_gpu->base;
+       struct a5xx_preempt_record *ptr;
+       struct drm_gem_object *bo;
+       u64 iova;
+
+       ptr = alloc_kernel_bo(gpu->dev, gpu,
+               A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+               MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+               &bo, &iova);
+
+       if (IS_ERR(ptr))
+               return PTR_ERR(ptr);
+
+       a5xx_gpu->preempt_bo[ring->id] = bo;
+       a5xx_gpu->preempt_iova[ring->id] = iova;
+       a5xx_gpu->preempt[ring->id] = ptr;
+
+       /* Set up the defaults on the preemption record */
+
+       ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+       ptr->info = 0;
+       ptr->data = 0;
+       ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+       ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
+       ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+       return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct msm_ringbuffer *ring;
+       int i;
+
+       FOR_EACH_RING(gpu, ring, i) {
+               if (!ring || !a5xx_gpu->preempt_bo[i])
+                       continue;
+
+               if (a5xx_gpu->preempt_iova[i])
+                       msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+               drm_gem_object_unreference_unlocked(a5xx_gpu->preempt_bo[i]);
+
+               a5xx_gpu->preempt_bo[i] = NULL;
+       }
+
+       if (a5xx_gpu->smmu_info_bo) {
+               if (a5xx_gpu->smmu_info_iova)
+                       msm_gem_put_iova(a5xx_gpu->smmu_info_bo, gpu->aspace);
+               drm_gem_object_unreference_unlocked(a5xx_gpu->smmu_info_bo);
+               a5xx_gpu->smmu_info_bo = NULL;
+       }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       struct msm_ringbuffer *ring;
+       struct a5xx_smmu_info *ptr;
+       struct drm_gem_object *bo;
+       uint64_t iova;
+       int i;
+
+       /* No preemption if we only have one ring */
+       if (gpu->nr_rings <= 1)
+               return;
+
+       FOR_EACH_RING(gpu, ring, i) {
+               if (!ring)
+                       continue;
+
+               if (preempt_init_ring(a5xx_gpu, ring))
+                       goto fail;
+       }
+
+       if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
+               ptr = alloc_kernel_bo(gpu->dev, gpu,
+                       sizeof(struct a5xx_smmu_info),
+                       MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+                       &bo, &iova);
+
+               if (IS_ERR(ptr))
+                       goto fail;
+
+               ptr->magic = A5XX_SMMU_INFO_MAGIC;
+
+               a5xx_gpu->smmu_info_bo = bo;
+               a5xx_gpu->smmu_info_iova = iova;
+               a5xx_gpu->smmu_info = ptr;
+       }
+
+       setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+               (unsigned long) a5xx_gpu);
+
+       return;
+fail:
+       /*
+        * On any failure our adventure is over. Clean up and
+        * set nr_rings to 1 to force preemption off
+        */
+       a5xx_preempt_fini(gpu);
+       gpu->nr_rings = 1;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
new file mode 100644 (file)
index 0000000..5a2edb0
--- /dev/null
@@ -0,0 +1,796 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+#include "msm_snapshot_api.h"
+
+#define A5XX_NR_SHADER_BANKS 4
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper.  These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+       u32 type;
+       u32 regoffset;
+       u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+       { 0x35, 0xE00, 0x32 },   /* HSLQ non-context */
+       { 0x31, 0x2080, 0x1 },   /* HLSQ 2D context 0 */
+       { 0x33, 0x2480, 0x1 },   /* HLSQ 2D context 1 */
+       { 0x32, 0xE780, 0x62 },  /* HLSQ 3D context 0 */
+       { 0x34, 0xEF80, 0x62 },  /* HLSQ 3D context 1 */
+       { 0x3f, 0x0EC0, 0x40 },  /* SP non-context */
+       { 0x3d, 0x2040, 0x1 },   /* SP 2D context 0 */
+       { 0x3b, 0x2440, 0x1 },   /* SP 2D context 1 */
+       { 0x3e, 0xE580, 0x180 }, /* SP 3D context 0 */
+       { 0x3c, 0xED80, 0x180 }, /* SP 3D context 1 */
+       { 0x3a, 0x0F00, 0x1c },  /* TP non-context */
+       { 0x38, 0x2000, 0xa },   /* TP 2D context 0 */
+       { 0x36, 0x2400, 0xa },   /* TP 2D context 1 */
+       { 0x39, 0xE700, 0x80 },  /* TP 3D context 0 */
+       { 0x37, 0xEF00, 0x80 },  /* TP 3D context 1 */
+};
+
+/*
+ * The debugbus registers contain device state that presumably makes
+ * sense to the hardware designers. 'count' is the number of indexes to read,
+ * each index value is 64 bits
+ */
+static const struct {
+       enum a5xx_debugbus id;
+       u32 count;
+} a5xx_debugbus_blocks[] = {
+       {  A5XX_RBBM_DBGBUS_CP, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RBBM, 0x100, },
+       {  A5XX_RBBM_DBGBUS_HLSQ, 0x100, },
+       {  A5XX_RBBM_DBGBUS_UCHE, 0x100, },
+       {  A5XX_RBBM_DBGBUS_DPM, 0x100, },
+       {  A5XX_RBBM_DBGBUS_TESS, 0x100, },
+       {  A5XX_RBBM_DBGBUS_PC, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VFDP, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VPC, 0x100, },
+       {  A5XX_RBBM_DBGBUS_TSE, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RAS, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VSC, 0x100, },
+       {  A5XX_RBBM_DBGBUS_COM, 0x100, },
+       {  A5XX_RBBM_DBGBUS_DCOM, 0x100, },
+       {  A5XX_RBBM_DBGBUS_LRZ, 0x100, },
+       {  A5XX_RBBM_DBGBUS_A2D_DSP, 0x100, },
+       {  A5XX_RBBM_DBGBUS_CCUFCHE, 0x100, },
+       {  A5XX_RBBM_DBGBUS_GPMU, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RBP, 0x100, },
+       {  A5XX_RBBM_DBGBUS_HM, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RBBM_CFG, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VBIF_CX, 0x100, },
+       {  A5XX_RBBM_DBGBUS_GPC, 0x100, },
+       {  A5XX_RBBM_DBGBUS_LARC, 0x100, },
+       {  A5XX_RBBM_DBGBUS_HLSQ_SPTP, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RB_0, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RB_1, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RB_2, 0x100, },
+       {  A5XX_RBBM_DBGBUS_RB_3, 0x100, },
+       {  A5XX_RBBM_DBGBUS_CCU_0, 0x100, },
+       {  A5XX_RBBM_DBGBUS_CCU_1, 0x100, },
+       {  A5XX_RBBM_DBGBUS_CCU_2, 0x100, },
+       {  A5XX_RBBM_DBGBUS_CCU_3, 0x100, },
+       {  A5XX_RBBM_DBGBUS_A2D_RAS_0, 0x100, },
+       {  A5XX_RBBM_DBGBUS_A2D_RAS_1, 0x100, },
+       {  A5XX_RBBM_DBGBUS_A2D_RAS_2, 0x100, },
+       {  A5XX_RBBM_DBGBUS_A2D_RAS_3, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VFD_0, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VFD_1, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VFD_2, 0x100, },
+       {  A5XX_RBBM_DBGBUS_VFD_3, 0x100, },
+       {  A5XX_RBBM_DBGBUS_SP_0, 0x100, },
+       {  A5XX_RBBM_DBGBUS_SP_1, 0x100, },
+       {  A5XX_RBBM_DBGBUS_SP_2, 0x100, },
+       {  A5XX_RBBM_DBGBUS_SP_3, 0x100, },
+       {  A5XX_RBBM_DBGBUS_TPL1_0, 0x100, },
+       {  A5XX_RBBM_DBGBUS_TPL1_1, 0x100, },
+       {  A5XX_RBBM_DBGBUS_TPL1_2, 0x100, },
+       {  A5XX_RBBM_DBGBUS_TPL1_3, 0x100, },
+};
+
+/*
+ * The shader blocks are read from the HLSQ aperture - each one has its own
+ * identifier for the aperture read
+ */
+static const struct {
+       enum a5xx_shader_blocks id;
+       u32 size;
+} a5xx_shader_blocks[] = {
+       {A5XX_TP_W_MEMOBJ,              0x200},
+       {A5XX_TP_W_MIPMAP_BASE,         0x3C0},
+       {A5XX_TP_W_SAMPLER_TAG,          0x40},
+       {A5XX_TP_S_3D_SAMPLER,           0x80},
+       {A5XX_TP_S_3D_SAMPLER_TAG,       0x20},
+       {A5XX_TP_S_CS_SAMPLER,           0x40},
+       {A5XX_TP_S_CS_SAMPLER_TAG,       0x10},
+       {A5XX_SP_W_CONST,               0x800},
+       {A5XX_SP_W_CB_SIZE,              0x30},
+       {A5XX_SP_W_CB_BASE,              0xF0},
+       {A5XX_SP_W_STATE,                 0x1},
+       {A5XX_SP_S_3D_CONST,            0x800},
+       {A5XX_SP_S_3D_CB_SIZE,           0x28},
+       {A5XX_SP_S_3D_UAV_SIZE,          0x80},
+       {A5XX_SP_S_CS_CONST,            0x400},
+       {A5XX_SP_S_CS_CB_SIZE,            0x8},
+       {A5XX_SP_S_CS_UAV_SIZE,          0x80},
+       {A5XX_SP_S_3D_CONST_DIRTY,       0x12},
+       {A5XX_SP_S_3D_CB_SIZE_DIRTY,      0x1},
+       {A5XX_SP_S_3D_UAV_SIZE_DIRTY,     0x2},
+       {A5XX_SP_S_CS_CONST_DIRTY,        0xA},
+       {A5XX_SP_S_CS_CB_SIZE_DIRTY,      0x1},
+       {A5XX_SP_S_CS_UAV_SIZE_DIRTY,     0x2},
+       {A5XX_HLSQ_ICB_DIRTY,             0xB},
+       {A5XX_SP_POWER_RESTORE_RAM_TAG,   0xA},
+       {A5XX_TP_POWER_RESTORE_RAM_TAG,   0xA},
+       {A5XX_TP_W_SAMPLER,              0x80},
+       {A5XX_TP_W_MEMOBJ_TAG,           0x40},
+       {A5XX_TP_S_3D_MEMOBJ,           0x200},
+       {A5XX_TP_S_3D_MEMOBJ_TAG,        0x20},
+       {A5XX_TP_S_CS_MEMOBJ,           0x100},
+       {A5XX_TP_S_CS_MEMOBJ_TAG,        0x10},
+       {A5XX_SP_W_INSTR,               0x800},
+       {A5XX_SP_W_UAV_SIZE,             0x80},
+       {A5XX_SP_W_UAV_BASE,             0x80},
+       {A5XX_SP_W_INST_TAG,             0x40},
+       {A5XX_SP_S_3D_INSTR,            0x800},
+       {A5XX_SP_S_3D_CB_BASE,           0xC8},
+       {A5XX_SP_S_3D_UAV_BASE,          0x80},
+       {A5XX_SP_S_CS_INSTR,            0x400},
+       {A5XX_SP_S_CS_CB_BASE,           0x28},
+       {A5XX_SP_S_CS_UAV_BASE,          0x80},
+       {A5XX_SP_S_3D_INSTR_DIRTY,        0x1},
+       {A5XX_SP_S_3D_CB_BASE_DIRTY,      0x5},
+       {A5XX_SP_S_3D_UAV_BASE_DIRTY,     0x2},
+       {A5XX_SP_S_CS_INSTR_DIRTY,        0x1},
+       {A5XX_SP_S_CS_CB_BASE_DIRTY,      0x1},
+       {A5XX_SP_S_CS_UAV_BASE_DIRTY,     0x2},
+       {A5XX_HLSQ_ICB,                 0x200},
+       {A5XX_HLSQ_ICB_CB_BASE_DIRTY,     0x4},
+       {A5XX_SP_POWER_RESTORE_RAM,     0x140},
+       {A5XX_TP_POWER_RESTORE_RAM,      0x40},
+};
+
+/*
+ * The A5XX architecture has a a built in engine to asynchronously dump
+ * registers from the GPU. It is used to accelerate the copy of hundreds
+ * (thousands) of registers and as a safe way to access registers that might
+ * have secure data in them (if the GPU is in secure, the crashdumper returns
+ * bogus values for those registers). On a fully secured device the CPU will be
+ * blocked from accessing those registers directly and so the crashdump is the
+ * only way that we can access context registers and the shader banks for debug
+ * purposes.
+ *
+ * The downside of the crashdump is that it requires access to GPU accessible
+ * memory (so the VBIF and the bus and the SMMU need to be up and working) and
+ * you need enough memory to write the script for the crashdumper and to store
+ * the data that you are dumping so there is a balancing act between the work to
+ * set up a crash dumper and the value we get out of it.
+ */
+
+/*
+ * The crashdump uses a pseudo-script format to read and write registers.  Each
+ * operation is two 64 bit values.
+ *
+ * READ:
+ *  [qword 0] [64:00] - The absolute IOVA address target for the register value
+ *  [qword 1] [63:44] - the dword address of the register offset to read
+ *            [15:00] - Number of dwords to read at once
+ *
+ * WRITE:
+ *  [qword 0] [31:0] 32 bit value to write to the register
+ *  [qword 1] [63:44] - the dword address of the register offset to write
+ *            [21:21] - set 1 to write
+ *            [15:00] - Number of dwords to write (usually 1)
+ *
+ * At the bottom of the script, write quadword zeros to trigger the end.
+ */
+struct crashdump {
+       struct drm_gem_object *bo;
+       void *ptr;
+       u64 iova;
+       u32 index;
+};
+
+#define CRASHDUMP_BO_SIZE (SZ_1M)
+#define CRASHDUMP_SCRIPT_SIZE (256 * SZ_1K)
+#define CRASHDUMP_DATA_SIZE (CRASHDUMP_BO_SIZE - CRASHDUMP_SCRIPT_SIZE)
+
+static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+       struct drm_device *drm = gpu->dev;
+       int ret = -ENOMEM;
+
+       crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
+       if (IS_ERR(crashdump->bo)) {
+               ret = PTR_ERR(crashdump->bo);
+               crashdump->bo = NULL;
+               return ret;
+       }
+
+       crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
+       if (!crashdump->ptr)
+               goto out;
+
+       ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
+               &crashdump->iova);
+
+out:
+       if (ret) {
+               drm_gem_object_unreference(crashdump->bo);
+               crashdump->bo = NULL;
+       }
+
+       return ret;
+}
+
+static int crashdump_run(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+       if (!crashdump->ptr || !crashdump->index)
+               return -EINVAL;
+
+       gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+               lower_32_bits(crashdump->iova));
+       gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_HI,
+               upper_32_bits(crashdump->iova));
+
+       gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+       return spin_until(gpu_read(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL) & 0x04);
+}
+
+static void crashdump_destroy(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+       if (!crashdump->bo)
+               return;
+
+       if (crashdump->iova)
+               msm_gem_put_iova(crashdump->bo, gpu->aspace);
+
+       drm_gem_object_unreference(crashdump->bo);
+
+       memset(crashdump, 0, sizeof(*crashdump));
+}
+
+static inline void CRASHDUMP_SCRIPT_WRITE(struct crashdump *crashdump,
+               u32 reg, u32 val)
+{
+       u64 *ptr = crashdump->ptr + crashdump->index;
+
+       if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+               >= CRASHDUMP_SCRIPT_SIZE))
+               return;
+
+       /* This is the value to write */
+       ptr[0] = (u64) val;
+
+       /*
+        * This triggers a write to the specified register.  1 is the size of
+        * the write in dwords
+        */
+       ptr[1] = (((u64) reg) << 44) | (1 << 21) | 1;
+
+       crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void CRASHDUMP_SCRIPT_READ(struct crashdump *crashdump,
+               u32 reg, u32 count, u32 offset)
+{
+       u64 *ptr = crashdump->ptr + crashdump->index;
+
+       if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+               >= CRASHDUMP_SCRIPT_SIZE))
+               return;
+
+       if (WARN_ON(offset + (count * sizeof(u32)) >= CRASHDUMP_DATA_SIZE))
+               return;
+
+       ptr[0] = (u64) crashdump->iova + CRASHDUMP_SCRIPT_SIZE + offset;
+       ptr[1] = (((u64) reg) << 44) | count;
+
+       crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void *CRASHDUMP_DATA_PTR(struct crashdump *crashdump, u32 offset)
+{
+       if (WARN_ON(!crashdump->ptr || offset >= CRASHDUMP_DATA_SIZE))
+               return NULL;
+
+       return crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset;
+}
+
+static inline u32 CRASHDUMP_DATA_READ(struct crashdump *crashdump, u32 offset)
+{
+       return *((u32 *) CRASHDUMP_DATA_PTR(crashdump, offset));
+}
+
+static inline void CRASHDUMP_RESET(struct crashdump *crashdump)
+{
+       crashdump->index = 0;
+}
+
+static inline void CRASHDUMP_END(struct crashdump *crashdump)
+{
+       u64 *ptr = crashdump->ptr + crashdump->index;
+
+       if (WARN_ON((crashdump->index + (2 * sizeof(u64)))
+               >= CRASHDUMP_SCRIPT_SIZE))
+               return;
+
+       ptr[0] = 0;
+       ptr[1] = 0;
+
+       crashdump->index += 2 * sizeof(u64);
+}
+
+static u32 _crashdump_read_hlsq_aperture(struct crashdump *crashdump,
+               u32 offset, u32 statetype, u32 bank,
+               u32 count)
+{
+       CRASHDUMP_SCRIPT_WRITE(crashdump, REG_A5XX_HLSQ_DBG_READ_SEL,
+               A5XX_HLSQ_DBG_READ_SEL_STATETYPE(statetype) | bank);
+
+       CRASHDUMP_SCRIPT_READ(crashdump, REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE,
+               count, offset);
+
+       return count * sizeof(u32);
+}
+
+static u32 _copy_registers(struct msm_snapshot *snapshot,
+               struct crashdump *crashdump, u32 reg, u32 count,
+               u32 offset)
+{
+       int i;
+       u32 *ptr = (u32 *) (crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset);
+       /*
+        * Write the offset of the first register of the group and the number of
+        * registers in the group
+        */
+       SNAPSHOT_WRITE_U32(snapshot, ((count << 16) | reg));
+
+       /* Followed by each register value in the group */
+       for (i = 0; i < count; i++)
+               SNAPSHOT_WRITE_U32(snapshot, ptr[i]);
+
+       return count * sizeof(u32);
+}
+
+/*
+ * Return the number of registers in each register group from the
+ * adreno_gpu->rgisters
+ */
+static inline u32 REG_COUNT(const unsigned int *ptr)
+{
+       return (ptr[1] - ptr[0]) + 1;
+}
+
+/*
+ * Capture what registers we can from the CPU in case the crashdumper is
+ * unavailable or broken.  This will omit the SP,TP and HLSQ registers, but
+ * you'll get everything else and that ain't bad
+ */
+static void a5xx_snapshot_registers_cpu(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct msm_snapshot_regs header;
+       u32 regcount = 0, groups = 0;
+       int i;
+
+       /*
+        * Before we write the section we need to figure out how big our data
+        * section will be
+        */
+       for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+               regcount += REG_COUNT(&(adreno_gpu->registers[i]));
+               groups++;
+       }
+
+       header.count = groups;
+
+       /*
+        * We need one dword for each group and then one dword for each register
+        * value in that group
+        */
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+               regcount + groups))
+               return;
+
+       for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+               u32 count = REG_COUNT(&(adreno_gpu->registers[i]));
+               u32 reg = adreno_gpu->registers[i];
+               int j;
+
+               /* Write the offset and count for the group */
+               SNAPSHOT_WRITE_U32(snapshot, (count << 16) | reg);
+
+               /* Write each value in the group */
+               for (j = 0; j < count; j++)
+                       SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, reg++));
+       }
+}
+
+static void a5xx_snapshot_registers(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       struct msm_snapshot_regs header;
+       struct crashdump *crashdump = snapshot->priv;
+       u32 offset = 0, regcount = 0, groups = 0;
+       int i;
+
+       /*
+        * First snapshot all the registers that we can from the CPU.  Do this
+        * because the crashdumper has a tendency to "taint" the value of some
+        * of the registers (because the GPU implements the crashdumper) so we
+        * only want to use the crash dump facility if we have to
+        */
+       a5xx_snapshot_registers_cpu(gpu, snapshot);
+
+       if (!crashdump)
+               return;
+
+       CRASHDUMP_RESET(crashdump);
+
+       /* HLSQ and context registers behind the aperture */
+       for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+               u32 count = a5xx_hlsq_aperture_regs[i].count;
+
+               offset += _crashdump_read_hlsq_aperture(crashdump, offset,
+                       a5xx_hlsq_aperture_regs[i].type, 0, count);
+               regcount += count;
+
+               groups++;
+       }
+
+       CRASHDUMP_END(crashdump);
+
+       if (crashdump_run(gpu, crashdump))
+               return;
+
+       header.count = groups;
+
+       /*
+        * The size of the data will be one dword for each "group" of registers,
+        * and then one dword for each of the registers in that group
+        */
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+               groups + regcount))
+               return;
+
+       /* Copy the registers to the snapshot */
+       for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+               offset += _copy_registers(snapshot, crashdump,
+                       a5xx_hlsq_aperture_regs[i].regoffset,
+                       a5xx_hlsq_aperture_regs[i].count, offset);
+}
+
+static void _a5xx_snapshot_shader_bank(struct msm_snapshot *snapshot,
+               struct crashdump *crashdump, u32 block, u32 bank,
+               u32 size, u32 offset)
+{
+       void *src;
+
+       struct msm_snapshot_shader header = {
+               .type = block,
+               .index = bank,
+               .size = size,
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_SHADER, size))
+               return;
+
+       src = CRASHDUMP_DATA_PTR(crashdump, offset);
+
+       if (src)
+               SNAPSHOT_MEMCPY(snapshot, src, size * sizeof(u32));
+}
+
+static void a5xx_snapshot_shader_memory(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       struct crashdump *crashdump = snapshot->priv;
+       u32 offset = 0;
+       int i;
+
+       /* We can only get shader memory through the crashdump */
+       if (!crashdump)
+               return;
+
+       CRASHDUMP_RESET(crashdump);
+
+       /* For each shader block */
+       for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+               int j;
+
+               /* For each block, dump 4 banks */
+               for (j = 0; j < A5XX_NR_SHADER_BANKS; j++)
+                       offset += _crashdump_read_hlsq_aperture(crashdump,
+                               offset, a5xx_shader_blocks[i].id, j,
+                               a5xx_shader_blocks[i].size);
+       }
+
+       CRASHDUMP_END(crashdump);
+
+       /* If the crashdump fails we can't get shader memory any other way */
+       if (crashdump_run(gpu, crashdump))
+               return;
+
+       /* Each bank of each shader gets its own snapshot section */
+       for (offset = 0, i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+               int j;
+
+               for (j = 0; j < A5XX_NR_SHADER_BANKS; j++) {
+                       _a5xx_snapshot_shader_bank(snapshot, crashdump,
+                               a5xx_shader_blocks[i].id, j,
+                               a5xx_shader_blocks[i].size, offset);
+                       offset += a5xx_shader_blocks[i].size * sizeof(u32);
+               }
+       }
+}
+
+#define A5XX_NUM_AXI_ARB_BLOCKS 2
+#define A5XX_NUM_XIN_BLOCKS     4
+#define VBIF_DATA_SIZE ((16 * A5XX_NUM_AXI_ARB_BLOCKS) + \
+       (18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS))
+
+static void a5xx_snapshot_debugbus_vbif(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       int i;
+       struct msm_snapshot_debugbus header = {
+               .id = A5XX_RBBM_DBGBUS_VBIF,
+               .count = VBIF_DATA_SIZE,
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+               VBIF_DATA_SIZE))
+               return;
+
+       gpu_rmw(gpu, REG_A5XX_VBIF_CLKON, A5XX_VBIF_CLKON_FORCE_ON_TESTBUS,
+               A5XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+       gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 0);
+       gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS_OUT_CTRL,
+               A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN);
+
+       for (i = 0; i < A5XX_NUM_AXI_ARB_BLOCKS; i++) {
+               int j;
+
+               gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << (i + 16));
+               for (j = 0; j < 16; j++) {
+                       gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+                       A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+                       SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+                               REG_A5XX_VBIF_TEST_BUS_OUT));
+               }
+       }
+
+       for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+               int j;
+
+               gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+               for (j = 0; j < 18; j++) {
+                       gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+                       A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+                       SNAPSHOT_WRITE_U32(snapshot,
+                               gpu_read(gpu, REG_A5XX_VBIF_TEST_BUS_OUT));
+               }
+       }
+
+       for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+               int j;
+
+               gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+               for (j = 0; j < 12; j++) {
+                       gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL1,
+                       A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(j));
+                       SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+                               REG_A5XX_VBIF_TEST_BUS_OUT));
+               }
+       }
+
+}
+
+static void a5xx_snapshot_debugbus_block(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot, u32 block, u32 count)
+{
+       int i;
+       struct msm_snapshot_debugbus header = {
+               .id = block,
+               .count = count * 2, /* Each value is 2 dwords */
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+               (count * 2)))
+               return;
+
+       for (i = 0; i < count; i++) {
+               u32 reg = A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(i) |
+                       A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+               gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_A, reg);
+               gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_B, reg);
+               gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_C, reg);
+               gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_D, reg);
+
+               /* Each debugbus entry is a quad word */
+               SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+                       REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2));
+               SNAPSHOT_WRITE_U32(snapshot,
+                       gpu_read(gpu, REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1));
+       }
+}
+
+static void a5xx_snapshot_debugbus(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       int i;
+
+       gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_CNTLM,
+               A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(0xF));
+
+       for (i = 0; i < ARRAY_SIZE(a5xx_debugbus_blocks); i++)
+               a5xx_snapshot_debugbus_block(gpu, snapshot,
+                       a5xx_debugbus_blocks[i].id,
+                       a5xx_debugbus_blocks[i].count);
+
+       /* VBIF is special and not in a good way */
+       a5xx_snapshot_debugbus_vbif(gpu, snapshot);
+}
+
+static void a5xx_snapshot_cp_merciu(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       unsigned int i;
+       struct msm_snapshot_debug header = {
+               .type = SNAPSHOT_DEBUG_CP_MERCIU,
+               .size = 64 << 1, /* Data size is 2 dwords per entry */
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64 << 1))
+               return;
+
+       gpu_write(gpu, REG_A5XX_CP_MERCIU_DBG_ADDR, 0);
+       for (i = 0; i < 64; i++) {
+               SNAPSHOT_WRITE_U32(snapshot,
+                       gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_1));
+               SNAPSHOT_WRITE_U32(snapshot,
+                       gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_2));
+       }
+}
+
+static void a5xx_snapshot_cp_roq(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       int i;
+       struct msm_snapshot_debug header = {
+               .type = SNAPSHOT_DEBUG_CP_ROQ,
+               .size = 512,
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 512))
+               return;
+
+       gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+       for (i = 0; i < 512; i++)
+               SNAPSHOT_WRITE_U32(snapshot,
+                       gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_cp_meq(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       int i;
+       struct msm_snapshot_debug header = {
+               .type = SNAPSHOT_DEBUG_CP_MEQ,
+               .size = 64,
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64))
+               return;
+
+       gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+       for (i = 0; i < 64; i++)
+               SNAPSHOT_WRITE_U32(snapshot,
+                       gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot, u32 addr, u32 data,
+               u32 count)
+{
+       unsigned int i;
+       struct msm_snapshot_indexed_regs header = {
+               .index_reg = addr,
+               .data_reg = data,
+               .start = 0,
+               .count = count,
+       };
+
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_INDEXED_REGS,
+               count))
+               return;
+
+       for (i = 0; i < count; i++) {
+               gpu_write(gpu, addr, i);
+               SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, data));
+       }
+}
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+       struct crashdump crashdump = { 0 };
+
+       if (!crashdump_init(gpu, &crashdump))
+               snapshot->priv = &crashdump;
+
+       /* To accurately read all registers, disable hardware clock gating */
+       a5xx_set_hwcg(gpu, false);
+
+       /* Kick it up to the generic level */
+       adreno_snapshot(gpu, snapshot);
+
+       /* Read the GPU registers */
+       a5xx_snapshot_registers(gpu, snapshot);
+
+       /* Read the shader memory banks */
+       a5xx_snapshot_shader_memory(gpu, snapshot);
+
+       /* Read the debugbus registers */
+       a5xx_snapshot_debugbus(gpu, snapshot);
+
+       /* PFP data */
+       a5xx_snapshot_indexed_registers(gpu, snapshot,
+               REG_A5XX_CP_PFP_STAT_ADDR, REG_A5XX_CP_PFP_STAT_DATA, 36);
+
+       /* ME data */
+       a5xx_snapshot_indexed_registers(gpu, snapshot,
+               REG_A5XX_CP_ME_STAT_ADDR, REG_A5XX_CP_ME_STAT_DATA, 29);
+
+       /* DRAW_STATE data */
+       a5xx_snapshot_indexed_registers(gpu, snapshot,
+               REG_A5XX_CP_DRAW_STATE_ADDR, REG_A5XX_CP_DRAW_STATE_DATA,
+               256);
+
+       /* ME cache */
+       a5xx_snapshot_indexed_registers(gpu, snapshot,
+               REG_A5XX_CP_ME_UCODE_DBG_ADDR, REG_A5XX_CP_ME_UCODE_DBG_DATA,
+               0x53F);
+
+       /* PFP cache */
+       a5xx_snapshot_indexed_registers(gpu, snapshot,
+               REG_A5XX_CP_PFP_UCODE_DBG_ADDR, REG_A5XX_CP_PFP_UCODE_DBG_DATA,
+               0x53F);
+
+       /* ME queue */
+       a5xx_snapshot_cp_meq(gpu, snapshot);
+
+       /* CP ROQ */
+       a5xx_snapshot_cp_roq(gpu, snapshot);
+
+       /* CP MERCIU */
+       a5xx_snapshot_cp_merciu(gpu, snapshot);
+
+       crashdump_destroy(gpu, &crashdump);
+       snapshot->priv = NULL;
+
+       /* Re-enable HWCG */
+       a5xx_set_hwcg(gpu, true);
+       return 0;
+}
index e81481d..1cf8447 100644 (file)
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
 Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -127,11 +128,13 @@ enum a3xx_rop_code {
        ROP_COPY_INVERTED = 3,
        ROP_AND_REVERSE = 4,
        ROP_INVERT = 5,
+       ROP_XOR = 6,
        ROP_NAND = 7,
        ROP_AND = 8,
        ROP_EQUIV = 9,
        ROP_NOOP = 10,
        ROP_OR_INVERTED = 11,
+       ROP_COPY = 12,
        ROP_OR_REVERSE = 13,
        ROP_OR = 14,
        ROP_SET = 15,
@@ -172,6 +175,14 @@ enum a3xx_color_swap {
        XYZW = 3,
 };
 
+enum a3xx_rb_blend_opcode {
+       BLEND_DST_PLUS_SRC = 0,
+       BLEND_SRC_MINUS_DST = 1,
+       BLEND_DST_MINUS_SRC = 2,
+       BLEND_MIN_DST_SRC = 3,
+       BLEND_MAX_DST_SRC = 4,
+};
+
 #define REG_AXXX_CP_RB_BASE                                    0x000001c0
 
 #define REG_AXXX_CP_RB_CNTL                                    0x000001c1
index 5127b75..a498a60 100644 (file)
@@ -27,6 +27,7 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
 
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
 
 static const struct adreno_info gpulist[] = {
        {
@@ -77,6 +78,22 @@ static const struct adreno_info gpulist[] = {
                .pfpfw = "a420_pfp.fw",
                .gmem  = (SZ_1M + SZ_512K),
                .init  = a4xx_gpu_init,
+       }, {
+               .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+               .revn = 530,
+               .name = "A530",
+               .pm4fw = "a530_pm4.fw",
+               .pfpfw = "a530_pfp.fw",
+               .gmem = SZ_1M,
+               .init = a5xx_gpu_init,
+       }, {
+               .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+               .revn = 540,
+               .name = "A540",
+               .pm4fw = "a530_pm4.fw",
+               .pfpfw = "a530_pfp.fw",
+               .gmem = SZ_1M,
+               .init = a5xx_gpu_init,
        },
 };
 
@@ -86,6 +103,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
 MODULE_FIRMWARE("a330_pfp.fw");
 MODULE_FIRMWARE("a420_pm4.fw");
 MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
 
 static inline bool _rev_match(uint8_t entry, uint8_t id)
 {
@@ -148,12 +167,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                gpu->funcs->pm_resume(gpu);
                mutex_unlock(&dev->struct_mutex);
+
+               disable_irq(gpu->irq);
+
                ret = gpu->funcs->hw_init(gpu);
                if (ret) {
                        dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
                        gpu->funcs->destroy(gpu);
                        gpu = NULL;
                } else {
+                       enable_irq(gpu->irq);
                        /* give inactive pm a chance to kick in: */
                        msm_gpu_retire(gpu);
                }
@@ -172,11 +195,16 @@ static void set_gpu_pdev(struct drm_device *dev,
 static int adreno_bind(struct device *dev, struct device *master, void *data)
 {
        static struct adreno_platform_config config = {};
-       struct device_node *child, *node = dev->of_node;
-       u32 val;
+       uint32_t val = 0;
        int ret;
 
-       ret = of_property_read_u32(node, "qcom,chipid", &val);
+       /*
+        * Read the chip ID from the device tree at bind time - we use this
+        * information to load the correct functions. All the rest of the
+        * (extensive) device tree probing should happen in the GPU specific
+        * code
+        */
+       ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
        if (ret) {
                dev_err(dev, "could not find chipid: %d\n", ret);
                return ret;
@@ -185,29 +213,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
        config.rev = ADRENO_REV((val >> 24) & 0xff,
                        (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
 
-       /* find clock rates: */
-       config.fast_rate = 0;
-       config.slow_rate = ~0;
-       for_each_child_of_node(node, child) {
-               if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
-                       struct device_node *pwrlvl;
-                       for_each_child_of_node(child, pwrlvl) {
-                               ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
-                               if (ret) {
-                                       dev_err(dev, "could not find gpu-freq: %d\n", ret);
-                                       return ret;
-                               }
-                               config.fast_rate = max(config.fast_rate, val);
-                               config.slow_rate = min(config.slow_rate, val);
-                       }
-               }
-       }
-
-       if (!config.fast_rate) {
-               dev_err(dev, "could not find clk rates\n");
-               return -ENXIO;
-       }
-
        dev->platform_data = &config;
        set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
        return 0;
index 4951172..f188382 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/utsname.h>
 #include "adreno_gpu.h"
+#include "msm_snapshot.h"
 #include "msm_gem.h"
 #include "msm_mmu.h"
 
-#define RB_SIZE    SZ_32K
-#define RB_BLKSIZE 16
 
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
 {
@@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
        case MSM_PARAM_GMEM_SIZE:
                *value = adreno_gpu->gmem;
                return 0;
+       case MSM_PARAM_GMEM_BASE:
+               *value = 0x100000;
+               return 0;
        case MSM_PARAM_CHIP_ID:
                *value = adreno_gpu->rev.patchid |
                                (adreno_gpu->rev.minor << 8) |
@@ -42,7 +45,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
                                (adreno_gpu->rev.core << 24);
                return 0;
        case MSM_PARAM_MAX_FREQ:
-               *value = adreno_gpu->base.fast_rate;
+               *value = gpu->gpufreq[gpu->active_level];
                return 0;
        case MSM_PARAM_TIMESTAMP:
                if (adreno_gpu->funcs->get_timestamp)
@@ -54,91 +57,125 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
        }
 }
 
-#define rbmemptr(adreno_gpu, member)  \
-       ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
 int adreno_hw_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       int ret;
+       int i;
 
        DBG("%s", gpu->name);
 
-       ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
-       if (ret) {
-               gpu->rb_iova = 0;
-               dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
-               return ret;
+       for (i = 0; i < gpu->nr_rings; i++) {
+               int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace,
+                       &gpu->rb[i]->iova);
+               if (ret) {
+                       gpu->rb[i]->iova = 0;
+                       dev_err(gpu->dev->dev,
+                               "could not map ringbuffer %d: %d\n", i, ret);
+                       return ret;
+               }
        }
 
-       /* Setup REG_CP_RB_CNTL: */
+       /*
+        * Setup REG_CP_RB_CNTL.  The same value is used across targets (with
+        * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+        * for the ringbuffer size and block size is moved to msm_gpu.h for the
+        * pre-processor to deal with and the A430 variant is ORed in here
+        */
        adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
-                       /* size is log2(quad-words): */
-                       AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
-                       AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
-                       (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
+               MSM_GPU_RB_CNTL_DEFAULT |
+               (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
 
-       /* Setup ringbuffer address: */
-       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+       /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
+       adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+               REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
 
-       if (!adreno_is_a430(adreno_gpu))
-               adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
-                                               rbmemptr(adreno_gpu, rptr));
+       adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+               REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
 
        return 0;
 }
 
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+               struct msm_ringbuffer *ring)
 {
-       return ring->cur - ring->start;
+       if (adreno_is_a430(adreno_gpu)) {
+               /*
+                * If index is anything but 0 this will probably break horribly,
+                * but I think that we have enough infrastructure in place to
+                * ensure that it won't be. If not then this is why your
+                * a430 stopped working.
+                */
+               return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
+                       adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+       } else
+               return adreno_gpu->memptrs->rptr[ring->id];
 }
 
-/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
-static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
 {
-       if (adreno_is_a430(adreno_gpu))
-               return adreno_gpu->memptrs->rptr = adreno_gpu_read(
-                       adreno_gpu, REG_ADRENO_CP_RB_RPTR);
-       else
-               return adreno_gpu->memptrs->rptr;
+       return gpu->rb[0];
+}
+
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+               struct msm_ringbuffer *ring)
+{
+       if (!ring)
+               return 0;
+
+       return ring->submitted_fence;
 }
 
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       return adreno_gpu->memptrs->fence;
+
+       if (!ring)
+               return 0;
+
+       return adreno_gpu->memptrs->fence[ring->id];
 }
 
 void adreno_recover(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct drm_device *dev = gpu->dev;
-       int ret;
+       struct msm_ringbuffer *ring;
+       int ret, i;
 
        gpu->funcs->pm_suspend(gpu);
 
-       /* reset ringbuffer: */
-       gpu->rb->cur = gpu->rb->start;
+       /* reset ringbuffer(s): */
+
+       FOR_EACH_RING(gpu, ring, i) {
+               if (!ring)
+                       continue;
 
-       /* reset completed fence seqno, just discard anything pending: */
-       adreno_gpu->memptrs->fence = gpu->submitted_fence;
-       adreno_gpu->memptrs->rptr  = 0;
-       adreno_gpu->memptrs->wptr  = 0;
+               /* No need for a lock here, nobody else is peeking in */
+               ring->cur = ring->start;
+               ring->next = ring->start;
+
+               /* reset completed fence seqno, discard anything pending: */
+               adreno_gpu->memptrs->fence[ring->id] =
+                       adreno_submitted_fence(gpu, ring);
+               adreno_gpu->memptrs->rptr[ring->id]  = 0;
+       }
 
        gpu->funcs->pm_resume(gpu);
+
+       disable_irq(gpu->irq);
        ret = gpu->funcs->hw_init(gpu);
        if (ret) {
                dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
                /* hmm, oh well? */
        }
+       enable_irq(gpu->irq);
 }
 
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx)
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       struct msm_drm_private *priv = gpu->dev->dev_private;
-       struct msm_ringbuffer *ring = gpu->rb;
+       struct msm_ringbuffer *ring = gpu->rb[submit->ring];
        unsigned i, ibs = 0;
 
        for (i = 0; i < submit->nr_cmds; i++) {
@@ -147,13 +184,11 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                        /* ignore IB-targets */
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       /* ignore if there has not been a ctx switch: */
-                       if (priv->lastctx == ctx)
                                break;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
                                CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
-                       OUT_RING(ring, submit->cmd[i].iova);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
                        OUT_RING(ring, submit->cmd[i].size);
                        ibs++;
                        break;
@@ -184,7 +219,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        OUT_PKT3(ring, CP_EVENT_WRITE, 3);
        OUT_RING(ring, CACHE_FLUSH_TS);
-       OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+       OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
        OUT_RING(ring, submit->fence);
 
        /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -211,15 +246,25 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        }
 #endif
 
-       gpu->funcs->flush(gpu);
+       gpu->funcs->flush(gpu, ring);
 
        return 0;
 }
 
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       uint32_t wptr = get_wptr(gpu->rb);
+       uint32_t wptr;
+
+       /* Copy the shadow to the actual register */
+       ring->cur = ring->next;
+
+       /*
+        * Mask the wptr value that we calculate to fit in the HW range. This is
+        * to account for the possibility that the last command fit exactly into
+        * the ringbuffer and rb->next hasn't wrapped to zero yet
+        */
+       wptr = get_wptr(ring);
 
        /* ensure writes to ringbuffer have hit system memory: */
        mb();
@@ -227,25 +272,27 @@ void adreno_flush(struct msm_gpu *gpu)
        adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
 }
 
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       uint32_t wptr = get_wptr(gpu->rb);
-       int ret;
+       uint32_t wptr = get_wptr(ring);
 
        /* wait for CP to drain ringbuffer: */
-       ret = spin_until(get_rptr(adreno_gpu) == wptr);
-
-       if (ret)
-               DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+       if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+               return true;
 
        /* TODO maybe we need to reset GPU here to recover from hang? */
+       DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+               gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+       return false;
 }
 
 #ifdef CONFIG_DEBUG_FS
 void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct msm_ringbuffer *ring;
        int i;
 
        seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
@@ -253,11 +300,18 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
                        adreno_gpu->rev.major, adreno_gpu->rev.minor,
                        adreno_gpu->rev.patchid);
 
-       seq_printf(m, "fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-                       gpu->submitted_fence);
-       seq_printf(m, "rptr:     %d\n", get_rptr(adreno_gpu));
-       seq_printf(m, "wptr:     %d\n", adreno_gpu->memptrs->wptr);
-       seq_printf(m, "rb wptr:  %d\n", get_wptr(gpu->rb));
+       FOR_EACH_RING(gpu, ring, i) {
+               if (!ring)
+                       continue;
+
+               seq_printf(m, "rb %d: fence:    %d/%d\n", i,
+                       adreno_last_fence(gpu, ring),
+                       adreno_submitted_fence(gpu, ring));
+
+               seq_printf(m, "      rptr:     %d\n",
+                       get_rptr(adreno_gpu, ring));
+               seq_printf(m, "rb wptr:  %d\n", get_wptr(ring));
+       }
 
        gpu->funcs->pm_resume(gpu);
 
@@ -286,22 +340,29 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
  */
 void adreno_dump_info(struct msm_gpu *gpu)
 {
+       struct drm_device *dev = gpu->dev;
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct msm_ringbuffer *ring;
        int i;
 
-       printk("revision: %d (%d.%d.%d.%d)\n",
+       dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
                        adreno_gpu->info->revn, adreno_gpu->rev.core,
                        adreno_gpu->rev.major, adreno_gpu->rev.minor,
                        adreno_gpu->rev.patchid);
 
-       printk("fence:    %d/%d\n", adreno_gpu->memptrs->fence,
-                       gpu->submitted_fence);
-       printk("rptr:     %d\n", get_rptr(adreno_gpu));
-       printk("wptr:     %d\n", adreno_gpu->memptrs->wptr);
-       printk("rb wptr:  %d\n", get_wptr(gpu->rb));
+       FOR_EACH_RING(gpu, ring, i) {
+               if (!ring)
+                       continue;
+
+               dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
+                       adreno_last_fence(gpu, ring),
+                       adreno_submitted_fence(gpu, ring),
+                       get_rptr(adreno_gpu, ring),
+                       get_wptr(ring));
+       }
 
        for (i = 0; i < 8; i++) {
-               printk("CP_SCRATCH_REG%d: %u\n", i,
+               pr_err("CP_SCRATCH_REG%d: %u\n", i,
                        gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
        }
 }
@@ -326,30 +387,146 @@ void adreno_dump(struct msm_gpu *gpu)
        }
 }
 
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
 {
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       uint32_t size = gpu->rb->size / 4;
-       uint32_t wptr = get_wptr(gpu->rb);
-       uint32_t rptr = get_rptr(adreno_gpu);
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+       uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+       /* Use ring->next to calculate free size */
+       uint32_t wptr = ring->next - ring->start;
+       uint32_t rptr = get_rptr(adreno_gpu, ring);
        return (rptr + (size - 1) - wptr) % size;
 }
 
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
 {
-       if (spin_until(ring_freewords(gpu) >= ndwords))
-               DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+       if (spin_until(ring_freewords(ring) >= ndwords))
+               DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
+                       ring->gpu->name, ring->id);
 }
 
 static const char *iommu_ports[] = {
-               "gfx3d_user", "gfx3d_priv",
-               "gfx3d1_user", "gfx3d1_priv",
+               "gfx3d_user",
 };
 
+/* Read the set of powerlevels */
+static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
+{
+       struct device_node *child;
+
+       gpu->active_level = 1;
+
+       /* The device tree will tell us the best clock to initialize with */
+       of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
+
+       if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
+               gpu->active_level = 1;
+
+       for_each_child_of_node(node, child) {
+               unsigned int index;
+
+               if (of_property_read_u32(child, "reg", &index))
+                       return -EINVAL;
+
+               if (index >= ARRAY_SIZE(gpu->gpufreq))
+                       continue;
+
+               gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
+
+               of_property_read_u32(child, "qcom,gpu-freq",
+                       &gpu->gpufreq[index]);
+               of_property_read_u32(child, "qcom,bus-freq",
+                       &gpu->busfreq[index]);
+       }
+
+       DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+               gpu->gpufreq[gpu->active_level],
+               gpu->gpufreq[gpu->nr_pwrlevels - 1],
+               gpu->busfreq[gpu->active_level]);
+
+       return 0;
+}
+
+/*
+ * Escape valve for targets that don't define the binning nodes. Get the
+ * first powerlevel node and parse it
+ */
+static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
+               struct device_node *parent)
+{
+       struct device_node *child;
+
+       child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
+       if (child)
+               return _adreno_get_pwrlevels(gpu, child);
+
+       dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
+       return -EINVAL;
+}
+
+/* Get the powerlevels for the target */
+static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct device_node *node, *child;
+
+       /* See if the target has defined a number of power bins */
+       node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
+       if (!node) {
+               /* If not look for the qcom,gpu-pwrlevels node */
+               return adreno_get_legacy_pwrlevels(gpu, parent);
+       }
+
+       for_each_child_of_node(node, child) {
+               unsigned int bin;
+
+               if (of_property_read_u32(child, "qcom,speed-bin", &bin))
+                       continue;
+
+               /*
+                * If the bin matches the bin specified by the fuses, then we
+                * have a winner - parse it
+                */
+               if (adreno_gpu->speed_bin == bin)
+                       return _adreno_get_pwrlevels(gpu, child);
+       }
+
+       return -ENODEV;
+}
+
+static const struct {
+       const char *str;
+       uint32_t flag;
+} quirks[] = {
+       { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+       { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
+};
+
+/* Parse the statistics from the device tree */
+static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct device_node *node = pdev->dev.of_node;
+       int i, ret;
+
+       /* Probe the powerlevels */
+       ret = adreno_get_pwrlevels(gpu, node);
+       if (ret)
+               return ret;
+
+       /* Check to see if any quirks were specified in the device tree */
+       for (i = 0; i < ARRAY_SIZE(quirks); i++)
+               if (of_property_read_bool(node, quirks[i].str))
+                       adreno_gpu->quirks |= quirks[i].flag;
+
+       return 0;
+}
+
 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
-               struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+               struct adreno_gpu *adreno_gpu,
+               const struct adreno_gpu_funcs *funcs, int nr_rings)
 {
        struct adreno_platform_config *config = pdev->dev.platform_data;
+       struct msm_gpu_config adreno_gpu_config  = { 0 };
        struct msm_gpu *gpu = &adreno_gpu->base;
        struct msm_mmu *mmu;
        int ret;
@@ -360,19 +537,29 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        adreno_gpu->revn = adreno_gpu->info->revn;
        adreno_gpu->rev = config->rev;
 
-       gpu->fast_rate = config->fast_rate;
-       gpu->slow_rate = config->slow_rate;
-       gpu->bus_freq  = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-       gpu->bus_scale_table = config->bus_scale_table;
-#endif
+       /* Get the rest of the target configuration from the device tree */
+       adreno_of_parse(pdev, gpu);
 
-       DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
-                       gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+       adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
+       adreno_gpu_config.irqname = "kgsl_3d0_irq";
+       adreno_gpu_config.nr_rings = nr_rings;
+
+       adreno_gpu_config.va_start = SZ_16M;
+       adreno_gpu_config.va_end = 0xffffffff;
+
+       if (adreno_gpu->revn >= 500) {
+               /* 5XX targets use a 64 bit region */
+               adreno_gpu_config.va_start = 0x800000000;
+               adreno_gpu_config.va_end = 0x8ffffffff;
+       } else {
+               adreno_gpu_config.va_start = 0x300000;
+               adreno_gpu_config.va_end = 0xffffffff;
+       }
+
+       adreno_gpu_config.nr_rings = nr_rings;
 
        ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
-                       adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
-                       RB_SIZE);
+                       adreno_gpu->info->name, &adreno_gpu_config);
        if (ret)
                return ret;
 
@@ -390,7 +577,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       mmu = gpu->mmu;
+       mmu = gpu->aspace->mmu;
        if (mmu) {
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
@@ -415,7 +602,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return -ENOMEM;
        }
 
-       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+       ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
                        &adreno_gpu->memptrs_iova);
        if (ret) {
                dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -427,12 +614,98 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
 void adreno_gpu_cleanup(struct adreno_gpu *gpu)
 {
+       struct msm_gem_address_space *aspace = gpu->base.aspace;
+
        if (gpu->memptrs_bo) {
                if (gpu->memptrs_iova)
-                       msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+                       msm_gem_put_iova(gpu->memptrs_bo, aspace);
                drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
        }
        release_firmware(gpu->pm4);
        release_firmware(gpu->pfp);
+
        msm_gpu_cleanup(&gpu->base);
+
+       if (aspace) {
+               aspace->mmu->funcs->detach(aspace->mmu);
+               msm_gem_address_space_put(aspace);
+       }
+}
+
+static void adreno_snapshot_os(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       struct msm_snapshot_linux header;
+
+       memset(&header, 0, sizeof(header));
+
+       header.osid = SNAPSHOT_OS_LINUX_V3;
+       strlcpy(header.release, utsname()->release, sizeof(header.release));
+       strlcpy(header.version, utsname()->version, sizeof(header.version));
+
+       header.seconds = get_seconds();
+       header.ctxtcount = 0;
+
+       SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
+}
+
+static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct msm_snapshot_ringbuffer header;
+       unsigned int i, end = 0;
+       unsigned int *data = ring->start;
+
+       memset(&header, 0, sizeof(header));
+
+       /*
+        * We only want to copy the active contents of each ring, so find the
+        * last valid entry in the ringbuffer
+        */
+       for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
+               if (data[i])
+                       end = i;
+       }
+
+       /* The dump always starts at 0 */
+       header.start = 0;
+       header.end = end;
+
+       /* This is the number of dwords being dumped */
+       header.count = end + 1;
+
+       /* This is the size of the actual ringbuffer */
+       header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
+
+       header.id = ring->id;
+       header.gpuaddr = ring->iova;
+       header.rptr = get_rptr(adreno_gpu, ring);
+       header.wptr = get_wptr(ring);
+       header.timestamp_queued = adreno_submitted_fence(gpu, ring);
+       header.timestamp_retired = adreno_last_fence(gpu, ring);
+
+       /* Write the header even if the ringbuffer data is empty */
+       if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
+               header.count))
+               return;
+
+       SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
+}
+
+static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
+               struct msm_snapshot *snapshot)
+{
+       struct msm_ringbuffer *ring;
+       int i;
+
+       /* Write a new section for each ringbuffer */
+       FOR_EACH_RING(gpu, ring, i)
+               adreno_snapshot_ringbuffer(gpu, snapshot, ring);
+}
+
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+       adreno_snapshot_os(gpu, snapshot);
+       adreno_snapshot_ringbuffers(gpu, snapshot);
 }
index 1d07511..3046111 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
 
 #include "msm_gpu.h"
 
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
 #include "adreno_common.xml.h"
 #include "adreno_pm4.xml.h"
 
 #define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
 /**
  * adreno_regs: List of registers that are used in across all
  * 3D devices. Each device type has different offset value for the same
  * and are indexed by the enumeration values defined in this enum
  */
 enum adreno_regs {
-       REG_ADRENO_CP_DEBUG,
-       REG_ADRENO_CP_ME_RAM_WADDR,
-       REG_ADRENO_CP_ME_RAM_DATA,
-       REG_ADRENO_CP_PFP_UCODE_DATA,
-       REG_ADRENO_CP_PFP_UCODE_ADDR,
-       REG_ADRENO_CP_WFI_PEND_CTR,
        REG_ADRENO_CP_RB_BASE,
+       REG_ADRENO_CP_RB_BASE_HI,
        REG_ADRENO_CP_RB_RPTR_ADDR,
+       REG_ADRENO_CP_RB_RPTR_ADDR_HI,
        REG_ADRENO_CP_RB_RPTR,
        REG_ADRENO_CP_RB_WPTR,
-       REG_ADRENO_CP_PROTECT_CTRL,
-       REG_ADRENO_CP_ME_CNTL,
        REG_ADRENO_CP_RB_CNTL,
-       REG_ADRENO_CP_IB1_BASE,
-       REG_ADRENO_CP_IB1_BUFSZ,
-       REG_ADRENO_CP_IB2_BASE,
-       REG_ADRENO_CP_IB2_BUFSZ,
-       REG_ADRENO_CP_TIMESTAMP,
-       REG_ADRENO_CP_ME_RAM_RADDR,
-       REG_ADRENO_CP_ROQ_ADDR,
-       REG_ADRENO_CP_ROQ_DATA,
-       REG_ADRENO_CP_MERCIU_ADDR,
-       REG_ADRENO_CP_MERCIU_DATA,
-       REG_ADRENO_CP_MERCIU_DATA2,
-       REG_ADRENO_CP_MEQ_ADDR,
-       REG_ADRENO_CP_MEQ_DATA,
-       REG_ADRENO_CP_HW_FAULT,
-       REG_ADRENO_CP_PROTECT_STATUS,
-       REG_ADRENO_SCRATCH_ADDR,
-       REG_ADRENO_SCRATCH_UMSK,
-       REG_ADRENO_SCRATCH_REG2,
-       REG_ADRENO_RBBM_STATUS,
-       REG_ADRENO_RBBM_PERFCTR_CTL,
-       REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
-       REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
-       REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
-       REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
-       REG_ADRENO_RBBM_INT_0_MASK,
-       REG_ADRENO_RBBM_INT_0_STATUS,
-       REG_ADRENO_RBBM_AHB_ERROR_STATUS,
-       REG_ADRENO_RBBM_PM_OVERRIDE2,
-       REG_ADRENO_RBBM_AHB_CMD,
-       REG_ADRENO_RBBM_INT_CLEAR_CMD,
-       REG_ADRENO_RBBM_SW_RESET_CMD,
-       REG_ADRENO_RBBM_CLOCK_CTL,
-       REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
-       REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
-       REG_ADRENO_VPC_DEBUG_RAM_SEL,
-       REG_ADRENO_VPC_DEBUG_RAM_READ,
-       REG_ADRENO_VSC_SIZE_ADDRESS,
-       REG_ADRENO_VFD_CONTROL_0,
-       REG_ADRENO_VFD_INDEX_MAX,
-       REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
-       REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
-       REG_ADRENO_SP_VS_OBJ_START_REG,
-       REG_ADRENO_SP_FS_OBJ_START_REG,
-       REG_ADRENO_PA_SC_AA_CONFIG,
-       REG_ADRENO_SQ_GPR_MANAGEMENT,
-       REG_ADRENO_SQ_INST_STORE_MANAGMENT,
-       REG_ADRENO_TP0_CHICKEN,
-       REG_ADRENO_RBBM_RBBM_CTL,
-       REG_ADRENO_UCHE_INVALIDATE0,
-       REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
-       REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
        REG_ADRENO_REGISTER_MAX,
 };
 
+enum adreno_quirks {
+       ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+       ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
 struct adreno_rev {
        uint8_t  core;
        uint8_t  major;
@@ -128,10 +83,20 @@ struct adreno_info {
 
 const struct adreno_info *adreno_info(struct adreno_rev rev);
 
+#define _sizeof(member) \
+       sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
+
+#define _base(adreno_gpu, member)  \
+       ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+#define rbmemptr(adreno_gpu, index, member) \
+       (_base((adreno_gpu), member) + ((index) * _sizeof(member)))
+
 struct adreno_rbmemptrs {
-       volatile uint32_t rptr;
-       volatile uint32_t wptr;
-       volatile uint32_t fence;
+       volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
+       volatile uint32_t fence[MSM_GPU_MAX_RINGS];
+       volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
+       volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
 };
 
 struct adreno_gpu {
@@ -153,7 +118,7 @@ struct adreno_gpu {
        // different for z180..
        struct adreno_rbmemptrs *memptrs;
        struct drm_gem_object *memptrs_bo;
-       uint32_t memptrs_iova;
+       uint64_t memptrs_iova;
 
        /*
         * Register offsets are different between some GPUs.
@@ -161,16 +126,15 @@ struct adreno_gpu {
         * code (a3xx_gpu.c) and stored in this common location.
         */
        const unsigned int *reg_offsets;
+
+       uint32_t quirks;
+       uint32_t speed_bin;
 };
 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
 
 /* platform config data (ie. from DT, or pdata) */
 struct adreno_platform_config {
        struct adreno_rev rev;
-       uint32_t fast_rate, slow_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-       struct msm_bus_scale_pdata *bus_scale_table;
-#endif
 };
 
 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -187,6 +151,9 @@ struct adreno_platform_config {
        __ret;                                             \
 })
 
+#define GPU_OF_NODE(_g) \
+       (((struct msm_drm_private *) \
+         ((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
 
 static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
 {
@@ -234,32 +201,46 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
        return gpu->revn == 430;
 }
 
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 540;
+}
+
 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
 int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+               struct msm_ringbuffer *ring);
 void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 #ifdef CONFIG_DEBUG_FS
 void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
 #endif
 void adreno_dump_info(struct msm_gpu *gpu);
 void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
 
 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
-               struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+               struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+               int nr_rings);
 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
 
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
 
 /* ringbuffer helpers (the parts that are adreno specific) */
 
 static inline void
 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
 {
-       adreno_wait_ring(ring->gpu, cnt+1);
+       adreno_wait_ring(ring, cnt+1);
        OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
 }
 
@@ -267,19 +248,49 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
 static inline void
 OUT_PKT2(struct msm_ringbuffer *ring)
 {
-       adreno_wait_ring(ring->gpu, 1);
+       adreno_wait_ring(ring, 1);
        OUT_RING(ring, CP_TYPE2_PKT);
 }
 
 static inline void
 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
 {
-       adreno_wait_ring(ring->gpu, cnt+1);
+       adreno_wait_ring(ring, cnt+1);
        OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
 }
 
+static inline u32 PM4_PARITY(u32 val)
+{
+       return (0x9669 >> (0xF & (val ^
+               (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+               (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+               (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+       (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+        (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+       adreno_wait_ring(ring, cnt + 1);
+       OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+       adreno_wait_ring(ring, cnt + 1);
+       OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+               ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
 /*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
  * @gpu:               Pointer to struct adreno_gpu
  * @offset_name:       The register enum that is checked
  */
@@ -290,6 +301,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
                        !gpu->reg_offsets[offset_name]) {
                BUG();
        }
+
+       /*
+        * REG_SKIP is a special value that tell us that the register in
+        * question isn't implemented on target but don't trigger a BUG(). This
+        * is used to cleanly implement adreno_gpu_write64() and
+        * adreno_gpu_read64() in a generic fashion
+        */
+       if (gpu->reg_offsets[offset_name] == REG_SKIP)
+               return false;
+
        return true;
 }
 
@@ -311,4 +332,40 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
                gpu_write(&gpu->base, reg - 1, data);
 }
 
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+               enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+       adreno_gpu_write(gpu, lo, lower_32_bits(data));
+       adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+       return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+       ((1 << 30) | (1 << 29) | \
+       ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+       ((1 << 29) \
+       ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
 #endif /* __ADRENO_GPU_H__ */
index d7477ff..9911a18 100644 (file)
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml               (    431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml  (   1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml          (  32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml (  12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml    (  19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml          (  83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml          ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml          (  81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml         (   1773 bytes, from 2016-10-24 21:12:27)
 
 Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -58,6 +59,7 @@ enum vgt_event_type {
        RST_PIX_CNT = 13,
        RST_VTX_CNT = 14,
        TILE_FLUSH = 15,
+       STAT_EVENT = 16,
        CACHE_FLUSH_AND_INV_TS_EVENT = 20,
        ZPASS_DONE = 21,
        CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -82,7 +84,6 @@ enum pc_di_primtype {
        DI_PT_LINESTRIP_ADJ = 11,
        DI_PT_TRI_ADJ = 12,
        DI_PT_TRISTRIP_ADJ = 13,
-       DI_PT_PATCHES = 34,
 };
 
 enum pc_di_src_sel {
@@ -110,11 +111,15 @@ enum adreno_pm4_packet_type {
        CP_TYPE1_PKT = 0x40000000,
        CP_TYPE2_PKT = 0x80000000,
        CP_TYPE3_PKT = 0xc0000000,
+       CP_TYPE4_PKT = 0x40000000,
+       CP_TYPE7_PKT = 0x70000000,
 };
 
 enum adreno_pm4_type3_packets {
        CP_ME_INIT = 72,
        CP_NOP = 16,
+       CP_PREEMPT_ENABLE = 28,
+       CP_PREEMPT_TOKEN = 30,
        CP_INDIRECT_BUFFER = 63,
        CP_INDIRECT_BUFFER_PFD = 55,
        CP_WAIT_FOR_IDLE = 38,
@@ -163,6 +168,7 @@ enum adreno_pm4_type3_packets {
        CP_TEST_TWO_MEMS = 113,
        CP_REG_WR_NO_CTXT = 120,
        CP_RECORD_PFP_TIMESTAMP = 17,
+       CP_SET_SECURE_MODE = 102,
        CP_WAIT_FOR_ME = 19,
        CP_SET_DRAW_STATE = 67,
        CP_DRAW_INDX_OFFSET = 56,
@@ -178,6 +184,21 @@ enum adreno_pm4_type3_packets {
        CP_WAIT_MEM_WRITES = 18,
        CP_COND_REG_EXEC = 71,
        CP_MEM_TO_REG = 66,
+       CP_EXEC_CS = 51,
+       CP_PERFCOUNTER_ACTION = 80,
+       CP_SMMU_TABLE_UPDATE = 83,
+       CP_CONTEXT_REG_BUNCH = 92,
+       CP_YIELD_ENABLE = 28,
+       CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+       CP_SKIP_IB2_ENABLE_LOCAL = 35,
+       CP_SET_SUBDRAW_SIZE = 53,
+       CP_SET_VISIBILITY_OVERRIDE = 100,
+       CP_PREEMPT_ENABLE_GLOBAL = 105,
+       CP_PREEMPT_ENABLE_LOCAL = 106,
+       CP_CONTEXT_SWITCH_YIELD = 107,
+       CP_SET_RENDER_MODE = 108,
+       CP_COMPUTE_CHECKPOINT = 110,
+       CP_MEM_TO_MEM = 115,
        IN_IB_PREFETCH_END = 23,
        IN_SUBBLK_PREFETCH = 31,
        IN_INSTR_PREFETCH = 32,
@@ -196,6 +217,7 @@ enum adreno_state_block {
        SB_VERT_SHADER = 4,
        SB_GEOM_SHADER = 5,
        SB_FRAG_SHADER = 6,
+       SB_COMPUTE_SHADER = 7,
 };
 
 enum adreno_state_type {
@@ -389,7 +411,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
 {
        return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
 }
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE                       0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK                   0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT                  8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+       return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK                 0x00000c00
 #define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT                        10
 static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -533,5 +560,78 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
        return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
 }
 
+#define REG_CP_DISPATCH_COMPUTE_0                              0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1                              0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK                          0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT                         0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+       return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2                              0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK                          0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT                         0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+       return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3                              0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK                          0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT                         0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+       return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0                               0x00000000
+
+#define REG_CP_SET_RENDER_MODE_1                               0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK                   0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT                  0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+       return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2                               0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK                   0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT                  0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+       return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3                               0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE                       0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4                               0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5                               0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK                  0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT                 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+       return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6                               0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK                   0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT                  0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+       return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7                               0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK                   0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT                  0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+       return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
 
 #endif /* ADRENO_PM4_XML */
index b8520aa..fb9617c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1186,7 +1186,7 @@ static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
 int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
 {
        int rc = 0;
-       u32 iova = 0;
+       u64 iova = 0;
 
        dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
                                           SZ_4K,
index 5f5a373..4cb4764 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -89,7 +89,7 @@ int msm_dsi_manager_phy_enable(int id,
                u32 *clk_pre, u32 *clk_post);
 void msm_dsi_manager_phy_disable(int id);
 int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
@@ -143,7 +143,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
                                        const struct mipi_dsi_msg *msg);
 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
-                                       u32 iova, u32 len);
+                                       u64 iova, u32 len);
 int msm_dsi_host_enable(struct mipi_dsi_host *host);
 int msm_dsi_host_disable(struct mipi_dsi_host *host);
 int msm_dsi_host_power_on(struct mipi_dsi_host *host);
index 4c49868..4580a6e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -836,7 +836,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
 {
        struct drm_device *dev = msm_host->dev;
        int ret;
-       u32 iova;
+       u64 iova;
 
        mutex_lock(&dev->struct_mutex);
        msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
@@ -974,7 +974,7 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
 {
        int ret;
-       u32 iova;
+       uint64_t iova;
        bool triggered;
 
        ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
@@ -1750,11 +1750,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
        return ret;
 }
 
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
-       dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+       /* FIXME: Verify that the iova < 32 bits? */
+       dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
        dsi_write(msm_host, REG_DSI_DMA_LEN, len);
        dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
 
index 0455ff7..2091b74 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -774,7 +774,7 @@ restore_host0:
        return ret;
 }
 
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
 {
        struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
        struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
new file mode 100644 (file)
index 0000000..6a020b3
--- /dev/null
@@ -0,0 +1,1156 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)    "sde-hdmi:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+
+#include "sde_kms.h"
+#include "msm_drv.h"
+#include "sde_hdmi.h"
+
+static DEFINE_MUTEX(sde_hdmi_list_lock);
+static LIST_HEAD(sde_hdmi_list);
+
+static const struct of_device_id sde_hdmi_dt_match[] = {
+       {.compatible = "qcom,hdmi-display"},
+       {}
+};
+
+static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
+                                               char __user *buff,
+                                               size_t count,
+                                               loff_t *ppos)
+{
+       struct sde_hdmi *display = file->private_data;
+       char *buf;
+       u32 len = 0;
+
+       if (!display)
+               return -ENODEV;
+
+       if (*ppos)
+               return 0;
+
+       buf = kzalloc(SZ_1K, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       len += snprintf(buf, SZ_4K, "name = %s\n", display->name);
+
+       if (copy_to_user(buff, buf, len)) {
+               kfree(buf);
+               return -EFAULT;
+       }
+
+       *ppos += len;
+
+       kfree(buf);
+       return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+       .open = simple_open,
+       .read = _sde_hdmi_debugfs_dump_info_read,
+};
+
+static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
+{
+       int rc = 0;
+       struct dentry *dir, *dump_file;
+
+       dir = debugfs_create_dir(display->name, NULL);
+       if (!dir) {
+               rc = -ENOMEM;
+               SDE_ERROR("[%s]debugfs create dir failed, rc = %d\n",
+                       display->name, rc);
+               goto error;
+       }
+
+       dump_file = debugfs_create_file("dump_info",
+                                       0444,
+                                       dir,
+                                       display,
+                                       &dump_info_fops);
+       if (IS_ERR_OR_NULL(dump_file)) {
+               rc = PTR_ERR(dump_file);
+               SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+                      display->name, rc);
+               goto error_remove_dir;
+       }
+
+       display->root = dir;
+       return rc;
+error_remove_dir:
+       debugfs_remove(dir);
+error:
+       return rc;
+}
+
+static void _sde_hdmi_debugfs_deinit(struct sde_hdmi *display)
+{
+       debugfs_remove(display->root);
+}
+
+static void _sde_hdmi_phy_reset(struct hdmi *hdmi)
+{
+       unsigned int val;
+
+       val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET);
+        else
+                hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET);
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+       else
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET_PLL);
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET);
+        else
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET);
+
+       if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val | HDMI_PHY_CTRL_SW_RESET_PLL);
+       else
+               hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+                               val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+}
+
+static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
+{
+       const struct hdmi_platform_config *config = hdmi->config;
+       int ret;
+
+       if (on) {
+               if (config->ddc_clk_gpio != -1) {
+                       ret = gpio_request(config->ddc_clk_gpio,
+                               "HDMI_DDC_CLK");
+                       if (ret) {
+                               SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                                       "HDMI_DDC_CLK", config->ddc_clk_gpio,
+                                       ret);
+                               goto error_ddc_clk_gpio;
+                       }
+                       gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+               }
+
+               if (config->ddc_data_gpio != -1) {
+                       ret = gpio_request(config->ddc_data_gpio,
+                               "HDMI_DDC_DATA");
+                       if (ret) {
+                               SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                                       "HDMI_DDC_DATA", config->ddc_data_gpio,
+                                       ret);
+                               goto error_ddc_data_gpio;
+                       }
+                       gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+               }
+
+               ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+               if (ret) {
+                       SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                               "HDMI_HPD", config->hpd_gpio, ret);
+                       goto error_hpd_gpio;
+               }
+               gpio_direction_output(config->hpd_gpio, 1);
+               if (config->hpd5v_gpio != -1) {
+                       ret = gpio_request(config->hpd5v_gpio, "HDMI_HPD_5V");
+                       if (ret) {
+                               SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                                                 "HDMI_HPD_5V",
+                                                 config->hpd5v_gpio,
+                                                 ret);
+                               goto error_hpd5v_gpio;
+                       }
+                       gpio_set_value_cansleep(config->hpd5v_gpio, 1);
+               }
+
+               if (config->mux_en_gpio != -1) {
+                       ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+                       if (ret) {
+                               SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                                       "HDMI_MUX_EN", config->mux_en_gpio,
+                                       ret);
+                               goto error_en_gpio;
+                       }
+                       gpio_set_value_cansleep(config->mux_en_gpio, 1);
+               }
+
+               if (config->mux_sel_gpio != -1) {
+                       ret = gpio_request(config->mux_sel_gpio,
+                               "HDMI_MUX_SEL");
+                       if (ret) {
+                               SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                                       "HDMI_MUX_SEL", config->mux_sel_gpio,
+                                       ret);
+                               goto error_sel_gpio;
+                       }
+                       gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+               }
+
+               if (config->mux_lpm_gpio != -1) {
+                       ret = gpio_request(config->mux_lpm_gpio,
+                                       "HDMI_MUX_LPM");
+                       if (ret) {
+                               SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+                                       "HDMI_MUX_LPM",
+                                       config->mux_lpm_gpio, ret);
+                               goto error_lpm_gpio;
+                       }
+                       gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+               }
+               SDE_DEBUG("gpio on");
+       } else {
+               if (config->ddc_clk_gpio != -1)
+                       gpio_free(config->ddc_clk_gpio);
+
+               if (config->ddc_data_gpio != -1)
+                       gpio_free(config->ddc_data_gpio);
+
+               gpio_free(config->hpd_gpio);
+
+               if (config->mux_en_gpio != -1) {
+                       gpio_set_value_cansleep(config->mux_en_gpio, 0);
+                       gpio_free(config->mux_en_gpio);
+               }
+
+               if (config->mux_sel_gpio != -1) {
+                       gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+                       gpio_free(config->mux_sel_gpio);
+               }
+
+               if (config->mux_lpm_gpio != -1) {
+                       gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+                       gpio_free(config->mux_lpm_gpio);
+               }
+               SDE_DEBUG("gpio off");
+       }
+
+       return 0;
+
+error_lpm_gpio:
+       if (config->mux_sel_gpio != -1)
+               gpio_free(config->mux_sel_gpio);
+error_sel_gpio:
+       if (config->mux_en_gpio != -1)
+               gpio_free(config->mux_en_gpio);
+error_en_gpio:
+       gpio_free(config->hpd5v_gpio);
+error_hpd5v_gpio:
+       gpio_free(config->hpd_gpio);
+error_hpd_gpio:
+       if (config->ddc_data_gpio != -1)
+               gpio_free(config->ddc_data_gpio);
+error_ddc_data_gpio:
+       if (config->ddc_clk_gpio != -1)
+               gpio_free(config->ddc_clk_gpio);
+error_ddc_clk_gpio:
+       return ret;
+}
+
+static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
+{
+       struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+       const struct hdmi_platform_config *config = hdmi->config;
+       struct device *dev = &hdmi->pdev->dev;
+       uint32_t hpd_ctrl;
+       int i, ret;
+       unsigned long flags;
+
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               ret = regulator_enable(hdmi->hpd_regs[i]);
+               if (ret) {
+                       SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+                       goto fail;
+               }
+       }
+
+       ret = pinctrl_pm_select_default_state(dev);
+       if (ret) {
+               SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+               goto fail;
+       }
+
+       ret = _sde_hdmi_gpio_config(hdmi, true);
+       if (ret) {
+               SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+               goto fail;
+       }
+
+       for (i = 0; i < config->hpd_clk_cnt; i++) {
+               if (config->hpd_freq && config->hpd_freq[i]) {
+                       ret = clk_set_rate(hdmi->hpd_clks[i],
+                                       config->hpd_freq[i]);
+                       if (ret)
+                               pr_warn("failed to set clk %s (%d)\n",
+                                               config->hpd_clk_names[i], ret);
+               }
+
+               ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+               if (ret) {
+                       SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+                                       config->hpd_clk_names[i], ret);
+                       goto fail;
+               }
+       }
+
+       hdmi_set_mode(hdmi, false);
+       _sde_hdmi_phy_reset(hdmi);
+       hdmi_set_mode(hdmi, true);
+
+       hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+       /* set timeout to 4.1ms (max) for hardware debounce */
+       spin_lock_irqsave(&hdmi->reg_lock, flags);
+       hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+       hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+       hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+                       HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+       /* enable HPD events: */
+       hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+                       HDMI_HPD_INT_CTRL_INT_CONNECT |
+                       HDMI_HPD_INT_CTRL_INT_EN);
+
+       /* Toggle HPD circuit to trigger HPD sense */
+       hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+                       ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+       hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+                       HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+       spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+       return 0;
+
+fail:
+       return ret;
+}
+
+static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
+{
+       struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+       const struct hdmi_platform_config *config = hdmi->config;
+       struct device *dev = &hdmi->pdev->dev;
+       int i, ret = 0;
+
+       /* Disable HPD interrupt */
+       hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+       hdmi_set_mode(hdmi, false);
+
+       for (i = 0; i < config->hpd_clk_cnt; i++)
+               clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+       ret = _sde_hdmi_gpio_config(hdmi, false);
+       if (ret)
+               pr_warn("failed to unconfigure GPIOs: %d\n", ret);
+
+       ret = pinctrl_pm_select_sleep_state(dev);
+       if (ret)
+               pr_warn("pinctrl state chg failed: %d\n", ret);
+
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               ret = regulator_disable(hdmi->hpd_regs[i]);
+               if (ret)
+                       pr_warn("failed to disable hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+       }
+}
+
+static void _sde_hdmi_hotplug_work(struct work_struct *work)
+{
+       struct sde_hdmi *sde_hdmi =
+               container_of(work, struct sde_hdmi, hpd_work);
+       struct drm_connector *connector;
+
+       if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
+               !sde_hdmi->ctrl.ctrl->connector) {
+               SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+                               sde_hdmi);
+               return;
+       }
+
+       connector = sde_hdmi->ctrl.ctrl->connector;
+       drm_helper_hpd_irq_event(connector->dev);
+}
+
+static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
+{
+       struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+       uint32_t hpd_int_status, hpd_int_ctrl;
+
+       /* Process HPD: */
+       hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+       hpd_int_ctrl   = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+       if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+                       (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+               sde_hdmi->connected = !!(hpd_int_status &
+                                       HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+               /* ack & disable (temporarily) HPD events: */
+               hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+                       HDMI_HPD_INT_CTRL_INT_ACK);
+
+               DRM_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+                               hpd_int_ctrl);
+
+               /* detect disconnect if we are connected or visa versa: */
+               hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+               if (!sde_hdmi->connected)
+                       hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+               hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+               queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+       }
+}
+
+static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
+{
+       struct sde_hdmi *sde_hdmi = dev_id;
+       struct hdmi *hdmi;
+
+       if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+               SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+               return IRQ_NONE;
+       }
+       hdmi = sde_hdmi->ctrl.ctrl;
+       /* Process HPD: */
+       _sde_hdmi_connector_irq(sde_hdmi);
+
+       /* Process DDC: */
+       hdmi_i2c_irq(hdmi->i2c);
+
+       /* Process HDCP: */
+       if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+               hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
+
+       /* TODO audio.. */
+
+       return IRQ_HANDLED;
+}
+
+int sde_hdmi_get_info(struct msm_display_info *info,
+                               void *display)
+{
+       int rc = 0;
+       struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+       struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+
+       if (!display || !info) {
+               SDE_ERROR("display=%p or info=%p is NULL\n", display, info);
+               return -EINVAL;
+       }
+
+       mutex_lock(&hdmi_display->display_lock);
+
+       info->intf_type = DRM_MODE_CONNECTOR_HDMIA;
+       info->num_of_h_tiles = 1;
+       info->h_tile_instance[0] = 0;
+       if (hdmi_display->non_pluggable) {
+               info->capabilities = MSM_DISPLAY_CAP_VID_MODE;
+               hdmi_display->connected = true;
+               hdmi->hdmi_mode = true;
+       } else {
+               info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG |
+                               MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
+       }
+       info->is_connected = hdmi_display->connected;
+       info->max_width = 1920;
+       info->max_height = 1080;
+       info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+       mutex_unlock(&hdmi_display->display_lock);
+       return rc;
+}
+
+u32 sde_hdmi_get_num_of_displays(void)
+{
+       u32 count = 0;
+       struct sde_hdmi *display;
+
+       mutex_lock(&sde_hdmi_list_lock);
+
+       list_for_each_entry(display, &sde_hdmi_list, list)
+               count++;
+
+       mutex_unlock(&sde_hdmi_list_lock);
+       return count;
+}
+
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count)
+{
+       struct sde_hdmi *display;
+       int i = 0;
+
+       SDE_DEBUG("\n");
+
+       if (!display_array || !max_display_count) {
+               if (!display_array)
+                       SDE_ERROR("invalid param\n");
+               return 0;
+       }
+
+       mutex_lock(&sde_hdmi_list_lock);
+       list_for_each_entry(display, &sde_hdmi_list, list) {
+               if (i >= max_display_count)
+                       break;
+               display_array[i++] = display;
+       }
+       mutex_unlock(&sde_hdmi_list_lock);
+
+       return i;
+}
+
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+               void *display)
+{
+       struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+       if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+               SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+               return -EINVAL;
+       }
+
+       _sde_hdmi_hdp_disable(sde_hdmi);
+
+       return 0;
+}
+
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+               void *info,
+               void *display)
+{
+       int rc = 0;
+       struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+       struct hdmi *hdmi;
+
+       if (!sde_hdmi) {
+               SDE_ERROR("sde_hdmi is NULL\n");
+               return -EINVAL;
+       }
+
+       hdmi = sde_hdmi->ctrl.ctrl;
+       if (!hdmi) {
+               SDE_ERROR("hdmi is NULL\n");
+               return -EINVAL;
+       }
+
+       if (info)
+               sde_kms_info_add_keystr(info,
+                               "DISPLAY_TYPE",
+                               sde_hdmi->display_type);
+
+       hdmi->connector = connector;
+       INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
+
+       /* Enable HPD detection */
+       rc = _sde_hdmi_hpd_enable(sde_hdmi);
+       if (rc)
+               SDE_ERROR("failed to enable HPD: %d\n", rc);
+
+       return rc;
+}
+
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+               bool force,
+               void *display)
+{
+       enum drm_connector_status status = connector_status_unknown;
+       struct msm_display_info info;
+       int rc;
+
+       if (!connector || !display) {
+               SDE_ERROR("connector=%p or display=%p is NULL\n",
+                       connector, display);
+               return status;
+       }
+
+       SDE_DEBUG("\n");
+
+       /* get display dsi_info */
+       memset(&info, 0x0, sizeof(info));
+       rc = sde_hdmi_get_info(&info, display);
+       if (rc) {
+               SDE_ERROR("failed to get display info, rc=%d\n", rc);
+               return connector_status_disconnected;
+       }
+
+       if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+               status = (info.is_connected ? connector_status_connected :
+                                             connector_status_disconnected);
+       else
+               status = connector_status_connected;
+
+       connector->display_info.width_mm = info.width_mm;
+       connector->display_info.height_mm = info.height_mm;
+
+       return status;
+}
+
+int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
+{
+       struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+       struct hdmi *hdmi;
+       struct edid *edid;
+       struct drm_display_mode *mode, *m;
+       uint32_t hdmi_ctrl;
+       int ret = 0;
+
+       if (!connector || !display) {
+               SDE_ERROR("connector=%p or display=%p is NULL\n",
+                       connector, display);
+               return 0;
+       }
+
+       SDE_DEBUG("\n");
+
+       hdmi = hdmi_display->ctrl.ctrl;
+       if (hdmi_display->non_pluggable) {
+               list_for_each_entry(mode, &hdmi_display->mode_list, head) {
+                       m = drm_mode_duplicate(connector->dev, mode);
+                       if (!m) {
+                               SDE_ERROR("failed to add hdmi mode %dx%d\n",
+                                       mode->hdisplay, mode->vdisplay);
+                               break;
+                       }
+                       drm_mode_probed_add(connector, m);
+               }
+               ret = hdmi_display->num_of_modes;
+       } else {
+               /* Read EDID */
+               hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+               hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+               edid = drm_get_edid(connector, hdmi->i2c);
+
+               hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+               hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+               drm_mode_connector_update_edid_property(connector, edid);
+
+               if (edid) {
+                       ret = drm_add_edid_modes(connector, edid);
+                       kfree(edid);
+               }
+       }
+
+       return ret;
+}
+
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+               struct drm_display_mode *mode,
+               void *display)
+{
+       struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+       struct hdmi *hdmi;
+       struct msm_drm_private *priv;
+       struct msm_kms *kms;
+       long actual, requested;
+
+       if (!connector || !display || !mode) {
+               SDE_ERROR("connector=%p or display=%p or mode=%p is NULL\n",
+                       connector, display, mode);
+               return 0;
+       }
+
+       SDE_DEBUG("\n");
+
+       hdmi = hdmi_display->ctrl.ctrl;
+       priv = connector->dev->dev_private;
+       kms = priv->kms;
+       requested = 1000 * mode->clock;
+       actual = kms->funcs->round_pixclk(kms,
+                       requested, hdmi->encoder);
+
+       SDE_DEBUG("requested=%ld, actual=%ld", requested, actual);
+
+       if (actual != requested)
+               return MODE_CLOCK_RANGE;
+
+       return MODE_OK;
+}
+
+int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+       if (!display) {
+               SDE_ERROR("Invalid params\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+       if (!display) {
+               SDE_ERROR("Invalid params\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+       int rc = 0;
+       struct sde_hdmi_ctrl *display_ctrl = NULL;
+       struct sde_hdmi *display = NULL;
+       struct drm_device *drm = NULL;
+       struct msm_drm_private *priv = NULL;
+       struct platform_device *pdev = to_platform_device(dev);
+
+       SDE_ERROR("E\n");
+       if (!dev || !pdev || !master) {
+               pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+                       dev, pdev, master);
+               return -EINVAL;
+       }
+
+       drm = dev_get_drvdata(master);
+       display = platform_get_drvdata(pdev);
+       if (!drm || !display) {
+               pr_err("invalid param(s), drm %pK, display %pK\n",
+                          drm, display);
+               return -EINVAL;
+       }
+
+       priv = drm->dev_private;
+       mutex_lock(&display->display_lock);
+
+       rc = _sde_hdmi_debugfs_init(display);
+       if (rc) {
+               SDE_ERROR("[%s]Debugfs init failed, rc=%d\n",
+                               display->name, rc);
+               goto error;
+       }
+
+       display_ctrl = &display->ctrl;
+       display_ctrl->ctrl = priv->hdmi;
+       SDE_ERROR("display_ctrl->ctrl=%p\n", display_ctrl->ctrl);
+       display->drm_dev = drm;
+
+error:
+       mutex_unlock(&display->display_lock);
+       return rc;
+}
+
+
+static void sde_hdmi_unbind(struct device *dev, struct device *master,
+               void *data)
+{
+       struct sde_hdmi *display = NULL;
+
+       if (!dev) {
+               SDE_ERROR("invalid params\n");
+               return;
+       }
+
+       display = platform_get_drvdata(to_platform_device(dev));
+       if (!display) {
+               SDE_ERROR("Invalid display device\n");
+               return;
+       }
+       mutex_lock(&display->display_lock);
+       (void)_sde_hdmi_debugfs_deinit(display);
+       display->drm_dev = NULL;
+       mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops sde_hdmi_comp_ops = {
+       .bind = sde_hdmi_bind,
+       .unbind = sde_hdmi_unbind,
+};
+
+static int _sde_hdmi_parse_dt_modes(struct device_node *np,
+                                       struct list_head *head,
+                                       u32 *num_of_modes)
+{
+       int rc = 0;
+       struct drm_display_mode *mode;
+       u32 mode_count = 0;
+       struct device_node *node = NULL;
+       struct device_node *root_node = NULL;
+       const char *name;
+       u32 h_front_porch, h_pulse_width, h_back_porch;
+       u32 v_front_porch, v_pulse_width, v_back_porch;
+       bool h_active_high, v_active_high;
+       u32 flags = 0;
+
+       root_node = of_get_child_by_name(np, "qcom,customize-modes");
+       if (!root_node) {
+               root_node = of_parse_phandle(np, "qcom,customize-modes", 0);
+               if (!root_node) {
+                       DRM_INFO("No entry present for qcom,customize-modes");
+                       goto end;
+               }
+       }
+       for_each_child_of_node(root_node, node) {
+               rc = 0;
+               mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+               if (!mode) {
+                       SDE_ERROR("Out of memory\n");
+                       rc =  -ENOMEM;
+                       continue;
+               }
+
+               rc = of_property_read_string(node, "qcom,mode-name",
+                                               &name);
+               if (rc) {
+                       SDE_ERROR("failed to read qcom,mode-name, rc=%d\n", rc);
+                       goto fail;
+               }
+               strlcpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+               rc = of_property_read_u32(node, "qcom,mode-h-active",
+                                               &mode->hdisplay);
+               if (rc) {
+                       SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+                                               &h_front_porch);
+               if (rc) {
+                       SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+                                               &h_pulse_width);
+               if (rc) {
+                       SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+                                               &h_back_porch);
+               if (rc) {
+                       SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               h_active_high = of_property_read_bool(node,
+                                               "qcom,mode-h-active-high");
+
+               rc = of_property_read_u32(node, "qcom,mode-v-active",
+                                               &mode->vdisplay);
+               if (rc) {
+                       SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+                                               &v_front_porch);
+               if (rc) {
+                       SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+                                               &v_pulse_width);
+               if (rc) {
+                       SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+                                               &v_back_porch);
+               if (rc) {
+                       SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               v_active_high = of_property_read_bool(node,
+                                               "qcom,mode-v-active-high");
+
+               rc = of_property_read_u32(node, "qcom,mode-refersh-rate",
+                                               &mode->vrefresh);
+               if (rc) {
+                       SDE_ERROR("failed to read refersh-rate, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+                                               &mode->clock);
+               if (rc) {
+                       SDE_ERROR("failed to read clock, rc=%d\n", rc);
+                       goto fail;
+               }
+
+               mode->hsync_start = mode->hdisplay + h_front_porch;
+               mode->hsync_end = mode->hsync_start + h_pulse_width;
+               mode->htotal = mode->hsync_end + h_back_porch;
+               mode->vsync_start = mode->vdisplay + v_front_porch;
+               mode->vsync_end = mode->vsync_start + v_pulse_width;
+               mode->vtotal = mode->vsync_end + v_back_porch;
+               if (h_active_high)
+                       flags |= DRM_MODE_FLAG_PHSYNC;
+               else
+                       flags |= DRM_MODE_FLAG_NHSYNC;
+               if (v_active_high)
+                       flags |= DRM_MODE_FLAG_PVSYNC;
+               else
+                       flags |= DRM_MODE_FLAG_NVSYNC;
+               mode->flags = flags;
+
+               if (!rc) {
+                       mode_count++;
+                       list_add_tail(&mode->head, head);
+               }
+
+               SDE_DEBUG("mode[%d] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+                       mode_count - 1, mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal, mode->vdisplay,
+                       mode->vsync_start, mode->vsync_end, mode->vtotal,
+                       mode->vrefresh, mode->flags, mode->clock);
+fail:
+               if (rc) {
+                       kfree(mode);
+                       continue;
+               }
+       }
+
+       if (num_of_modes)
+               *num_of_modes = mode_count;
+
+end:
+       return rc;
+}
+
+static int _sde_hdmi_parse_dt(struct device_node *node,
+                               struct sde_hdmi *display)
+{
+       int rc = 0;
+
+       display->name = of_get_property(node, "label", NULL);
+
+       display->display_type = of_get_property(node,
+                                               "qcom,display-type", NULL);
+       if (!display->display_type)
+               display->display_type = "unknown";
+
+       display->non_pluggable = of_property_read_bool(node,
+                                               "qcom,non-pluggable");
+
+       rc = _sde_hdmi_parse_dt_modes(node, &display->mode_list,
+                                       &display->num_of_modes);
+       if (rc)
+               SDE_ERROR("parse_dt_modes failed rc=%d\n", rc);
+
+       return rc;
+}
+
+static int _sde_hdmi_dev_probe(struct platform_device *pdev)
+{
+       int rc;
+       struct sde_hdmi *display;
+       int ret = 0;
+
+
+       SDE_DEBUG("\n");
+
+       if (!pdev || !pdev->dev.of_node) {
+               SDE_ERROR("pdev not found\n");
+               return -ENODEV;
+       }
+
+       display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+       if (!display)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&display->mode_list);
+       rc = _sde_hdmi_parse_dt(pdev->dev.of_node, display);
+       if (rc)
+               SDE_ERROR("parse dt failed, rc=%d\n", rc);
+
+       mutex_init(&display->display_lock);
+       display->pdev = pdev;
+       platform_set_drvdata(pdev, display);
+       mutex_lock(&sde_hdmi_list_lock);
+       list_add(&display->list, &sde_hdmi_list);
+       mutex_unlock(&sde_hdmi_list_lock);
+       if (!sde_hdmi_dev_init(display)) {
+               ret = component_add(&pdev->dev, &sde_hdmi_comp_ops);
+               if (ret) {
+                       pr_err("component add failed\n");
+                       goto out;
+               }
+       }
+       return 0;
+
+out:
+       if (rc)
+               devm_kfree(&pdev->dev, display);
+       return rc;
+}
+
+static int _sde_hdmi_dev_remove(struct platform_device *pdev)
+{
+       struct sde_hdmi *display;
+       struct sde_hdmi *pos, *tmp;
+       struct drm_display_mode *mode, *n;
+
+       if (!pdev) {
+               SDE_ERROR("Invalid device\n");
+               return -EINVAL;
+       }
+
+       display = platform_get_drvdata(pdev);
+
+       mutex_lock(&sde_hdmi_list_lock);
+       list_for_each_entry_safe(pos, tmp, &sde_hdmi_list, list) {
+               if (pos == display) {
+                       list_del(&display->list);
+                       break;
+               }
+       }
+       mutex_unlock(&sde_hdmi_list_lock);
+
+       list_for_each_entry_safe(mode, n, &display->mode_list, head) {
+               list_del(&mode->head);
+               kfree(mode);
+       }
+
+       platform_set_drvdata(pdev, NULL);
+       devm_kfree(&pdev->dev, display);
+       return 0;
+}
+
+static struct platform_driver sde_hdmi_driver = {
+       .probe = _sde_hdmi_dev_probe,
+       .remove = _sde_hdmi_dev_remove,
+       .driver = {
+               .name = "sde_hdmi",
+               .of_match_table = sde_hdmi_dt_match,
+       },
+};
+
+int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
+{
+       int rc = 0;
+       struct msm_drm_private *priv = NULL;
+       struct hdmi *hdmi;
+       struct platform_device *pdev;
+
+       DBG("");
+       if (!display || !display->drm_dev || !enc) {
+               SDE_ERROR("display=%p or enc=%p or drm_dev is NULL\n",
+                       display, enc);
+               return -EINVAL;
+       }
+
+       mutex_lock(&display->display_lock);
+       priv = display->drm_dev->dev_private;
+       hdmi = display->ctrl.ctrl;
+
+       if (!priv || !hdmi) {
+               SDE_ERROR("priv=%p or hdmi=%p is NULL\n",
+                       priv, hdmi);
+               mutex_unlock(&display->display_lock);
+               return -EINVAL;
+       }
+
+       pdev = hdmi->pdev;
+       hdmi->dev = display->drm_dev;
+       hdmi->encoder = enc;
+
+       hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+       hdmi->bridge = hdmi_bridge_init(hdmi);
+       if (IS_ERR(hdmi->bridge)) {
+               rc = PTR_ERR(hdmi->bridge);
+               SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
+               hdmi->bridge = NULL;
+               goto error;
+       }
+       hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (hdmi->irq < 0) {
+               rc = hdmi->irq;
+               SDE_ERROR("failed to get irq: %d\n", rc);
+               goto error;
+       }
+
+       rc = devm_request_irq(&pdev->dev, hdmi->irq,
+                       _sde_hdmi_irq, IRQF_TRIGGER_HIGH,
+                       "sde_hdmi_isr", display);
+       if (rc < 0) {
+               SDE_ERROR("failed to request IRQ%u: %d\n",
+                               hdmi->irq, rc);
+               goto error;
+       }
+
+       enc->bridge = hdmi->bridge;
+       priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+       mutex_unlock(&display->display_lock);
+       return 0;
+
+error:
+       /* bridge is normally destroyed by drm: */
+       if (hdmi->bridge) {
+               hdmi_bridge_destroy(hdmi->bridge);
+               hdmi->bridge = NULL;
+       }
+       mutex_unlock(&display->display_lock);
+       return rc;
+}
+
+int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+       int rc = 0;
+
+       if (!display) {
+               SDE_ERROR("Invalid params\n");
+               return -EINVAL;
+       }
+
+       return rc;
+}
+
+static int __init sde_hdmi_register(void)
+{
+       int rc = 0;
+
+       DBG("");
+       rc = platform_driver_register(&sde_hdmi_driver);
+       return rc;
+}
+
+static void __exit sde_hdmi_unregister(void)
+{
+       platform_driver_unregister(&sde_hdmi_driver);
+}
+
+module_init(sde_hdmi_register);
+module_exit(sde_hdmi_unregister);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
new file mode 100644 (file)
index 0000000..ce3937f
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_H_
+#define _SDE_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "hdmi.h"
+
+/**
+ * struct sde_hdmi_info - defines hdmi display properties
+ * @display_type:      Display type as defined by device tree.
+ * @is_hot_pluggable:  Can panel be hot plugged.
+ * @is_connected:      Is panel connected.
+ * @is_edid_supported: Does panel support reading EDID information.
+ * @width_mm:          Physical width of panel in millimeters.
+ * @height_mm:         Physical height of panel in millimeters.
+ */
+struct sde_hdmi_info {
+       const char *display_type;
+
+       /* HPD */
+       bool is_hot_pluggable;
+       bool is_connected;
+       bool is_edid_supported;
+
+       /* Physical properties */
+       u32 width_mm;
+       u32 height_mm;
+};
+
+/**
+ * struct sde_hdmi_ctrl - hdmi ctrl/phy information for the display
+ * @ctrl:           Handle to the HDMI controller device.
+ * @ctrl_of_node:   pHandle to the HDMI controller device.
+ * @hdmi_ctrl_idx:   HDMI controller instance id.
+ */
+struct sde_hdmi_ctrl {
+       /* controller info */
+       struct hdmi *ctrl;
+       struct device_node *ctrl_of_node;
+       u32 hdmi_ctrl_idx;
+};
+
+/**
+ * struct sde_hdmi - hdmi display information
+ * @pdev:             Pointer to platform device.
+ * @drm_dev:          DRM device associated with the display.
+ * @name:             Name of the display.
+ * @display_type:     Display type as defined in device tree.
+ * @list:             List pointer.
+ * @display_lock:     Mutex for sde_hdmi interface.
+ * @ctrl:             Controller information for HDMI display.
+ * @non_pluggable:    If HDMI display is non pluggable
+ * @num_of_modes:     Number of modes supported by display if non pluggable.
+ * @mode_list:        Mode list if non pluggable.
+ * @connected:        If HDMI display is connected.
+ * @is_tpg_enabled:   TPG state.
+ * @hpd_work:         HPD work structure.
+ * @root:             Debug fs root entry.
+ */
+struct sde_hdmi {
+       struct platform_device *pdev;
+       struct drm_device *drm_dev;
+
+       const char *name;
+       const char *display_type;
+       struct list_head list;
+       struct mutex display_lock;
+
+       struct sde_hdmi_ctrl ctrl;
+
+       bool non_pluggable;
+       u32 num_of_modes;
+       struct list_head mode_list;
+       bool connected;
+       bool is_tpg_enabled;
+
+       struct work_struct hpd_work;
+
+       /* DEBUG FS */
+       struct dentry *root;
+};
+
+#ifdef CONFIG_DRM_SDE_HDMI
+/**
+ * sde_hdmi_get_num_of_displays() - returns number of display devices
+ *                                    supported.
+ *
+ * Return: number of displays.
+ */
+u32 sde_hdmi_get_num_of_displays(void);
+
+/**
+ * sde_hdmi_get_displays() - returns the display list that's available.
+ * @display_array: Pointer to display list
+ * @max_display_count: Number of maximum displays in the list
+ *
+ * Return: number of available displays.
+ */
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count);
+
+/**
+ * sde_hdmi_connector_pre_deinit()- perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+               void *display);
+
+/**
+ * sde_hdmi_connector_post_init()- perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+               void *info,
+               void *display);
+
+/**
+ * sde_hdmi_connector_detect()- determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+               bool force,
+               void *display);
+
+/**
+ * sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+
+ * Returns: Number of modes added
+ */
+int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+               void *display);
+
+/**
+ * sde_hdmi_mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ *
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+               struct drm_display_mode *mode,
+               void *display);
+
+/**
+ * sde_hdmi_dev_init() - Initializes the display device
+ * @display:         Handle to the display.
+ *
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_dev_deinit() - Desinitializes the display device
+ * @display:        Handle to the display.
+ *
+ * All the resources acquired during device init will be released.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_drm_init() - initializes DRM objects for the display device.
+ * @display:            Handle to the display.
+ * @encoder:            Pointer to the encoder object which is connected to the
+ *                     display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_init(struct sde_hdmi *display,
+                               struct drm_encoder *enc);
+
+/**
+ * sde_hdmi_drm_deinit() - destroys DRM objects assosciated with the display
+ * @display:        Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_info() - returns the display properties
+ * @display:          Handle to the display.
+ * @info:             Pointer to the structure where info is stored.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_info(struct msm_display_info *info,
+                               void *display);
+
+#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
+
+static inline u32 sde_hdmi_get_num_of_displays(void)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_get_displays(void **display_array,
+               u32 max_display_count)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+               void *display)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_connector_post_init(struct drm_connector *connector,
+               void *info,
+               void *display)
+{
+       return 0;
+}
+
+static inline enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+               bool force,
+               void *display)
+{
+       return connector_status_disconnected;
+}
+
+static inline int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+               void *display)
+{
+       return 0;
+}
+
+static inline enum drm_mode_status sde_hdmi_mode_valid(
+               struct drm_connector *connector,
+               struct drm_display_mode *mode,
+               void *display)
+{
+       return MODE_OK;
+}
+
+static inline int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_drm_init(struct sde_hdmi *display,
+                               struct drm_encoder *enc)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+       return 0;
+}
+
+static inline int sde_hdmi_get_info(struct msm_display_info *info,
+                               void *display)
+{
+       return 0;
+}
+#endif /*#else of CONFIG_DRM_SDE_HDMI*/
+#endif /* _SDE_HDMI_H_ */
index ba59211..7915562 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -234,6 +234,11 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
                dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
                hdmi->hdcp_ctrl = NULL;
        }
+       /*making it false currently to avoid ifdefs
+        *will get rid of this flag when HDCP SW
+        *support gets added to HDMI DRM driver
+        */
+       hdmi->is_hdcp_supported = false;
 
        return hdmi;
 
@@ -389,7 +394,34 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
                .hpd_freq      = hpd_clk_freq_8x74,
 };
 
+/*TO DO*/
+static const char *pwr_reg_names_8x98[] = {"core-vdda", "core-vcc"};
+/*TO DO*/
+static const char *hpd_reg_names_8x98[] = {"hpd-gdsc", "hpd-5v"};
+
+static const char *pwr_clk_names_8x98[] = {"core_extp_clk",
+                                  "hpd_alt_iface_clk"};
+
+static const char *hpd_clk_names_8x98[] = {"hpd_iface_clk",
+                                  "hpd_core_clk",
+                                  "hpd_mdp_core_clk",
+                                  "mnoc_clk",
+                                  "hpd_misc_ahb_clk",
+                                  "hpd_bus_clk"};
+
+static unsigned long hpd_clk_freq_8x98[] = {0, 19200000, 0, 0, 0, 0};
+
+static struct hdmi_platform_config hdmi_tx_8998_config = {
+               .phy_init = NULL,
+               HDMI_CFG(pwr_reg, 8x98),
+               HDMI_CFG(hpd_reg, 8x98),
+               HDMI_CFG(pwr_clk, 8x98),
+               HDMI_CFG(hpd_clk, 8x98),
+               .hpd_freq      = hpd_clk_freq_8x98,
+};
+
 static const struct of_device_id dt_match[] = {
+       { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8998_config },
        { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
        { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
        { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -425,7 +457,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 #ifdef CONFIG_OF
        struct device_node *of_node = dev->of_node;
        const struct of_device_id *match;
-
        match = of_match_node(dt_match, of_node);
        if (match && match->data) {
                hdmi_cfg = (struct hdmi_platform_config *)match->data;
@@ -443,12 +474,13 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        hdmi_cfg->mux_en_gpio   = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
        hdmi_cfg->mux_sel_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
        hdmi_cfg->mux_lpm_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-
+       hdmi_cfg->hpd5v_gpio    = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd5v");
 #else
        static struct hdmi_platform_config config = {};
        static const char *hpd_clk_names[] = {
                        "core_clk", "master_iface_clk", "slave_iface_clk",
        };
+
        if (cpu_is_apq8064()) {
                static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
                config.phy_init      = hdmi_phy_8960_init;
index e22ddcd..9ce8ff5 100644 (file)
@@ -70,7 +70,7 @@ struct hdmi {
        struct drm_encoder *encoder;
 
        bool hdmi_mode;               /* are we in hdmi mode? */
-
+       bool is_hdcp_supported;
        int irq;
        struct workqueue_struct *workq;
 
@@ -110,7 +110,9 @@ struct hdmi_platform_config {
        int pwr_clk_cnt;
 
        /* gpio's: */
-       int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+       int ddc_clk_gpio, ddc_data_gpio;
+       int hpd_gpio, mux_en_gpio;
+       int mux_sel_gpio, hpd5v_gpio;
        int mux_lpm_gpio;
 };
 
index 6ac9aa1..caad2be 100644 (file)
@@ -40,7 +40,7 @@ struct mdp4_crtc {
                uint32_t x, y;
 
                /* next cursor to scan-out: */
-               uint32_t next_iova;
+               uint64_t next_iova;
                struct drm_gem_object *next_bo;
 
                /* current cursor being scanned out: */
@@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
                container_of(work, struct mdp4_crtc, unref_cursor_work);
        struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
 
-       msm_gem_put_iova(val, mdp4_kms->id);
+       msm_gem_put_iova(val, mdp4_kms->aspace);
        drm_gem_object_unreference_unlocked(val);
 }
 
@@ -387,25 +387,28 @@ static void update_cursor(struct drm_crtc *crtc)
        if (mdp4_crtc->cursor.stale) {
                struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
                struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
-               uint32_t iova = mdp4_crtc->cursor.next_iova;
+               uint64_t iova = mdp4_crtc->cursor.next_iova;
 
                if (next_bo) {
                        /* take a obj ref + iova ref when we start scanning out: */
                        drm_gem_object_reference(next_bo);
-                       msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+                       msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+                               &iova);
 
                        /* enable cursor: */
                        mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
                                        MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
                                        MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+                       /* FIXME: Make sure iova < 32 bits */
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+                               lower_32_bits(iova));
                        mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
                                        MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
                                        MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
                } else {
                        /* disable cursor: */
                        mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
-                                       mdp4_kms->blank_cursor_iova);
+                               lower_32_bits(mdp4_kms->blank_cursor_iova));
                }
 
                /* and drop the iova ref + obj rev when done scanning out: */
@@ -432,7 +435,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_gem_object *cursor_bo, *old_bo;
        unsigned long flags;
-       uint32_t iova;
+       uint64_t iova;
        int ret;
 
        if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -449,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
        }
 
        if (cursor_bo) {
-               ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+               ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
                if (ret)
                        goto fail;
        } else {
index f0f66ac..b6cddee 100644 (file)
@@ -17,6 +17,7 @@
 
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 #include "mdp4_kms.h"
 
@@ -177,18 +178,35 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
        struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
        unsigned i;
+       struct msm_gem_address_space *aspace = mdp4_kms->aspace;
 
        for (i = 0; i < priv->num_crtcs; i++)
                mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+
+       if (aspace) {
+               aspace->mmu->funcs->detach(aspace->mmu,
+                               iommu_ports, ARRAY_SIZE(iommu_ports));
+               msm_gem_address_space_destroy(aspace);
+       }
 }
 
 static void mdp4_destroy(struct msm_kms *kms)
 {
+       struct device *dev = mdp4_kms->dev->dev;
+       struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
        if (mdp4_kms->blank_cursor_iova)
-               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
        if (mdp4_kms->blank_cursor_bo)
                drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+       if (aspace) {
+               aspace->mmu->funcs->detach(aspace->mmu,
+                               iommu_ports, ARRAY_SIZE(iommu_ports));
+               msm_gem_address_space_put(aspace);
+       }
+
        kfree(mdp4_kms);
 }
 
@@ -408,7 +426,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        struct mdp4_platform_config *config = mdp4_get_config(pdev);
        struct mdp4_kms *mdp4_kms;
        struct msm_kms *kms = NULL;
-       struct msm_mmu *mmu;
+       struct msm_gem_address_space *aspace;
        int ret;
 
        mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -497,26 +515,30 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        mdelay(16);
 
        if (config->iommu) {
-               mmu = msm_iommu_new(&pdev->dev, config->iommu);
+               struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
+
                if (IS_ERR(mmu)) {
                        ret = PTR_ERR(mmu);
                        goto fail;
                }
-               ret = mmu->funcs->attach(mmu, iommu_ports,
+
+               aspace = msm_gem_address_space_create(&pdev->dev,
+                               mmu, "mdp4", 0x1000, 0xffffffff);
+               if (IS_ERR(aspace)) {
+                       ret = PTR_ERR(aspace);
+                       goto fail;
+               }
+
+               mdp4_kms->aspace = aspace;
+
+               ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret)
                        goto fail;
        } else {
                dev_info(dev->dev, "no iommu, fallback to phys "
                                "contig buffers for scanout\n");
-               mmu = NULL;
-       }
-
-       mdp4_kms->id = msm_register_mmu(dev, mmu);
-       if (mdp4_kms->id < 0) {
-               ret = mdp4_kms->id;
-               dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
-               goto fail;
+               aspace = NULL;
        }
 
        ret = modeset_init(mdp4_kms);
@@ -535,7 +557,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
-       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
                        &mdp4_kms->blank_cursor_iova);
        if (ret) {
                dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
@@ -562,6 +584,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
        /* TODO */
        config.max_clk = 266667000;
        config.iommu = iommu_domain_alloc(msm_iommu_get_bus(&dev->dev));
+
 #else
        if (cpu_is_apq8064())
                config.max_clk = 266667000;
index 8a7f6e1..5cf03e5 100644 (file)
@@ -33,8 +33,6 @@ struct mdp4_kms {
        int rev;
 
        /* mapper-id used to request GEM buffer mapped for scanout: */
-       int id;
-
        void __iomem *mmio;
 
        struct regulator *dsi_pll_vdda;
@@ -45,12 +43,13 @@ struct mdp4_kms {
        struct clk *pclk;
        struct clk *lut_clk;
        struct clk *axi_clk;
+       struct msm_gem_address_space *aspace;
 
        struct mdp_irq error_handler;
 
        /* empty/blank cursor bo to use when cursor is "disabled" */
        struct drm_gem_object *blank_cursor_bo;
-       uint32_t blank_cursor_iova;
+       uint64_t blank_cursor_iova;
 };
 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
 
index 30d57e7..bc1ece2 100644 (file)
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
                return 0;
 
        DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
-       return msm_framebuffer_prepare(fb, mdp4_kms->id);
+       return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
                return;
 
        DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
-       msm_framebuffer_cleanup(fb, mdp4_kms->id);
+       msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
 }
 
 
@@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
                        MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
-                       msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+                       msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
-                       msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+                       msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
-                       msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+                       msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
-                       msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+                       msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
 
        plane->fb = fb;
 }
index bb1225a..89305ad 100644 (file)
@@ -547,7 +547,7 @@ fail:
        if (cfg_handler)
                mdp5_cfg_destroy(cfg_handler);
 
-       return NULL;
+       return ERR_PTR(ret);
 }
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
index 7f9f4ac..422a9a7 100644 (file)
@@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
                container_of(work, struct mdp5_crtc, unref_cursor_work);
        struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
 
-       msm_gem_put_iova(val, mdp5_kms->id);
+       msm_gem_put_iova(val, mdp5_kms->aspace);
        drm_gem_object_unreference_unlocked(val);
 }
 
@@ -509,7 +509,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
        struct drm_gem_object *cursor_bo, *old_bo = NULL;
-       uint32_t blendcfg, cursor_addr, stride;
+       uint32_t blendcfg, stride;
+       uint64_t cursor_addr;
        int ret, bpp, lm;
        unsigned int depth;
        enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
@@ -536,7 +537,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        if (!cursor_bo)
                return -ENOENT;
 
-       ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+       ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
        if (ret)
                return -EINVAL;
 
index f7aebf5..e4e69eb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -18,6 +18,7 @@
 
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 #include "mdp5_kms.h"
 
@@ -130,13 +131,13 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
 static void mdp5_destroy(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct msm_mmu *mmu = mdp5_kms->mmu;
+       struct msm_gem_address_space *aspace = mdp5_kms->aspace;
 
        mdp5_irq_domain_fini(mdp5_kms);
 
-       if (mmu) {
-               mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
-               mmu->funcs->destroy(mmu);
+       if (aspace) {
+               aspace->mmu->funcs->detach(aspace->mmu);
+               msm_gem_address_space_put(aspace);
        }
 
        if (mdp5_kms->ctlm)
@@ -474,7 +475,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        struct mdp5_cfg *config;
        struct mdp5_kms *mdp5_kms;
        struct msm_kms *kms = NULL;
-       struct msm_mmu *mmu;
+       struct msm_gem_address_space *aspace;
        uint32_t major, minor;
        int i, ret;
 
@@ -595,34 +596,34 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        mdelay(16);
 
        if (config->platform.iommu) {
-               mmu = msm_smmu_new(&pdev->dev,
+               struct msm_mmu *mmu = msm_smmu_new(&pdev->dev,
                                MSM_SMMU_DOMAIN_UNSECURE);
                if (IS_ERR(mmu)) {
                        ret = PTR_ERR(mmu);
                        dev_err(dev->dev, "failed to init iommu: %d\n", ret);
                        iommu_domain_free(config->platform.iommu);
+               }
+
+               aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+                               mmu, "mdp5");
+               if (IS_ERR(aspace)) {
+                       ret = PTR_ERR(aspace);
                        goto fail;
                }
 
-               ret = mmu->funcs->attach(mmu, iommu_ports,
+               mdp5_kms->aspace = aspace;
+
+               ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret) {
-                       dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
-                       mmu->funcs->destroy(mmu);
+                       dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+                               ret);
                        goto fail;
                }
        } else {
-               dev_info(dev->dev, "no iommu, fallback to phys "
-                               "contig buffers for scanout\n");
-               mmu = NULL;
-       }
-       mdp5_kms->mmu = mmu;
-
-       mdp5_kms->id = msm_register_mmu(dev, mmu);
-       if (mdp5_kms->id < 0) {
-               ret = mdp5_kms->id;
-               dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
-               goto fail;
+               dev_info(&pdev->dev,
+                        "no iommu, fallback to phys contig buffers for scanout\n");
+               aspace = NULL;
        }
 
        ret = modeset_init(mdp5_kms);
index 84f65d4..c1aa86d 100644 (file)
@@ -36,8 +36,7 @@ struct mdp5_kms {
 
 
        /* mapper-id used to request GEM buffer mapped for scanout: */
-       int id;
-       struct msm_mmu *mmu;
+       struct msm_gem_address_space *aspace;
 
        struct mdp5_smp *smp;
        struct mdp5_ctl_manager *ctlm;
index 81cd490..873ab11 100644 (file)
@@ -260,7 +260,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
                return 0;
 
        DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
-       return msm_framebuffer_prepare(fb, mdp5_kms->id);
+       return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -274,7 +274,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
                return;
 
        DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
-       msm_framebuffer_cleanup(fb, mdp5_kms->id);
+       msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
 }
 
 static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -400,13 +400,13 @@ static void set_scanout_locked(struct drm_plane *plane,
                        MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
 
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
-                       msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+                       msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
-                       msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+                       msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
-                       msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+                       msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-                       msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+                       msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
 
        plane->fb = fb;
 }
index 1dcf88a..f821a81 100644 (file)
@@ -23,6 +23,8 @@
 #include "sde_wb.h"
 
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#include "msm_gem.h"
+#include "msm_mmu.h"
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
@@ -38,42 +40,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
        .atomic_commit = msm_atomic_commit,
 };
 
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
-       struct msm_drm_private *priv = dev->dev_private;
-       int idx = priv->num_mmus++;
-
-       if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
-               return -EINVAL;
-
-       priv->mmus[idx] = mmu;
-
-       return idx;
-}
-
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
-       struct msm_drm_private *priv = dev->dev_private;
-       int idx;
-
-       if (priv->num_mmus <= 0) {
-               dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
-               return;
-       }
-
-       idx = priv->num_mmus - 1;
-
-       /* only support reverse-order deallocation */
-       if (priv->mmus[idx] != mmu) {
-               dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
-               return;
-       }
-
-       --priv->num_mmus;
-       priv->mmus[idx] = 0;
-}
-
-
 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -588,29 +554,65 @@ static void load_gpu(struct drm_device *dev)
 }
 #endif
 
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
 {
        struct msm_file_private *ctx;
 
+       if (!priv || !priv->gpu)
+               return NULL;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       ctx->aspace = msm_gem_address_space_create_instance(
+               priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff);
+
+       if (IS_ERR(ctx->aspace)) {
+               int ret = PTR_ERR(ctx->aspace);
+
+               /*
+                * If dynamic domains are not supported, everybody uses the
+                * same pagetable
+                */
+               if (ret != -EOPNOTSUPP) {
+                       kfree(ctx);
+                       return ERR_PTR(ret);
+               }
+
+               ctx->aspace = priv->gpu->aspace;
+       }
+
+       ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0);
+       return ctx;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct msm_file_private *ctx = NULL;
+       struct msm_drm_private *priv;
+       struct msm_kms *kms;
+
+       if (!dev || !dev->dev_private)
+               return -ENODEV;
+
+       priv = dev->dev_private;
        /* For now, load gpu on open.. to avoid the requirement of having
         * firmware in the initrd.
         */
        load_gpu(dev);
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (!ctx)
-               return -ENOMEM;
+       ctx = setup_pagetable(priv);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
 
        file->driver_priv = ctx;
 
-       if (dev && dev->dev_private) {
-               struct msm_drm_private *priv = dev->dev_private;
-               struct msm_kms *kms;
+       kms = priv->kms;
+
+       if (kms && kms->funcs && kms->funcs->postopen)
+               kms->funcs->postopen(kms, file);
 
-               kms = priv->kms;
-               if (kms && kms->funcs && kms->funcs->postopen)
-                       kms->funcs->postopen(kms, file);
-       }
        return 0;
 }
 
@@ -633,8 +635,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
                kms->funcs->postclose(kms, file);
 
        mutex_lock(&dev->struct_mutex);
-       if (ctx == priv->lastctx)
-               priv->lastctx = NULL;
+       if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
+               ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
+               msm_gem_address_space_put(ctx->aspace);
+       }
        mutex_unlock(&dev->struct_mutex);
 
        kfree(ctx);
@@ -833,6 +837,13 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
        return 0;
 }
 
+static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+
+       return msm_snapshot_write(priv->gpu, m);
+}
+
 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -897,11 +908,22 @@ static int show_locked(struct seq_file *m, void *arg)
        return ret;
 }
 
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       int (*show)(struct drm_device *dev, struct seq_file *m) =
+                       node->info_ent->data;
+
+       return show(dev, m);
+}
+
 static struct drm_info_list msm_debugfs_list[] = {
                {"gpu", show_locked, 0, msm_gpu_show},
                {"gem", show_locked, 0, msm_gem_show},
                { "mm", show_locked, 0, msm_mm_show },
                { "fb", show_locked, 0, msm_fb_show },
+               { "snapshot", show_unlocked, 0, msm_snapshot_show },
 };
 
 static int late_init_minor(struct drm_minor *minor)
@@ -975,14 +997,23 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
                ktime_t *timeout , bool interruptible)
 {
        struct msm_drm_private *priv = dev->dev_private;
+       struct msm_gpu *gpu = priv->gpu;
+       int index = FENCE_RING(fence);
+       uint32_t submitted;
        int ret;
 
-       if (!priv->gpu)
-               return 0;
+       if (!gpu)
+               return -ENXIO;
 
-       if (fence > priv->gpu->submitted_fence) {
+       if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings ||
+               !gpu->rb[index])
+               return -EINVAL;
+
+       submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]);
+
+       if (fence > submitted) {
                DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
-                               fence, priv->gpu->submitted_fence);
+                       fence, submitted);
                return -EINVAL;
        }
 
@@ -1012,7 +1043,7 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
 
                if (ret == 0) {
                        DBG("timeout waiting for fence: %u (completed: %u)",
-                                       fence, priv->completed_fence);
+                                       fence, priv->completed_fence[index]);
                        ret = -ETIMEDOUT;
                } else if (ret != -ERESTARTSYS) {
                        ret = 0;
@@ -1026,12 +1057,13 @@ int msm_queue_fence_cb(struct drm_device *dev,
                struct msm_fence_cb *cb, uint32_t fence)
 {
        struct msm_drm_private *priv = dev->dev_private;
+       int index = FENCE_RING(fence);
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
        if (!list_empty(&cb->work.entry)) {
                ret = -EINVAL;
-       } else if (fence > priv->completed_fence) {
+       } else if (fence > priv->completed_fence[index]) {
                cb->fence = fence;
                list_add_tail(&cb->work.entry, &priv->fence_cbs);
        } else {
@@ -1046,21 +1078,21 @@ int msm_queue_fence_cb(struct drm_device *dev,
 void msm_update_fence(struct drm_device *dev, uint32_t fence)
 {
        struct msm_drm_private *priv = dev->dev_private;
+       struct msm_fence_cb *cb, *tmp;
+       int index = FENCE_RING(fence);
 
-       mutex_lock(&dev->struct_mutex);
-       priv->completed_fence = max(fence, priv->completed_fence);
-
-       while (!list_empty(&priv->fence_cbs)) {
-               struct msm_fence_cb *cb;
-
-               cb = list_first_entry(&priv->fence_cbs,
-                               struct msm_fence_cb, work.entry);
+       if (index >= MSM_GPU_MAX_RINGS)
+               return;
 
-               if (cb->fence > priv->completed_fence)
-                       break;
+       mutex_lock(&dev->struct_mutex);
+       priv->completed_fence[index] = max(fence, priv->completed_fence[index]);
 
-               list_del_init(&cb->work.entry);
-               queue_work(priv->wq, &cb->work);
+       list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) {
+               if (COMPARE_FENCE_LTE(cb->fence,
+                       priv->completed_fence[index])) {
+                       list_del_init(&cb->work.entry);
+                       queue_work(priv->wq, &cb->work);
+               }
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -1165,16 +1197,28 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 {
        struct drm_msm_gem_info *args = data;
        struct drm_gem_object *obj;
+       struct msm_file_private *ctx = file->driver_priv;
        int ret = 0;
 
-       if (args->pad)
+       if (args->flags & ~MSM_INFO_FLAGS)
+               return -EINVAL;
+
+       if (!ctx || !ctx->aspace)
                return -EINVAL;
 
        obj = drm_gem_object_lookup(dev, file, args->handle);
        if (!obj)
                return -ENOENT;
 
-       args->offset = msm_gem_mmap_offset(obj);
+       if (args->flags & MSM_INFO_IOVA) {
+               uint64_t iova;
+
+               ret = msm_gem_get_iova(obj, ctx->aspace, &iova);
+               if (!ret)
+                       args->offset = iova;
+       } else {
+               args->offset = msm_gem_mmap_offset(obj);
+       }
 
        drm_gem_object_unreference_unlocked(obj);
 
@@ -1185,13 +1229,24 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
                struct drm_file *file)
 {
        struct drm_msm_wait_fence *args = data;
-       ktime_t timeout = to_ktime(args->timeout);
+       ktime_t timeout;
+
 
        if (args->pad) {
                DRM_ERROR("invalid pad: %08x\n", args->pad);
                return -EINVAL;
        }
 
+       /*
+        * Special case - if the user passes a timeout of 0.0 just return the
+        * current fence status (0 for retired, -EBUSY for active) with no
+        * accompanying kernel logs. This can be a poor man's way of
+        * determining the status of a fence.
+        */
+       if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0)
+               return msm_wait_fence(dev, args->fence, NULL, true);
+
+       timeout = to_ktime(args->timeout);
        return msm_wait_fence(dev, args->fence, &timeout, true);
 }
 
index a267888..d8a4c34 100644 (file)
@@ -32,7 +32,8 @@
 #include <linux/iommu.h>
 #include <linux/types.h>
 #include <linux/of_graph.h>
-#include <linux/mdss_io_util.h>
+#include <linux/of_device.h>
+#include <linux/sde_io_util.h>
 #include <asm/sizes.h>
 #include <linux/kthread.h>
 
@@ -63,6 +64,8 @@ struct msm_mmu;
 struct msm_rd_state;
 struct msm_perf_state;
 struct msm_gem_submit;
+struct msm_gem_address_space;
+struct msm_gem_vma;
 
 #define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
 #define MAX_CRTCS      8
@@ -72,11 +75,7 @@ struct msm_gem_submit;
 #define MAX_CONNECTORS 8
 
 struct msm_file_private {
-       /* currently we don't do anything useful with this.. but when
-        * per-context address spaces are supported we'd keep track of
-        * the context's page-tables here.
-        */
-       int dummy;
+       struct msm_gem_address_space *aspace;
 };
 
 enum msm_mdp_plane_property {
@@ -248,6 +247,8 @@ struct msm_drm_commit {
        struct kthread_worker worker;
 };
 
+#define MSM_GPU_MAX_RINGS 4
+
 struct msm_drm_private {
 
        struct msm_kms *kms;
@@ -274,11 +275,12 @@ struct msm_drm_private {
 
        /* when we have more than one 'msm_gpu' these need to be an array: */
        struct msm_gpu *gpu;
-       struct msm_file_private *lastctx;
 
        struct drm_fb_helper *fbdev;
 
-       uint32_t next_fence, completed_fence;
+       uint32_t next_fence[MSM_GPU_MAX_RINGS];
+       uint32_t completed_fence[MSM_GPU_MAX_RINGS];
+
        wait_queue_head_t fence_event;
 
        struct msm_rd_state *rd;
@@ -296,9 +298,13 @@ struct msm_drm_private {
        uint32_t pending_crtcs;
        wait_queue_head_t pending_crtcs_event;
 
-       /* registered MMUs: */
-       unsigned int num_mmus;
-       struct msm_mmu *mmus[NUM_DOMAINS];
+       /* Registered address spaces.. currently this is fixed per # of
+        * iommu's.  Ie. one for display block and one for gpu block.
+        * Eventually, to do per-process gpu pagetables, we'll want one
+        * of these per-process.
+        */
+       unsigned int num_aspaces;
+       struct msm_gem_address_space *aspace[NUM_DOMAINS];
 
        unsigned int num_planes;
        struct drm_plane *planes[MAX_PLANES];
@@ -345,6 +351,31 @@ struct msm_format {
        uint32_t pixel_format;
 };
 
+/*
+ * Some GPU targets can support multiple ringbuffers and preempt between them.
+ * In order to do this without massive API changes we will steal two bits from
+ * the top of the fence and use them to identify the ringbuffer, (0x00000001 for
+ * riug 0, 0x40000001 for ring 1, 0x50000001 for ring 2, etc). If you are going
+ * to do a fence comparision you have to make sure you are only comparing
+ * against fences from the same ring, but since fences within a ringbuffer are
+ * still contigious you can still use straight comparisons (i.e 0x40000001 is
+ * older than 0x40000002). Mathmatically there will be 0x3FFFFFFF timestamps
+ * per ring or ~103 days of 120 interrupts per second (two interrupts per frame
+ * at 60 FPS).
+ */
+#define FENCE_RING(_fence) ((_fence >> 30) & 3)
+#define FENCE(_ring, _fence) ((((_ring) & 3) << 30) | ((_fence) & 0x3FFFFFFF))
+
+static inline bool COMPARE_FENCE_LTE(uint32_t a, uint32_t b)
+{
+       return ((FENCE_RING(a) == FENCE_RING(b)) && a <= b);
+}
+
+static inline bool COMPARE_FENCE_LT(uint32_t a, uint32_t b)
+{
+       return ((FENCE_RING(a) == FENCE_RING(b)) && a < b);
+}
+
 /* callback from wq once fence has passed: */
 struct msm_fence_cb {
        struct work_struct work;
@@ -362,15 +393,34 @@ void __msm_fence_worker(struct work_struct *work);
 int msm_atomic_commit(struct drm_device *dev,
                struct drm_atomic_state *state, bool async);
 
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
 int msm_wait_fence(struct drm_device *dev, uint32_t fence,
                ktime_t *timeout, bool interruptible);
 int msm_queue_fence_cb(struct drm_device *dev,
                struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct drm_device *dev, uint32_t fence);
 
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt,
+               void *priv);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt,
+               void *priv, unsigned int flags);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+               const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+               uint64_t start, uint64_t end);
+
+/* For SDE  display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+               const char *name);
+
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
@@ -379,13 +429,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-               uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -416,9 +469,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                uint32_t size, struct sg_table *sgt);
 
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -502,7 +558,8 @@ u32 msm_readl(const void __iomem *addr);
 static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       return priv->completed_fence >= fence;
+
+       return priv->completed_fence[FENCE_RING(fence)] >= fence;
 }
 
 static inline int align_pitch(int width, int bpp)
index dca4de3..a3f0392 100644 (file)
@@ -92,15 +92,16 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
  * should be fine, since only the scanout (mdpN) side of things needs
  * this, the gpu doesn't care about fb's.
  */
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int ret, i, n = drm_format_num_planes(fb->pixel_format);
-       uint32_t iova;
+       uint64_t iova;
 
        for (i = 0; i < n; i++) {
-               ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
-               DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+               ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+               DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
                if (ret)
                        return ret;
        }
@@ -108,21 +109,30 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
        return 0;
 }
 
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = drm_format_num_planes(fb->pixel_format);
 
        for (i = 0; i < n; i++)
-               msm_gem_put_iova(msm_fb->planes[i], id);
+               msm_gem_put_iova(msm_fb->planes[i], aspace);
 }
 
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+               struct msm_gem_address_space *aspace, int plane)
 {
        struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+       uint64_t iova;
+
        if (!msm_fb->planes[plane])
                return 0;
-       return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+
+       iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+
+       /* FIXME: Make sure it is < 32 bits */
+       return lower_32_bits(iova);
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
index 3f6ec07..cf12725 100644 (file)
@@ -85,7 +85,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        struct drm_framebuffer *fb = NULL;
        struct fb_info *fbi = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = {0};
-       uint32_t paddr;
+       uint64_t paddr;
        int ret, size;
 
        DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
@@ -160,11 +160,12 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
-       dev->mode_config.fb_base = paddr;
+       /* FIXME: Verify paddr < 32 bits? */
+       dev->mode_config.fb_base = lower_32_bits(paddr);
 
        fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
        fbi->screen_size = fbdev->bo->size;
-       fbi->fix.smem_start = paddr;
+       fbi->fix.smem_start = lower_32_bits(paddr);
        fbi->fix.smem_len = fbdev->bo->size;
 
        DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
index 4e69fba..63128d1 100644 (file)
 #include "msm_gpu.h"
 #include "msm_mmu.h"
 
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+       return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
 static dma_addr_t physaddr(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -268,35 +273,67 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
        return offset;
 }
 
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+       if (domain) {
+               list_del(&domain->list);
+               kfree(domain);
+       }
+}
+
 static void
 put_iova(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct msm_drm_private *priv = obj->dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       int id;
+       struct msm_gem_vma *domain, *tmp;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
-               struct msm_mmu *mmu = priv->mmus[id];
+       list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+               if (iommu_present(&platform_bus_type)) {
+                       msm_gem_unmap_vma(domain->aspace, domain,
+                               msm_obj->sgt, get_dmabuf_ptr(obj));
+               }
 
-               if (!mmu || !msm_obj->domain[id].iova)
-                       continue;
+               obj_remove_domain(domain);
+       }
+}
 
-               if (obj->import_attach) {
-                       if (mmu->funcs->unmap_dma_buf)
-                               mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
-                                       obj->import_attach->dmabuf,
-                                       DMA_BIDIRECTIONAL);
-               } else
-                       mmu->funcs->unmap_sg(mmu, msm_obj->sgt,
-                               DMA_BIDIRECTIONAL);
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+       if (!domain)
+               return ERR_PTR(-ENOMEM);
+
+       domain->aspace = aspace;
+
+       list_add_tail(&domain->list, &msm_obj->domains);
+
+       return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_gem_vma *domain;
 
-               msm_obj->domain[id].iova = 0;
+       list_for_each_entry(domain, &msm_obj->domains, list) {
+               if (domain->aspace == aspace)
+                       return domain;
        }
+
+       return NULL;
 }
 
+#ifndef IOMMU_PRIV
+#define IOMMU_PRIV 0
+#endif
+
 /* should be called under struct_mutex.. although it can be called
  * from atomic context without struct_mutex to acquire an extra
  * iova ref if you know one is already held.
@@ -304,69 +341,64 @@ put_iova(struct drm_gem_object *obj)
  * That means when I do eventually need to add support for unpinning
  * the refcnt counter needs to be atomic_t.
  */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
-               uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct page **pages;
+       struct msm_gem_vma *domain;
        int ret = 0;
 
-       if (!msm_obj->domain[id].iova) {
-               struct msm_drm_private *priv = obj->dev->dev_private;
-               struct page **pages = get_pages(obj);
+       if (!iommu_present(&platform_bus_type)) {
+               pages = get_pages(obj);
 
                if (IS_ERR(pages))
                        return PTR_ERR(pages);
 
-               if (iommu_present(&platform_bus_type)) {
-                       struct msm_mmu *mmu = priv->mmus[id];
-
-                       if (WARN_ON(!mmu))
-                               return -EINVAL;
-
-                       if (obj->import_attach && mmu->funcs->map_dma_buf) {
-                               ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
-                                               obj->import_attach->dmabuf,
-                                               DMA_BIDIRECTIONAL);
-                               if (ret) {
-                                       DRM_ERROR("Unable to map dma buf\n");
-                                       return ret;
-                               }
-                       } else {
-                               ret = mmu->funcs->map_sg(mmu, msm_obj->sgt,
-                                       DMA_BIDIRECTIONAL);
-                       }
-
-                       if (!ret)
-                               msm_obj->domain[id].iova =
-                                       sg_dma_address(msm_obj->sgt->sgl);
-               } else {
-                       WARN_ONCE(1, "physical address being used\n");
-                       msm_obj->domain[id].iova = physaddr(obj);
+               *iova = (uint64_t) physaddr(obj);
+               return 0;
+       }
+
+       domain = obj_get_domain(obj, aspace);
+
+       if (!domain) {
+               domain = obj_add_domain(obj, aspace);
+               if (IS_ERR(domain))
+                       return PTR_ERR(domain);
+
+               pages = get_pages(obj);
+               if (IS_ERR(pages)) {
+                       obj_remove_domain(domain);
+                       return PTR_ERR(pages);
                }
+
+               ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+                       get_dmabuf_ptr(obj), msm_obj->flags);
        }
 
        if (!ret)
-               *iova = msm_obj->domain[id].iova;
+               *iova = domain->iova;
+       else
+               obj_remove_domain(domain);
 
        return ret;
 }
 
 /* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
 {
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_gem_vma *domain;
        int ret;
 
-       /* this is safe right now because we don't unmap until the
-        * bo is deleted:
-        */
-       if (msm_obj->domain[id].iova) {
-               *iova = msm_obj->domain[id].iova;
+       domain = obj_get_domain(obj, aspace);
+       if (domain) {
+               *iova = domain->iova;
                return 0;
        }
 
        mutex_lock(&obj->dev->struct_mutex);
-       ret = msm_gem_get_iova_locked(obj, id, iova);
+       ret = msm_gem_get_iova_locked(obj, aspace, iova);
        mutex_unlock(&obj->dev->struct_mutex);
        return ret;
 }
@@ -374,14 +406,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
  */
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
 {
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       WARN_ON(!msm_obj->domain[id].iova);
-       return msm_obj->domain[id].iova;
+       struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+       WARN_ON(!domain);
+
+       return domain ? domain->iova : 0;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
 {
        // XXX TODO ..
        // NOTE: probably don't need a _locked() version.. we wouldn't
@@ -515,14 +551,21 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
        struct drm_device *dev = obj->dev;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_gem_vma *domain;
        uint64_t off = drm_vma_node_start(&obj->vma_node);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
                        msm_obj->read_fence, msm_obj->write_fence,
                        obj->name, obj->refcount.refcount.counter,
-                       off, msm_obj->vaddr, obj->size);
+                       off, msm_obj->vaddr);
+
+       /* FIXME: we need to print the address space here too */
+       list_for_each_entry(domain, &msm_obj->domains, list)
+               seq_printf(m, " %08llx", domain->iova);
+
+       seq_puts(m, "\n");
 }
 
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -559,7 +602,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
        if (obj->import_attach) {
                if (msm_obj->vaddr)
-                       dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+                       dma_buf_vunmap(obj->import_attach->dmabuf,
+                               msm_obj->vaddr);
 
                /* Don't drop the pages for imported dmabuf, as they are not
                 * ours, just free the array we allocated:
@@ -613,7 +657,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
-       unsigned sz;
        bool use_vram = false;
 
        switch (flags & MSM_BO_CACHE_MASK) {
@@ -635,16 +678,16 @@ static int msm_gem_new_impl(struct drm_device *dev,
        if (WARN_ON(use_vram && !priv->vram.size))
                return -EINVAL;
 
-       sz = sizeof(*msm_obj);
-       if (use_vram)
-               sz += sizeof(struct drm_mm_node);
-
-       msm_obj = kzalloc(sz, GFP_KERNEL);
+       msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
        if (!msm_obj)
                return -ENOMEM;
 
-       if (use_vram)
-               msm_obj->vram_node = (void *)&msm_obj[1];
+       if (use_vram) {
+               struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
+
+               if (!IS_ERR(domain))
+                       msm_obj->vram_node = &domain->node;
+       }
 
        msm_obj->flags = flags;
 
@@ -652,6 +695,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
        reservation_object_init(msm_obj->resv);
 
        INIT_LIST_HEAD(&msm_obj->submit_entry);
+       INIT_LIST_HEAD(&msm_obj->domains);
+
        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
        *obj = &msm_obj->base;
index 2e4ae6b..ac46c47 100644 (file)
 #ifndef __MSM_GEM_H__
 #define __MSM_GEM_H__
 
+#include <linux/kref.h>
 #include <linux/reservation.h>
 #include "msm_drv.h"
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 
+struct msm_gem_aspace_ops {
+       int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
+               struct sg_table *sgt, void *priv, unsigned int flags);
+
+       void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
+               struct sg_table *sgt, void *priv);
+
+       void (*destroy)(struct msm_gem_address_space *);
+};
+
+struct msm_gem_address_space {
+       const char *name;
+       struct msm_mmu *mmu;
+       const struct msm_gem_aspace_ops *ops;
+       struct kref kref;
+};
+
+struct msm_gem_vma {
+       /* Node used by the GPU address space, but not the SDE address space */
+       struct drm_mm_node node;
+       struct msm_gem_address_space *aspace;
+       uint64_t iova;
+       struct list_head list;
+};
+
 struct msm_gem_object {
        struct drm_gem_object base;
 
@@ -52,9 +78,7 @@ struct msm_gem_object {
        struct sg_table *sgt;
        void *vaddr;
 
-       struct {
-               dma_addr_t iova;
-       } domain[NUM_DOMAINS];
+       struct list_head domains;
 
        /* normally (resv == &_resv) except for imported bo's */
        struct reservation_object *resv;
@@ -94,24 +118,25 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
  */
 struct msm_gem_submit {
        struct drm_device *dev;
-       struct msm_gpu *gpu;
+       struct msm_gem_address_space *aspace;
        struct list_head node;   /* node in gpu submit_list */
        struct list_head bo_list;
        struct ww_acquire_ctx ticket;
        uint32_t fence;
+       int ring;
        bool valid;
        unsigned int nr_cmds;
        unsigned int nr_bos;
        struct {
                uint32_t type;
                uint32_t size;  /* in dwords */
-               uint32_t iova;
+               uint64_t iova;
                uint32_t idx;   /* cmdstream buffer idx in bos[] */
        } cmd[MAX_CMDS];
        struct {
                uint32_t flags;
                struct msm_gem_object *obj;
-               uint32_t iova;
+               uint64_t iova;
        } bos[0];
 };
 
index 1847f83..0566cef 100644 (file)
@@ -34,7 +34,7 @@ static inline void __user *to_user_ptr(u64 address)
 }
 
 static struct msm_gem_submit *submit_create(struct drm_device *dev,
-               struct msm_gpu *gpu, int nr)
+               struct msm_gem_address_space *aspace, int nr)
 {
        struct msm_gem_submit *submit;
        int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
@@ -42,7 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
        submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
        if (submit) {
                submit->dev = dev;
-               submit->gpu = gpu;
+               submit->aspace = aspace;
 
                /* initially, until copy_from_user() and bo lookup succeeds: */
                submit->nr_bos = 0;
@@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                        pagefault_disable();
                }
 
-               if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+               if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+                       !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
                        DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
                        ret = -EINVAL;
                        goto out_unlock;
@@ -141,7 +142,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
        struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
        if (submit->bos[i].flags & BO_PINNED)
-               msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+               msm_gem_put_iova(&msm_obj->base, submit->aspace);
 
        if (submit->bos[i].flags & BO_LOCKED)
                ww_mutex_unlock(&msm_obj->resv->lock);
@@ -162,7 +163,7 @@ retry:
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
-               uint32_t iova;
+               uint64_t iova;
 
                if (slow_locked == i)
                        slow_locked = -1;
@@ -180,7 +181,7 @@ retry:
 
                /* if locking succeeded, pin bo: */
                ret = msm_gem_get_iova_locked(&msm_obj->base,
-                               submit->gpu->id, &iova);
+                               submit->aspace, &iova);
 
                /* this would break the logic in the fail path.. there is no
                 * reason for this to happen, but just to be on the safe side
@@ -229,7 +230,7 @@ fail:
 }
 
 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
-               struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+               struct msm_gem_object **obj, uint64_t *iova, bool *valid)
 {
        if (idx >= submit->nr_bos) {
                DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -275,7 +276,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                struct drm_msm_gem_submit_reloc submit_reloc;
                void __user *userptr =
                        to_user_ptr(relocs + (i * sizeof(submit_reloc)));
-               uint32_t iova, off;
+               uint64_t iova;
+               uint32_t off;
                bool valid;
 
                ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -347,17 +349,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        /* for now, we just have 3d pipe.. eventually this would need to
         * be more clever to dispatch to appropriate gpu module:
         */
-       if (args->pipe != MSM_PIPE_3D0)
+       if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
                return -EINVAL;
 
        gpu = priv->gpu;
+       if (!gpu)
+               return -ENXIO;
 
        if (args->nr_cmds > MAX_CMDS)
                return -EINVAL;
 
        mutex_lock(&dev->struct_mutex);
 
-       submit = submit_create(dev, gpu, args->nr_bos);
+       submit = submit_create(dev, ctx->aspace, args->nr_bos);
        if (!submit) {
                ret = -ENOMEM;
                goto out;
@@ -376,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                void __user *userptr =
                        to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
                struct msm_gem_object *msm_obj;
-               uint32_t iova;
+               uint64_t iova;
 
                ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
                if (ret) {
@@ -408,8 +412,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               if ((submit_cmd.size + submit_cmd.submit_offset) >=
-                               msm_obj->base.size) {
+               if (!(submit_cmd.size) ||
+                       ((submit_cmd.size + submit_cmd.submit_offset) >
+                               msm_obj->base.size)) {
                        DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
@@ -431,7 +436,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 
        submit->nr_cmds = i;
 
-       ret = msm_gpu_submit(gpu, submit, ctx);
+       /* Clamp the user submitted ring to the range of available rings */
+       submit->ring = clamp_t(uint32_t,
+               (args->flags & MSM_SUBMIT_RING_MASK) >> MSM_SUBMIT_RING_SHIFT,
+               0, gpu->nr_rings - 1);
+
+       ret = msm_gpu_submit(gpu, submit);
 
        args->fence = submit->fence;
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644 (file)
index 0000000..7ca9683
--- /dev/null
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+       struct msm_gem_address_space *aspace = container_of(kref,
+                       struct msm_gem_address_space, kref);
+
+       if (aspace->ops->destroy)
+               aspace->ops->destroy(aspace);
+
+       kfree(aspace);
+}
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+       if (aspace)
+               kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
+/* SDE address space operations */
+static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt,
+               void *priv)
+{
+       struct dma_buf *buf = priv;
+
+       if (buf)
+               aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
+                       sgt, buf, DMA_BIDIRECTIONAL);
+       else
+               aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
+                       DMA_BIDIRECTIONAL);
+
+       vma->iova = 0;
+
+       msm_gem_address_space_put(aspace);
+}
+
+
+static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt,
+               void *priv, unsigned int flags)
+{
+       struct dma_buf *buf = priv;
+       int ret;
+
+       if (buf)
+               ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
+                       DMA_BIDIRECTIONAL);
+       else
+               ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
+                       DMA_BIDIRECTIONAL);
+
+       if (!ret)
+               vma->iova = sg_dma_address(sgt->sgl);
+
+       /* Get a reference to the aspace to keep it around */
+       kref_get(&aspace->kref);
+
+       return ret;
+}
+
+static const struct msm_gem_aspace_ops smmu_aspace_ops = {
+       .map = smmu_aspace_map_vma,
+       .unmap = smmu_aspace_unmap_vma,
+};
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+               const char *name)
+{
+       struct msm_gem_address_space *aspace;
+
+       if (!mmu)
+               return ERR_PTR(-EINVAL);
+
+       aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+       if (!aspace)
+               return ERR_PTR(-ENOMEM);
+
+       aspace->name = name;
+       aspace->mmu = mmu;
+       aspace->ops = &smmu_aspace_ops;
+
+       kref_init(&aspace->kref);
+
+       return aspace;
+}
+
+/* GPU address space operations */
+struct msm_iommu_aspace {
+       struct msm_gem_address_space base;
+       struct drm_mm mm;
+};
+
+#define to_iommu_aspace(aspace) \
+       ((struct msm_iommu_aspace *) \
+        container_of(aspace, struct msm_iommu_aspace, base))
+
+static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+       if (!vma->iova)
+               return;
+
+       if (aspace->mmu)
+               aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
+
+       drm_mm_remove_node(&vma->node);
+
+       vma->iova = 0;
+
+       msm_gem_address_space_put(aspace);
+}
+
+static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
+               unsigned int flags)
+{
+       struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+       size_t size = 0;
+       struct scatterlist *sg;
+       int ret, i;
+       int iommu_flags = IOMMU_READ;
+
+       if (!(flags & MSM_BO_GPU_READONLY))
+               iommu_flags |= IOMMU_WRITE;
+
+       if (flags & MSM_BO_PRIVILEGED)
+               iommu_flags |= IOMMU_PRIV;
+
+       if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+               return 0;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+               size += sg->length + sg->offset;
+
+       ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
+                       0, DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               return ret;
+
+       vma->iova = vma->node.start << PAGE_SHIFT;
+
+       if (aspace->mmu)
+               ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+                       iommu_flags);
+
+       /* Get a reference to the aspace to keep it around */
+       kref_get(&aspace->kref);
+
+       return ret;
+}
+
+static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+       struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+
+       drm_mm_takedown(&local->mm);
+       aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
+       .map = iommu_aspace_map_vma,
+       .unmap = iommu_aspace_unmap_vma,
+       .destroy = iommu_aspace_destroy,
+};
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+               uint64_t start, uint64_t end)
+{
+       struct msm_iommu_aspace *local;
+
+       if (!mmu)
+               return ERR_PTR(-EINVAL);
+
+       local = kzalloc(sizeof(*local), GFP_KERNEL);
+       if (!local)
+               return ERR_PTR(-ENOMEM);
+
+       drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
+               (end >> PAGE_SHIFT) - 1);
+
+       local->base.name = name;
+       local->base.mmu = mmu;
+       local->base.ops = &msm_iommu_aspace_ops;
+
+       kref_init(&local->base.kref);
+
+       return &local->base;
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt,
+               void *priv, unsigned int flags)
+{
+       if (aspace && aspace->ops->map)
+               return aspace->ops->map(aspace, vma, sgt, priv, flags);
+
+       return -EINVAL;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+       if (aspace && aspace->ops->unmap)
+               aspace->ops->unmap(aspace, vma, sgt, priv);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+               const char *name)
+{
+       struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+
+       if (IS_ERR(mmu))
+               return (struct msm_gem_address_space *) mmu;
+
+       return msm_gem_address_space_new(mmu, name,
+               domain->geometry.aperture_start,
+               domain->geometry.aperture_end);
+}
+
+/* Create a new dynamic instance */
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+               uint64_t start, uint64_t end)
+{
+       struct msm_mmu *child = msm_iommu_new_dynamic(parent);
+
+       if (IS_ERR(child))
+               return (struct msm_gem_address_space *) child;
+
+       return msm_gem_address_space_new(child, name, start, end);
+}
index 6b02ada..3176f30 100644 (file)
@@ -90,21 +90,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
 
 static int enable_clk(struct msm_gpu *gpu)
 {
-       struct clk *rate_clk = NULL;
+       uint32_t rate = gpu->gpufreq[gpu->active_level];
        int i;
 
-       /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
-       for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
-               if (gpu->grp_clks[i]) {
-                       clk_prepare(gpu->grp_clks[i]);
-                       rate_clk = gpu->grp_clks[i];
-               }
-       }
+       if (gpu->core_clk)
+               clk_set_rate(gpu->core_clk, rate);
 
-       if (rate_clk && gpu->fast_rate)
-               clk_set_rate(rate_clk, gpu->fast_rate);
+       if (gpu->rbbmtimer_clk)
+               clk_set_rate(gpu->rbbmtimer_clk, 19200000);
+
+       for (i = gpu->nr_clocks - 1; i >= 0; i--)
+               if (gpu->grp_clks[i])
+                       clk_prepare(gpu->grp_clks[i]);
 
-       for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+       for (i = gpu->nr_clocks - 1; i >= 0; i--)
                if (gpu->grp_clks[i])
                        clk_enable(gpu->grp_clks[i]);
 
@@ -113,24 +112,23 @@ static int enable_clk(struct msm_gpu *gpu)
 
 static int disable_clk(struct msm_gpu *gpu)
 {
-       struct clk *rate_clk = NULL;
+       uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
        int i;
 
-       /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
-       for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
-               if (gpu->grp_clks[i]) {
+       for (i = gpu->nr_clocks - 1; i >= 0; i--)
+               if (gpu->grp_clks[i])
                        clk_disable(gpu->grp_clks[i]);
-                       rate_clk = gpu->grp_clks[i];
-               }
-       }
 
-       if (rate_clk && gpu->slow_rate)
-               clk_set_rate(rate_clk, gpu->slow_rate);
-
-       for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+       for (i = gpu->nr_clocks - 1; i >= 0; i--)
                if (gpu->grp_clks[i])
                        clk_unprepare(gpu->grp_clks[i]);
 
+       if (gpu->core_clk)
+               clk_set_rate(gpu->core_clk, rate);
+
+       if (gpu->rbbmtimer_clk)
+               clk_set_rate(gpu->rbbmtimer_clk, 0);
+
        return 0;
 }
 
@@ -138,8 +136,9 @@ static int enable_axi(struct msm_gpu *gpu)
 {
        if (gpu->ebi1_clk)
                clk_prepare_enable(gpu->ebi1_clk);
-       if (gpu->bus_freq)
-               bs_set(gpu, gpu->bus_freq);
+
+       if (gpu->busfreq[gpu->active_level])
+               bs_set(gpu, gpu->busfreq[gpu->active_level]);
        return 0;
 }
 
@@ -147,7 +146,8 @@ static int disable_axi(struct msm_gpu *gpu)
 {
        if (gpu->ebi1_clk)
                clk_disable_unprepare(gpu->ebi1_clk);
-       if (gpu->bus_freq)
+
+       if (gpu->busfreq[gpu->active_level])
                bs_set(gpu, 0);
        return 0;
 }
@@ -155,6 +155,8 @@ static int disable_axi(struct msm_gpu *gpu)
 int msm_gpu_pm_resume(struct msm_gpu *gpu)
 {
        struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
        int ret;
 
        DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -167,6 +169,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
        if (WARN_ON(gpu->active_cnt <= 0))
                return -EINVAL;
 
+       WARN_ON(pm_runtime_get_sync(&pdev->dev) < 0);
+
        ret = enable_pwrrail(gpu);
        if (ret)
                return ret;
@@ -185,6 +189,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 {
        struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
        int ret;
 
        DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -209,6 +215,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
        if (ret)
                return ret;
 
+       pm_runtime_put(&pdev->dev);
        return 0;
 }
 
@@ -277,17 +284,35 @@ static void recover_worker(struct work_struct *work)
        mutex_lock(&dev->struct_mutex);
        if (msm_gpu_active(gpu)) {
                struct msm_gem_submit *submit;
-               uint32_t fence = gpu->funcs->last_fence(gpu);
-
-               /* retire completed submits, plus the one that hung: */
-               retire_submits(gpu, fence + 1);
+               struct msm_ringbuffer *ring;
+               int i;
 
                inactive_cancel(gpu);
+
+               FOR_EACH_RING(gpu, ring, i) {
+                       uint32_t fence;
+
+                       if (!ring)
+                               continue;
+
+                       fence = gpu->funcs->last_fence(gpu, ring);
+
+                       /*
+                        * Retire the faulting command on the active ring and
+                        * make sure the other rings are cleaned up
+                        */
+                       if (ring == gpu->funcs->active_ring(gpu))
+                               retire_submits(gpu, fence + 1);
+                       else
+                               retire_submits(gpu, fence);
+               }
+
+               /* Recover the GPU */
                gpu->funcs->recover(gpu);
 
-               /* replay the remaining submits after the one that hung: */
+               /* replay the remaining submits for all rings: */
                list_for_each_entry(submit, &gpu->submit_list, node) {
-                       gpu->funcs->submit(gpu, submit, NULL);
+                       gpu->funcs->submit(gpu, submit);
                }
        }
        mutex_unlock(&dev->struct_mutex);
@@ -307,25 +332,28 @@ static void hangcheck_handler(unsigned long data)
        struct msm_gpu *gpu = (struct msm_gpu *)data;
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
-       uint32_t fence = gpu->funcs->last_fence(gpu);
+       struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+       uint32_t fence = gpu->funcs->last_fence(gpu, ring);
+       uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring);
 
-       if (fence != gpu->hangcheck_fence) {
+       if (fence != gpu->hangcheck_fence[ring->id]) {
                /* some progress has been made.. ya! */
-               gpu->hangcheck_fence = fence;
-       } else if (fence < gpu->submitted_fence) {
+               gpu->hangcheck_fence[ring->id] = fence;
+       } else if (fence < submitted) {
                /* no progress and not done.. hung! */
-               gpu->hangcheck_fence = fence;
-               dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
-                               gpu->name);
+               gpu->hangcheck_fence[ring->id] = fence;
+               dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+                               gpu->name, ring->id);
                dev_err(dev->dev, "%s:     completed fence: %u\n",
                                gpu->name, fence);
                dev_err(dev->dev, "%s:     submitted fence: %u\n",
-                               gpu->name, gpu->submitted_fence);
+                               gpu->name, submitted);
+
                queue_work(priv->wq, &gpu->recover_work);
        }
 
        /* if still more pending work, reset the hangcheck timer: */
-       if (gpu->submitted_fence > gpu->hangcheck_fence)
+       if (submitted > gpu->hangcheck_fence[ring->id])
                hangcheck_timer_reset(gpu);
 
        /* workaround for missing irq: */
@@ -434,54 +462,66 @@ out:
 static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
 {
        struct drm_device *dev = gpu->dev;
+       struct msm_gem_submit *submit, *tmp;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       while (!list_empty(&gpu->submit_list)) {
-               struct msm_gem_submit *submit;
-
-               submit = list_first_entry(&gpu->submit_list,
-                               struct msm_gem_submit, node);
+       /*
+        * Find and retire all the submits in the same ring that are older than
+        * or equal to 'fence'
+        */
 
-               if (submit->fence <= fence) {
+       list_for_each_entry_safe(submit, tmp, &gpu->submit_list, node) {
+               if (COMPARE_FENCE_LTE(submit->fence, fence)) {
                        list_del(&submit->node);
                        kfree(submit);
-               } else {
-                       break;
                }
        }
 }
 
-static void retire_worker(struct work_struct *work)
+static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence)
 {
-       struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
-       struct drm_device *dev = gpu->dev;
-       uint32_t fence = gpu->funcs->last_fence(gpu);
+       if (obj->write_fence & 0x3FFFFFFF)
+               return COMPARE_FENCE_LTE(obj->write_fence, fence);
 
-       msm_update_fence(gpu->dev, fence);
+       return COMPARE_FENCE_LTE(obj->read_fence, fence);
+}
 
-       mutex_lock(&dev->struct_mutex);
+static void _retire_ring(struct msm_gpu *gpu, uint32_t fence)
+{
+       struct msm_gem_object *obj, *tmp;
 
        retire_submits(gpu, fence);
 
-       while (!list_empty(&gpu->active_list)) {
-               struct msm_gem_object *obj;
-
-               obj = list_first_entry(&gpu->active_list,
-                               struct msm_gem_object, mm_list);
-
-               if ((obj->read_fence <= fence) &&
-                               (obj->write_fence <= fence)) {
-                       /* move to inactive: */
+       list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) {
+               if (_fence_signaled(obj, fence)) {
                        msm_gem_move_to_inactive(&obj->base);
-                       msm_gem_put_iova(&obj->base, gpu->id);
+                       msm_gem_put_iova(&obj->base, gpu->aspace);
                        drm_gem_object_unreference(&obj->base);
-               } else {
-                       break;
                }
        }
+}
 
-       mutex_unlock(&dev->struct_mutex);
+static void retire_worker(struct work_struct *work)
+{
+       struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+       struct drm_device *dev = gpu->dev;
+       struct msm_ringbuffer *ring;
+       int i;
+
+       FOR_EACH_RING(gpu, ring, i) {
+               uint32_t fence;
+
+               if (!ring)
+                       continue;
+
+               fence = gpu->funcs->last_fence(gpu, ring);
+               msm_update_fence(gpu->dev, fence);
+
+               mutex_lock(&dev->struct_mutex);
+               _retire_ring(gpu, fence);
+               mutex_unlock(&dev->struct_mutex);
+       }
 
        if (!msm_gpu_active(gpu))
                inactive_start(gpu);
@@ -496,18 +536,16 @@ void msm_gpu_retire(struct msm_gpu *gpu)
 }
 
 /* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx)
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
+       struct msm_ringbuffer *ring = gpu->rb[submit->ring];
        int i, ret;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       submit->fence = ++priv->next_fence;
-
-       gpu->submitted_fence = submit->fence;
+       submit->fence = FENCE(submit->ring, ++priv->next_fence[submit->ring]);
 
        inactive_cancel(gpu);
 
@@ -515,7 +553,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        msm_rd_dump_submit(submit);
 
-       gpu->submitted_fence = submit->fence;
+       ring->submitted_fence = submit->fence;
 
        update_sw_cntrs(gpu);
 
@@ -528,12 +566,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
 
                if (!is_active(msm_obj)) {
-                       uint32_t iova;
+                       uint64_t iova;
 
                        /* ring takes a reference to the bo and iova: */
                        drm_gem_object_reference(&msm_obj->base);
                        msm_gem_get_iova_locked(&msm_obj->base,
-                                       submit->gpu->id, &iova);
+                                       submit->aspace, &iova);
                }
 
                if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -543,8 +581,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                        msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
        }
 
-       ret = gpu->funcs->submit(gpu, submit, ctx);
-       priv->lastctx = ctx;
+       ret = gpu->funcs->submit(gpu, submit);
 
        hangcheck_timer_reset(gpu);
 
@@ -561,17 +598,54 @@ static irqreturn_t irq_handler(int irq, void *data)
        return gpu->funcs->irq(gpu);
 }
 
-static const char *clk_names[] = {
-               "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
-               "alt_mem_iface_clk",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+       struct clk *clk = devm_clk_get(dev, name);
+
+       DBG("clks[%s]: %p", name, clk);
+
+       return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+       struct device *dev = &pdev->dev;
+       struct property *prop;
+       const char *name;
+       int i = 0;
+
+       gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+       if (gpu->nr_clocks < 1) {
+               gpu->nr_clocks = 0;
+               return 0;
+       }
+
+       gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+               GFP_KERNEL);
+       if (!gpu->grp_clks)
+               return -ENOMEM;
+
+       of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+               gpu->grp_clks[i] = get_clock(dev, name);
+
+               /* Remember the key clocks that we need to control later */
+               if (!strcmp(name, "core_clk"))
+                       gpu->core_clk = gpu->grp_clks[i];
+               else if (!strcmp(name, "rbbmtimer_clk"))
+                       gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+               ++i;
+       }
+
+       return 0;
+}
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
-               const char *name, const char *ioname, const char *irqname, int ringsz)
+               const char *name, struct msm_gpu_config *config)
 {
        struct iommu_domain *iommu;
-       int i, ret;
+       int i, ret, nr_rings;
 
        if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
                gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -595,17 +669,16 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
        spin_lock_init(&gpu->perf_lock);
 
-       BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
 
        /* Map registers: */
-       gpu->mmio = msm_ioremap(pdev, ioname, name);
+       gpu->mmio = msm_ioremap(pdev, config->ioname, name);
        if (IS_ERR(gpu->mmio)) {
                ret = PTR_ERR(gpu->mmio);
                goto fail;
        }
 
        /* Get Interrupt: */
-       gpu->irq = platform_get_irq_byname(pdev, irqname);
+       gpu->irq = platform_get_irq_byname(pdev, config->irqname);
        if (gpu->irq < 0) {
                ret = gpu->irq;
                dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -619,13 +692,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                goto fail;
        }
 
-       /* Acquire clocks: */
-       for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
-               gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
-               DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
-               if (IS_ERR(gpu->grp_clks[i]))
-                       gpu->grp_clks[i] = NULL;
-       }
+       pm_runtime_enable(&pdev->dev);
+
+       ret = get_clocks(pdev, gpu);
+       if (ret)
+               goto fail;
 
        gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
        DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -649,12 +720,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
         */
        iommu = iommu_domain_alloc(&platform_bus_type);
        if (iommu) {
+               /* TODO 32b vs 64b address space.. */
+               iommu->geometry.aperture_start = config->va_start;
+               iommu->geometry.aperture_end = config->va_end;
+
                dev_info(drm->dev, "%s: using IOMMU\n", name);
-               gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
-               if (IS_ERR(gpu->mmu)) {
-                       ret = PTR_ERR(gpu->mmu);
+               gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+                               iommu, "gpu");
+               if (IS_ERR(gpu->aspace)) {
+                       ret = PTR_ERR(gpu->aspace);
                        dev_err(drm->dev, "failed to init iommu: %d\n", ret);
-                       gpu->mmu = NULL;
+                       gpu->aspace = NULL;
                        iommu_domain_free(iommu);
                        goto fail;
                }
@@ -662,42 +738,80 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        } else {
                dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
        }
-       gpu->id = msm_register_mmu(drm, gpu->mmu);
 
+       nr_rings = config->nr_rings;
 
-       /* Create ringbuffer: */
-       mutex_lock(&drm->struct_mutex);
-       gpu->rb = msm_ringbuffer_new(gpu, ringsz);
-       mutex_unlock(&drm->struct_mutex);
-       if (IS_ERR(gpu->rb)) {
-               ret = PTR_ERR(gpu->rb);
-               gpu->rb = NULL;
-               dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
-               goto fail;
+       if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+               WARN(1, "Only creating %lu ringbuffers\n", ARRAY_SIZE(gpu->rb));
+               nr_rings = ARRAY_SIZE(gpu->rb);
        }
 
+       /* Create ringbuffer(s): */
+       for (i = 0; i < nr_rings; i++) {
+               mutex_lock(&drm->struct_mutex);
+               gpu->rb[i] = msm_ringbuffer_new(gpu, i);
+               mutex_unlock(&drm->struct_mutex);
+
+               if (IS_ERR(gpu->rb[i])) {
+                       ret = PTR_ERR(gpu->rb[i]);
+                       gpu->rb[i] = NULL;
+                       dev_err(drm->dev,
+                               "could not create ringbuffer %d: %d\n", i, ret);
+                       goto fail;
+               }
+       }
+
+       gpu->nr_rings = nr_rings;
+
+#ifdef CONFIG_SMP
+       gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+       gpu->pm_qos_req_dma.irq = gpu->irq;
+#endif
+
+       pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+                       PM_QOS_DEFAULT_VALUE);
+
        bs_init(gpu);
 
+       gpu->snapshot = msm_snapshot_new(gpu);
+       if (IS_ERR(gpu->snapshot))
+               gpu->snapshot = NULL;
+
        return 0;
 
 fail:
+       for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+               if (gpu->rb[i])
+                       msm_ringbuffer_destroy(gpu->rb[i]);
+       }
+
+       pm_runtime_disable(&pdev->dev);
        return ret;
 }
 
 void msm_gpu_cleanup(struct msm_gpu *gpu)
 {
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       int i;
+
        DBG("%s", gpu->name);
 
        WARN_ON(!list_empty(&gpu->active_list));
 
        bs_fini(gpu);
 
-       if (gpu->rb) {
-               if (gpu->rb_iova)
-                       msm_gem_put_iova(gpu->rb->bo, gpu->id);
-               msm_ringbuffer_destroy(gpu->rb);
+       for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+               if (!gpu->rb[i])
+                       continue;
+
+               if (gpu->rb[i]->iova)
+                       msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace);
+
+               msm_ringbuffer_destroy(gpu->rb[i]);
        }
 
-       if (gpu->mmu)
-               gpu->mmu->funcs->destroy(gpu->mmu);
+       msm_snapshot_destroy(gpu, gpu->snapshot);
+       pm_runtime_disable(&pdev->dev);
 }
index 2bbe85a..06dfaab 100644 (file)
 #define __MSM_GPU_H__
 
 #include <linux/clk.h>
+#include <linux/pm_qos.h>
 #include <linux/regulator/consumer.h>
 
 #include "msm_drv.h"
 #include "msm_ringbuffer.h"
+#include "msm_snapshot.h"
 
 struct msm_gem_submit;
 struct msm_gpu_perfcntr;
 
+struct msm_gpu_config {
+       const char *ioname;
+       const char *irqname;
+       int nr_rings;
+       uint64_t va_start;
+       uint64_t va_end;
+};
+
 /* So far, with hardware that I've seen to date, we can have:
  *  + zero, one, or two z180 2d cores
  *  + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -46,18 +56,21 @@ struct msm_gpu_funcs {
        int (*hw_init)(struct msm_gpu *gpu);
        int (*pm_suspend)(struct msm_gpu *gpu);
        int (*pm_resume)(struct msm_gpu *gpu);
-       int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-                       struct msm_file_private *ctx);
-       void (*flush)(struct msm_gpu *gpu);
-       void (*idle)(struct msm_gpu *gpu);
+       int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+       void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
        irqreturn_t (*irq)(struct msm_gpu *irq);
-       uint32_t (*last_fence)(struct msm_gpu *gpu);
+       uint32_t (*last_fence)(struct msm_gpu *gpu,
+                       struct msm_ringbuffer *ring);
+       uint32_t (*submitted_fence)(struct msm_gpu *gpu,
+                       struct msm_ringbuffer *ring);
+       struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
        void (*recover)(struct msm_gpu *gpu);
        void (*destroy)(struct msm_gpu *gpu);
 #ifdef CONFIG_DEBUG_FS
        /* show GPU status in debugfs: */
        void (*show)(struct msm_gpu *gpu, struct seq_file *m);
 #endif
+       int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
 };
 
 struct msm_gpu {
@@ -77,14 +90,12 @@ struct msm_gpu {
        const struct msm_gpu_perfcntr *perfcntrs;
        uint32_t num_perfcntrs;
 
-       struct msm_ringbuffer *rb;
-       uint32_t rb_iova;
+       struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+       int nr_rings;
 
        /* list of GEM active objects: */
        struct list_head active_list;
 
-       uint32_t submitted_fence;
-
        /* is gpu powered/active? */
        int active_cnt;
        bool inactive;
@@ -95,13 +106,20 @@ struct msm_gpu {
        void __iomem *mmio;
        int irq;
 
-       struct msm_mmu *mmu;
-       int id;
+       struct msm_gem_address_space *aspace;
 
        /* Power Control: */
        struct regulator *gpu_reg, *gpu_cx;
-       struct clk *ebi1_clk, *grp_clks[6];
-       uint32_t fast_rate, slow_rate, bus_freq;
+       struct clk **grp_clks;
+       struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+       int nr_clocks;
+
+       uint32_t gpufreq[10];
+       uint32_t busfreq[10];
+       uint32_t nr_pwrlevels;
+       uint32_t active_level;
+
+       struct pm_qos_request pm_qos_req_dma;
 
 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
        struct msm_bus_scale_pdata *bus_scale_table;
@@ -117,15 +135,44 @@ struct msm_gpu {
 #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
 #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
        struct timer_list hangcheck_timer;
-       uint32_t hangcheck_fence;
+       uint32_t hangcheck_fence[MSM_GPU_MAX_RINGS];
        struct work_struct recover_work;
 
        struct list_head submit_list;
+
+       struct msm_snapshot *snapshot;
 };
 
+/* It turns out that all targets use the same ringbuffer size. */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+               (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+               AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline struct msm_ringbuffer *__get_ring(struct msm_gpu *gpu, int index)
+{
+       return (index < ARRAY_SIZE(gpu->rb) ? gpu->rb[index] : NULL);
+}
+
+#define FOR_EACH_RING(gpu, ring, index) \
+       for (index = 0, ring = (gpu)->rb[0]; \
+               index < (gpu)->nr_rings && index < ARRAY_SIZE((gpu)->rb); \
+               index++, ring = __get_ring(gpu, index))
+
 static inline bool msm_gpu_active(struct msm_gpu *gpu)
 {
-       return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+       struct msm_ringbuffer *ring;
+       int i;
+
+       FOR_EACH_RING(gpu, ring, i) {
+               if (gpu->funcs->submitted_fence(gpu, ring) >
+                       gpu->funcs->last_fence(gpu, ring))
+                       return true;
+       }
+
+       return false;
 }
 
 /* Perf-Counters:
@@ -151,6 +198,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
        return msm_readl(gpu->mmio + (reg << 2));
 }
 
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+       uint32_t val = gpu_read(gpu, reg);
+
+       val &= ~mask;
+       gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+       u64 val;
+
+       /*
+        * Why not a readq here? Two reasons: 1) many of the LO registers are
+        * not quad word aligned and 2) the GPU hardware designers have a bit
+        * of a history of putting registers where they fit, especially in
+        * spins. The longer a GPU family goes the higher the chance that
+        * we'll get burned.  We could do a series of validity checks if we
+        * wanted to, but really is a readq() that much better? Nah.
+        */
+
+       /*
+        * For some lo/hi registers (like perfcounters), the hi value is latched
+        * when the lo is read, so make sure to read the lo first to trigger
+        * that
+        */
+       val = (u64) msm_readl(gpu->mmio + (lo << 2));
+       val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+       return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+       /* Why not a writeq here? Read the screed above */
+       msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+       msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
 int msm_gpu_pm_resume(struct msm_gpu *gpu);
 
@@ -160,12 +246,12 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 
 void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
-               const char *name, const char *ioname, const char *irqname, int ringsz);
+               const char *name, struct msm_gpu_config *config);
+
 void msm_gpu_cleanup(struct msm_gpu *gpu);
 
 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
index 7e64a62..3c16222 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/of_platform.h>
 #include "msm_drv.h"
-#include "msm_mmu.h"
-
-struct msm_iommu {
-       struct msm_mmu base;
-       struct iommu_domain *domain;
-};
-#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+#include "msm_iommu.h"
 
 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
-       pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+       pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
        return 0;
 }
 
+/*
+ * Get and enable the IOMMU clocks so that we can make
+ * sure they stay on the entire duration so that we can
+ * safely change the pagetable from the GPU
+ */
+static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       struct device *dev;
+       struct property *prop;
+       const char *name;
+       int i = 0;
+
+       if (WARN_ON(!pdev))
+               return;
+
+       dev = &pdev->dev;
+
+       iommu->nr_clocks =
+               of_property_count_strings(dev->of_node, "clock-names");
+
+       if (iommu->nr_clocks < 0) {
+               iommu->nr_clocks = 0;
+               return;
+       }
+
+       if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
+               iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
+
+       of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+               if (i == iommu->nr_clocks)
+                       break;
+
+               iommu->clocks[i] =  clk_get(dev, name);
+               if (iommu->clocks[i])
+                       clk_prepare_enable(iommu->clocks[i]);
+
+               i++;
+       }
+}
+
+static int _attach_iommu_device(struct msm_mmu *mmu,
+               struct iommu_domain *domain, const char **names, int cnt)
+{
+       int i;
+
+       /* See if there is a iommus member in the current device.  If not, look
+        * for the names and see if there is one in there.
+        */
+
+       if (of_find_property(mmu->dev->of_node, "iommus", NULL))
+               return iommu_attach_device(domain, mmu->dev);
+
+       /* Look through the list of names for a target */
+       for (i = 0; i < cnt; i++) {
+               struct device_node *node =
+                       of_find_node_by_name(mmu->dev->of_node, names[i]);
+
+               if (!node)
+                       continue;
+
+               if (of_find_property(node, "iommus", NULL)) {
+                       struct platform_device *pdev;
+
+                       /* Get the platform device for the node */
+                       of_platform_populate(node->parent, NULL, NULL,
+                               mmu->dev);
+
+                       pdev = of_find_device_by_node(node);
+
+                       if (!pdev)
+                               continue;
+
+                       _get_iommu_clocks(mmu,
+                               of_find_device_by_node(node->parent));
+
+                       mmu->dev = &pdev->dev;
+
+                       return iommu_attach_device(domain, mmu->dev);
+               }
+       }
+
+       dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
+       return -ENODEV;
+}
+
 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
-       return iommu_attach_device(iommu->domain, mmu->dev);
+       int val = 1, ret;
+
+       /* Hope springs eternal */
+       iommu->allow_dynamic = true;
+
+       /* per-instance pagetables need TTBR1 support in the IOMMU driver */
+       ret = iommu_domain_set_attr(iommu->domain,
+               DOMAIN_ATTR_ENABLE_TTBR1, &val);
+       if (ret)
+               iommu->allow_dynamic = false;
+
+       /* Attach the device to the domain */
+       ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
+       if (ret)
+               return ret;
+
+       /*
+        * Get the context bank for the base domain; this will be shared with
+        * the children.
+        */
+       iommu->cb = -1;
+       if (iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXT_BANK,
+               &iommu->cb))
+               iommu->allow_dynamic = false;
+
+       return 0;
+}
+
+static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
+               int cnt)
+{
+       static unsigned int procid;
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       int ret;
+       unsigned int id;
+
+       /* Assign a unique procid for the domain to cut down on TLB churn */
+       id = ++procid;
+
+       iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_PROCID, &id);
+
+       ret = iommu_attach_device(iommu->domain, mmu->dev);
+       if (ret)
+               return ret;
+
+       /*
+        * Get the TTBR0 and the CONTEXTIDR - these will be used by the GPU to
+        * switch the pagetable on its own.
+        */
+       iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_TTBR0,
+               &iommu->ttbr0);
+       iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXTIDR,
+               &iommu->contextidr);
+
+       return 0;
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       int i;
+
+       iommu_detach_device(iommu->domain, mmu->dev);
+
+       for (i = 0; i < iommu->nr_clocks; i++) {
+               if (iommu->clocks[i])
+                       clk_disable(iommu->clocks[i]);
+       }
 }
 
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        iommu_detach_device(iommu->domain, mmu->dev);
 }
 
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
                struct sg_table *sgt, int prot)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        struct iommu_domain *domain = iommu->domain;
        struct scatterlist *sg;
-       unsigned int da = iova;
+       uint64_t da = iova;
        unsigned int i, j;
        int ret;
 
@@ -60,7 +208,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
                phys_addr_t pa = sg_phys(sg) - sg->offset;
                size_t bytes = sg->length + sg->offset;
 
-               VERB("map[%d]: %08x %pa(%zx)", i, iova, &pa, bytes);
+               VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
 
                ret = iommu_map(domain, da, pa, bytes, prot);
                if (ret)
@@ -82,13 +230,13 @@ fail:
        return ret;
 }
 
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
                struct sg_table *sgt)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        struct iommu_domain *domain = iommu->domain;
        struct scatterlist *sg;
-       unsigned int da = iova;
+       uint64_t da = iova;
        int i;
 
        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -99,7 +247,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
                if (unmapped < bytes)
                        return unmapped;
 
-               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+               VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
 
                BUG_ON(!PAGE_ALIGNED(bytes));
 
@@ -124,7 +272,16 @@ static const struct msm_mmu_funcs funcs = {
                .destroy = msm_iommu_destroy,
 };
 
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+static const struct msm_mmu_funcs dynamic_funcs = {
+               .attach = msm_iommu_attach_dynamic,
+               .detach = msm_iommu_detach_dynamic,
+               .map = msm_iommu_map,
+               .unmap = msm_iommu_unmap,
+               .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
+               const struct msm_mmu_funcs *funcs)
 {
        struct msm_iommu *iommu;
 
@@ -133,8 +290,54 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
                return ERR_PTR(-ENOMEM);
 
        iommu->domain = domain;
-       msm_mmu_init(&iommu->base, dev, &funcs);
+       msm_mmu_init(&iommu->base, dev, funcs);
        iommu_set_fault_handler(domain, msm_fault_handler, dev);
 
        return &iommu->base;
 }
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+{
+       return _msm_iommu_new(dev, domain, &funcs);
+}
+
+/*
+ * Given a base domain that is attached to a IOMMU device try to create a
+ * dynamic domain that is also attached to the same device but allocates a new
+ * pagetable. This is used to allow multiple pagetables to be attached to the
+ * same device.
+ */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
+{
+       struct msm_iommu *base_iommu = to_msm_iommu(base);
+       struct iommu_domain *domain;
+       struct msm_mmu *mmu;
+       int ret, val = 1;
+
+       /* Don't continue if the base domain didn't have the support we need */
+       if (!base || base_iommu->allow_dynamic == false)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       domain = iommu_domain_alloc(&platform_bus_type);
+       if (!domain)
+               return ERR_PTR(-ENODEV);
+
+       mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
+
+       if (IS_ERR(mmu)) {
+               if (domain)
+                       iommu_domain_free(domain);
+               return mmu;
+       }
+
+       ret = iommu_domain_set_attr(domain, DOMAIN_ATTR_DYNAMIC, &val);
+       if (ret) {
+               msm_iommu_destroy(mmu);
+               return ERR_PTR(ret);
+       }
+
+       /* Set the context bank to match the base domain */
+       iommu_domain_set_attr(domain, DOMAIN_ATTR_CONTEXT_BANK,
+               &base_iommu->cb);
+
+       return mmu;
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.h b/drivers/gpu/drm/msm/msm_iommu.h
new file mode 100644 (file)
index 0000000..d005cfb
--- /dev/null
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_IOMMU_H_
+#define _MSM_IOMMU_H_
+
+#include "msm_mmu.h"
+
+struct msm_iommu {
+       struct msm_mmu base;
+       struct iommu_domain *domain;
+       int cb;
+       phys_addr_t ttbr0;
+       uint32_t contextidr;
+       bool allow_dynamic;
+
+       struct clk *clocks[5];
+       int nr_clocks;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static inline bool msm_iommu_allow_dynamic(struct msm_mmu *mmu)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+       return iommu->allow_dynamic;
+}
+#endif
index c8c7755..501f12b 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/iommu.h>
 
 struct msm_mmu;
-struct msm_gpu;
 
 enum msm_mmu_domain_type {
        MSM_SMMU_DOMAIN_UNSECURE,
@@ -33,10 +32,10 @@ enum msm_mmu_domain_type {
 
 struct msm_mmu_funcs {
        int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
-       void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
-       int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+       void (*detach)(struct msm_mmu *mmu);
+       int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
                        int prot);
-       int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt);
+       int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
        int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
                        enum dma_data_direction dir);
        void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
@@ -61,8 +60,8 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
 }
 
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
 struct msm_mmu *msm_smmu_new(struct device *dev,
        enum msm_mmu_domain_type domain);
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
 
 #endif /* __MSM_MMU_H__ */
index 9a78c48..6dbb516 100644 (file)
@@ -307,7 +307,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
 
        for (i = 0; i < submit->nr_cmds; i++) {
                uint32_t idx  = submit->cmd[i].idx;
-               uint32_t iova = submit->cmd[i].iova;
+               uint64_t iova = submit->cmd[i].iova;
                uint32_t szd  = submit->cmd[i].size; /* in dwords */
                struct msm_gem_object *obj = submit->bos[idx].obj;
                const char *buf = msm_gem_vaddr_locked(&obj->base);
@@ -315,7 +315,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
                buf += iova - submit->bos[idx].iova;
 
                rd_write_section(rd, RD_GPUADDR,
-                               (uint32_t[2]){ iova, szd * 4 }, 8);
+                               (uint64_t[2]) { iova, szd * 4 }, 16);
                rd_write_section(rd, RD_BUFFER_CONTENTS,
                                buf, szd * 4);
 
@@ -329,7 +329,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                case MSM_SUBMIT_CMD_BUF:
                        rd_write_section(rd, RD_CMDSTREAM_ADDR,
-                                       (uint32_t[2]){ iova, szd }, 8);
+                                       (uint64_t[2]) { iova, szd }, 16);
                        break;
                }
        }
index 1f14b90..14a16c4 100644 (file)
 #include "msm_ringbuffer.h"
 #include "msm_gpu.h"
 
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
 {
        struct msm_ringbuffer *ring;
        int ret;
 
-       size = ALIGN(size, 4);   /* size should be dword aligned */
+       /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+       BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring) {
@@ -32,7 +33,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        }
 
        ring->gpu = gpu;
-       ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+       ring->id = id;
+       ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
        if (IS_ERR(ring->bo)) {
                ret = PTR_ERR(ring->bo);
                ring->bo = NULL;
@@ -40,10 +42,11 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        }
 
        ring->start = msm_gem_vaddr_locked(ring->bo);
-       ring->end   = ring->start + (size / 4);
+       ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+       ring->next  = ring->start;
        ring->cur   = ring->start;
 
-       ring->size = size;
+       spin_lock_init(&ring->lock);
 
        return ring;
 
index 6e0e104..1e84905 100644 (file)
 
 struct msm_ringbuffer {
        struct msm_gpu *gpu;
-       int size;
+       int id;
        struct drm_gem_object *bo;
-       uint32_t *start, *end, *cur;
+       uint32_t *start, *end, *cur, *next;
+       uint64_t iova;
+       uint32_t submitted_fence;
+       spinlock_t lock;
 };
 
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
 
 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +38,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
 static inline void
 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
 {
-       if (ring->cur == ring->end)
-               ring->cur = ring->start;
-       *(ring->cur++) = data;
+       /*
+        * ring->next points to the current command being written - it won't be
+        * committed as ring->cur until the flush
+        */
+       if (ring->next == ring->end)
+               ring->next = ring->start;
+       *(ring->next++) = data;
 }
 
 #endif /* __MSM_RINGBUFFER_H__ */
index e3db669..c99f51e 100644 (file)
@@ -86,7 +86,7 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
        return 0;
 }
 
-static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_smmu_detach(struct msm_mmu *mmu)
 {
        struct msm_smmu *smmu = to_msm_smmu(mmu);
        struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -104,14 +104,14 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
        dev_dbg(client->dev, "iommu domain detached\n");
 }
 
-static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
                struct sg_table *sgt, int prot)
 {
        struct msm_smmu *smmu = to_msm_smmu(mmu);
        struct msm_smmu_client *client = msm_smmu_to_client(smmu);
        struct iommu_domain *domain;
        struct scatterlist *sg;
-       unsigned int da = iova;
+       uint64_t da = iova;
        unsigned int i, j;
        int ret;
 
@@ -126,7 +126,7 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
                u32 pa = sg_phys(sg) - sg->offset;
                size_t bytes = sg->length + sg->offset;
 
-               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+               VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
 
                ret = iommu_map(domain, da, pa, bytes, prot);
                if (ret)
@@ -172,14 +172,14 @@ static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
        dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
 }
 
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
                struct sg_table *sgt)
 {
        struct msm_smmu *smmu = to_msm_smmu(mmu);
        struct msm_smmu_client *client = msm_smmu_to_client(smmu);
        struct iommu_domain *domain;
        struct scatterlist *sg;
-       unsigned int da = iova;
+       uint64_t da = iova;
        int i;
 
        if (!client)
@@ -197,7 +197,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
                if (unmapped < bytes)
                        return unmapped;
 
-               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+               VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
 
                WARN_ON(!PAGE_ALIGNED(bytes));
 
diff --git a/drivers/gpu/drm/msm/msm_snapshot.c b/drivers/gpu/drm/msm/msm_snapshot.c
new file mode 100644 (file)
index 0000000..30f3e5c
--- /dev/null
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_snapshot_api.h"
+
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+
+       if (!snapshot)
+               return;
+
+       dma_free_coherent(&pdev->dev, SZ_1M, snapshot->ptr,
+               snapshot->physaddr);
+
+       kfree(snapshot);
+}
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu)
+{
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       struct msm_snapshot *snapshot;
+
+       snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+       if (!snapshot)
+               return ERR_PTR(-ENOMEM);
+
+       snapshot->ptr = dma_alloc_coherent(&pdev->dev, SZ_1M,
+               &snapshot->physaddr, GFP_KERNEL);
+
+       if (!snapshot->ptr) {
+               kfree(snapshot);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+       return snapshot;
+}
+
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+       int ret;
+       struct msm_snapshot_header header;
+       uint64_t val;
+
+       if (!snapshot)
+               return -ENOMEM;
+
+       /*
+        * For now, blow away the snapshot and take a new one  - the most
+        * interesting hang is the last one we saw
+        */
+       seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+       header.magic = SNAPSHOT_MAGIC;
+       gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+       header.gpuid = lower_32_bits(val);
+
+       gpu->funcs->get_param(gpu, MSM_PARAM_CHIP_ID, &val);
+       header.chipid = lower_32_bits(val);
+
+       seq_buf_putmem(&snapshot->buf, &header, sizeof(header));
+
+       ret = gpu->funcs->snapshot(gpu, snapshot);
+
+       if (!ret) {
+               struct msm_snapshot_section_header end;
+
+               end.magic = SNAPSHOT_SECTION_MAGIC;
+               end.id = SNAPSHOT_SECTION_END;
+               end.size = sizeof(end);
+
+               seq_buf_putmem(&snapshot->buf, &end, sizeof(end));
+
+               dev_info(gpu->dev->dev, "GPU snapshot created [0x%pa (%d bytes)]\n",
+                       &snapshot->physaddr, seq_buf_used(&snapshot->buf));
+       }
+
+       return ret;
+}
+
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m)
+{
+       if (gpu && gpu->snapshot)
+               seq_write(m, gpu->snapshot->ptr,
+                       seq_buf_used(&gpu->snapshot->buf));
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
new file mode 100644 (file)
index 0000000..247e135
--- /dev/null
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_H_
+#define MSM_SNAPSHOT_H_
+
+#include <linux/string.h>
+#include <linux/seq_buf.h>
+#include "msm_snapshot_api.h"
+
+struct msm_snapshot {
+       void *ptr;
+       struct seq_buf buf;
+       phys_addr_t physaddr;
+       uint32_t index;
+       uint32_t remain;
+       unsigned long timestamp;
+       void *priv;
+};
+
+/* Write a uint32_t value to the next position in the snapshot buffer */
+static inline void SNAPSHOT_WRITE_U32(struct msm_snapshot *snapshot,
+               uint32_t value)
+{
+       seq_buf_putmem(&snapshot->buf, &value, sizeof(value));
+}
+
+/* Copy a block of memory to the next position in the snapshot buffer */
+static inline void SNAPSHOT_MEMCPY(struct msm_snapshot *snapshot, void *src,
+               uint32_t size)
+{
+       if (size)
+               seq_buf_putmem(&snapshot->buf, src, size);
+}
+
+static inline bool _snapshot_header(struct msm_snapshot *snapshot,
+               struct msm_snapshot_section_header *header,
+               u32 headsz, u32 datasz, u32 id)
+{
+       u32 size = headsz + datasz;
+
+       if (seq_buf_buffer_left(&snapshot->buf) <= size)
+               return false;
+
+       /* Write the section header */
+       header->magic = SNAPSHOT_SECTION_MAGIC;
+       header->id = id;
+       header->size = headsz + datasz;
+
+       /* Write the section header */
+       seq_buf_putmem(&snapshot->buf, header, headsz);
+
+       /* The caller will fill in the data from here */
+       return true;
+}
+
+/* SNAPSHOT_HEADER
+ * _snapshot: pointer to struct msm_snapshot
+ * _header: Local variable containing the sub-section header
+ * _id: Section ID to write
+ * _dword: Size of the data section (in dword)
+ */
+#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
+       _snapshot_header((_snapshot), \
+               (struct msm_snapshot_section_header *) &(header), \
+               sizeof(header), (_dwords) << 2, (_id))
+
+struct msm_gpu;
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu);
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m);
+
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
new file mode 100644 (file)
index 0000000..9f0adb9
--- /dev/null
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_API_H_
+#define MSM_SNAPSHOT_API_H_
+
+#include <linux/types.h>
+
+/* High word is the magic, low word is the snapshot header version */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+struct msm_snapshot_header {
+       __u32 magic;
+       __u32 gpuid;
+       __u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct msm_snapshot_section_header {
+       __u16 magic;
+       __u16 id;
+       __u32 size;
+} __packed;
+
+/* Section identifiers */
+#define SNAPSHOT_SECTION_OS            0x0101
+#define SNAPSHOT_SECTION_REGS_V2       0x0202
+#define SNAPSHOT_SECTION_RB_V2         0x0302
+#define SNAPSHOT_SECTION_IB_V2         0x0402
+#define SNAPSHOT_SECTION_INDEXED_REGS  0x0501
+#define SNAPSHOT_SECTION_DEBUG         0x0901
+#define SNAPSHOT_SECTION_DEBUGBUS      0x0A01
+#define SNAPSHOT_SECTION_GPU_OBJECT_V2 0x0B02
+#define SNAPSHOT_SECTION_MEMLIST_V2    0x0E02
+#define SNAPSHOT_SECTION_SHADER                0x1201
+#define SNAPSHOT_SECTION_END           0xFFFF
+
+#define SNAPSHOT_OS_LINUX_V3          0x00000202
+
+struct msm_snapshot_linux {
+       struct msm_snapshot_section_header header;
+       int osid;
+       __u32 seconds;
+       __u32 power_flags;
+       __u32 power_level;
+       __u32 power_interval_timeout;
+       __u32 grpclk;
+       __u32 busclk;
+       __u64 ptbase;
+       __u32 pid;
+       __u32 current_context;
+       __u32 ctxtcount;
+       unsigned char release[32];
+       unsigned char version[32];
+       unsigned char comm[16];
+} __packed;
+
+struct msm_snapshot_ringbuffer {
+       struct msm_snapshot_section_header header;
+       int start;
+       int end;
+       int rbsize;
+       int wptr;
+       int rptr;
+       int count;
+       __u32 timestamp_queued;
+       __u32 timestamp_retired;
+       __u64 gpuaddr;
+       __u32 id;
+} __packed;
+
+struct msm_snapshot_regs {
+       struct msm_snapshot_section_header header;
+       __u32 count;
+} __packed;
+
+struct msm_snapshot_indexed_regs {
+       struct msm_snapshot_section_header header;
+       __u32 index_reg;
+       __u32 data_reg;
+       __u32 start;
+       __u32 count;
+} __packed;
+
+#define SNAPSHOT_DEBUG_CP_MEQ          7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM      8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM      9
+#define SNAPSHOT_DEBUG_CP_ROQ          10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY   11
+#define SNAPSHOT_DEBUG_CP_MERCIU       12
+
+struct msm_snapshot_debug {
+       struct msm_snapshot_section_header header;
+       __u32 type;
+       __u32 size;
+} __packed;
+
+struct msm_snapshot_debugbus {
+       struct msm_snapshot_section_header header;
+       __u32 id;
+       __u32 count;
+} __packed;
+
+struct msm_snapshot_shader {
+       struct msm_snapshot_section_header header;
+       __u32 type;
+       __u32 index;
+       __u32 size;
+} __packed;
+
+#endif
index ac9997c..31cf25a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -65,8 +65,12 @@ static void sde_connector_destroy(struct drm_connector *connector)
 
        c_conn = to_sde_connector(connector);
 
+       if (c_conn->ops.pre_deinit)
+               c_conn->ops.pre_deinit(connector, c_conn->display);
+
        if (c_conn->blob_caps)
                drm_property_unreference_blob(c_conn->blob_caps);
+
        msm_property_destroy(&c_conn->property_info);
 
        drm_connector_unregister(connector);
@@ -88,8 +92,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
                return;
        }
 
-       msm_framebuffer_cleanup(c_state->out_fb,
-                       c_state->mmu_id);
+       msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
        drm_framebuffer_unreference(c_state->out_fb);
        c_state->out_fb = NULL;
 
@@ -193,7 +196,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
        if (c_state->out_fb) {
                drm_framebuffer_reference(c_state->out_fb);
                rc = msm_framebuffer_prepare(c_state->out_fb,
-                               c_state->mmu_id);
+                               c_state->aspace);
                if (rc)
                        SDE_ERROR("failed to prepare fb, %d\n", rc);
        }
@@ -241,14 +244,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
                        rc = -EFAULT;
                } else {
                        if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
-                               c_state->mmu_id =
-                               c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+                               c_state->aspace =
+                               c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
                        else
-                               c_state->mmu_id =
-                               c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+                               c_state->aspace =
+                               c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
 
                        rc = msm_framebuffer_prepare(c_state->out_fb,
-                                       c_state->mmu_id);
+                                       c_state->aspace);
                        if (rc)
                                SDE_ERROR("prep fb failed, %d\n", rc);
                }
@@ -492,18 +495,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
        c_conn->panel = panel;
        c_conn->display = display;
 
-       /* cache mmu_id's for later */
        sde_kms = to_sde_kms(priv->kms);
        if (sde_kms->vbif[VBIF_NRT]) {
-               c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-                       sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
-               c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-                       sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+               c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+                       sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+               c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+                       sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
        } else {
-               c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-                       sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
-               c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-                       sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+               c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+                       sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+               c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+                       sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
        }
 
        if (ops)
index 9580282..3f26ee7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,15 @@ struct sde_connector_ops {
                        void *display);
 
        /**
+        * pre_deinit - perform additional deinitialization steps
+        * @connector: Pointer to drm connector structure
+        * @display: Pointer to private display handle
+        * Returns: Zero on success
+        */
+       int (*pre_deinit)(struct drm_connector *connector,
+                       void *display);
+
+       /**
         * detect - determine if connector is connected
         * @connector: Pointer to drm connector structure
         * @force: Force detect setting from drm framework
@@ -140,7 +149,7 @@ struct sde_connector {
        struct drm_panel *panel;
        void *display;
 
-       int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+       struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
 
        char name[SDE_CONNECTOR_NAME_SIZE];
 
@@ -195,13 +204,13 @@ struct sde_connector {
  * struct sde_connector_state - private connector status structure
  * @base: Base drm connector structure
  * @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
  * @property_values: Local cache of current connector property values
  */
 struct sde_connector_state {
        struct drm_connector_state base;
        struct drm_framebuffer *out_fb;
-       int mmu_id;
+       struct msm_gem_address_space *aspace;
        uint64_t property_values[CONNECTOR_PROP_COUNT];
 };
 
index b2853e8..dbfc2dd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -432,6 +432,99 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
        sde_kms->irq_obj.total_irqs = 0;
 }
 
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+       struct sde_kms *sde_kms;
+
+       if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+               SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+               return;
+       }
+       sde_kms = irq_data_get_irq_chip_data(irqd);
+
+       smp_mb__before_atomic();
+       clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+       smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+       struct sde_kms *sde_kms;
+
+       if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+               SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+               return;
+       }
+       sde_kms = irq_data_get_irq_chip_data(irqd);
+
+       smp_mb__before_atomic();
+       set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+       smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+       .name = "sde",
+       .irq_mask = sde_hw_irq_mask,
+       .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+               unsigned int irq, irq_hw_number_t hwirq)
+{
+       struct sde_kms *sde_kms;
+       int rc;
+
+       if (!domain || !domain->host_data) {
+               SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+               return -EINVAL;
+       }
+       sde_kms = domain->host_data;
+
+       irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+       rc = irq_set_chip_data(irq, sde_kms);
+
+       return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+       .map = sde_hw_irqdomain_map,
+       .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+       struct device *dev;
+       struct irq_domain *domain;
+
+       if (!sde_kms->dev || !sde_kms->dev->dev) {
+               pr_err("invalid device handles\n");
+               return -EINVAL;
+       }
+
+       dev = sde_kms->dev->dev;
+
+       domain = irq_domain_add_linear(dev->of_node, 32,
+                       &sde_hw_irqdomain_ops, sde_kms);
+       if (!domain) {
+               pr_err("failed to add irq_domain\n");
+               return -EINVAL;
+       }
+
+       sde_kms->irq_controller.enabled_mask = 0;
+       sde_kms->irq_controller.domain = domain;
+
+       return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+       if (sde_kms->irq_controller.domain) {
+               irq_domain_remove(sde_kms->irq_controller.domain);
+               sde_kms->irq_controller.domain = NULL;
+       }
+       return 0;
+}
+
 irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
 {
        /*
index 92642e7..ee1b9bd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,20 @@ int sde_core_irq_postinstall(struct sde_kms *sde_kms);
 void sde_core_irq_uninstall(struct sde_kms *sde_kms);
 
 /**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms:           SDE handle
+ * @return:            none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms:           SDE handle
+ * @return:            0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
  * sde_core_irq - core IRQ handler
  * @sde_kms:           SDE handle
  * @return:            interrupt handling status
index ed4b7be..2205dd9 100644 (file)
@@ -264,7 +264,7 @@ struct sde_encoder_phys_cmd {
  * @wb_fmt:            Writeback pixel format
  * @frame_count:       Counter of completed writeback operations
  * @kickoff_count:     Counter of issued writeback operations
- * @mmu_id:            mmu identifier for non-secure/secure domain
+ * @aspace:            address space identifier for non-secure/secure domain
  * @wb_dev:            Pointer to writeback device
  * @start_time:                Start time of writeback latest request
  * @end_time:          End time of writeback latest request
@@ -285,7 +285,7 @@ struct sde_encoder_phys_wb {
        const struct sde_format *wb_fmt;
        u32 frame_count;
        u32 kickoff_count;
-       int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+       struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
        struct sde_wb_device *wb_dev;
        ktime_t start_time;
        ktime_t end_time;
index 9943e39..9368c49 100644 (file)
@@ -180,7 +180,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
        struct sde_hw_wb *hw_wb;
        struct sde_hw_wb_cfg *wb_cfg;
        const struct msm_format *format;
-       int ret, mmu_id;
+       int ret;
+       struct msm_gem_address_space *aspace;
 
        if (!phys_enc) {
                SDE_ERROR("invalid encoder\n");
@@ -193,9 +194,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
 
        wb_cfg->intf_mode = phys_enc->intf_mode;
        wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
-       mmu_id = (wb_cfg->is_secure) ?
-                       wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
-                       wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+       aspace = (wb_cfg->is_secure) ?
+                       wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+                       wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
 
        SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
 
@@ -217,7 +218,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
        wb_cfg->roi = *wb_roi;
 
        if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
-               ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+               ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
                if (ret) {
                        SDE_DEBUG("failed to populate layout %d\n", ret);
                        return;
@@ -226,7 +227,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
                wb_cfg->dest.height = fb->height;
                wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
        } else {
-               ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+               ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
                        &wb_cfg->dest);
                if (ret) {
                        /* this error should be detected during atomic_check */
@@ -1017,15 +1018,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
        phys_enc = &wb_enc->base;
 
        if (p->sde_kms->vbif[VBIF_NRT]) {
-               wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-                       p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
-               wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-                       p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+               wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+                       p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+               wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+                       p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
        } else {
-               wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
-                       p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
-               wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
-                       p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+               wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+                       p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+               wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+                       p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
        }
 
        hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
index 41180f5..42bbbdc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -630,7 +630,7 @@ static int _sde_format_get_plane_sizes(
 }
 
 static int _sde_format_populate_addrs_ubwc(
-               int mmu_id,
+               struct msm_gem_address_space *aspace,
                struct drm_framebuffer *fb,
                struct sde_hw_fmt_layout *layout)
 {
@@ -641,7 +641,7 @@ static int _sde_format_populate_addrs_ubwc(
                return -EINVAL;
        }
 
-       base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+       base_addr = msm_framebuffer_iova(fb, aspace, 0);
        if (!base_addr) {
                DRM_ERROR("failed to retrieve base addr\n");
                return -EFAULT;
@@ -711,7 +711,7 @@ static int _sde_format_populate_addrs_ubwc(
 }
 
 static int _sde_format_populate_addrs_linear(
-               int mmu_id,
+               struct msm_gem_address_space *aspace,
                struct drm_framebuffer *fb,
                struct sde_hw_fmt_layout *layout)
 {
@@ -728,7 +728,7 @@ static int _sde_format_populate_addrs_linear(
 
        /* Populate addresses for simple formats here */
        for (i = 0; i < layout->num_planes; ++i) {
-               layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+               layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
                if (!layout->plane_addr[i]) {
                        DRM_ERROR("failed to retrieve base addr\n");
                        return -EFAULT;
@@ -739,7 +739,7 @@ static int _sde_format_populate_addrs_linear(
 }
 
 int sde_format_populate_layout(
-               int mmu_id,
+               struct msm_gem_address_space *aspace,
                struct drm_framebuffer *fb,
                struct sde_hw_fmt_layout *layout)
 {
@@ -770,9 +770,9 @@ int sde_format_populate_layout(
 
        /* Populate the addresses given the fb */
        if (SDE_FORMAT_IS_UBWC(layout->format))
-               ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+               ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
        else
-               ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+               ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
 
        /* check if anything changed */
        if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -814,14 +814,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
 }
 
 int sde_format_populate_layout_with_roi(
-               int mmu_id,
+               struct msm_gem_address_space *aspace,
                struct drm_framebuffer *fb,
                struct sde_rect *roi,
                struct sde_hw_fmt_layout *layout)
 {
        int ret;
 
-       ret = sde_format_populate_layout(mmu_id, fb, layout);
+       ret = sde_format_populate_layout(aspace, fb, layout);
        if (ret || !roi)
                return ret;
 
index 5dcdfbb..0de081d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 #define _SDE_FORMATS_H
 
 #include <drm/drm_fourcc.h>
+#include "msm_gem.h"
 #include "sde_hw_mdss.h"
 
 /**
@@ -76,7 +77,7 @@ int sde_format_check_modified_format(
 /**
  * sde_format_populate_layout - populate the given format layout based on
  *                     mmu, fb, and format found in the fb
- * @mmu_id:            mmu id handle
+ * @aspace:            address space pointer
  * @fb:                framebuffer pointer
  * @fmtl:              format layout structure to populate
  *
@@ -84,14 +85,14 @@ int sde_format_check_modified_format(
  *         are the same as before or 0 if new addresses were populated
  */
 int sde_format_populate_layout(
-               int mmu_id,
+               struct msm_gem_address_space *aspace,
                struct drm_framebuffer *fb,
                struct sde_hw_fmt_layout *fmtl);
 
 /**
  * sde_format_populate_layout_with_roi - populate the given format layout
  *                     based on mmu, fb, roi, and format found in the fb
- * @mmu_id:            mmu id handle
+ * @aspace:            mmu id handle
  * @fb:                framebuffer pointer
  * @roi:               region of interest (optional)
  * @fmtl:              format layout structure to populate
@@ -99,7 +100,7 @@ int sde_format_populate_layout(
  * Return: error code on failure, 0 on success
  */
 int sde_format_populate_layout_with_roi(
-               int mmu_id,
+               struct msm_gem_address_space *aspace,
                struct drm_framebuffer *fb,
                struct sde_rect *roi,
                struct sde_hw_fmt_layout *fmtl);
index 909d6df..eeb7a00 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,86 +49,14 @@ irqreturn_t sde_irq(struct msm_kms *kms)
        return IRQ_HANDLED;
 }
 
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
-       struct sde_kms *sde_kms;
-
-       if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-               SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-               return;
-       }
-       sde_kms = irq_data_get_irq_chip_data(irqd);
-
-       smp_mb__before_atomic();
-       clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-       smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
-       struct sde_kms *sde_kms;
-
-       if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-               SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-               return;
-       }
-       sde_kms = irq_data_get_irq_chip_data(irqd);
-
-       smp_mb__before_atomic();
-       set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-       smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
-       .name = "sde",
-       .irq_mask = sde_hw_irq_mask,
-       .irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
-               unsigned int irq, irq_hw_number_t hwirq)
-{
-       struct sde_kms *sde_kms;
-       int rc;
-
-       if (!domain || !domain->host_data) {
-               SDE_ERROR("invalid parameters domain %d\n", domain != 0);
-               return -EINVAL;
-       }
-       sde_kms = domain->host_data;
-
-       irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
-       rc = irq_set_chip_data(irq, sde_kms);
-
-       return rc;
-}
-
-static struct irq_domain_ops sde_hw_irqdomain_ops = {
-       .map = sde_hw_irqdomain_map,
-       .xlate = irq_domain_xlate_onecell,
-};
-
 void sde_irq_preinstall(struct msm_kms *kms)
 {
        struct sde_kms *sde_kms = to_sde_kms(kms);
-       struct device *dev;
-       struct irq_domain *domain;
 
        if (!sde_kms->dev || !sde_kms->dev->dev) {
                pr_err("invalid device handles\n");
                return;
        }
-       dev = sde_kms->dev->dev;
-
-       domain = irq_domain_add_linear(dev->of_node, 32,
-                       &sde_hw_irqdomain_ops, sde_kms);
-       if (!domain) {
-               pr_err("failed to add irq_domain\n");
-               return;
-       }
-
-       sde_kms->irq_controller.enabled_mask = 0;
-       sde_kms->irq_controller.domain = domain;
 
        sde_core_irq_preinstall(sde_kms);
 }
@@ -158,9 +86,5 @@ void sde_irq_uninstall(struct msm_kms *kms)
        }
 
        sde_core_irq_uninstall(sde_kms);
-
-       if (sde_kms->irq_controller.domain) {
-               irq_domain_remove(sde_kms->irq_controller.domain);
-               sde_kms->irq_controller.domain = NULL;
-       }
+       sde_core_irq_domain_fini(sde_kms);
 }
index afe90d1..581918d 100644 (file)
@@ -27,6 +27,7 @@
 #include "dsi_display.h"
 #include "dsi_drm.h"
 #include "sde_wb.h"
+#include "sde_hdmi.h"
 
 #include "sde_kms.h"
 #include "sde_core_irq.h"
@@ -504,8 +505,30 @@ static int _sde_kms_get_displays(struct sde_kms *sde_kms)
                        wb_display_get_displays(sde_kms->wb_displays,
                                        sde_kms->wb_display_count);
        }
+
+       /* hdmi */
+       sde_kms->hdmi_displays = NULL;
+       sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
+       SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
+       if (sde_kms->hdmi_display_count) {
+               sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
+                                 sizeof(void *),
+                                 GFP_KERNEL);
+               if (!sde_kms->hdmi_displays) {
+                       SDE_ERROR("failed to allocate hdmi displays\n");
+                       goto exit_deinit_hdmi;
+               }
+               sde_kms->hdmi_display_count =
+                       sde_hdmi_get_displays(sde_kms->hdmi_displays,
+                               sde_kms->hdmi_display_count);
+       }
+
        return 0;
 
+exit_deinit_hdmi:
+       sde_kms->hdmi_display_count = 0;
+       sde_kms->hdmi_displays = NULL;
+
 exit_deinit_wb:
        kfree(sde_kms->wb_displays);
        sde_kms->wb_display_count = 0;
@@ -528,6 +551,9 @@ static void _sde_kms_release_displays(struct sde_kms *sde_kms)
                SDE_ERROR("invalid sde kms\n");
                return;
        }
+       kfree(sde_kms->hdmi_displays);
+       sde_kms->hdmi_display_count = 0;
+       sde_kms->hdmi_displays = NULL;
 
        kfree(sde_kms->wb_displays);
        sde_kms->wb_displays = NULL;
@@ -565,18 +591,30 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
                .set_property = sde_wb_connector_set_property,
                .get_info =     sde_wb_get_info,
        };
-       struct msm_display_info info;
+       static const struct sde_connector_ops hdmi_ops = {
+               .pre_deinit = sde_hdmi_connector_pre_deinit,
+               .post_init =  sde_hdmi_connector_post_init,
+               .detect =     sde_hdmi_connector_detect,
+               .get_modes =  sde_hdmi_connector_get_modes,
+               .mode_valid = sde_hdmi_mode_valid,
+               .get_info =   sde_hdmi_get_info,
+       };
+       struct msm_display_info info = {0};
        struct drm_encoder *encoder;
        void *display, *connector;
        int i, max_encoders;
        int rc = 0;
+       int connector_poll;
 
        if (!dev || !priv || !sde_kms) {
                SDE_ERROR("invalid argument(s)\n");
                return -EINVAL;
        }
 
-       max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+       max_encoders = sde_kms->dsi_display_count +
+               sde_kms->wb_display_count +
+               sde_kms->hdmi_display_count;
+
        if (max_encoders > ARRAY_SIZE(priv->encoders)) {
                max_encoders = ARRAY_SIZE(priv->encoders);
                SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -666,6 +704,57 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
                }
        }
 
+       /* hdmi */
+       for (i = 0; i < sde_kms->hdmi_display_count &&
+               priv->num_encoders < max_encoders; ++i) {
+               display = sde_kms->hdmi_displays[i];
+               encoder = NULL;
+
+               memset(&info, 0x0, sizeof(info));
+               rc = sde_hdmi_dev_init(display);
+               if (rc) {
+                       SDE_ERROR("hdmi dev_init %d failed\n", i);
+                       continue;
+               }
+               rc = sde_hdmi_get_info(&info, display);
+               if (rc) {
+                       SDE_ERROR("hdmi get_info %d failed\n", i);
+                       continue;
+               }
+               if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+                       connector_poll = DRM_CONNECTOR_POLL_HPD;
+               else
+                       connector_poll = 0;
+               encoder = sde_encoder_init(dev, &info);
+               if (IS_ERR_OR_NULL(encoder)) {
+                       SDE_ERROR("encoder init failed for hdmi %d\n", i);
+                       continue;
+               }
+
+               rc = sde_hdmi_drm_init(display, encoder);
+               if (rc) {
+                       SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
+                       sde_encoder_destroy(encoder);
+                       continue;
+               }
+
+               connector = sde_connector_init(dev,
+                                       encoder,
+                                       0,
+                                       display,
+                                       &hdmi_ops,
+                                       connector_poll,
+                                       DRM_MODE_CONNECTOR_HDMIA);
+               if (connector) {
+                       priv->encoders[priv->num_encoders++] = encoder;
+               } else {
+                       SDE_ERROR("hdmi %d connector init failed\n", i);
+                       sde_hdmi_dev_deinit(display);
+                       sde_hdmi_drm_deinit(display);
+                       sde_encoder_destroy(encoder);
+               }
+       }
+
        return 0;
 }
 
@@ -726,6 +815,9 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
        priv = dev->dev_private;
        catalog = sde_kms->catalog;
 
+       ret = sde_core_irq_domain_add(sde_kms);
+       if (ret)
+               goto fail_irq;
        /*
         * Query for underlying display drivers, and create connectors,
         * bridges and encoders for them.
@@ -784,6 +876,8 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
        return 0;
 fail:
        _sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+       sde_core_irq_domain_fini(sde_kms);
        return ret;
 }
 
@@ -940,17 +1034,16 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
        struct msm_mmu *mmu;
        int i;
 
-       for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
-               if (!sde_kms->mmu[i])
+       for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+               if (!sde_kms->aspace[i])
                        continue;
 
-               mmu = sde_kms->mmu[i];
-               msm_unregister_mmu(sde_kms->dev, mmu);
-               mmu->funcs->detach(mmu, (const char **)iommu_ports,
-                               ARRAY_SIZE(iommu_ports));
-               mmu->funcs->destroy(mmu);
-               sde_kms->mmu[i] = 0;
-               sde_kms->mmu_id[i] = 0;
+               mmu = sde_kms->aspace[i]->mmu;
+
+               mmu->funcs->detach(mmu);
+               msm_gem_address_space_put(sde_kms->aspace[i]);
+
+               sde_kms->aspace[i] = NULL;
        }
 
        return 0;
@@ -962,6 +1055,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
        int i, ret;
 
        for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+               struct msm_gem_address_space *aspace;
+
                mmu = msm_smmu_new(sde_kms->dev->dev, i);
                if (IS_ERR(mmu)) {
                        /* MMU's can be optional depending on platform */
@@ -971,25 +1066,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
                        continue;
                }
 
-               ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
-                               ARRAY_SIZE(iommu_ports));
-               if (ret) {
-                       SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+               aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+                       mmu, "sde");
+               if (IS_ERR(aspace)) {
+                       ret = PTR_ERR(aspace);
                        mmu->funcs->destroy(mmu);
                        goto fail;
                }
 
-               sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
-               if (sde_kms->mmu_id[i] < 0) {
-                       ret = sde_kms->mmu_id[i];
-                       SDE_ERROR("failed to register sde iommu %d: %d\n",
-                                       i, ret);
-                       mmu->funcs->detach(mmu, (const char **)iommu_ports,
-                                       ARRAY_SIZE(iommu_ports));
+               sde_kms->aspace[i] = aspace;
+
+               ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
+                               ARRAY_SIZE(iommu_ports));
+               if (ret) {
+                       SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+                       msm_gem_address_space_put(aspace);
                        goto fail;
                }
 
-               sde_kms->mmu[i] = mmu;
        }
 
        return 0;
@@ -1134,6 +1228,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
                goto perf_err;
        }
 
+       sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+       if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+               rc = PTR_ERR(sde_kms->hw_intr);
+               SDE_ERROR("hw_intr init failed: %d\n", rc);
+               sde_kms->hw_intr = NULL;
+               goto hw_intr_init_err;
+       }
+
        /*
         * _sde_kms_drm_obj_init should create the DRM related objects
         * i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1159,21 +1261,12 @@ static int sde_kms_hw_init(struct msm_kms *kms)
         */
        dev->mode_config.allow_fb_modifiers = true;
 
-       sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-       if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
-               rc = PTR_ERR(sde_kms->hw_intr);
-               SDE_ERROR("hw_intr init failed: %d\n", rc);
-               sde_kms->hw_intr = NULL;
-               goto hw_intr_init_err;
-       }
-
        sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
        return 0;
 
-hw_intr_init_err:
-       _sde_kms_drm_obj_destroy(sde_kms);
 drm_obj_init_err:
        sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
 perf_err:
 power_error:
        sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
index bf127ff..44f6be9 100644 (file)
@@ -22,6 +22,7 @@
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
+#include "msm_gem.h"
 #include "sde_dbg.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_ctl.h"
@@ -121,8 +122,7 @@ struct sde_kms {
        int core_rev;
        struct sde_mdss_cfg *catalog;
 
-       struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
-       int mmu_id[MSM_SMMU_DOMAIN_MAX];
+       struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
        struct sde_power_client *core_client;
 
        /* directory entry for debugfs */
@@ -154,8 +154,9 @@ struct sde_kms {
        void **dsi_displays;
        int wb_display_count;
        void **wb_displays;
-
        bool has_danger_ctrl;
+       void **hdmi_displays;
+       int hdmi_display_count;
 };
 
 struct vsync_info {
index b3de453..114acfd 100644 (file)
@@ -86,7 +86,7 @@ enum sde_plane_qos {
 struct sde_plane {
        struct drm_plane base;
 
-       int mmu_id;
+       struct msm_gem_address_space *aspace;
 
        struct mutex lock;
 
@@ -580,7 +580,7 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
                return;
        }
 
-       ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+       ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout);
        if (ret == -EAGAIN)
                SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
        else if (ret)
@@ -1285,7 +1285,7 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
                return 0;
 
        SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
-       return msm_framebuffer_prepare(fb, psde->mmu_id);
+       return msm_framebuffer_prepare(fb, psde->aspace);
 }
 
 static void sde_plane_cleanup_fb(struct drm_plane *plane,
@@ -1294,11 +1294,11 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
        struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
        struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
 
-       if (!fb)
+       if (!fb || !psde)
                return;
 
        SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
-       msm_framebuffer_cleanup(fb, psde->mmu_id);
+       msm_framebuffer_cleanup(fb, psde->aspace);
 }
 
 static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
@@ -2384,7 +2384,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
        /* cache local stuff for later */
        plane = &psde->base;
        psde->pipe = pipe;
-       psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+       psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
 
        /* initialize underlying h/w driver */
        psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
new file mode 100644 (file)
index 0000000..70a4225
--- /dev/null
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS  16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+       u32 in_val;
+
+       if (!io || !io->base) {
+               DEV_ERR("%pS->%s: invalid input\n",
+                       __builtin_return_address(0), __func__);
+               return;
+       }
+
+       if (offset > io->len) {
+               DEV_ERR("%pS->%s: offset out of range\n",
+                       __builtin_return_address(0), __func__);
+               return;
+       }
+
+       writel_relaxed(value, io->base + offset);
+       if (debug) {
+               in_val = readl_relaxed(io->base + offset);
+               DEV_DBG("[%08x] => %08x [%08x]\n",
+                       (u32)(unsigned long)(io->base + offset),
+                       value, in_val);
+       }
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+       u32 value;
+
+       if (!io || !io->base) {
+               DEV_ERR("%pS->%s: invalid input\n",
+                       __builtin_return_address(0), __func__);
+               return -EINVAL;
+       }
+
+       if (offset > io->len) {
+               DEV_ERR("%pS->%s: offset out of range\n",
+                       __builtin_return_address(0), __func__);
+               return -EINVAL;
+       }
+
+       value = readl_relaxed(io->base + offset);
+       if (debug)
+               DEV_DBG("[%08x] <= %08x\n",
+                       (u32)(unsigned long)(io->base + offset), value);
+
+       return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+       u32 debug)
+{
+       if (debug)
+               print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+                       (void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+       unsigned int type, const char *name)
+{
+       struct resource *res = NULL;
+
+       res = platform_get_resource_byname(pdev, type, name);
+       if (!res)
+               DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+       return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+       struct dss_io_data *io_data, const char *name)
+{
+       struct resource *res = NULL;
+
+       if (!pdev || !io_data) {
+               DEV_ERR("%pS->%s: invalid input\n",
+                       __builtin_return_address(0), __func__);
+               return -EINVAL;
+       }
+
+       res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+       if (!res) {
+               DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+                       __builtin_return_address(0), __func__, name);
+               return -ENODEV;
+       }
+
+       io_data->len = (u32)resource_size(res);
+       io_data->base = ioremap(res->start, io_data->len);
+       if (!io_data->base) {
+               DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+                       __builtin_return_address(0), __func__, name);
+               return -EIO;
+       }
+
+       return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+       if (!io_data) {
+               DEV_ERR("%pS->%s: invalid input\n",
+                       __builtin_return_address(0), __func__);
+               return;
+       }
+
+       if (io_data->base) {
+               iounmap(io_data->base);
+               io_data->base = NULL;
+       }
+       io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+       int num_vreg, int config)
+{
+       int i = 0, rc = 0;
+       struct dss_vreg *curr_vreg = NULL;
+       enum dss_vreg_type type;
+
+       if (!in_vreg || !num_vreg)
+               return rc;
+
+       if (config) {
+               for (i = 0; i < num_vreg; i++) {
+                       curr_vreg = &in_vreg[i];
+                       curr_vreg->vreg = regulator_get(dev,
+                               curr_vreg->vreg_name);
+                       rc = PTR_RET(curr_vreg->vreg);
+                       if (rc) {
+                               DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+                                        __builtin_return_address(0), __func__,
+                                        curr_vreg->vreg_name, rc);
+                               curr_vreg->vreg = NULL;
+                               goto vreg_get_fail;
+                       }
+                       type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+                                       ? DSS_REG_LDO : DSS_REG_VS;
+                       if (type == DSS_REG_LDO) {
+                               rc = regulator_set_voltage(
+                                       curr_vreg->vreg,
+                                       curr_vreg->min_voltage,
+                                       curr_vreg->max_voltage);
+                               if (rc < 0) {
+                                       DEV_ERR("%pS->%s: %s set vltg fail\n",
+                                               __builtin_return_address(0),
+                                               __func__,
+                                               curr_vreg->vreg_name);
+                                       goto vreg_set_voltage_fail;
+                               }
+                       }
+               }
+       } else {
+               for (i = num_vreg-1; i >= 0; i--) {
+                       curr_vreg = &in_vreg[i];
+                       if (curr_vreg->vreg) {
+                               type = (regulator_count_voltages(
+                                       curr_vreg->vreg) > 0)
+                                       ? DSS_REG_LDO : DSS_REG_VS;
+                               if (type == DSS_REG_LDO) {
+                                       regulator_set_voltage(curr_vreg->vreg,
+                                               0, curr_vreg->max_voltage);
+                               }
+                               regulator_put(curr_vreg->vreg);
+                               curr_vreg->vreg = NULL;
+                       }
+               }
+       }
+       return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+       regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+       regulator_put(curr_vreg->vreg);
+       curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+       for (i--; i >= 0; i--) {
+               curr_vreg = &in_vreg[i];
+               type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+                       ? DSS_REG_LDO : DSS_REG_VS;
+               goto vreg_unconfig;
+       }
+       return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+       int i = 0, rc = 0;
+       bool need_sleep;
+
+       if (enable) {
+               for (i = 0; i < num_vreg; i++) {
+                       rc = PTR_RET(in_vreg[i].vreg);
+                       if (rc) {
+                               DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+                                       __builtin_return_address(0), __func__,
+                                       in_vreg[i].vreg_name, rc);
+                               goto vreg_set_opt_mode_fail;
+                       }
+                       need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+                       if (in_vreg[i].pre_on_sleep && need_sleep)
+                               usleep_range(in_vreg[i].pre_on_sleep * 1000,
+                                       in_vreg[i].pre_on_sleep * 1000);
+                       rc = regulator_set_load(in_vreg[i].vreg,
+                               in_vreg[i].enable_load);
+                       if (rc < 0) {
+                               DEV_ERR("%pS->%s: %s set opt m fail\n",
+                                       __builtin_return_address(0), __func__,
+                                       in_vreg[i].vreg_name);
+                               goto vreg_set_opt_mode_fail;
+                       }
+                       rc = regulator_enable(in_vreg[i].vreg);
+                       if (in_vreg[i].post_on_sleep && need_sleep)
+                               usleep_range(in_vreg[i].post_on_sleep * 1000,
+                                       in_vreg[i].post_on_sleep * 1000);
+                       if (rc < 0) {
+                               DEV_ERR("%pS->%s: %s enable failed\n",
+                                       __builtin_return_address(0), __func__,
+                                       in_vreg[i].vreg_name);
+                               goto disable_vreg;
+                       }
+               }
+       } else {
+               for (i = num_vreg-1; i >= 0; i--) {
+                       if (in_vreg[i].pre_off_sleep)
+                               usleep_range(in_vreg[i].pre_off_sleep * 1000,
+                                       in_vreg[i].pre_off_sleep * 1000);
+                       regulator_set_load(in_vreg[i].vreg,
+                               in_vreg[i].disable_load);
+                       regulator_disable(in_vreg[i].vreg);
+                       if (in_vreg[i].post_off_sleep)
+                               usleep_range(in_vreg[i].post_off_sleep * 1000,
+                                       in_vreg[i].post_off_sleep * 1000);
+               }
+       }
+       return rc;
+
+disable_vreg:
+       regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+       for (i--; i >= 0; i--) {
+               if (in_vreg[i].pre_off_sleep)
+                       usleep_range(in_vreg[i].pre_off_sleep * 1000,
+                               in_vreg[i].pre_off_sleep * 1000);
+               regulator_set_load(in_vreg[i].vreg,
+                       in_vreg[i].disable_load);
+               regulator_disable(in_vreg[i].vreg);
+               if (in_vreg[i].post_off_sleep)
+                       usleep_range(in_vreg[i].post_off_sleep * 1000,
+                               in_vreg[i].post_off_sleep * 1000);
+       }
+
+       return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+       int i = 0, rc = 0;
+
+       if (enable) {
+               for (i = 0; i < num_gpio; i++) {
+                       DEV_DBG("%pS->%s: %s enable\n",
+                               __builtin_return_address(0), __func__,
+                               in_gpio[i].gpio_name);
+
+                       rc = gpio_request(in_gpio[i].gpio,
+                               in_gpio[i].gpio_name);
+                       if (rc < 0) {
+                               DEV_ERR("%pS->%s: %s enable failed\n",
+                                       __builtin_return_address(0), __func__,
+                                       in_gpio[i].gpio_name);
+                               goto disable_gpio;
+                       }
+                       gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+               }
+       } else {
+               for (i = num_gpio-1; i >= 0; i--) {
+                       DEV_DBG("%pS->%s: %s disable\n",
+                               __builtin_return_address(0), __func__,
+                               in_gpio[i].gpio_name);
+                       if (in_gpio[i].gpio)
+                               gpio_free(in_gpio[i].gpio);
+               }
+       }
+       return rc;
+
+disable_gpio:
+       for (i--; i >= 0; i--)
+               if (in_gpio[i].gpio)
+                       gpio_free(in_gpio[i].gpio);
+
+       return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+       int i;
+
+       for (i = num_clk - 1; i >= 0; i--) {
+               if (clk_arry[i].clk)
+                       clk_put(clk_arry[i].clk);
+               clk_arry[i].clk = NULL;
+       }
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < num_clk; i++) {
+               clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+               rc = PTR_RET(clk_arry[i].clk);
+               if (rc) {
+                       DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name, rc);
+                       goto error;
+               }
+       }
+
+       return rc;
+
+error:
+       msm_dss_put_clk(clk_arry, num_clk);
+
+       return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < num_clk; i++) {
+               if (clk_arry[i].clk) {
+                       if (clk_arry[i].type != DSS_CLK_AHB) {
+                               DEV_DBG("%pS->%s: '%s' rate %ld\n",
+                                       __builtin_return_address(0), __func__,
+                                       clk_arry[i].clk_name,
+                                       clk_arry[i].rate);
+                               rc = clk_set_rate(clk_arry[i].clk,
+                                       clk_arry[i].rate);
+                               if (rc) {
+                                       DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+                                               __builtin_return_address(0),
+                                               __func__,
+                                               clk_arry[i].clk_name, rc);
+                                       break;
+                               }
+                       }
+               } else {
+                       DEV_ERR("%pS->%s: '%s' is not available\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name);
+                       rc = -EPERM;
+                       break;
+               }
+       }
+
+       return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+       int i, rc = 0;
+
+       if (enable) {
+               for (i = 0; i < num_clk; i++) {
+                       DEV_DBG("%pS->%s: enable '%s'\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name);
+                       if (clk_arry[i].clk) {
+                               rc = clk_prepare_enable(clk_arry[i].clk);
+                               if (rc)
+                                       DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+                                               __builtin_return_address(0),
+                                               __func__,
+                                               clk_arry[i].clk_name, rc);
+                       } else {
+                               DEV_ERR("%pS->%s: '%s' is not available\n",
+                                       __builtin_return_address(0), __func__,
+                                       clk_arry[i].clk_name);
+                               rc = -EPERM;
+                       }
+
+                       if (rc) {
+                               msm_dss_enable_clk(&clk_arry[i],
+                                       i, false);
+                               break;
+                       }
+               }
+       } else {
+               for (i = num_clk - 1; i >= 0; i--) {
+                       DEV_DBG("%pS->%s: disable '%s'\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name);
+
+                       if (clk_arry[i].clk)
+                               clk_disable_unprepare(clk_arry[i].clk);
+                       else
+                               DEV_ERR("%pS->%s: '%s' is not available\n",
+                                       __builtin_return_address(0), __func__,
+                                       clk_arry[i].clk_name);
+               }
+       }
+
+       return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+                       uint8_t reg_offset, uint8_t *read_buf)
+{
+       struct i2c_msg msgs[2];
+       int ret = -1;
+
+       pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+                __func__, slave_addr, reg_offset);
+
+       msgs[0].addr = slave_addr >> 1;
+       msgs[0].flags = 0;
+       msgs[0].buf = &reg_offset;
+       msgs[0].len = 1;
+
+       msgs[1].addr = slave_addr >> 1;
+       msgs[1].flags = I2C_M_RD;
+       msgs[1].buf = read_buf;
+       msgs[1].len = 1;
+
+       ret = i2c_transfer(client->adapter, msgs, 2);
+       if (ret < 1) {
+               pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+               return -EACCES;
+       }
+       pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+       return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+                       uint8_t reg_offset, uint8_t *value)
+{
+       struct i2c_msg msgs[1];
+       uint8_t data[2];
+       int status = -EACCES;
+
+       pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+                __func__, slave_addr, reg_offset);
+
+       data[0] = reg_offset;
+       data[1] = *value;
+
+       msgs[0].addr = slave_addr >> 1;
+       msgs[0].flags = 0;
+       msgs[0].len = 2;
+       msgs[0].buf = data;
+
+       status = i2c_transfer(client->adapter, msgs, 1);
+       if (status < 1) {
+               pr_err("I2C WRITE FAILED=[%d]\n", status);
+               return -EACCES;
+       }
+       pr_debug("%s: I2C write status=%x\n", __func__, status);
+       return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
index 3c82a26..ab65283 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <linux/msm-bus.h>
 #include <linux/msm-bus-board.h>
-#include <linux/mdss_io_util.h>
+#include <linux/sde_io_util.h>
 
 #include "sde_power_handle.h"
 #include "sde_trace.h"
index 0d068e9..89218b6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2327,10 +2327,6 @@ static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
        if (adreno_drawqueue_is_empty(drawqueue))
                return count;
 
-       /* Don't update the drawqueue timeout if we are about to preempt out */
-       if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
-               return count;
-
        /* Don't update the drawqueue timeout if it isn't active */
        if (!drawqueue_is_current(drawqueue))
                return count;
index 2b227f2..601e7a2 100644 (file)
@@ -4379,8 +4379,9 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
                 val = _get_svm_area(private, entry, addr, len, flags);
                 if (IS_ERR_VALUE(val))
                        KGSL_MEM_ERR(device,
-                               "_get_svm_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
-                               private->pid, addr, pgoff, len, (int) val);
+                               "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n",
+                               private->pid, current->mm->mmap_base, addr,
+                               pgoff, len, (int) val);
        }
 
 put:
index ed70a98..6e72cda 100644 (file)
@@ -136,6 +136,17 @@ config CORESIGHT_CTI
          hardware component to another. It can also be used to pass
          software generated events.
 
+config CORESIGHT_CTI_SAVE_DISABLE
+       bool "Turn off CTI save and restore"
+       depends on CORESIGHT_CTI
+       help
+         Turns off CoreSight CTI save and restore support for cpu CTIs. This
+         avoids voting for the clocks during probe as well as the associated
+         save and restore latency at the cost of breaking cpu CTI support on
+         targets where cpu CTIs have to be preserved across power collapse.
+
+         If unsure, say 'N' here to avoid breaking cpu CTI support.
+
 config CORESIGHT_TPDA
        bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
        help
index f13017c..99de400 100644 (file)
@@ -1128,17 +1128,6 @@ config TOUCHSCREEN_FT5X06_GESTURE
 
          If unsure, say N.
 
-config TOUCHSCREEN_MSTAR21XX
-       tristate "Mstar touchscreens"
-       depends on I2C
-       help
-        Say Y here if you have a mstar touchscreen.
-
-        If unsure, say N.
-
-        To compile this driver as a module, choose M here: the
-        module will be called msg21xx_ts.
-
 config TOUCHSCREEN_ROHM_BU21023
        tristate "ROHM BU21023/24 Dual touch support resistive touchscreens"
        depends on I2C
index 7606fe5..a32132c 100644 (file)
@@ -98,6 +98,5 @@ obj-$(CONFIG_TOUCHSCREEN_TPS6507X)    += tps6507x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_ZFORCE)       += zforce_ts.o
 obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
 obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
-obj-$(CONFIG_TOUCHSCREEN_MSTAR21XX)    += msg21xx_ts.o
 obj-$(CONFIG_TOUCHSCREEN_GT9XX)                += gt9xx/
 obj-$(CONFIG_TOUCHSCREEN_ST)           += st/
diff --git a/drivers/input/touchscreen/msg21xx_ts.c b/drivers/input/touchscreen/msg21xx_ts.c
deleted file mode 100644 (file)
index fe8c6e1..0000000
+++ /dev/null
@@ -1,2260 +0,0 @@
-/*
- * MStar MSG21XX touchscreen driver
- *
- * Copyright (c) 2006-2012 MStar Semiconductor, Inc.
- *
- * Copyright (C) 2012 Bruce Ding <bruce.ding@mstarsemi.com>
- *
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/input.h>
-#include <linux/input/mt.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/sysfs.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/debugfs.h>
-#include <linux/regulator/consumer.h>
-
-#if defined(CONFIG_FB)
-#include <linux/notifier.h>
-#include <linux/fb.h>
-#endif
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-#include <linux/input/vir_ps.h>
-#endif
-
-/* Macro Definition */
-
-#define TOUCH_DRIVER_DEBUG 0
-#if (TOUCH_DRIVER_DEBUG == 1)
-#define DBG(fmt, arg...) pr_info(fmt, ##arg)
-#else
-#define DBG(fmt, arg...)
-#endif
-
-/* Constant Value & Variable Definition */
-
-#define MSTAR_VTG_MIN_UV       2800000
-#define MSTAR_VTG_MAX_UV       3300000
-#define MSTAR_I2C_VTG_MIN_UV   1800000
-#define MSTAR_I2C_VTG_MAX_UV   1800000
-
-#define MAX_BUTTONS            4
-#define FT_COORDS_ARR_SIZE     4
-#define MSTAR_FW_NAME_MAX_LEN  50
-
-#define MSTAR_CHIPTOP_REGISTER_BANK    0x1E
-#define MSTAR_CHIPTOP_REGISTER_ICTYPE 0xCC
-#define MSTAR_INIT_SW_ID 0x7FF
-#define MSTAR_DEBUG_DIR_NAME "ts_debug"
-
-#define MSG_FW_FILE_MAJOR_VERSION(x) \
-       (((x)->data[0x7f4f] << 8) + ((x)->data[0x7f4e]))
-
-#define MSG_FW_FILE_MINOR_VERSION(x) \
-       (((x)->data[0x7f51] << 8) + ((x)->data[0x7f50]))
-
-/*
- * Note.
- * Please do not change the below setting.
- */
-#define TPD_WIDTH   (2048)
-#define TPD_HEIGHT  (2048)
-
-#ifdef FIRMWARE_AUTOUPDATE
-enum {
-       SWID_START = 1,
-       SWID_TRULY = SWID_START,
-       SWID_NULL,
-};
-
-static unsigned char MSG_FIRMWARE[1][33*1024] = { {
-               #include "msg21xx_truly_update_bin.h"
-       }
-};
-#endif
-
-#define CONFIG_TP_HAVE_KEY
-#define PINCTRL_STATE_ACTIVE   "pmx_ts_active"
-#define PINCTRL_STATE_SUSPEND  "pmx_ts_suspend"
-#define PINCTRL_STATE_RELEASE  "pmx_ts_release"
-
-#define SLAVE_I2C_ID_DBBUS              (0xC4>>1)
-
-#define DEMO_MODE_PACKET_LENGTH        (8)
-
-#define TP_PRINT
-
-static char *fw_version; /* customer firmware version */
-static unsigned short fw_version_major;
-static unsigned short fw_version_minor;
-static unsigned char temp[94][1024];
-static unsigned int crc32_table[256];
-
-static unsigned short fw_file_major, fw_file_minor;
-static unsigned short main_sw_id = MSTAR_INIT_SW_ID;
-static unsigned short info_sw_id = MSTAR_INIT_SW_ID;
-static unsigned int bin_conf_crc32;
-
-struct msg21xx_ts_platform_data {
-       const char *name;
-       char fw_name[MSTAR_FW_NAME_MAX_LEN];
-       u8 fw_version_major;
-       u8 fw_version_minor;
-       u32 irq_gpio;
-       u32 irq_gpio_flags;
-       u32 reset_gpio;
-       u32 reset_gpio_flags;
-       u32 x_max;
-       u32 y_max;
-       u32 x_min;
-       u32 y_min;
-       u32 panel_minx;
-       u32 panel_miny;
-       u32 panel_maxx;
-       u32 panel_maxy;
-       u32 num_max_touches;
-       bool no_force_update;
-       bool i2c_pull_up;
-       bool ignore_id_check;
-       int (*power_init)(bool);
-       int (*power_on)(bool);
-       int (*power_init)(bool);
-       int (*power_on)(bool);
-       u8 ic_type;
-       u32 button_map[MAX_BUTTONS];
-       u32 num_buttons;
-       u32 hard_reset_delay_ms;
-       u32 post_hard_reset_delay_ms;
-       bool updating_fw;
-};
-
-/* Touch Data Type Definition */
-struct touchPoint_t {
-       unsigned short x;
-       unsigned short y;
-};
-
-struct touchInfo_t {
-       struct touchPoint_t *point;
-       unsigned char count;
-       unsigned char keycode;
-};
-
-struct msg21xx_ts_data {
-       struct i2c_client *client;
-       struct input_dev *input_dev;
-       struct msg21xx_ts_platform_data *pdata;
-       struct regulator *vdd;
-       struct regulator *vcc_i2c;
-       bool suspended;
-#if defined(CONFIG_FB)
-       struct notifier_block fb_notif;
-#endif
-       struct pinctrl *ts_pinctrl;
-       struct pinctrl_state *pinctrl_state_active;
-       struct pinctrl_state *pinctrl_state_suspend;
-       struct pinctrl_state *pinctrl_state_release;
-       struct mutex ts_mutex;
-       struct touchInfo_t info;
-};
-
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
-                       unsigned long event, void *data);
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-static unsigned char bEnableTpProximity;
-static unsigned char bFaceClosingTp;
-#endif
-
-#ifdef TP_PRINT
-static int tp_print_proc_read(struct msg21xx_ts_data *ts_data);
-static void tp_print_create_entry(struct msg21xx_ts_data *ts_data);
-#endif
-
-static void _ReadBinConfig(struct msg21xx_ts_data *ts_data);
-static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data);
-
-static struct mutex msg21xx_mutex;
-
-enum EMEM_TYPE_t {
-       EMEM_ALL = 0,
-       EMEM_MAIN,
-       EMEM_INFO,
-};
-
-/* Function Definition */
-
-static unsigned int _CRC_doReflect(unsigned int ref, signed char ch)
-{
-       unsigned int value = 0;
-       unsigned int i = 0;
-
-       for (i = 1; i < (ch + 1); i++) {
-               if (ref & 1)
-                       value |= 1 << (ch - i);
-               ref >>= 1;
-       }
-
-       return value;
-}
-
-static unsigned int _CRC_getValue(unsigned int text, unsigned int prevCRC)
-{
-       unsigned int ulCRC = prevCRC;
-
-       ulCRC = (ulCRC >> 8) ^ crc32_table[(ulCRC & 0xFF) ^ text];
-
-       return ulCRC;
-}
-
-static void _CRC_initTable(void)
-{
-       unsigned int magic_number = 0x04c11db7;
-       unsigned int i, j;
-
-       for (i = 0; i <= 0xFF; i++) {
-               crc32_table[i] = _CRC_doReflect(i, 8) << 24;
-               for (j = 0; j < 8; j++)
-                       crc32_table[i] = (crc32_table[i] << 1) ^
-                               (crc32_table[i] & (0x80000000L) ?
-                                       magic_number : 0);
-               crc32_table[i] = _CRC_doReflect(crc32_table[i], 32);
-       }
-}
-
-static void msg21xx_reset_hw(struct msg21xx_ts_platform_data *pdata)
-{
-       gpio_direction_output(pdata->reset_gpio, 1);
-       gpio_set_value_cansleep(pdata->reset_gpio, 0);
-       /* Note that the RST must be in LOW 10ms at least */
-       usleep(pdata->hard_reset_delay_ms * 1000);
-       gpio_set_value_cansleep(pdata->reset_gpio, 1);
-       /* Enable the interrupt service thread/routine for INT after 50ms */
-       usleep(pdata->post_hard_reset_delay_ms * 1000);
-}
-
-static int read_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr,
-                       unsigned char *buf, unsigned short size)
-{
-       int rc = 0;
-       struct i2c_msg msgs[] = {
-               {
-                       .addr = addr,
-                       .flags = I2C_M_RD, /* read flag */
-                       .len = size,
-                       .buf = buf,
-               },
-       };
-
-       /* If everything went ok (i.e. 1 msg transmitted), return #bytes
-        * transmitted, else error code.
-        */
-       if (ts_data->client != NULL) {
-               rc = i2c_transfer(ts_data->client->adapter, msgs, 1);
-               if (rc < 0)
-                       dev_err(&ts_data->client->dev,
-                               "%s error %d\n", __func__, rc);
-       } else {
-               dev_err(&ts_data->client->dev, "ts_data->client is NULL\n");
-       }
-
-       return rc;
-}
-
-static int write_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr,
-                       unsigned char *buf, unsigned short size)
-{
-       int rc = 0;
-       struct i2c_msg msgs[] = {
-               {
-                       .addr = addr,
-                       /*
-                        * if read flag is undefined,
-                        * then it means write flag.
-                        */
-                       .flags = 0,
-                       .len = size,
-                       .buf = buf,
-               },
-       };
-
-       /*
-        * If everything went ok (i.e. 1 msg transmitted), return #bytes
-        * transmitted, else error code.
-        */
-       if (ts_data->client != NULL) {
-               rc = i2c_transfer(ts_data->client->adapter, msgs, 1);
-               if (rc < 0)
-                       dev_err(&ts_data->client->dev,
-                               "%s error %d\n", __func__, rc);
-       } else {
-               dev_err(&ts_data->client->dev, "ts_data->client is NULL\n");
-       }
-
-       return rc;
-}
-
-static unsigned short read_reg(struct msg21xx_ts_data *ts_data,
-                                       unsigned char bank, unsigned char addr)
-{
-       unsigned char tx_data[3] = {0x10, bank, addr};
-       unsigned char rx_data[2] = {0};
-
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-       read_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, rx_data, sizeof(rx_data));
-
-       return rx_data[1] << 8 | rx_data[0];
-}
-
-static void write_reg(struct msg21xx_ts_data *ts_data, unsigned char bank,
-                               unsigned char addr,
-                                               unsigned short data)
-{
-       unsigned char tx_data[5] = {0x10, bank, addr, data & 0xFF, data >> 8};
-
-       write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 5);
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-}
-
-static void write_reg_8bit(struct msg21xx_ts_data *ts_data, unsigned char bank,
-                               unsigned char addr,
-                                               unsigned char data)
-{
-       unsigned char tx_data[4] = {0x10, bank, addr, data};
-
-       write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 4);
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-}
-
-static void dbbusDWIICEnterSerialDebugMode(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char data[5];
-
-       /* Enter the Serial Debug Mode */
-       data[0] = 0x53;
-       data[1] = 0x45;
-       data[2] = 0x52;
-       data[3] = 0x44;
-       data[4] = 0x42;
-
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICStopMCU(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char data[1];
-
-       /* Stop the MCU */
-       data[0] = 0x37;
-
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICIICUseBus(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char data[1];
-
-       /* IIC Use Bus */
-       data[0] = 0x35;
-
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICIICReshape(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char data[1];
-
-       /* IIC Re-shape */
-       data[0] = 0x71;
-
-       write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static unsigned char msg21xx_get_ic_type(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char ic_type = 0;
-       unsigned char bank;
-       unsigned char addr;
-
-       msg21xx_reset_hw(ts_data->pdata);
-       dbbusDWIICEnterSerialDebugMode(ts_data);
-       dbbusDWIICStopMCU(ts_data);
-       dbbusDWIICIICUseBus(ts_data);
-       dbbusDWIICIICReshape(ts_data);
-       msleep(300);
-
-       /* stop mcu */
-       write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01);
-       /* disable watch dog */
-       write_reg(ts_data, 0x3C, 0x60, 0xAA55);
-       /* get ic type */
-       bank = MSTAR_CHIPTOP_REGISTER_BANK;
-       addr = MSTAR_CHIPTOP_REGISTER_ICTYPE;
-       ic_type = (0xff)&(read_reg(ts_data, bank, addr));
-
-       if (ic_type != ts_data->pdata->ic_type)
-               ic_type = 0;
-
-       msg21xx_reset_hw(ts_data->pdata);
-
-       return ic_type;
-}
-
-static int msg21xx_read_firmware_id(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char command[3] = { 0x53, 0x00, 0x2A};
-       unsigned char response[4] = { 0 };
-
-       mutex_lock(&msg21xx_mutex);
-       write_i2c_seq(ts_data, ts_data->client->addr, command, sizeof(command));
-       read_i2c_seq(ts_data, ts_data->client->addr, response,
-                               sizeof(response));
-       mutex_unlock(&msg21xx_mutex);
-       ts_data->pdata->fw_version_major = (response[1]<<8) + response[0];
-       ts_data->pdata->fw_version_minor = (response[3]<<8) + response[2];
-
-       dev_info(&ts_data->client->dev, "major num = %d, minor num = %d\n",
-                       ts_data->pdata->fw_version_major,
-                       ts_data->pdata->fw_version_minor);
-
-       return 0;
-}
-
-static int firmware_erase_c33(struct msg21xx_ts_data *ts_data,
-                                       enum EMEM_TYPE_t emem_type)
-{
-       /* stop mcu */
-       write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
-       /* disable watch dog */
-       write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
-       write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
-       /* set PROGRAM password */
-       write_reg_8bit(ts_data, 0x16, 0x1A, 0xBA);
-       write_reg_8bit(ts_data, 0x16, 0x1B, 0xAB);
-
-       write_reg_8bit(ts_data, 0x16, 0x18, 0x80);
-
-       if (emem_type == EMEM_ALL)
-               write_reg_8bit(ts_data, 0x16, 0x08, 0x10);
-
-       write_reg_8bit(ts_data, 0x16, 0x18, 0x40);
-       msleep(20);
-
-       /* clear pce */
-       write_reg_8bit(0x16, 0x18, 0x80);
-
-       /* erase trigger */
-       if (emem_type == EMEM_MAIN)
-               write_reg_8bit(ts_data, 0x16, 0x0E, 0x04); /* erase main */
-       else
-               write_reg_8bit(0x16, 0x0E, 0x08); /* erase all block */
-
-       return 1;
-}
-
-static void _ReadBinConfig(void);
-static unsigned int _CalMainCRC32(void);
-
-static int check_fw_update(void)
-{
-       int ret = 0;
-
-       msg21xx_read_firmware_id();
-       _ReadBinConfig();
-       if (main_sw_id == info_sw_id) {
-               if (_CalMainCRC32() == bin_conf_crc32) {
-                       /*check upgrading*/
-                       if ((update_bin_major == pdata->fw_version_major) &&
-                               (update_bin_minor > pdata->fw_version_minor)) {
-                               ret = 1;
-                       }
-               }
-       }
-       return ret;
-               write_reg_8bit(ts_data, 0x16, 0x0E, 0x08); /* erase all block */
-
-       return 0;
-}
-
-static ssize_t firmware_update_c33(struct device *dev,
-                                               struct device_attribute *attr,
-                                               const char *buf, size_t size,
-                                               enum EMEM_TYPE_t emem_type,
-                                               bool isForce) {
-       unsigned int i, j;
-       unsigned int crc_main, crc_main_tp;
-       unsigned int crc_info, crc_info_tp;
-       unsigned short reg_data = 0;
-       int update_pass = 1;
-       bool fw_upgrade = false;
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       crc_main = 0xffffffff;
-       crc_info = 0xffffffff;
-
-       msg21xx_reset_hw(ts_data->pdata);
-
-       msg21xx_read_firmware_id(ts_data);
-       _ReadBinConfig(ts_data);
-       if ((main_sw_id == info_sw_id) &&
-               (_CalMainCRC32(ts_data) == bin_conf_crc32) &&
-               (fw_file_major == ts_data->pdata->fw_version_major) &&
-               (fw_file_minor > ts_data->pdata->fw_version_minor)) {
-               fw_upgrade = true;
-       }
-
-       if (!fw_upgrade && !isForce) {
-               dev_dbg(dev, "no need to update\n");
-               msg21xx_reset_hw(ts_data->pdata);
-               return size;
-       }
-       msg21xx_reset_hw(ts_data->pdata);
-       msleep(300);
-
-       dbbusDWIICEnterSerialDebugMode(ts_data);
-       dbbusDWIICStopMCU(ts_data);
-       dbbusDWIICIICUseBus(ts_data);
-       dbbusDWIICIICReshape(ts_data);
-       msleep(300);
-
-       /* erase main */
-       firmware_erase_c33(ts_data, EMEM_MAIN);
-       msleep(1000);
-
-       msg21xx_reset_hw(ts_data->pdata);
-       dbbusDWIICEnterSerialDebugMode(ts_data);
-       dbbusDWIICStopMCU(ts_data);
-       dbbusDWIICIICUseBus(ts_data);
-       dbbusDWIICIICReshape(ts_data);
-       msleep(300);
-       /*
-        * Program
-        */
-       /* polling 0x3CE4 is 0x1C70 */
-       if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
-               do {
-                       reg_data = read_reg(ts_data, 0x3C, 0xE4);
-               } while (reg_data != 0x1C70);
-       }
-
-       switch (emem_type) {
-       case EMEM_ALL:
-               write_reg(ts_data, 0x3C, 0xE4, 0xE38F);  /* for all-blocks */
-               break;
-       case EMEM_MAIN:
-               write_reg(ts_data, 0x3C, 0xE4, 0x7731);  /* for main block */
-               break;
-       case EMEM_INFO:
-               write_reg(ts_data, 0x3C, 0xE4, 0x7731);  /* for info block */
-
-               write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01);
-
-               write_reg_8bit(ts_data, 0x3C, 0xE4, 0xC5);
-               write_reg_8bit(ts_data, 0x3C, 0xE5, 0x78);
-
-               write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK,
-                                               0x04, 0x9F);
-               write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK,
-                                               0x05, 0x82);
-
-               write_reg_8bit(ts_data, 0x0F, 0xE6, 0x00);
-               msleep(100);
-               break;
-       }
-
-       /* polling 0x3CE4 is 0x2F43 */
-       do {
-               reg_data = read_reg(ts_data, 0x3C, 0xE4);
-       } while (reg_data != 0x2F43);
-
-       /* calculate CRC 32 */
-       _CRC_initTable();
-
-       /* total  32 KB : 2 byte per R/W */
-       for (i = 0; i < 32; i++) {
-               if (i == 31) {
-                       fw_bin_data[i][1014] = 0x5A;
-                       fw_bin_data[i][1015] = 0xA5;
-
-                       for (j = 0; j < 1016; j++)
-                               crc_main = _CRC_getValue(fw_bin_data[i][j],
-                                                       crc_main);
-               } else {
-                       for (j = 0; j < 1024; j++)
-                               crc_main = _CRC_getValue(fw_bin_data[i][j],
-                                                       crc_main);
-               }
-
-               for (j = 0; j < 8; j++)
-                       write_i2c_seq(ts_data, ts_data->client->addr,
-                                               &fw_bin_data[i][j * 128], 128);
-               msleep(100);
-
-               /* polling 0x3CE4 is 0xD0BC */
-               do {
-                       reg_data = read_reg(ts_data, 0x3C, 0xE4);
-               } while (reg_data != 0xD0BC);
-
-               write_reg(ts_data, 0x3C, 0xE4, 0x2F43);
-       }
-
-       if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
-               /* write file done and check crc */
-               write_reg(ts_data, 0x3C, 0xE4, 0x1380);
-       }
-       msleep(20);
-
-       if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
-               /* polling 0x3CE4 is 0x9432 */
-               do {
-                       reg_data = read_reg(ts_data, 0x3C, 0xE4);
-               } while (reg_data != 0x9432);
-       }
-
-       crc_main = crc_main ^ 0xffffffff;
-       crc_info = crc_info ^ 0xffffffff;
-
-       if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
-               /* CRC Main from TP */
-               crc_main_tp = read_reg(ts_data, 0x3C, 0x80);
-               crc_main_tp = (crc_main_tp << 16) |
-                                               read_reg(ts_data, 0x3C, 0x82);
-
-               /* CRC Info from TP */
-               crc_info_tp = read_reg(ts_data, 0x3C, 0xA0);
-               crc_info_tp = (crc_info_tp << 16) |
-                                               read_reg(ts_data, 0x3C, 0xA2);
-       }
-
-       update_pass = 1;
-       if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
-               if (crc_main_tp != crc_main)
-                       update_pass = 0;
-       }
-
-       if (!update_pass) {
-               dev_err(dev, "update_C33 failed\n");
-               msg21xx_reset_hw(ts_data->pdata);
-               return 0;
-       }
-
-       dev_dbg(dev, "update_C33 OK\n");
-       msg21xx_reset_hw(ts_data->pdata);
-       return size;
-}
-
-static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data)
-{
-       unsigned int ret = 0;
-       unsigned short reg_data = 0;
-
-       msg21xx_reset_hw(ts_data->pdata);
-
-       dbbusDWIICEnterSerialDebugMode(ts_data);
-       dbbusDWIICStopMCU(ts_data);
-       dbbusDWIICIICUseBus(ts_data);
-       dbbusDWIICIICReshape(ts_data);
-       msleep(100);
-
-       /* Stop MCU */
-       write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
-       /* Stop Watchdog */
-       write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
-       write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
-       /* cmd */
-       write_reg(ts_data, 0x3C, 0xE4, 0xDF4C);
-       write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60);
-       /* TP SW reset */
-       write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F);
-
-       /* MCU run */
-       write_reg(ts_data, 0x0F, 0xE6, 0x0000);
-
-       /* polling 0x3CE4 */
-       do {
-               reg_data = read_reg(ts_data, 0x3C, 0xE4);
-       } while (reg_data != 0x9432);
-
-       /* Cal CRC Main from TP */
-       ret = read_reg(ts_data, 0x3C, 0x80);
-       ret = (ret << 16) | read_reg(ts_data, 0x3C, 0x82);
-
-       dev_dbg(&ts_data->client->dev,
-                       "[21xxA]:Current main crc32=0x%x\n", ret);
-       return ret;
-}
-
-static void _ReadBinConfig(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char dbbus_tx_data[5] = {0};
-       unsigned char dbbus_rx_data[4] = {0};
-       unsigned short reg_data = 0;
-
-       msg21xx_reset_hw(ts_data->pdata);
-
-       dbbusDWIICEnterSerialDebugMode(ts_data);
-       dbbusDWIICStopMCU(ts_data);
-       dbbusDWIICIICUseBus(ts_data);
-       dbbusDWIICIICReshape(ts_data);
-       msleep(100);
-
-       /* Stop MCU */
-       write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
-       /* Stop Watchdog */
-       write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
-       write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
-       /* cmd */
-       write_reg(ts_data, 0x3C, 0xE4, 0xA4AB);
-       write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60);
-
-       /* TP SW reset */
-       write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F);
-
-       /* MCU run */
-       write_reg(ts_data, 0x0F, 0xE6, 0x0000);
-
-       /* polling 0x3CE4 */
-       do {
-               reg_data = read_reg(ts_data, 0x3C, 0xE4);
-       } while (reg_data != 0x5B58);
-
-       dbbus_tx_data[0] = 0x72;
-       dbbus_tx_data[1] = 0x7F;
-       dbbus_tx_data[2] = 0x55;
-       dbbus_tx_data[3] = 0x00;
-       dbbus_tx_data[4] = 0x04;
-       write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
-       read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
-       if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39)
-               && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39)
-               && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) {
-               main_sw_id = (dbbus_rx_data[0] - 0x30) * 100 +
-                                       (dbbus_rx_data[1] - 0x30) * 10 +
-                                       (dbbus_rx_data[2] - 0x30);
-       }
-
-       dbbus_tx_data[0] = 0x72;
-       dbbus_tx_data[1] = 0x7F;
-       dbbus_tx_data[2] = 0xFC;
-       dbbus_tx_data[3] = 0x00;
-       dbbus_tx_data[4] = 0x04;
-       write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
-       read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
-       bin_conf_crc32 = (dbbus_rx_data[0] << 24) |
-                       (dbbus_rx_data[1] << 16) |
-                       (dbbus_rx_data[2] << 8) |
-                       (dbbus_rx_data[3]);
-
-       dbbus_tx_data[0] = 0x72;
-       dbbus_tx_data[1] = 0x83;
-       dbbus_tx_data[2] = 0x00;
-       dbbus_tx_data[3] = 0x00;
-       dbbus_tx_data[4] = 0x04;
-       write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
-       read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
-       if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39)
-               && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39)
-               && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) {
-               info_sw_id = (dbbus_rx_data[0] - 0x30) * 100 +
-                                       (dbbus_rx_data[1] - 0x30) * 10 +
-                                       (dbbus_rx_data[2] - 0x30);
-       }
-
-       dev_dbg(&ts_data->client->dev,
-               "[21xxA]:main_sw_id = %d, info_sw_id = %d, bin_conf_crc32 = 0x%x\n",
-               main_sw_id, info_sw_id, bin_conf_crc32);
-}
-
-static ssize_t firmware_update_show(struct device *dev,
-                                               struct device_attribute *attr,
-                                               char *buf)
-{
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       return snprintf(buf, 3, "%d\n", ts_data->pdata->updating_fw);
-}
-
-static ssize_t firmware_update_store(struct device *dev,
-                                               struct device_attribute *attr,
-                                               const char *buf,
-                                               size_t size)
-{
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       ts_data->pdata->updating_fw = true;
-       disable_irq(ts_data->client->irq);
-
-       size = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false);
-
-       enable_irq(ts_data->client->irq);
-       ts_data->pdata->updating_fw = false;
-
-       return size;
-}
-
-static DEVICE_ATTR(update, (S_IRUGO | S_IWUSR),
-                                       firmware_update_show,
-                                       firmware_update_store);
-
-static int prepare_fw_data(struct device *dev)
-{
-       int count;
-       int i;
-       int ret;
-       const struct firmware *fw = NULL;
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       ret = request_firmware(&fw, ts_data->pdata->fw_name, dev);
-       if (ret < 0) {
-               dev_err(dev, "Request firmware failed - %s (%d)\n",
-                                               ts_data->pdata->fw_name, ret);
-               return ret;
-       }
-
-       count = fw->size / 1024;
-
-       for (i = 0; i < count; i++)
-               memcpy(fw_bin_data[i], fw->data + (i * 1024), 1024);
-
-       fw_file_major = MSG_FW_FILE_MAJOR_VERSION(fw);
-       fw_file_minor = MSG_FW_FILE_MINOR_VERSION(fw);
-       dev_dbg(dev, "New firmware: %d.%d",
-                       fw_file_major, fw_file_minor);
-
-       return fw->size;
-}
-
-static ssize_t firmware_update_smart_store(struct device *dev,
-                                               struct device_attribute *attr,
-                                               const char *buf,
-                                               size_t size)
-{
-       int ret;
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       ret = prepare_fw_data(dev);
-       if (ret < 0) {
-               dev_err(dev, "Request firmware failed -(%d)\n", ret);
-               return ret;
-       }
-       ts_data->pdata->updating_fw = true;
-       disable_irq(ts_data->client->irq);
-
-       ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false);
-       if (ret == 0)
-               dev_err(dev, "firmware_update_c33 ret = %d\n", ret);
-
-       enable_irq(ts_data->client->irq);
-       ts_data->pdata->updating_fw = false;
-
-       return ret;
-}
-
-static ssize_t firmware_force_update_smart_store(struct device *dev,
-                                               struct device_attribute *attr,
-                                               const char *buf,
-                                               size_t size)
-{
-       int ret;
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       ret = prepare_fw_data(dev);
-       if (ret < 0) {
-               dev_err(dev, "Request firmware failed -(%d)\n", ret);
-               return ret;
-       }
-       ts_data->pdata->updating_fw = true;
-       disable_irq(ts_data->client->irq);
-
-       ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, true);
-       if (ret == 0)
-               dev_err(dev, "firmware_update_c33 et = %d\n", ret);
-
-       enable_irq(ts_data->client->irq);
-       ts_data->pdata->updating_fw = false;
-
-       return ret;
-}
-
-static DEVICE_ATTR(update_fw, (S_IRUGO | S_IWUSR),
-                                       firmware_update_show,
-                                       firmware_update_smart_store);
-
-static DEVICE_ATTR(force_update_fw, (S_IRUGO | S_IWUSR),
-                                       firmware_update_show,
-                                       firmware_force_update_smart_store);
-
-static ssize_t firmware_version_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       msg21xx_read_firmware_id(ts_data);
-       return snprintf(buf, sizeof(char) * 8, "%03d%03d\n",
-                       ts_data->pdata->fw_version_major,
-                       ts_data->pdata->fw_version_minor);
-}
-
-static DEVICE_ATTR(version, S_IRUGO,
-                                       firmware_version_show,
-                                       NULL);
-
-
-static ssize_t msg21xx_fw_name_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       return snprintf(buf, MSTAR_FW_NAME_MAX_LEN - 1,
-                               "%s\n", ts_data->pdata->fw_name);
-}
-
-static ssize_t msg21xx_fw_name_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
-{
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       if (size > MSTAR_FW_NAME_MAX_LEN - 1)
-               return -EINVAL;
-
-       strlcpy(ts_data->pdata->fw_name, buf, size);
-       if (ts_data->pdata->fw_name[size - 1] == '\n')
-               ts_data->pdata->fw_name[size - 1] = 0;
-
-       return size;
-}
-
-static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR),
-                       msg21xx_fw_name_show, msg21xx_fw_name_store);
-
-static ssize_t firmware_data_store(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf,
-                                       size_t size)
-{
-       int count = size / 1024;
-       int i;
-
-       for (i = 0; i < count; i++)
-               memcpy(fw_bin_data[i], buf + (i * 1024), 1024);
-
-       if (buf != NULL)
-               dev_dbg(dev, "buf[0] = %c\n", buf[0]);
-
-       return size;
-}
-
-static DEVICE_ATTR(data, S_IWUSR, NULL, firmware_data_store);
-
-static ssize_t tp_print_show(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       tp_print_proc_read(ts_data);
-
-       return snprintf(buf, 3, "%d\n", ts_data->suspended);
-}
-
-static ssize_t tp_print_store(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf,
-                               size_t size)
-{
-       return size;
-}
-
-static DEVICE_ATTR(tpp, (S_IRUGO | S_IWUSR),
-                               tp_print_show, tp_print_store);
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-static void _msg_enable_proximity(void)
-{
-       unsigned char tx_data[4] = {0};
-
-       tx_data[0] = 0x52;
-       tx_data[1] = 0x00;
-       tx_data[2] = 0x47;
-       tx_data[3] = 0xa0;
-       mutex_lock(&msg21xx_mutex);
-       write_i2c_seq(ts_data->client->addr, &tx_data[0], 4);
-       mutex_unlock(&msg21xx_mutex);
-
-       bEnableTpProximity = 1;
-}
-
-static void _msg_disable_proximity(void)
-{
-       unsigned char tx_data[4] = {0};
-
-       tx_data[0] = 0x52;
-       tx_data[1] = 0x00;
-       tx_data[2] = 0x47;
-       tx_data[3] = 0xa1;
-       mutex_lock(&msg21xx_mutex);
-       write_i2c_seq(ts_data->client->addr, &tx_data[0], 4);
-       mutex_unlock(&msg21xx_mutex);
-
-       bEnableTpProximity = 0;
-       bFaceClosingTp = 0;
-}
-
-static void tsps_msg21xx_enable(int en)
-{
-       if (en)
-               _msg_enable_proximity();
-       else
-               _msg_disable_proximity();
-}
-
-static int tsps_msg21xx_data(void)
-{
-       return bFaceClosingTp;
-}
-#endif
-
-static int msg21xx_pinctrl_init(struct msg21xx_ts_data *ts_data)
-{
-       int retval;
-
-       /* Get pinctrl if target uses pinctrl */
-       ts_data->ts_pinctrl = devm_pinctrl_get(&(ts_data->client->dev));
-       if (IS_ERR_OR_NULL(ts_data->ts_pinctrl)) {
-               retval = PTR_ERR(ts_data->ts_pinctrl);
-               dev_dbg(&ts_data->client->dev,
-                       "Target does not use pinctrl %d\n", retval);
-               goto err_pinctrl_get;
-       }
-
-       ts_data->pinctrl_state_active = pinctrl_lookup_state(
-                       ts_data->ts_pinctrl, PINCTRL_STATE_ACTIVE);
-       if (IS_ERR_OR_NULL(ts_data->pinctrl_state_active)) {
-               retval = PTR_ERR(ts_data->pinctrl_state_active);
-               dev_dbg(&ts_data->client->dev,
-                       "Can't lookup %s pinstate %d\n",
-                       PINCTRL_STATE_ACTIVE, retval);
-               goto err_pinctrl_lookup;
-       }
-
-       ts_data->pinctrl_state_suspend = pinctrl_lookup_state(
-                       ts_data->ts_pinctrl, PINCTRL_STATE_SUSPEND);
-       if (IS_ERR_OR_NULL(ts_data->pinctrl_state_suspend)) {
-               retval = PTR_ERR(ts_data->pinctrl_state_suspend);
-               dev_dbg(&ts_data->client->dev,
-                       "Can't lookup %s pinstate %d\n",
-                       PINCTRL_STATE_SUSPEND, retval);
-               goto err_pinctrl_lookup;
-       }
-
-       ts_data->pinctrl_state_release = pinctrl_lookup_state(
-                       ts_data->ts_pinctrl, PINCTRL_STATE_RELEASE);
-       if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
-               retval = PTR_ERR(ts_data->pinctrl_state_release);
-               dev_dbg(&ts_data->client->dev,
-                       "Can't lookup %s pinstate %d\n",
-                       PINCTRL_STATE_RELEASE, retval);
-       }
-
-       return 0;
-
-err_pinctrl_lookup:
-       devm_pinctrl_put(ts_data->ts_pinctrl);
-err_pinctrl_get:
-       ts_data->ts_pinctrl = NULL;
-       return retval;
-}
-
-static unsigned char calculate_checksum(unsigned char *msg, int length)
-{
-       int checksum = 0, i;
-
-       for (i = 0; i < length; i++)
-               checksum += msg[i];
-
-       return (unsigned char)((-checksum) & 0xFF);
-}
-
-static int parse_info(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char data[DEMO_MODE_PACKET_LENGTH] = {0};
-       unsigned char checksum = 0;
-       unsigned int x = 0, y = 0;
-       unsigned int x2 = 0, y2 = 0;
-       unsigned int delta_x = 0, delta_y = 0;
-
-       mutex_lock(&msg21xx_mutex);
-       read_i2c_seq(ts_data, ts_data->client->addr, &data[0],
-                               DEMO_MODE_PACKET_LENGTH);
-       mutex_unlock(&msg21xx_mutex);
-       checksum = calculate_checksum(&data[0], (DEMO_MODE_PACKET_LENGTH-1));
-       dev_dbg(&ts_data->client->dev, "check sum: [%x] == [%x]?\n",
-                       data[DEMO_MODE_PACKET_LENGTH-1], checksum);
-
-       if (data[DEMO_MODE_PACKET_LENGTH-1] != checksum) {
-               dev_err(&ts_data->client->dev, "WRONG CHECKSUM\n");
-               return -EINVAL;
-       }
-
-       if (data[0] != 0x52) {
-               dev_err(&ts_data->client->dev, "WRONG HEADER\n");
-               return -EINVAL;
-       }
-
-       ts_data->info.keycode = 0xFF;
-       if ((data[1] == 0xFF) && (data[2] == 0xFF) &&
-               (data[3] == 0xFF) && (data[4] == 0xFF) &&
-               (data[6] == 0xFF)) {
-               if ((data[5] == 0xFF) || (data[5] == 0)) {
-                       ts_data->info.keycode = 0xFF;
-               } else if ((data[5] == 1) || (data[5] == 2) ||
-                               (data[5] == 4) || (data[5] == 8)) {
-                       ts_data->info.keycode = data[5] >> 1;
-
-                       dev_dbg(&ts_data->client->dev,
-                               "ts_data->info.keycode index %d\n",
-                               ts_data->info.keycode);
-               }
-       #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-               else if (bEnableTpProximity && ((data[5] == 0x80) ||
-                                       (data[5] == 0x40))) {
-                       if (data[5] == 0x80)
-                               bFaceClosingTp = 1;
-                       else if (data[5] == 0x40)
-                               bFaceClosingTp = 0;
-
-                       return -EINVAL;
-               }
-       #endif
-               else {
-                       dev_err(&ts_data->client->dev, "WRONG KEY\n");
-                       return -EINVAL;
-               }
-       } else {
-               x = (((data[1] & 0xF0) << 4) | data[2]);
-               y = (((data[1] & 0x0F) << 8) | data[3]);
-               delta_x = (((data[4] & 0xF0) << 4) | data[5]);
-               delta_y = (((data[4] & 0x0F) << 8) | data[6]);
-
-               if ((delta_x == 0) && (delta_y == 0)) {
-                       ts_data->info.point[0].x =
-                               x * ts_data->pdata->x_max / TPD_WIDTH;
-                       ts_data->info.point[0].y =
-                               y * ts_data->pdata->y_max / TPD_HEIGHT;
-                       ts_data->info.count = 1;
-               } else {
-                       if (delta_x > 2048)
-                               delta_x -= 4096;
-
-                       if (delta_y > 2048)
-                               delta_y -= 4096;
-
-                       x2 = (unsigned int)((signed short)x +
-                                               (signed short)delta_x);
-                       y2 = (unsigned int)((signed short)y +
-                                               (signed short)delta_y);
-                       ts_data->info.point[0].x =
-                               x * ts_data->pdata->x_max / TPD_WIDTH;
-                       ts_data->info.point[0].y =
-                               y * ts_data->pdata->y_max / TPD_HEIGHT;
-                       ts_data->info.point[1].x =
-                               x2 * ts_data->pdata->x_max / TPD_WIDTH;
-                       ts_data->info.point[1].y =
-                               y2 * ts_data->pdata->y_max / TPD_HEIGHT;
-                       ts_data->info.count = ts_data->pdata->num_max_touches;
-               }
-       }
-
-       return 0;
-}
-
-static void touch_driver_touch_released(struct msg21xx_ts_data *ts_data)
-{
-       int i;
-
-       for (i = 0; i < ts_data->pdata->num_max_touches; i++) {
-               input_mt_slot(ts_data->input_dev, i);
-               input_mt_report_slot_state(ts_data->input_dev,
-                                               MT_TOOL_FINGER, 0);
-       }
-
-       input_report_key(ts_data->input_dev, BTN_TOUCH, 0);
-       input_report_key(ts_data->input_dev, BTN_TOOL_FINGER, 0);
-       input_sync(ts_data->input_dev);
-}
-
-/* read data through I2C then report data to input
- *sub-system when interrupt occurred
- */
-static irqreturn_t msg21xx_ts_interrupt(int irq, void *dev_id)
-{
-       int i = 0;
-       static int last_keycode = 0xFF;
-       static int last_count;
-       struct msg21xx_ts_data *ts_data = dev_id;
-
-       ts_data->info.count = 0;
-       if (parse_info(ts_data) == 0) {
-               if (ts_data->info.keycode != 0xFF) {   /* key touch pressed */
-                       if (ts_data->info.keycode <
-                                       ts_data->pdata->num_buttons) {
-                               if (ts_data->info.keycode != last_keycode) {
-                                       dev_dbg(&ts_data->client->dev,
-                                               "key touch pressed");
-
-                                       input_report_key(ts_data->input_dev,
-                                                       BTN_TOUCH, 1);
-                                       input_report_key(ts_data->input_dev,
-                                               ts_data->pdata->button_map[
-                                               ts_data->info.keycode], 1);
-
-                                       last_keycode = ts_data->info.keycode;
-                               } else {
-                                       /* pass duplicate key-pressing */
-                                       dev_dbg(&ts_data->client->dev,
-                                               "REPEATED KEY\n");
-                               }
-                       } else {
-                               dev_dbg(&ts_data->client->dev, "WRONG KEY\n");
-                       }
-               } else {  /* key touch released */
-                       if (last_keycode != 0xFF) {
-                               dev_dbg(&ts_data->client->dev, "key touch released");
-
-                               input_report_key(ts_data->input_dev,
-                                               BTN_TOUCH, 0);
-                               input_report_key(ts_data->input_dev,
-                               ts_data->pdata->button_map[last_keycode],
-                               0);
-
-                               last_keycode = 0xFF;
-                       }
-               }
-
-               if (ts_data->info.count > 0) { /* point touch pressed */
-                       for (i = 0; i < ts_data->info.count; i++) {
-                               input_mt_slot(ts_data->input_dev, i);
-                               input_mt_report_slot_state(ts_data->input_dev,
-                                       MT_TOOL_FINGER, 1);
-                               input_report_abs(ts_data->input_dev,
-                                       ABS_MT_TOUCH_MAJOR, 1);
-                               input_report_abs(ts_data->input_dev,
-                                       ABS_MT_POSITION_X,
-                                       ts_data->info.point[i].x);
-                               input_report_abs(ts_data->input_dev,
-                                       ABS_MT_POSITION_Y,
-                                       ts_data->info.point[i].y);
-                       }
-               }
-
-               if (last_count > info.count) {
-                       for (i = info.count; i < MAX_TOUCH_NUM; i++) {
-                               input_mt_slot(input_dev, i);
-                               input_mt_report_slot_state(input_dev,
-               }
-
-               if (last_count > ts_data->info.count) {
-                       for (i = ts_data->info.count;
-                               i < ts_data->pdata->num_max_touches;
-                               i++) {
-                               input_mt_slot(ts_data->input_dev, i);
-                               input_mt_report_slot_state(ts_data->input_dev,
-                                       MT_TOOL_FINGER, 0);
-                       }
-               }
-               last_count = ts_data->info.count;
-
-               input_report_key(ts_data->input_dev, BTN_TOUCH,
-                                               ts_data->info.count > 0);
-               input_report_key(ts_data->input_dev, BTN_TOOL_FINGER,
-                                               ts_data->info.count > 0);
-
-               input_sync(ts_data->input_dev);
-       }
-
-       return IRQ_HANDLED;
-}
-
-static int msg21xx_ts_power_init(struct msg21xx_ts_data *ts_data, bool init)
-{
-       int rc;
-
-       if (init) {
-               ts_data->vdd = regulator_get(&ts_data->client->dev,
-                                                                       "vdd");
-               if (IS_ERR(ts_data->vdd)) {
-                       rc = PTR_ERR(ts_data->vdd);
-                       dev_err(&ts_data->client->dev,
-                               "Regulator get failed vdd rc=%d\n", rc);
-                       return rc;
-               }
-
-               if (regulator_count_voltages(ts_data->vdd) > 0) {
-                       rc = regulator_set_voltage(ts_data->vdd,
-                                                       MSTAR_VTG_MIN_UV,
-                                                       MSTAR_VTG_MAX_UV);
-                       if (rc) {
-                               dev_err(&ts_data->client->dev,
-                                       "Regulator set_vtg failed vdd rc=%d\n",
-                                       rc);
-                               goto reg_vdd_put;
-                       }
-               }
-
-               ts_data->vcc_i2c = regulator_get(&ts_data->client->dev,
-                                                               "vcc_i2c");
-               if (IS_ERR(ts_data->vcc_i2c)) {
-                       rc = PTR_ERR(ts_data->vcc_i2c);
-                       dev_err(&ts_data->client->dev,
-                               "Regulator get failed vcc_i2c rc=%d\n", rc);
-                       goto reg_vdd_set_vtg;
-               }
-
-               if (regulator_count_voltages(ts_data->vcc_i2c) > 0) {
-                       rc = regulator_set_voltage(ts_data->vcc_i2c,
-                                               MSTAR_I2C_VTG_MIN_UV,
-                                               MSTAR_I2C_VTG_MAX_UV);
-                       if (rc) {
-                               dev_err(&ts_data->client->dev,
-                               "Regulator set_vtg failed vcc_i2c rc=%d\n", rc);
-                               goto reg_vcc_i2c_put;
-                       }
-               }
-       } else {
-               if (regulator_count_voltages(ts_data->vdd) > 0)
-                       regulator_set_voltage(ts_data->vdd, 0,
-                                                       MSTAR_VTG_MAX_UV);
-
-               regulator_put(ts_data->vdd);
-
-               if (regulator_count_voltages(ts_data->vcc_i2c) > 0)
-                       regulator_set_voltage(ts_data->vcc_i2c, 0,
-                                               MSTAR_I2C_VTG_MAX_UV);
-
-               regulator_put(ts_data->vcc_i2c);
-       }
-
-       return 0;
-
-reg_vcc_i2c_put:
-       regulator_put(ts_data->vcc_i2c);
-reg_vdd_set_vtg:
-       if (regulator_count_voltages(ts_data->vdd) > 0)
-               regulator_set_voltage(ts_data->vdd, 0, MSTAR_VTG_MAX_UV);
-reg_vdd_put:
-       regulator_put(ts_data->vdd);
-       return rc;
-}
-
-static int msg21xx_ts_power_on(struct msg21xx_ts_data *ts_data, bool on)
-{
-       int rc;
-
-       if (!on)
-               goto power_off;
-
-       rc = regulator_enable(ts_data->vdd);
-       if (rc) {
-               dev_err(&ts_data->client->dev,
-                       "Regulator vdd enable failed rc=%d\n", rc);
-               return rc;
-       }
-
-       rc = regulator_enable(ts_data->vcc_i2c);
-       if (rc) {
-               dev_err(&ts_data->client->dev,
-                       "Regulator vcc_i2c enable failed rc=%d\n", rc);
-               regulator_disable(ts_data->vdd);
-       }
-
-       return rc;
-
-       DBG("*** %s ***\n", __func__);
-       rc = regulator_disable(vdd);
-power_off:
-       rc = regulator_disable(ts_data->vdd);
-       if (rc) {
-               dev_err(&ts_data->client->dev,
-                       "Regulator vdd disable failed rc=%d\n", rc);
-               return rc;
-       }
-
-       rc = regulator_disable(ts_data->vcc_i2c);
-       if (rc) {
-               dev_err(&ts_data->client->dev,
-                       "Regulator vcc_i2c disable failed rc=%d\n", rc);
-               rc = regulator_enable(ts_data->vdd);
-       }
-
-       return rc;
-}
-
-static int msg21xx_ts_gpio_configure(struct msg21xx_ts_data *ts_data, bool on)
-{
-       int ret = 0;
-
-       if (!on)
-               goto pwr_deinit;
-
-       if (gpio_is_valid(ts_data->pdata->irq_gpio)) {
-               ret = gpio_request(ts_data->pdata->irq_gpio,
-                                               "msg21xx_irq_gpio");
-               if (ret) {
-                       dev_err(&ts_data->client->dev,
-                               "Failed to request GPIO[%d], %d\n",
-                               ts_data->pdata->irq_gpio, ret);
-                       goto err_irq_gpio_req;
-               }
-               ret = gpio_direction_input(ts_data->pdata->irq_gpio);
-               if (ret) {
-                       dev_err(&ts_data->client->dev,
-                               "Failed to set direction for gpio[%d], %d\n",
-                               ts_data->pdata->irq_gpio, ret);
-                       goto err_irq_gpio_dir;
-               }
-               gpio_set_value_cansleep(ts_data->pdata->irq_gpio, 1);
-       } else {
-               dev_err(&ts_data->client->dev, "irq gpio not provided\n");
-               goto err_irq_gpio_req;
-       }
-
-       if (gpio_is_valid(ts_data->pdata->reset_gpio)) {
-               ret = gpio_request(ts_data->pdata->reset_gpio,
-                                       "msg21xx_reset_gpio");
-               if (ret) {
-                       dev_err(&ts_data->client->dev,
-                               "Failed to request GPIO[%d], %d\n",
-                               ts_data->pdata->reset_gpio, ret);
-                       goto err_reset_gpio_req;
-               }
-
-       } else {
-               if (gpio_is_valid(pdata->irq_gpio))
-                       gpio_free(pdata->irq_gpio);
-               if (gpio_is_valid(pdata->reset_gpio)) {
-                       gpio_set_value_cansleep(pdata->reset_gpio, 0);
-                       ret = gpio_direction_input(pdata->reset_gpio);
-                       if (ret)
-                               dev_err(&i2c_client->dev,
-                                       "Unable to set direction for gpio [%d]\n",
-                                       pdata->reset_gpio);
-                       gpio_free(pdata->reset_gpio);
-               }
-       }
-       return 0;
-               /* power on TP */
-               ret = gpio_direction_output(
-                                       ts_data->pdata->reset_gpio, 1);
-               if (ret) {
-                       dev_err(&ts_data->client->dev,
-                               "Failed to set direction for GPIO[%d], %d\n",
-                               ts_data->pdata->reset_gpio, ret);
-                       goto err_reset_gpio_dir;
-               }
-               msleep(100);
-               gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0);
-               msleep(20);
-               gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 1);
-               msleep(200);
-       } else {
-               dev_err(&ts_data->client->dev, "reset gpio not provided\n");
-               goto err_reset_gpio_req;
-       }
-
-       return 0;
-
-err_reset_gpio_dir:
-       if (gpio_is_valid(ts_data->pdata->reset_gpio))
-               gpio_free(ts_data->pdata->irq_gpio);
-err_reset_gpio_req:
-err_irq_gpio_dir:
-       if (gpio_is_valid(ts_data->pdata->irq_gpio))
-               gpio_free(ts_data->pdata->irq_gpio);
-err_irq_gpio_req:
-       return ret;
-
-pwr_deinit:
-       if (gpio_is_valid(ts_data->pdata->irq_gpio))
-               gpio_free(ts_data->pdata->irq_gpio);
-       if (gpio_is_valid(ts_data->pdata->reset_gpio)) {
-               gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0);
-               ret = gpio_direction_input(ts_data->pdata->reset_gpio);
-               if (ret)
-                       dev_err(&ts_data->client->dev,
-                               "Unable to set direction for gpio [%d]\n",
-                               ts_data->pdata->reset_gpio);
-               gpio_free(ts_data->pdata->reset_gpio);
-       }
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int msg21xx_ts_resume(struct device *dev)
-{
-       int retval;
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       if (!ts_data->suspended) {
-               dev_info(dev, "msg21xx_ts already in resume\n");
-               return 0;
-       }
-
-       mutex_lock(&ts_data->ts_mutex);
-
-       retval = msg21xx_ts_power_on(ts_data, true);
-       if (retval) {
-               dev_err(dev, "msg21xx_ts power on failed");
-               mutex_unlock(&ts_data->ts_mutex);
-               return retval;
-       }
-
-       if (ts_data->ts_pinctrl) {
-               retval = pinctrl_select_state(ts_data->ts_pinctrl,
-                               ts_data->pinctrl_state_active);
-               if (retval < 0) {
-                       dev_err(dev, "Cannot get active pinctrl state\n");
-                       mutex_unlock(&ts_data->ts_mutex);
-                       return retval;
-               }
-       }
-
-       retval = msg21xx_ts_gpio_configure(ts_data, true);
-       if (retval) {
-               dev_err(dev, "Failed to put gpios in active state %d",
-                               retval);
-               mutex_unlock(&ts_data->ts_mutex);
-               return retval;
-       }
-
-       enable_irq(ts_data->client->irq);
-       ts_data->suspended = false;
-
-       mutex_unlock(&ts_data->ts_mutex);
-
-       return 0;
-}
-
-static int msg21xx_ts_suspend(struct device *dev)
-{
-       int retval;
-       struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
-       if (ts_data->pdata->updating_fw) {
-               dev_info(dev, "Firmware loading in progress\n");
-               return 0;
-       }
-
-       if (ts_data->suspended) {
-               dev_info(dev, "msg21xx_ts already in suspend\n");
-               return 0;
-       }
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-       if (bEnableTpProximity) {
-               dev_dbg(dev, "suspend bEnableTpProximity=%d\n",
-                               bEnableTpProximity);
-               return 0;
-       }
-#endif
-
-       mutex_lock(&ts_data->ts_mutex);
-
-       disable_irq(ts_data->client->irq);
-
-       touch_driver_touch_released(ts_data);
-
-       if (ts_data->ts_pinctrl) {
-               retval = pinctrl_select_state(ts_data->ts_pinctrl,
-                               ts_data->pinctrl_state_suspend);
-               if (retval < 0) {
-                       dev_err(dev, "Cannot get idle pinctrl state %d\n",
-                               retval);
-                       mutex_unlock(&ts_data->ts_mutex);
-                       return retval;
-               }
-       }
-
-       retval = msg21xx_ts_gpio_configure(ts_data, false);
-       if (retval) {
-               dev_err(dev, "Failed to put gpios in idle state %d",
-                               retval);
-               mutex_unlock(&ts_data->ts_mutex);
-               return retval;
-       }
-
-       retval = msg21xx_ts_power_on(ts_data, false);
-       if (retval) {
-               dev_err(dev, "msg21xx_ts power off failed");
-               mutex_unlock(&ts_data->ts_mutex);
-               return retval;
-       }
-
-       ts_data->suspended = true;
-
-       mutex_unlock(&ts_data->ts_mutex);
-
-       return 0;
-}
-#else
-static int msg21xx_ts_resume(struct device *dev)
-{
-       return 0;
-}
-static int msg21xx_ts_suspend(struct device *dev)
-{
-       return 0;
-}
-#endif
-
-static int msg21xx_debug_suspend_set(void *_data, u64 val)
-{
-       struct msg21xx_ts_data *data = _data;
-
-       mutex_lock(&data->input_dev->mutex);
-
-       if (val)
-               msg21xx_ts_suspend(&data->client->dev);
-       else
-               msg21xx_ts_resume(&data->client->dev);
-
-       mutex_unlock(&data->input_dev->mutex);
-
-       return 0;
-}
-
-static int msg21xx_debug_suspend_get(void *_data, u64 *val)
-{
-       struct msg21xx_ts_data *data = _data;
-
-       mutex_lock(&data->input_dev->mutex);
-       *val = data->suspended;
-       mutex_unlock(&data->input_dev->mutex);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, msg21xx_debug_suspend_get,
-                       msg21xx_debug_suspend_set, "%lld\n");
-
-
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
-                               unsigned long event, void *data)
-{
-       struct fb_event *evdata = data;
-       int *blank;
-       struct msg21xx_ts_data *ts_data =
-               container_of(self, struct msg21xx_ts_data, fb_notif);
-
-       if (evdata && evdata->data && event == FB_EVENT_BLANK) {
-               blank = evdata->data;
-               if (*blank == FB_BLANK_UNBLANK)
-                       msg21xx_ts_resume(&ts_data->client->dev);
-               else if (*blank == FB_BLANK_POWERDOWN)
-                       msg21xx_ts_suspend(&ts_data->client->dev);
-       }
-
-       return 0;
-}
-#endif
-
-static int msg21xx_get_dt_coords(struct device *dev, char *name,
-                               struct msg21xx_ts_platform_data *pdata)
-{
-       u32 coords[FT_COORDS_ARR_SIZE];
-       struct property *prop;
-       struct device_node *np = dev->of_node;
-       int coords_size, rc;
-
-       prop = of_find_property(np, name, NULL);
-       if (!prop)
-               return -EINVAL;
-       if (!prop->value)
-               return -ENODATA;
-
-       coords_size = prop->length / sizeof(u32);
-       if (coords_size != FT_COORDS_ARR_SIZE) {
-               dev_err(dev, "invalid %s\n", name);
-               return -EINVAL;
-       }
-
-       rc = of_property_read_u32_array(np, name, coords, coords_size);
-       if (rc && (rc != -EINVAL)) {
-               dev_err(dev, "Unable to read %s\n", name);
-               return rc;
-       }
-
-       if (!strcmp(name, "mstar,panel-coords")) {
-               pdata->panel_minx = coords[0];
-               pdata->panel_miny = coords[1];
-               pdata->panel_maxx = coords[2];
-               pdata->panel_maxy = coords[3];
-       } else if (!strcmp(name, "mstar,display-coords")) {
-               pdata->x_min = coords[0];
-               pdata->y_min = coords[1];
-               pdata->x_max = coords[2];
-               pdata->y_max = coords[3];
-       } else {
-               dev_err(dev, "unsupported property %s\n", name);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int msg21xx_parse_dt(struct device *dev,
-                       struct msg21xx_ts_platform_data *pdata)
-{
-       int rc;
-       struct device_node *np = dev->of_node;
-       struct property *prop;
-       u32 temp_val;
-
-       rc = msg21xx_get_dt_coords(dev, "mstar,panel-coords", pdata);
-       if (rc && (rc != -EINVAL))
-               return rc;
-
-       rc = msg21xx_get_dt_coords(dev, "mstar,display-coords", pdata);
-       if (rc)
-               return rc;
-
-       rc = of_property_read_u32(np, "mstar,hard-reset-delay-ms",
-                                                       &temp_val);
-       if (!rc)
-               pdata->hard_reset_delay_ms = temp_val;
-       else
-               return rc;
-
-       rc = of_property_read_u32(np, "mstar,post-hard-reset-delay-ms",
-                                                       &temp_val);
-       if (!rc)
-               pdata->post_hard_reset_delay_ms = temp_val;
-       else
-               return rc;
-
-       /* reset, irq gpio info */
-       pdata->reset_gpio = of_get_named_gpio_flags(np, "mstar,reset-gpio",
-                               0, &pdata->reset_gpio_flags);
-       if (pdata->reset_gpio < 0)
-               return pdata->reset_gpio;
-
-       pdata->irq_gpio = of_get_named_gpio_flags(np, "mstar,irq-gpio",
-                               0, &pdata->irq_gpio_flags);
-       if (pdata->irq_gpio < 0)
-               return pdata->irq_gpio;
-
-       rc = of_property_read_u32(np, "mstar,ic-type", &temp_val);
-       if (rc && (rc != -EINVAL))
-               return rc;
-
-       pdata->ic_type = temp_val;
-
-       rc = of_property_read_u32(np, "mstar,num-max-touches", &temp_val);
-       if (!rc)
-               pdata->num_max_touches = temp_val;
-       else
-               return rc;
-
-       prop = of_find_property(np, "mstar,button-map", NULL);
-       if (prop) {
-               pdata->num_buttons = prop->length / sizeof(temp_val);
-               if (pdata->num_buttons > MAX_BUTTONS)
-                       return -EINVAL;
-
-               rc = of_property_read_u32_array(np,
-                       "mstar,button-map", pdata->button_map,
-                       pdata->num_buttons);
-               if (rc) {
-                       dev_err(dev, "Unable to read key codes\n");
-                       return rc;
-               }
-       }
-
-       return 0;
-}
-
-/* probe function is used for matching and initializing input device */
-static int msg21xx_ts_probe(struct i2c_client *client,
-               const struct i2c_device_id *id) {
-
-       int ret = 0, i;
-       struct dentry *temp, *dir;
-       struct input_dev *input_dev;
-       struct msg21xx_ts_data *ts_data;
-       struct msg21xx_ts_platform_data *pdata;
-
-       if (client->dev.of_node) {
-               pdata = devm_kzalloc(&client->dev,
-                       sizeof(struct msg21xx_ts_platform_data), GFP_KERNEL);
-               if (!pdata)
-                       return -ENOMEM;
-
-               ret = msg21xx_parse_dt(&client->dev, pdata);
-               if (ret) {
-                       dev_err(&client->dev, "DT parsing failed\n");
-                       return ret;
-               }
-       } else
-               pdata = client->dev.platform_data;
-
-       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-               dev_err(&client->dev, "I2C not supported\n");
-               return -ENODEV;
-       }
-
-       ts_data = devm_kzalloc(&client->dev,
-                       sizeof(struct msg21xx_ts_data), GFP_KERNEL);
-       if (!ts_data)
-               return -ENOMEM;
-
-       ts_data->client = client;
-       ts_data->info.point = devm_kzalloc(&client->dev,
-               sizeof(struct touchPoint_t) * pdata->num_max_touches,
-               GFP_KERNEL);
-       if (!ts_data->info.point) {
-               dev_err(&client->dev, "Not enough memory\n");
-               return -ENOMEM;
-       }
-
-       /* allocate an input device */
-       input_dev = input_allocate_device();
-       if (!input_dev) {
-               ret = -ENOMEM;
-               dev_err(&client->dev, "input device allocation failed\n");
-               goto err_input_allocate_dev;
-       }
-
-       input_dev->name = client->name;
-       input_dev->phys = "I2C";
-       input_dev->dev.parent = &client->dev;
-       input_dev->id.bustype = BUS_I2C;
-
-       ts_data->input_dev = input_dev;
-       ts_data->client = client;
-       ts_data->pdata = pdata;
-
-       input_set_drvdata(input_dev, ts_data);
-       i2c_set_clientdata(client, ts_data);
-
-       ret = msg21xx_ts_power_init(ts_data, true);
-       if (ret) {
-               dev_err(&client->dev, "Mstar power init failed\n");
-               return ret;
-       }
-
-       ret = msg21xx_ts_power_on(ts_data, true);
-       if (ret) {
-               dev_err(&client->dev, "Mstar power on failed\n");
-               goto exit_deinit_power;
-       }
-
-       ret = msg21xx_pinctrl_init(ts_data);
-       if (!ret && ts_data->ts_pinctrl) {
-               /*
-               * Pinctrl handle is optional. If pinctrl handle is found
-               * let pins to be configured in active state. If not
-               * found continue further without error.
-               */
-               ret = pinctrl_select_state(ts_data->ts_pinctrl,
-                               ts_data->pinctrl_state_active);
-               if (ret < 0)
-                       dev_err(&client->dev,
-                               "Failed to select %s pinatate %d\n",
-                               PINCTRL_STATE_ACTIVE, ret);
-       }
-
-       ret = msg21xx_ts_gpio_configure(ts_data, true);
-       if (ret) {
-               dev_err(&client->dev, "Failed to configure gpio %d\n", ret);
-               goto exit_gpio_config;
-       }
-
-       if (msg21xx_get_ic_type(ts_data) == 0) {
-               dev_err(&client->dev, "The current IC is not Mstar\n");
-               ret = -1;
-               goto err_wrong_ic_type;
-       }
-
-       mutex_init(&msg21xx_mutex);
-       mutex_init(&ts_data->ts_mutex);
-
-       /* set the supported event type for input device */
-       set_bit(EV_ABS, input_dev->evbit);
-       set_bit(EV_SYN, input_dev->evbit);
-       set_bit(EV_KEY, input_dev->evbit);
-       set_bit(BTN_TOUCH, input_dev->keybit);
-       set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-       set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
-       for (i = 0; i < pdata->num_buttons; i++)
-               input_set_capability(input_dev, EV_KEY, pdata->button_map[i]);
-
-       input_set_drvdata(input_dev, ts_data);
-       i2c_set_clientdata(client, ts_data);
-
-#ifdef CONFIG_TP_HAVE_KEY
-       {
-               int i;
-
-               for (i = 0; i < num_buttons; i++)
-                       input_set_capability(input_dev, EV_KEY, button_map[i]);
-       }
-#endif
-
-       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
-                               0, 2, 0, 0);
-       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 2, 0, 0);
-       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
-                       0, pdata->x_max, 0, 0);
-       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
-                       0, pdata->y_max, 0, 0);
-       ret = input_mt_init_slots(input_dev,  pdata->num_max_touches, 0);
-       if (ret) {
-               dev_err(&client->dev,
-                       "Error %d initialising slots\n", ret);
-               goto err_free_mem;
-       }
-
-       /* register the input device to input sub-system */
-       ret = input_register_device(input_dev);
-       if (ret < 0) {
-               dev_err(&client->dev,
-                       "Unable to register ms-touchscreen input device\n");
-               goto err_input_reg_dev;
-       }
-
-       /* version */
-       if (device_create_file(&client->dev, &dev_attr_version) < 0) {
-               dev_err(&client->dev,
-                       "Failed to create device file(%s)!\n",
-                       dev_attr_version.attr.name);
-               goto err_create_fw_ver_file;
-       }
-       /* update */
-       if (device_create_file(&client->dev, &dev_attr_update) < 0) {
-               dev_err(&client->dev,
-                       "Failed to create device file(%s)!\n",
-                       dev_attr_update.attr.name);
-               goto err_create_fw_update_file;
-       }
-       /* data */
-       if (device_create_file(&client->dev, &dev_attr_data) < 0) {
-               dev_err(&client->dev,
-                       "Failed to create device file(%s)!\n",
-                       dev_attr_data.attr.name);
-               goto err_create_fw_data_file;
-       }
-       /* fw name */
-       if (device_create_file(&client->dev, &dev_attr_fw_name) < 0) {
-               dev_err(&client->dev,
-                       "Failed to create device file(%s)!\n",
-                       dev_attr_fw_name.attr.name);
-               goto err_create_fw_name_file;
-       }
-       /* smart fw update */
-       if (device_create_file(&client->dev, &dev_attr_update_fw) < 0) {
-               dev_err(&client->dev,
-                       "Failed to create device file(%s)!\n",
-                       dev_attr_update_fw.attr.name);
-               goto err_create_update_fw_file;
-       }
-       /* smart fw force update */
-       if (device_create_file(&client->dev,
-                                       &dev_attr_force_update_fw) < 0) {
-               dev_err(&client->dev,
-                       "Failed to create device file(%s)!\n",
-                       dev_attr_force_update_fw.attr.name);
-               goto err_create_force_update_fw_file;
-       }
-       dir = debugfs_create_dir(MSTAR_DEBUG_DIR_NAME, NULL);
-       temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, dir,
-                                       ts_data, &debug_suspend_fops);
-       if (temp == NULL || IS_ERR(temp)) {
-               dev_err(&client->dev,
-                       "debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
-               goto free_debug_dir;
-       }
-
-#ifdef TP_PRINT
-       tp_print_create_entry(ts_data);
-#endif
-
-       ret = request_threaded_irq(client->irq, NULL,
-                               msg21xx_ts_interrupt,
-                               pdata->irq_gpio_flags | IRQF_ONESHOT,
-                               "msg21xx", ts_data);
-       if (ret)
-               goto err_req_irq;
-
-       disable_irq(client->irq);
-
-#if defined(CONFIG_FB)
-       ts_data->fb_notif.notifier_call = fb_notifier_callback;
-       ret = fb_register_client(&ts_data->fb_notif);
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-       tsps_assist_register_callback("msg21xx", &tsps_msg21xx_enable,
-                               &tsps_msg21xx_data);
-#endif
-
-       dev_dbg(&client->dev, "mstar touch screen registered\n");
-       enable_irq(client->irq);
-       return 0;
-
-err_req_irq:
-       free_irq(client->irq, ts_data);
-       device_remove_file(&client->dev, &dev_attr_data);
-free_debug_dir:
-       debugfs_remove_recursive(dir);
-err_create_fw_data_file:
-       device_remove_file(&client->dev, &dev_attr_update);
-err_create_fw_update_file:
-       device_remove_file(&client->dev, &dev_attr_version);
-err_create_fw_name_file:
-       device_remove_file(&client->dev, &dev_attr_fw_name);
-err_create_update_fw_file:
-       device_remove_file(&client->dev, &dev_attr_update_fw);
-err_create_force_update_fw_file:
-       device_remove_file(&client->dev, &dev_attr_force_update_fw);
-err_create_fw_ver_file:
-       input_unregister_device(input_dev);
-
-err_input_reg_dev:
-       input_free_device(input_dev);
-       input_dev = NULL;
-err_input_allocate_dev:
-       mutex_destroy(&msg21xx_mutex);
-       mutex_destroy(&ts_data->ts_mutex);
-
-err_wrong_ic_type:
-       msg21xx_ts_gpio_configure(ts_data, false);
-exit_gpio_config:
-       if (ts_data->ts_pinctrl) {
-               if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
-                       devm_pinctrl_put(ts_data->ts_pinctrl);
-                       ts_data->ts_pinctrl = NULL;
-               } else {
-                       ret = pinctrl_select_state(ts_data->ts_pinctrl,
-                                       ts_data->pinctrl_state_release);
-                       if (ret < 0)
-                               dev_err(&ts_data->client->dev,
-                                       "Cannot get release pinctrl state\n");
-               }
-       }
-       msg21xx_ts_power_on(ts_data, false);
-exit_deinit_power:
-       msg21xx_ts_power_init(ts_data, false);
-err_free_mem:
-       input_free_device(input_dev);
-
-       return ret;
-}
-
-/* remove function is triggered when the input device is removed
- *from input sub-system
- */
-static int touch_driver_remove(struct i2c_client *client)
-{
-       int retval = 0;
-       struct msg21xx_ts_data *ts_data = i2c_get_clientdata(client);
-
-       free_irq(ts_data->client->irq, ts_data);
-       gpio_free(ts_data->pdata->irq_gpio);
-       gpio_free(ts_data->pdata->reset_gpio);
-
-       if (ts_data->ts_pinctrl) {
-               if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
-                       devm_pinctrl_put(ts_data->ts_pinctrl);
-                       ts_data->ts_pinctrl = NULL;
-               } else {
-                       retval = pinctrl_select_state(ts_data->ts_pinctrl,
-                                       ts_data->pinctrl_state_release);
-                       if (retval < 0)
-                               dev_err(&ts_data->client->dev,
-                                       "Cannot get release pinctrl state\n");
-               }
-       }
-
-       input_unregister_device(ts_data->input_dev);
-       mutex_destroy(&msg21xx_mutex);
-       mutex_destroy(&ts_data->ts_mutex);
-
-       return retval;
-}
-
-/* The I2C device list is used for matching I2C device
- *and I2C device driver.
- */
-static const struct i2c_device_id touch_device_id[] = {
-       {"msg21xx", 0},
-       {}, /* should not omitted */
-};
-
-static const struct of_device_id msg21xx_match_table[] = {
-       { .compatible = "mstar,msg21xx", },
-       { },
-};
-
-MODULE_DEVICE_TABLE(i2c, touch_device_id);
-
-static struct i2c_driver touch_device_driver = {
-       .driver = {
-               .name = "ms-msg21xx",
-               .owner = THIS_MODULE,
-               .of_match_table = msg21xx_match_table,
-       },
-       .probe = msg21xx_ts_probe,
-       .remove = touch_driver_remove,
-       .id_table = touch_device_id,
-};
-
-module_i2c_driver(touch_device_driver);
-
-#ifdef TP_PRINT
-#include <linux/proc_fs.h>
-
-static unsigned short InfoAddr = 0x0F, PoolAddr = 0x10, TransLen = 256;
-static unsigned char row, units, cnt;
-
-static int tp_print_proc_read(struct msg21xx_ts_data *ts_data)
-{
-       unsigned short i, j;
-       unsigned short left, offset = 0;
-       unsigned char dbbus_tx_data[3] = {0};
-       unsigned char u8Data;
-       signed short s16Data;
-       int s32Data;
-       char *buf = NULL;
-
-       left = cnt*row*units;
-       if ((ts_data->suspended == 0) &&
-                               (InfoAddr != 0x0F) &&
-                               (PoolAddr != 0x10) &&
-                               (left > 0)) {
-               buf = kmalloc(left, GFP_KERNEL);
-               if (buf != NULL) {
-
-                       while (left > 0) {
-                               dbbus_tx_data[0] = 0x53;
-                               dbbus_tx_data[1] = ((PoolAddr + offset) >> 8)
-                                                                       & 0xFF;
-                               dbbus_tx_data[2] = (PoolAddr + offset) & 0xFF;
-                               mutex_lock(&msg21xx_mutex);
-                               write_i2c_seq(ts_data, ts_data->client->addr,
-                                                       &dbbus_tx_data[0], 3);
-                               read_i2c_seq(ts_data, ts_data->client->addr,
-                                       &buf[offset],
-                                       left > TransLen ? TransLen : left);
-                               mutex_unlock(&msg21xx_mutex);
-
-                               if (left > TransLen) {
-                                       left -= TransLen;
-                                       offset += TransLen;
-                               } else {
-                                       left = 0;
-                               }
-                       }
-
-                       for (i = 0; i < cnt; i++) {
-                               for (j = 0; j < row; j++) {
-                                       if (units == 1) {
-                                               u8Data = buf[i * row * units +
-                                                               j * units];
-                                       } else if (units == 2) {
-                                               s16Data = buf[i * row * units +
-                                               j * units] +
-                                               (buf[i * row * units +
-                                               j * units + 1] << 8);
-                                       } else if (units == 4) {
-                                               s32Data = buf[i * row * units +
-                                               j * units] +
-                                               (buf[i * row * units +
-                                               j * units + 1] << 8) +
-                                               (buf[i * row * units +
-                                               j * units + 2] << 16) +
-                                               (buf[i * row * units +
-                                               j * units + 3] << 24);
-                                       }
-                               }
-                       }
-
-                       kfree(buf);
-               }
-       }
-
-       return 0;
-}
-
-static void tp_print_create_entry(struct msg21xx_ts_data *ts_data)
-{
-       unsigned char dbbus_tx_data[3] = {0};
-       unsigned char dbbus_rx_data[8] = {0};
-
-       dbbus_tx_data[0] = 0x53;
-       dbbus_tx_data[1] = 0x00;
-       dbbus_tx_data[2] = 0x58;
-       mutex_lock(&msg21xx_mutex);
-       write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 3);
-       read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
-       mutex_unlock(&msg21xx_mutex);
-       InfoAddr = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
-       PoolAddr = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
-
-       if ((InfoAddr != 0x0F) && (PoolAddr != 0x10)) {
-               msleep(20);
-               dbbus_tx_data[0] = 0x53;
-               dbbus_tx_data[1] = (InfoAddr >> 8) & 0xFF;
-               dbbus_tx_data[2] = InfoAddr & 0xFF;
-               mutex_lock(&msg21xx_mutex);
-               write_i2c_seq(ts_data, ts_data->client->addr,
-                                               &dbbus_tx_data[0], 3);
-               read_i2c_seq(ts_data, ts_data->client->addr,
-                                               &dbbus_rx_data[0], 8);
-               mutex_unlock(&msg21xx_mutex);
-
-               units = dbbus_rx_data[0];
-               row = dbbus_rx_data[1];
-               cnt = dbbus_rx_data[2];
-               TransLen = (dbbus_rx_data[7]<<8) + dbbus_rx_data[6];
-
-               if (device_create_file(&ts_data->client->dev,
-                                               &dev_attr_tpp) < 0)
-                       dev_err(&ts_data->client->dev, "Failed to create device file(%s)!\n",
-                                       dev_attr_tpp.attr.name);
-       }
-}
-#endif
-
-MODULE_AUTHOR("MStar Semiconductor, Inc.");
-MODULE_LICENSE("GPL v2");
index ce15e15..ce1eb56 100644 (file)
 #define RESUME_RETRY                   (0 << 0)
 #define RESUME_TERMINATE               (1 << 0)
 
-#define TTBCR2_SEP_SHIFT               15
-#define TTBCR2_SEP_UPSTREAM            (0x7 << TTBCR2_SEP_SHIFT)
-
-#define TTBCR2_SEP_31                  0
-#define TTBCR2_SEP_35                  1
-#define TTBCR2_SEP_39                  2
-#define TTBCR2_SEP_41                  3
-#define TTBCR2_SEP_43                  4
-#define TTBCR2_SEP_47                  5
-#define TTBCR2_SEP_NOSIGN              7
-
 #define TTBRn_ASID_SHIFT               48
 
 #define FSR_MULTI                      (1 << 31)
@@ -1614,7 +1603,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
                if (smmu->version > ARM_SMMU_V1) {
                        reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-                       reg |= TTBCR2_SEP_UPSTREAM;
                        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
                }
        } else {
@@ -1745,7 +1733,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
-       unsigned long quirks = 0;
+       unsigned long quirks =
+               smmu_domain->attributes & (1 << DOMAIN_ATTR_ENABLE_TTBR1) ?
+                       IO_PGTABLE_QUIRK_ARM_TTBR1 : 0;
 
        if (smmu_domain->smmu)
                goto out;
@@ -1837,6 +1827,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                };
                fmt = ARM_MSM_SECURE;
        } else {
+
                smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
                        .quirks         = quirks,
                        .pgsize_bitmap  = arm_smmu_ops.pgsize_bitmap,
@@ -3140,6 +3131,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
                        & (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
                ret = 0;
                break;
+       case DOMAIN_ATTR_ENABLE_TTBR1:
+               *((int *)data) = !!(smmu_domain->attributes
+                                       & (1 << DOMAIN_ATTR_ENABLE_TTBR1));
+               ret = 0;
+               break;
+
        default:
                ret = -ENODEV;
                break;
@@ -3283,6 +3280,12 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
                ret = 0;
                break;
        }
+       case DOMAIN_ATTR_ENABLE_TTBR1:
+               if (*((int *)data))
+                       smmu_domain->attributes |=
+                               1 << DOMAIN_ATTR_ENABLE_TTBR1;
+               ret = 0;
+               break;
        default:
                ret = -ENODEV;
                break;
index 0d057ca..5f2b662 100644 (file)
 #define ARM_LPAE_TCR_TG0_64K           (1 << 14)
 #define ARM_LPAE_TCR_TG0_16K           (2 << 14)
 
+#define ARM_LPAE_TCR_TG1_16K            1ULL
+#define ARM_LPAE_TCR_TG1_4K             2ULL
+#define ARM_LPAE_TCR_TG1_64K            3ULL
+
 #define ARM_LPAE_TCR_SH0_SHIFT         12
 #define ARM_LPAE_TCR_SH0_MASK          0x3
+#define ARM_LPAE_TCR_SH1_SHIFT         28
 #define ARM_LPAE_TCR_SH_NS             0
 #define ARM_LPAE_TCR_SH_OS             2
 #define ARM_LPAE_TCR_SH_IS             3
 
 #define ARM_LPAE_TCR_ORGN0_SHIFT       10
+#define ARM_LPAE_TCR_ORGN1_SHIFT       26
 #define ARM_LPAE_TCR_IRGN0_SHIFT       8
+#define ARM_LPAE_TCR_IRGN1_SHIFT       24
 #define ARM_LPAE_TCR_RGN_MASK          0x3
 #define ARM_LPAE_TCR_RGN_NC            0
 #define ARM_LPAE_TCR_RGN_WBWA          1
 #define ARM_LPAE_TCR_T0SZ_SHIFT                0
 #define ARM_LPAE_TCR_SZ_MASK           0xf
 
+#define ARM_LPAE_TCR_T1SZ_SHIFT         16
+#define ARM_LPAE_TCR_T1SZ_MASK          0x3f
+
 #define ARM_LPAE_TCR_PS_SHIFT          16
 #define ARM_LPAE_TCR_PS_MASK           0x7
 
 #define ARM_LPAE_TCR_EPD1_SHIFT                23
 #define ARM_LPAE_TCR_EPD1_FAULT                1
 
+#define ARM_LPAE_TCR_SEP_SHIFT         (15 + 32)
+
+#define ARM_LPAE_TCR_SEP_31            0ULL
+#define ARM_LPAE_TCR_SEP_35            1ULL
+#define ARM_LPAE_TCR_SEP_39            2ULL
+#define ARM_LPAE_TCR_SEP_41            3ULL
+#define ARM_LPAE_TCR_SEP_43            4ULL
+#define ARM_LPAE_TCR_SEP_47            5ULL
+#define ARM_LPAE_TCR_SEP_UPSTREAM      7ULL
+
 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)    ((n) << 3)
 #define ARM_LPAE_MAIR_ATTR_MASK                0xff
 #define ARM_LPAE_MAIR_ATTR_DEVICE      0x04
@@ -206,7 +226,7 @@ struct arm_lpae_io_pgtable {
        unsigned long           pg_shift;
        unsigned long           bits_per_level;
 
-       void                    *pgd;
+       void                    *pgd[2];
 };
 
 typedef u64 arm_lpae_iopte;
@@ -524,14 +544,26 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
        return pte;
 }
 
+static inline arm_lpae_iopte *arm_lpae_get_table(
+               struct arm_lpae_io_pgtable *data, unsigned long iova)
+{
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+       return ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) &&
+               (iova & (1UL << (cfg->ias - 1)))) ?
+               data->pgd[1] : data->pgd[0];
+}
+
 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
                        phys_addr_t paddr, size_t size, int iommu_prot)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
-       arm_lpae_iopte *ptep = data->pgd;
+       arm_lpae_iopte *ptep;
        int ret, lvl = ARM_LPAE_START_LVL(data);
        arm_lpae_iopte prot;
 
+       ptep = arm_lpae_get_table(data, iova);
+
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
                return 0;
@@ -554,7 +586,7 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
-       arm_lpae_iopte *ptep = data->pgd;
+       arm_lpae_iopte *ptep;
        int lvl = ARM_LPAE_START_LVL(data);
        arm_lpae_iopte prot;
        struct scatterlist *s;
@@ -563,6 +595,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
        unsigned int min_pagesz;
        struct map_state ms;
 
+       ptep = arm_lpae_get_table(data, iova);
+
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
                goto out_err;
@@ -672,7 +706,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 
-       __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+       __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd[0]);
+       if (data->pgd[1])
+               __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
+                       data->pgd[1]);
        kfree(data);
 }
 
@@ -800,9 +837,11 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        size_t unmapped = 0;
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable *iop = &data->iop;
-       arm_lpae_iopte *ptep = data->pgd;
+       arm_lpae_iopte *ptep;
        int lvl = ARM_LPAE_START_LVL(data);
 
+       ptep = arm_lpae_get_table(data, iova);
+
        while (unmapped < size) {
                size_t ret, size_to_unmap, remaining;
 
@@ -828,7 +867,10 @@ static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
                                unsigned long iova, int *plvl_ret,
                                arm_lpae_iopte *ptep_ret)
 {
-       arm_lpae_iopte pte, *ptep = data->pgd;
+       arm_lpae_iopte pte, *ptep;
+
+       ptep = arm_lpae_get_table(data, iova);
+
        *plvl_ret = ARM_LPAE_START_LVL(data);
        *ptep_ret = 0;
 
@@ -994,6 +1036,71 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
        return data;
 }
 
+static u64 arm64_lpae_setup_ttbr1(struct io_pgtable_cfg *cfg,
+               struct arm_lpae_io_pgtable *data)
+
+{
+       u64 reg;
+
+       /* If TTBR1 is disabled, disable speculative walks through the TTBR1 */
+       if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
+               reg = ARM_LPAE_TCR_EPD1;
+               reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+               return reg;
+       }
+
+       if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+               reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+                       (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN1_SHIFT) |
+                       (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN1_SHIFT);
+       else
+               reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+                       (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN1_SHIFT) |
+                       (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN1_SHIFT);
+
+       switch (1 << data->pg_shift) {
+       case SZ_4K:
+               reg |= (ARM_LPAE_TCR_TG1_4K << 30);
+               break;
+       case SZ_16K:
+               reg |= (ARM_LPAE_TCR_TG1_16K << 30);
+               break;
+       case SZ_64K:
+               reg |= (ARM_LPAE_TCR_TG1_64K << 30);
+               break;
+       }
+
+       /* Set T1SZ */
+       reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T1SZ_SHIFT;
+
+       /* Set the SEP bit based on the size */
+       switch (cfg->ias) {
+       case 32:
+               reg |= (ARM_LPAE_TCR_SEP_31 << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       case 36:
+               reg |= (ARM_LPAE_TCR_SEP_35 << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       case 40:
+               reg |= (ARM_LPAE_TCR_SEP_39 << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       case 42:
+               reg |= (ARM_LPAE_TCR_SEP_41 << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       case 44:
+               reg |= (ARM_LPAE_TCR_SEP_43 << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       case 48:
+               reg |= (ARM_LPAE_TCR_SEP_47 << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       default:
+               reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+               break;
+       }
+
+       return reg;
+}
+
 static struct io_pgtable *
 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 {
@@ -1050,8 +1157,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 
        reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
 
-       /* Disable speculative walks through TTBR1 */
-       reg |= ARM_LPAE_TCR_EPD1;
+       /* Bring in the TTBR1 configuration */
+       reg |= arm64_lpae_setup_ttbr1(cfg, data);
+
        cfg->arm_lpae_s1_cfg.tcr = reg;
 
        /* MAIRs */
@@ -1066,16 +1174,33 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s1_cfg.mair[1] = 0;
 
        /* Looking good; allocate a pgd */
-       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
-       if (!data->pgd)
+       data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+               cookie);
+       if (!data->pgd[0])
                goto out_free_data;
 
+
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+               data->pgd[1] = __arm_lpae_alloc_pages(data->pgd_size,
+                       GFP_KERNEL, cfg, cookie);
+               if (!data->pgd[1]) {
+                       __arm_lpae_free_pages(data->pgd[0], data->pgd_size, cfg,
+                               cookie);
+                       goto out_free_data;
+               }
+       } else {
+               data->pgd[1] = NULL;
+       }
+
        /* Ensure the empty pgd is visible before any actual TTBR write */
        wmb();
 
        /* TTBRs */
-       cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
-       cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+       cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd[0]);
+
+       if (data->pgd[1])
+               cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd[1]);
+
        return &data->iop;
 
 out_free_data:
@@ -1155,15 +1280,16 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s2_cfg.vtcr = reg;
 
        /* Allocate pgd pages */
-       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
-       if (!data->pgd)
+       data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+               cookie);
+       if (!data->pgd[0])
                goto out_free_data;
 
        /* Ensure the empty pgd is visible before any actual TTBR write */
        wmb();
 
        /* VTTBR */
-       cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+       cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd[0]);
        return &data->iop;
 
 out_free_data:
@@ -1261,7 +1387,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
                cfg->pgsize_bitmap, cfg->ias);
        pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
                data->levels, data->pgd_size, data->pg_shift,
-               data->bits_per_level, data->pgd);
+               data->bits_per_level, data->pgd[0]);
 }
 
 #define __FAIL(ops, i) ({                                              \
index a3f366f..f453304 100644 (file)
@@ -62,6 +62,7 @@ struct io_pgtable_cfg {
         */
        #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0)        /* Set NS bit in PTEs */
        #define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT (1 << 1)
+       #define IO_PGTABLE_QUIRK_ARM_TTBR1 (1 << 2)     /* Allocate TTBR1 PT */
        int                             quirks;
        unsigned long                   pgsize_bitmap;
        unsigned int                    ias;
index 5cbdc03..9ba0b7d 100644 (file)
@@ -122,6 +122,7 @@ struct sde_smmu_client {
        struct sde_module_power mp;
        struct reg_bus_client *reg_bus_clt;
        bool domain_attached;
+       bool domain_reattach;
        int domain;
 };
 
index 9b1175d..915126f 100644 (file)
@@ -247,6 +247,14 @@ int sde_smmu_attach(struct sde_rot_data_type *mdata)
                                        goto err;
                                }
                                sde_smmu->domain_attached = true;
+                               if (sde_smmu->domain_reattach) {
+                                       SDEROT_DBG(
+                                               "domain[%i] re-attach\n",
+                                               i);
+                                       /* remove extra vote */
+                                       sde_smmu_enable_power(sde_smmu, false);
+                                       sde_smmu->domain_reattach = false;
+                               }
                                SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
                        }
                } else {
@@ -292,6 +300,12 @@ int sde_smmu_detach(struct sde_rot_data_type *mdata)
                                arm_iommu_detach_device(sde_smmu->dev);
                                SDEROT_DBG("iommu domain[%i] detached\n", i);
                                sde_smmu->domain_attached = false;
+
+                               /*
+                                * since we are leaving the clock vote, on
+                                * re-attaching do not vote for clocks
+                                */
+                               sde_smmu->domain_reattach = true;
                                }
                        else {
                                sde_smmu_enable_power(sde_smmu, false);
index 52e3e9b..60b02f2 100644 (file)
@@ -3482,15 +3482,23 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
        /* RED error - Fatal: requires reset */
        if (mrq->cmdq_req->resp_err) {
                err = mrq->cmdq_req->resp_err;
+               goto reset;
+       }
+
+       /*
+        * TIMEOUT errrors can happen because of execution error
+        * in the last command. So send cmd 13 to get device status
+        */
+       if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+                       (mrq->data && (mrq->data->error == -ETIMEDOUT))) {
                if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
                        ret = get_card_status(host->card, &status, 0);
                        if (ret)
                                pr_err("%s: CMD13 failed with err %d\n",
                                                mmc_hostname(host), ret);
                }
-               pr_err("%s: Response error detected with device status 0x%08x\n",
+               pr_err("%s: Timeout error detected with device status 0x%08x\n",
                        mmc_hostname(host), status);
-               goto reset;
        }
 
        /*
index 9f5c67e..26e57f3 100644 (file)
@@ -3095,7 +3095,6 @@ int mmc_resume_bus(struct mmc_host *host)
        pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
        spin_lock_irqsave(&host->lock, flags);
        host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
-       host->rescan_disable = 0;
        spin_unlock_irqrestore(&host->lock, flags);
 
        mmc_bus_get(host);
@@ -4041,6 +4040,7 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
 
 void mmc_rescan(struct work_struct *work)
 {
+       unsigned long flags;
        struct mmc_host *host =
                container_of(work, struct mmc_host, detect.work);
 
@@ -4049,8 +4049,12 @@ void mmc_rescan(struct work_struct *work)
                host->trigger_card_event = false;
        }
 
-       if (host->rescan_disable)
+       spin_lock_irqsave(&host->lock, flags);
+       if (host->rescan_disable) {
+               spin_unlock_irqrestore(&host->lock, flags);
                return;
+       }
+       spin_unlock_irqrestore(&host->lock, flags);
 
        /* If there is a non-removable card registered, only scan once */
        if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
@@ -4297,10 +4301,6 @@ int mmc_pm_notify(struct notifier_block *notify_block,
        case PM_SUSPEND_PREPARE:
        case PM_RESTORE_PREPARE:
                spin_lock_irqsave(&host->lock, flags);
-               if (mmc_bus_needs_resume(host)) {
-                       spin_unlock_irqrestore(&host->lock, flags);
-                       break;
-               }
                host->rescan_disable = 1;
                spin_unlock_irqrestore(&host->lock, flags);
                cancel_delayed_work_sync(&host->detect);
index a754647..96d4fbf 100644 (file)
@@ -33,8 +33,8 @@
 #define DCMD_SLOT 31
 #define NUM_SLOTS 32
 
-/* 1 sec */
-#define HALT_TIMEOUT_MS 1000
+/* 10 sec */
+#define HALT_TIMEOUT_MS 10000
 
 static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
 static int cmdq_halt(struct mmc_host *mmc, bool halt);
@@ -197,6 +197,10 @@ static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
 static void cmdq_dumpregs(struct cmdq_host *cq_host)
 {
        struct mmc_host *mmc = cq_host->mmc;
+       int offset = 0;
+
+       if (cq_host->offset_changed)
+               offset = CQ_V5_VENDOR_CFG;
 
        MMC_TRACE(mmc,
        "%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
@@ -243,7 +247,7 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host)
                cmdq_readl(cq_host, CQCRI),
                cmdq_readl(cq_host, CQCRA));
        pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
-              cmdq_readl(cq_host, CQ_VENDOR_CFG));
+              cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
        pr_err(DRV_NAME ": ===========================================\n");
 
        cmdq_dump_task_history(cq_host);
@@ -384,6 +388,12 @@ static int cmdq_enable(struct mmc_host *mmc)
                cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
                                 CMDQ_TASK_DESC_SZ_128;
                cqcfg |= CQ_ICE_ENABLE;
+               /*
+                * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+                * in CQ register space, due to which few CQ registers are
+                * shifted. Set offset_changed boolean to use updated address.
+                */
+               cq_host->offset_changed = true;
        }
 
        cmdq_writel(cq_host, cqcfg, CQCFG);
@@ -818,14 +828,18 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
 {
        struct mmc_request *mrq;
        struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+       int offset = 0;
 
+       if (cq_host->offset_changed)
+               offset = CQ_V5_VENDOR_CFG;
        mrq = get_req_by_tag(cq_host, tag);
        if (tag == cq_host->dcmd_slot)
                mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
 
        if (mrq->cmdq_req->cmdq_req_flags & DCMD)
-               cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
-                           CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
+               cmdq_writel(cq_host,
+                       cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
+                       CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
 
        cmdq_runtime_pm_put(cq_host);
        if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
@@ -845,7 +859,6 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
        u32 dbr_set = 0;
 
        status = cmdq_readl(cq_host, CQIS);
-       cmdq_writel(cq_host, status, CQIS);
 
        if (!status && !err)
                return IRQ_NONE;
@@ -868,6 +881,17 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
                if (ret)
                        pr_err("%s: %s: halt failed ret=%d\n",
                                        mmc_hostname(mmc), __func__, ret);
+
+               /*
+                * Clear the CQIS after halting incase of error. This is done
+                * because if CQIS is cleared before halting, the CQ will
+                * continue with issueing commands for rest of requests with
+                * Doorbell rung. This will overwrite the Resp Arg register.
+                * So CQ must be halted first and then CQIS cleared incase
+                * of error
+                */
+               cmdq_writel(cq_host, status, CQIS);
+
                cmdq_dumpregs(cq_host);
 
                if (!err_info) {
@@ -956,13 +980,16 @@ skip_cqterri:
 
                        mrq->cmdq_req->resp_err = true;
                        pr_err("%s: Response error (0x%08x) from card !!!",
-                               mmc_hostname(mmc), status);
+                               mmc_hostname(mmc), cmdq_readl(cq_host, CQCRA));
+
                } else {
                        mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
                        mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
                }
 
                cmdq_finish_data(mmc, tag);
+       } else {
+               cmdq_writel(cq_host, status, CQIS);
        }
 
        if (status & CQIS_TCC) {
@@ -995,6 +1022,9 @@ skip_cqterri:
        if (status & CQIS_HAC) {
                if (cq_host->ops->post_cqe_halt)
                        cq_host->ops->post_cqe_halt(mmc);
+               /* halt done: re-enable legacy interrupts */
+               if (cq_host->ops->clear_set_irqs)
+                       cq_host->ops->clear_set_irqs(mmc, false);
                /* halt is completed, wakeup waiting thread */
                complete(&cq_host->halt_comp);
        }
@@ -1052,6 +1082,7 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
 {
        struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
        u32 ret = 0;
+       u32 config = 0;
        int retries = 3;
 
        cmdq_runtime_pm_get(cq_host);
@@ -1061,16 +1092,31 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
                                    CQCTL);
                        ret = wait_for_completion_timeout(&cq_host->halt_comp,
                                          msecs_to_jiffies(HALT_TIMEOUT_MS));
-                       if (!ret && !(cmdq_readl(cq_host, CQCTL) & HALT)) {
-                               retries--;
-                               continue;
+                       if (!ret) {
+                               pr_warn("%s: %s: HAC int timeout\n",
+                                       mmc_hostname(mmc), __func__);
+                               if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
+                                       /*
+                                        * Don't retry if CQE is halted but irq
+                                        * is not triggered in timeout period.
+                                        * And since we are returning error,
+                                        * un-halt CQE. Since irq was not fired
+                                        * yet, no need to set other params
+                                        */
+                                       retries = 0;
+                                       config = cmdq_readl(cq_host, CQCTL);
+                                       config &= ~HALT;
+                                       cmdq_writel(cq_host, config, CQCTL);
+                               } else {
+                                       pr_warn("%s: %s: retryng halt (%d)\n",
+                                               mmc_hostname(mmc), __func__,
+                                               retries);
+                                       retries--;
+                                       continue;
+                               }
                        } else {
                                MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
                                        __func__, retries);
-                               /* halt done: re-enable legacy interrupts */
-                               if (cq_host->ops->clear_set_irqs)
-                                       cq_host->ops->clear_set_irqs(mmc,
-                                                               false);
                                break;
                        }
                }
index 05c924a..6c10ab3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 #define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32)
 #define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0)
 
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG       0x900
 #define CQ_VENDOR_CFG  0x100
 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
 
@@ -175,6 +180,7 @@ struct cmdq_host {
        bool enabled;
        bool halted;
        bool init_done;
+       bool offset_changed;
 
        u8 *desc_base;
 
index 127a260..15d1efa 100644 (file)
@@ -310,6 +310,9 @@ void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
        writel_relaxed(val, base_addr + offset);
 }
 
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
 static const u32 tuning_block_64[] = {
        0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
        0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
@@ -2779,8 +2782,10 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
         */
        if (done)
                init_completion(&msm_host->pwr_irq_completion);
-       else
-               wait_for_completion(&msm_host->pwr_irq_completion);
+       else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+                               msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+               __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+                                       mmc_hostname(host->mmc), req_type);
 
        pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
                        __func__, req_type);
@@ -3284,6 +3289,8 @@ static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
        /* registers offset changed starting from 4.2.0 */
        int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
 
+       if (cq_host->offset_changed)
+               offset += CQ_V5_VENDOR_CFG;
        pr_err("---- Debug RAM dump ----\n");
        pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
               cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
index a55a26b..50d8e72 100644 (file)
@@ -625,6 +625,9 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
                                tx_ring_full_count[rmnet_mhi_ptr->dev_index]++;
                                netif_stop_queue(dev);
                                rmnet_log(MSG_VERBOSE, "Stopping Queue\n");
+                               write_unlock_irqrestore(
+                                           &rmnet_mhi_ptr->out_chan_full_lock,
+                                           flags);
                                goto rmnet_mhi_xmit_error_cleanup;
                        } else {
                                retry = 1;
@@ -652,7 +655,6 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
 
 rmnet_mhi_xmit_error_cleanup:
        rmnet_log(MSG_VERBOSE, "Ring full\n");
-       write_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
        return NETDEV_TX_BUSY;
 }
 
index 7e5e25e..f1ead7c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index a0f7658..4874c5b 100644 (file)
@@ -5,6 +5,7 @@ wil6210-y += netdev.o
 wil6210-y += cfg80211.o
 wil6210-y += pcie_bus.o
 wil6210-y += debugfs.o
+wil6210-y += sysfs.o
 wil6210-y += wmi.o
 wil6210-y += interrupt.o
 wil6210-y += txrx.o
index 31b4591..d472e13 100644 (file)
@@ -212,21 +212,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return rc;
        }
 
-       /* device supports 48 bit addresses */
-       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
-       if (rc) {
-               dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
-               rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_err(dev,
-                               "dma_set_mask_and_coherent(32) failed: %d\n",
-                               rc);
-                       goto if_free;
-               }
-       } else {
-               wil->use_extended_dma_addr = 1;
-       }
-
        wil->pdev = pdev;
        pci_set_drvdata(pdev, wil);
        /* rollback to if_free */
@@ -240,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        /* rollback to err_plat */
 
+       /* device supports 48bit addresses */
+       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (rc) {
+               dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
+               rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+               if (rc) {
+                       dev_err(dev,
+                               "dma_set_mask_and_coherent(32) failed: %d\n",
+                               rc);
+                       goto err_plat;
+               }
+       } else {
+               wil->use_extended_dma_addr = 1;
+       }
+
        rc = pci_enable_device(pdev);
        if (rc) {
                wil_err(wil,
@@ -303,7 +303,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 #endif /* CONFIG_PM */
 
        wil6210_debugfs_init(wil);
-
+       wil6210_sysfs_init(wil);
 
        return 0;
 
@@ -337,6 +337,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
+       wil6210_sysfs_remove(wil);
        wil6210_debugfs_remove(wil);
        rtnl_lock();
        wil_p2p_wdev_free(wil);
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
new file mode 100644 (file)
index 0000000..0faa26c
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       struct wil6210_priv *wil = dev_get_drvdata(dev);
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_tof_get_tx_rx_offset_event evt;
+       } __packed reply;
+       int rc;
+       ssize_t len;
+
+       if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+               return -EOPNOTSUPP;
+
+       memset(&reply, 0, sizeof(reply));
+       rc = wmi_call(wil, WMI_TOF_GET_TX_RX_OFFSET_CMDID, NULL, 0,
+                     WMI_TOF_GET_TX_RX_OFFSET_EVENTID,
+                     &reply, sizeof(reply), 100);
+       if (rc < 0)
+               return rc;
+       if (reply.evt.status) {
+               wil_err(wil, "get_tof_tx_rx_offset failed, error %d\n",
+                       reply.evt.status);
+               return -EIO;
+       }
+       len = snprintf(buf, PAGE_SIZE, "%u %u\n",
+                      le32_to_cpu(reply.evt.tx_offset),
+                      le32_to_cpu(reply.evt.rx_offset));
+       return len;
+}
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct wil6210_priv *wil = dev_get_drvdata(dev);
+       struct wmi_tof_set_tx_rx_offset_cmd cmd;
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_tof_set_tx_rx_offset_event evt;
+       } __packed reply;
+       unsigned int tx_offset, rx_offset;
+       int rc;
+
+       if (sscanf(buf, "%u %u", &tx_offset, &rx_offset) != 2)
+               return -EINVAL;
+
+       if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+               return -EOPNOTSUPP;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.tx_offset = cpu_to_le32(tx_offset);
+       cmd.rx_offset = cpu_to_le32(rx_offset);
+       memset(&reply, 0, sizeof(reply));
+       rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, &cmd, sizeof(cmd),
+                     WMI_TOF_SET_TX_RX_OFFSET_EVENTID,
+                     &reply, sizeof(reply), 100);
+       if (rc < 0)
+               return rc;
+       if (reply.evt.status) {
+               wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n",
+                       reply.evt.status);
+               return -EIO;
+       }
+       return count;
+}
+
+static DEVICE_ATTR(ftm_txrx_offset, 0644,
+                  wil_ftm_txrx_offset_sysfs_show,
+                  wil_ftm_txrx_offset_sysfs_store);
+
+static struct attribute *wil6210_sysfs_entries[] = {
+       &dev_attr_ftm_txrx_offset.attr,
+       NULL
+};
+
+static struct attribute_group wil6210_attribute_group = {
+       .name = "wil6210",
+       .attrs = wil6210_sysfs_entries,
+};
+
+int wil6210_sysfs_init(struct wil6210_priv *wil)
+{
+       struct device *dev = wil_to_dev(wil);
+       int err;
+
+       err = sysfs_create_group(&dev->kobj, &wil6210_attribute_group);
+       if (err) {
+               wil_err(wil, "failed to create sysfs group: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+void wil6210_sysfs_remove(struct wil6210_priv *wil)
+{
+       struct device *dev = wil_to_dev(wil);
+
+       sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+}
index 96062be..fcfbcf7 100644 (file)
@@ -893,6 +893,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 
 int wil6210_debugfs_init(struct wil6210_priv *wil);
 void wil6210_debugfs_remove(struct wil6210_priv *wil);
+int wil6210_sysfs_init(struct wil6210_priv *wil);
+void wil6210_sysfs_remove(struct wil6210_priv *wil);
 int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
                       struct station_info *sinfo);
 
index 7c8b5e3..cd105a0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2414,8 +2414,16 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
                        dev->res[base_sel - 1].base,
                        wr_offset, wr_mask, wr_value);
 
-               msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
-                       wr_offset, wr_mask, wr_value);
+               base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+               if (wr_offset >  base_sel_size - 4 ||
+                       msm_pcie_check_align(dev, wr_offset))
+                       PCIE_DBG_FS(dev,
+                               "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+                               dev->rc_idx, wr_offset, base_sel_size - 4);
+               else
+                       msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+                               wr_offset, wr_mask, wr_value);
 
                break;
        case 13: /* dump all registers of base_sel */
index 6e06fef..c3cb57e 100644 (file)
@@ -267,6 +267,14 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy,
 
        char prop_name[MAX_PROP_NAME];
 
+       if (dev->of_node) {
+               snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
+               if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
+                       dev_dbg(dev, "No vreg data found for %s\n", prop_name);
+                       return optional ? err : -ENODATA;
+               }
+       }
+
        vreg->name = kstrdup(name, GFP_KERNEL);
        if (!vreg->name) {
                err = -ENOMEM;
index 188388b..23b0428 100644 (file)
@@ -264,6 +264,11 @@ static void gsi_handle_glob_err(uint32_t err)
        }
 }
 
+static void gsi_handle_gp_int1(void)
+{
+       complete(&gsi_ctx->gen_ee_cmd_compl);
+}
+
 static void gsi_handle_glob_ee(int ee)
 {
        uint32_t val;
@@ -288,8 +293,7 @@ static void gsi_handle_glob_ee(int ee)
        }
 
        if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
-               notify.evt_id = GSI_PER_EVT_GLOB_GP1;
-               gsi_ctx->per.notify_cb(&notify);
+               gsi_handle_gp_int1();
        }
 
        if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
@@ -2745,6 +2749,67 @@ void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
 }
 EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
 
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
+{
+       enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
+       uint32_t val;
+       int res;
+
+       if (!gsi_ctx) {
+               pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+               return -GSI_STATUS_NODEV;
+       }
+
+       if (chan_idx >= gsi_ctx->max_ch || !code) {
+               GSIERR("bad params chan_idx=%d\n", chan_idx);
+               return -GSI_STATUS_INVALID_PARAMS;
+       }
+
+       mutex_lock(&gsi_ctx->mlock);
+       reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+       /* invalidate the response */
+       gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+                       GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+       gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+       gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+                       GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+       gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
+       val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+               GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+               ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+                       GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+               ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+                       GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+       gsi_writel(val, gsi_ctx->base +
+               GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+       res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+               msecs_to_jiffies(GSI_CMD_TIMEOUT));
+       if (res == 0) {
+               GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+               res = -GSI_STATUS_TIMED_OUT;
+               goto free_lock;
+       }
+
+       gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+               GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+       if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+               GSIERR("No response received\n");
+               res = -GSI_STATUS_ERROR;
+               goto free_lock;
+       }
+
+       res = GSI_STATUS_SUCCESS;
+       *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+       mutex_unlock(&gsi_ctx->mlock);
+
+       return res;
+}
+EXPORT_SYMBOL(gsi_halt_channel_ee);
+
 static int msm_gsi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -2757,6 +2822,7 @@ static int msm_gsi_probe(struct platform_device *pdev)
        }
 
        gsi_ctx->dev = dev;
+       init_completion(&gsi_ctx->gen_ee_cmd_compl);
        gsi_debugfs_init();
 
        return 0;
index 750ae2b..d0eb162 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -115,9 +115,12 @@ struct gsi_evt_ctx {
 struct gsi_ee_scratch {
        union __packed {
                struct {
-                       uint32_t resvd1:15;
+                       uint32_t inter_ee_cmd_return_code:3;
+                       uint32_t resvd1:2;
+                       uint32_t generic_ee_cmd_return_code:3;
+                       uint32_t resvd2:7;
                        uint32_t max_usb_pkt_size:1;
-                       uint32_t resvd2:8;
+                       uint32_t resvd3:8;
                        uint32_t mhi_base_chan_idx:8;
                } s;
                uint32_t val;
@@ -135,6 +138,10 @@ struct ch_debug_stats {
        unsigned long cmd_completed;
 };
 
+struct gsi_generic_ee_cmd_debug_stats {
+       unsigned long halt_channel;
+};
+
 struct gsi_ctx {
        void __iomem *base;
        struct device *dev;
@@ -143,6 +150,7 @@ struct gsi_ctx {
        struct gsi_chan_ctx chan[GSI_CHAN_MAX];
        struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
        struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
+       struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
        struct mutex mlock;
        spinlock_t slock;
        unsigned long evt_bmap;
@@ -154,6 +162,7 @@ struct gsi_ctx {
        struct workqueue_struct *dp_stat_wq;
        u32 max_ch;
        u32 max_ev;
+       struct completion gen_ee_cmd_compl;
 };
 
 enum gsi_re_type {
@@ -227,6 +236,18 @@ enum gsi_evt_ch_cmd_opcode {
        GSI_EVT_DE_ALLOC = 0xa,
 };
 
+enum gsi_generic_ee_cmd_opcode {
+       GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
+};
+
+enum gsi_generic_ee_cmd_return_code {
+       GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
+       GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
+       GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
+       GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
+       GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
+};
+
 extern struct gsi_ctx *gsi_ctx;
 void gsi_debugfs_init(void);
 uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
index 1acaf74..d0462aa 100644 (file)
        (GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n))
 #define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff
 #define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3
-#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f
 #define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa
 
 /* v1.0 */
 #define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
index 66e329a..74e7394 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -95,6 +95,46 @@ int ipa_disable_data_path(u32 clnt_hdl)
        return res;
 }
 
+int ipa2_enable_force_clear(u32 request_id, bool throttle_source,
+       u32 source_pipe_bitmask)
+{
+       struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+       int result;
+
+       memset(&req, 0, sizeof(req));
+       req.request_id = request_id;
+       req.source_pipe_bitmask = source_pipe_bitmask;
+       if (throttle_source) {
+               req.throttle_source_valid = 1;
+               req.throttle_source = 1;
+       }
+       result = qmi_enable_force_clear_datapath_send(&req);
+       if (result) {
+               IPAERR("qmi_enable_force_clear_datapath_send failed %d\n",
+                       result);
+               return result;
+       }
+
+       return 0;
+}
+
+int ipa2_disable_force_clear(u32 request_id)
+{
+       struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+       int result;
+
+       memset(&req, 0, sizeof(req));
+       req.request_id = request_id;
+       result = qmi_disable_force_clear_datapath_send(&req);
+       if (result) {
+               IPAERR("qmi_disable_force_clear_datapath_send failed %d\n",
+                       result);
+               return result;
+       }
+
+       return 0;
+}
+
 static int ipa2_smmu_map_peer_bam(unsigned long dev)
 {
        phys_addr_t base;
index b45e748..94d76db 100644 (file)
@@ -1803,6 +1803,9 @@ void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx);
 
 int ipa_enable_data_path(u32 clnt_hdl);
 int ipa_disable_data_path(u32 clnt_hdl);
+int ipa2_enable_force_clear(u32 request_id, bool throttle_source,
+       u32 source_pipe_bitmask);
+int ipa2_disable_force_clear(u32 request_id);
 int ipa_id_alloc(void *ptr);
 void *ipa_id_find(u32 id);
 void ipa_id_remove(u32 id);
index d88e5a6..e2ac9bf 100644 (file)
@@ -39,6 +39,8 @@
 #define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
 #define QMI_SEND_REQ_TIMEOUT_MS 60000
 
+#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000
+
 static struct qmi_handle *ipa_svc_handle;
 static void ipa_a5_svc_recv_msg(struct work_struct *work);
 static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg);
@@ -583,7 +585,8 @@ int qmi_enable_force_clear_datapath_send(
                        &req_desc,
                        req,
                        sizeof(*req),
-                       &resp_desc, &resp, sizeof(resp), 0);
+                       &resp_desc, &resp, sizeof(resp),
+                       QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
        if (rc < 0) {
                IPAWANERR("send req failed %d\n", rc);
                return rc;
@@ -628,7 +631,8 @@ int qmi_disable_force_clear_datapath_send(
                        &req_desc,
                        req,
                        sizeof(*req),
-                       &resp_desc, &resp, sizeof(resp), 0);
+                       &resp_desc, &resp, sizeof(resp),
+                       QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
        if (rc < 0) {
                IPAWANERR("send req failed %d\n", rc);
                return rc;
index 5bda4cb..f606691 100644 (file)
@@ -1404,7 +1404,6 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
        union IpaHwWdiCommonChCmdData_t disable;
        struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
        u32 prod_hdl;
-       int i;
 
        if (unlikely(!ipa_ctx)) {
                IPAERR("IPA driver was not initialized\n");
@@ -1421,28 +1420,6 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
        if (result)
                return result;
 
-       /* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
-       if (ipa_ctx->ipa_wdi2) {
-               for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
-                       IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
-                                       i,
-                                       *ipa_ctx->uc_ctx.rdy_ring_rp_va,
-                                       *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va);
-                       if (*ipa_ctx->uc_ctx.rdy_ring_rp_va !=
-                               *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va) {
-                               usleep_range(IPA_UC_WAIT_MIN_SLEEP,
-                                       IPA_UC_WAII_MAX_SLEEP);
-                       } else {
-                               break;
-                       }
-               }
-               /* In case ipa_uc still haven't processed all
-               * pending descriptors, we have to assert
-               */
-               if (i == IPA_UC_FINISH_MAX)
-                       BUG();
-       }
-
        IPADBG("ep=%d\n", clnt_hdl);
 
        ep = &ipa_ctx->ep[clnt_hdl];
@@ -1468,6 +1445,11 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
         * holb on IPA Producer pipe
         */
        if (IPA_CLIENT_IS_PROD(ep->client)) {
+
+               IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+                       clnt_hdl, ep->client);
+
+               /* remove delay on wlan-prod pipe*/
                memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
                ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 
@@ -1594,6 +1576,8 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
        struct ipa_ep_context *ep;
        union IpaHwWdiCommonChCmdData_t suspend;
        struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+       u32 source_pipe_bitmask = 0;
+       bool disable_force_clear = false;
 
        if (unlikely(!ipa_ctx)) {
                IPAERR("IPA driver was not initialized\n");
@@ -1623,6 +1607,31 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
        suspend.params.ipa_pipe_number = clnt_hdl;
 
        if (IPA_CLIENT_IS_PROD(ep->client)) {
+               /*
+                * For WDI 2.0 need to ensure pipe will be empty before suspend
+                * as IPA uC will fail to suspend the pipe otherwise.
+                */
+               if (ipa_ctx->ipa_wdi2) {
+                       source_pipe_bitmask = 1 <<
+                               ipa_get_ep_mapping(ep->client);
+                       result = ipa2_enable_force_clear(clnt_hdl,
+                               false, source_pipe_bitmask);
+                       if (result) {
+                               /*
+                                * assuming here modem SSR, AP can remove
+                                * the delay in this case
+                                */
+                               IPAERR("failed to force clear %d\n", result);
+                               IPAERR("remove delay from SCND reg\n");
+                               memset(&ep_cfg_ctrl, 0,
+                                       sizeof(struct ipa_ep_cfg_ctrl));
+                               ep_cfg_ctrl.ipa_ep_delay = false;
+                               ep_cfg_ctrl.ipa_ep_suspend = false;
+                               ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+                       } else {
+                               disable_force_clear = true;
+                       }
+               }
                IPADBG("Post suspend event first for IPA Producer\n");
                IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
                result = ipa_uc_send_cmd(suspend.raw32b,
@@ -1667,6 +1676,9 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
                }
        }
 
+       if (disable_force_clear)
+               ipa2_disable_force_clear(clnt_hdl);
+
        ipa_ctx->tag_process_before_gating = true;
        IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
        ep->uc_offload_state &= ~IPA_WDI_RESUMED;
index 8ca42c7..aa681d3 100644 (file)
@@ -1878,6 +1878,45 @@ static void ipa3_q6_avoid_holb(void)
        }
 }
 
+static void ipa3_halt_q6_cons_gsi_channels(void)
+{
+       int ep_idx;
+       int client_idx;
+       struct ipa_gsi_ep_config *gsi_ep_cfg;
+       int ret;
+       int code = 0;
+
+       for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+               if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+                       ep_idx = ipa3_get_ep_mapping(client_idx);
+                       if (ep_idx == -1)
+                               continue;
+
+                       gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
+                       if (!gsi_ep_cfg) {
+                               IPAERR("failed to get GSI config\n");
+                               ipa_assert();
+                               return;
+                       }
+
+                       ret = gsi_halt_channel_ee(
+                               gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
+                               &code);
+                       if (ret == GSI_STATUS_SUCCESS)
+                               IPADBG("halted gsi ch %d ee %d with code %d\n",
+                               gsi_ep_cfg->ipa_gsi_chan_num,
+                               gsi_ep_cfg->ee,
+                               code);
+                       else
+                               IPAERR("failed to halt ch %d ee %d code %d\n",
+                               gsi_ep_cfg->ipa_gsi_chan_num,
+                               gsi_ep_cfg->ee,
+                               code);
+               }
+       }
+}
+
+
 static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
        enum ipa_rule_type rlt)
 {
@@ -2312,6 +2351,7 @@ void ipa3_q6_post_shutdown_cleanup(void)
 
        /* Handle the issue where SUSPEND was removed for some reason */
        ipa3_q6_avoid_holb();
+       ipa3_halt_q6_cons_gsi_channels();
 
        for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
                if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
index c6fa39d..6165d30 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/completion.h>
 #include <linux/atomic.h>
 #include <linux/spinlock.h>
+#include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/cdev.h>
 #include <linux/msm_pcie.h>
@@ -29,6 +30,7 @@
 #include <linux/dma-mapping.h>
 
 extern struct mhi_pcie_devices mhi_devices;
+struct mhi_device_ctxt;
 
 enum MHI_DEBUG_LEVEL {
        MHI_MSG_RAW = 0x1,
@@ -125,6 +127,31 @@ enum MHI_STATE {
        MHI_STATE_reserved = 0x80000000
 };
 
+enum MHI_BRSTMODE {
+       /* BRST Mode Enable for HW Channels, SW Channel Disabled */
+       MHI_BRSTMODE_DEFAULT = 0x0,
+       MHI_BRSTMODE_RESERVED = 0x1,
+       MHI_BRSTMODE_DISABLE = 0x2,
+       MHI_BRSTMODE_ENABLE = 0x3
+};
+
+enum MHI_PM_STATE {
+       MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
+       MHI_PM_POR = 0x1, /* Power On Reset State */
+       MHI_PM_M0 = 0x2,
+       MHI_PM_M1 = 0x4,
+       MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
+       MHI_PM_M2 = 0x10,
+       MHI_PM_M3_ENTER = 0x20,
+       MHI_PM_M3 = 0x40,
+       MHI_PM_M3_EXIT = 0x80,
+};
+
+#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
+#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+                                                       MHI_PM_M1 | MHI_PM_M2))
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
+                                       (pm_state < MHI_PM_M3_EXIT))
 struct __packed mhi_event_ctxt {
        u32 mhi_intmodt;
        u32 mhi_event_er_type;
@@ -136,8 +163,11 @@ struct __packed mhi_event_ctxt {
 };
 
 struct __packed mhi_chan_ctxt {
-       enum MHI_CHAN_STATE mhi_chan_state;
-       enum MHI_CHAN_DIR mhi_chan_type;
+       u32 chstate : 8;
+       u32 brstmode : 2;
+       u32 pollcfg : 6;
+       u32 reserved : 16;
+       u32 chtype;
        u32 mhi_event_ring_index;
        u64 mhi_trb_ring_base_addr;
        u64 mhi_trb_ring_len;
@@ -172,7 +202,6 @@ enum MHI_PKT_TYPE {
        MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
        MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
        MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
-       MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
        MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
        MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
        MHI_PKT_TYPE_TX_EVENT = 0x22,
@@ -261,6 +290,17 @@ enum MHI_EVENT_CCS {
        MHI_EVENT_CC_BAD_TRE = 0x11,
 };
 
+struct db_mode {
+       /* if set do not reset DB_Mode during M0 resume */
+       u32 preserve_db_state : 1;
+       u32 db_mode : 1;
+       enum MHI_BRSTMODE brstmode;
+       void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt,
+                          void __iomem *io_addr,
+                          uintptr_t chan,
+                          u32 val);
+};
+
 struct mhi_ring {
        void *base;
        void *wp;
@@ -270,6 +310,10 @@ struct mhi_ring {
        uintptr_t el_size;
        u32 overwrite_en;
        enum MHI_CHAN_DIR dir;
+       struct db_mode db_mode;
+       u32 msi_disable_cntr;
+       u32 msi_enable_cntr;
+       spinlock_t ring_lock;
 };
 
 enum MHI_CMD_STATUS {
@@ -327,12 +371,19 @@ struct mhi_chan_info {
        u32 flags;
 };
 
+struct mhi_chan_cfg {
+       enum MHI_COMMAND current_cmd;
+       struct mutex chan_lock;
+       spinlock_t event_lock; /* completion event lock */
+       struct completion cmd_complete;
+       struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+       union mhi_cmd_pkt cmd_pkt;
+};
+
 struct mhi_client_handle {
        struct mhi_chan_info chan_info;
        struct mhi_device_ctxt *mhi_dev_ctxt;
        struct mhi_client_info_t client_info;
-       struct completion chan_reset_complete;
-       struct completion chan_open_complete;
        void *user_data;
        struct mhi_result result;
        u32 device_index;
@@ -344,12 +395,6 @@ struct mhi_client_handle {
        int event_ring_index;
 };
 
-enum MHI_EVENT_POLLING {
-       MHI_EVENT_POLLING_DISABLED = 0x0,
-       MHI_EVENT_POLLING_ENABLED = 0x1,
-       MHI_EVENT_POLLING_reserved = 0x80000000
-};
-
 enum MHI_TYPE_EVENT_RING {
        MHI_ER_DATA_TYPE = 0x1,
        MHI_ER_CTRL_TYPE = 0x2,
@@ -375,46 +420,27 @@ struct mhi_buf_info {
 
 struct mhi_counters {
        u32 m0_m1;
-       u32 m1_m0;
        u32 m1_m2;
        u32 m2_m0;
        u32 m0_m3;
-       u32 m3_m0;
        u32 m1_m3;
-       u32 mhi_reset_cntr;
-       u32 mhi_ready_cntr;
-       u32 m3_event_timeouts;
-       u32 m0_event_timeouts;
-       u32 m2_event_timeouts;
-       u32 msi_disable_cntr;
-       u32 msi_enable_cntr;
-       u32 nr_irq_migrations;
-       u32 *msi_counter;
-       u32 *ev_counter;
-       atomic_t outbound_acks;
+       u32 m3_m0;
        u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
        u32 bb_used[MHI_MAX_CHANNELS];
+       atomic_t device_wake;
+       atomic_t outbound_acks;
+       atomic_t events_pending;
+       u32 *msi_counter;
+       u32 mhi_reset_cntr;
 };
 
 struct mhi_flags {
        u32 mhi_initialized;
-       u32 pending_M3;
-       u32 pending_M0;
        u32 link_up;
-       u32 kill_threads;
-       atomic_t data_pending;
-       atomic_t events_pending;
-       atomic_t pending_resume;
-       atomic_t pending_ssr;
-       atomic_t pending_powerup;
-       atomic_t m2_transition;
        int stop_threads;
-       atomic_t device_wake;
-       u32 ssr;
+       u32 kill_threads;
        u32 ev_thread_stopped;
        u32 st_thread_stopped;
-       u32 uldl_enabled;
-       u32 db_mode[MHI_MAX_CHANNELS];
 };
 
 struct mhi_wait_queues {
@@ -458,44 +484,35 @@ struct mhi_dev_space {
 };
 
 struct mhi_device_ctxt {
-       enum MHI_STATE mhi_state;
+       enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
+       enum MHI_STATE mhi_state; /* protocol state */
        enum MHI_EXEC_ENV dev_exec_env;
 
        struct mhi_dev_space dev_space;
        struct mhi_pcie_dev_info *dev_info;
        struct pcie_core_info *dev_props;
        struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
-
        struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
 
        struct mhi_ring *mhi_local_event_ctxt;
        struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
+       struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
+
 
-       struct mutex *mhi_chan_mutex;
-       struct mutex mhi_link_state;
-       spinlock_t *mhi_ev_spinlock_list;
-       struct mutex *mhi_cmd_mutex_list;
        struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
        struct mhi_event_ring_cfg *ev_ring_props;
        struct task_struct *event_thread_handle;
        struct task_struct *st_thread_handle;
+       struct tasklet_struct ev_task; /* Process control Events */
+       struct work_struct process_m1_worker;
        struct mhi_wait_queues mhi_ev_wq;
        struct dev_mmio_info mmio_info;
 
-       u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
-       u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
-       spinlock_t *db_write_lock;
-
        struct mhi_state_work_queue state_change_work_item_list;
-       enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];
 
-       u32 cmd_ring_order;
        struct mhi_counters counters;
        struct mhi_flags flags;
 
-       u32 device_wake_asserted;
-
-       rwlock_t xfer_lock;
        struct hrtimer m1_timer;
        ktime_t m1_timeout;
 
@@ -508,11 +525,12 @@ struct mhi_device_ctxt {
 
        unsigned long esoc_notif;
        enum STATE_TRANSITION base_state;
-       atomic_t outbound_acks;
+
+       rwlock_t pm_xfer_lock; /* lock to control PM State */
+       spinlock_t dev_wake_lock; /* lock to set wake bit */
        struct mutex pm_lock;
        struct wakeup_source w_lock;
 
-       int enable_lpm;
        char *chan_info;
        struct dentry *mhi_parent_folder;
 };
@@ -577,7 +595,9 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
                                   enum MHI_CHAN_DIR chan_type,
                                   u32 event_ring,
                                   struct mhi_ring *ring,
-                                  enum MHI_CHAN_STATE chan_state);
+                                  enum MHI_CHAN_STATE chan_state,
+                                  bool preserve_db_state,
+                                  enum MHI_BRSTMODE brstmode);
 int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
 int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
                                              u32 chan);
@@ -622,8 +642,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
                                                enum MHI_CB_REASON reason);
 void mhi_notify_client(struct mhi_client_handle *client_handle,
                       enum MHI_CB_REASON reason);
-int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+                           bool force_set);
 int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
 int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
                        void *hcpu);
@@ -635,6 +656,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
 int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
                                        int index);
 int start_chan_sync(struct mhi_client_handle *client_handle);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+                            void __iomem *io_addr,
+                            uintptr_t chan,
+                            u32 val);
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+                                    void __iomem *io_addr,
+                                    uintptr_t chan,
+                                    u32 val);
 void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
                  uintptr_t io_offset, u32 val);
 void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -651,12 +680,19 @@ int mhi_runtime_suspend(struct device *dev);
 int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
                   struct mhi_chan_info *chan_info);
 int mhi_runtime_resume(struct device *dev);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_runtime_idle(struct device *dev);
 int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
                  enum MHI_TYPE_EVENT_RING type);
 void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
                                int index);
 void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
 int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
+void process_m1_transition(struct work_struct *work);
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+                    enum MHI_STATE new_state);
+const char *state_transition_str(enum STATE_TRANSITION state);
+void mhi_ctrl_ev_task(unsigned long data);
 
 #endif
index 4b63e88..113791a 100644 (file)
@@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file,
        size_t amount_copied = 0;
        uintptr_t align_len = 0x1000;
        u32 tx_db_val = 0;
+       rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+       const long bhi_timeout_ms = 1000;
+       long timeout;
 
        if (buf == NULL || 0 == count)
                return -EIO;
@@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file,
        if (count > BHI_MAX_IMAGE_SIZE)
                return -ENOMEM;
 
-       wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
+       timeout = wait_event_interruptible_timeout(
+                               *mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+                               mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
+                               msecs_to_jiffies(bhi_timeout_ms));
+       if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
+               return -EIO;
 
        mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
 
@@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file,
        bhi_ctxt->image_size = count;
 
        /* Write the image size */
+       read_lock_bh(pm_xfer_lock);
+       if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+               read_unlock_bh(pm_xfer_lock);
+               goto bhi_copy_error;
+       }
        pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
        mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
                                BHI_IMGADDR_HIGH,
@@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file,
                        BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
 
        mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
-
+       read_unlock_bh(pm_xfer_lock);
        for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
                u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
 
+               read_lock_bh(pm_xfer_lock);
+               if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+                       read_unlock_bh(pm_xfer_lock);
+                       goto bhi_copy_error;
+               }
                err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
                errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
                errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
@@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file,
                                                BHI_STATUS,
                                                BHI_STATUS_MASK,
                                                BHI_STATUS_SHIFT);
+               read_unlock_bh(pm_xfer_lock);
                mhi_log(MHI_MSG_CRITICAL,
                "BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
                        tx_db_val, err, errdbg1, errdbg2, errdbg3);
@@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
            || 0 == mhi_pcie_device->core.bar0_end)
                return -EIO;
 
-       mhi_log(MHI_MSG_INFO,
-               "Successfully registered char dev. bhi base is: 0x%p.\n",
-               bhi_ctxt->bhi_base);
        ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
        if (IS_ERR_VALUE(ret_val)) {
                mhi_log(MHI_MSG_CRITICAL,
index aa8500d..7843062 100644 (file)
@@ -89,32 +89,31 @@ dt_error:
 int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
        int r = 0;
+       int i;
 
        mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
                                        mhi_dev_ctxt->mmio_info.nr_event_rings,
                                        GFP_KERNEL);
-
        if (!mhi_dev_ctxt->mhi_local_event_ctxt)
                return -ENOMEM;
 
-       mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
-                                    mhi_dev_ctxt->mmio_info.nr_event_rings,
-                                    GFP_KERNEL);
-       if (!mhi_dev_ctxt->counters.ev_counter) {
-               r = -ENOMEM;
-               goto free_local_ec_list;
-       }
        mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
                                     mhi_dev_ctxt->mmio_info.nr_event_rings,
                                     GFP_KERNEL);
        if (!mhi_dev_ctxt->counters.msi_counter) {
                r = -ENOMEM;
-               goto free_ev_counter;
+               goto free_local_ec_list;
        }
+
+       for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
+               struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+                       mhi_local_event_ctxt[i];
+
+               spin_lock_init(&mhi_ring->ring_lock);
+       }
+
        return r;
 
-free_ev_counter:
-       kfree(mhi_dev_ctxt->counters.ev_counter);
 free_local_ec_list:
        kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
        return r;
@@ -129,13 +128,18 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
        db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
                                                event_ring_index,
                                                (uintptr_t) event_ctxt->wp);
-       mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
-                                       event_ring_index, db_value);
+       event_ctxt->db_mode.process_db(mhi_dev_ctxt,
+                                   mhi_dev_ctxt->mmio_info.event_db_addr,
+                                   event_ring_index,
+                                   db_value);
 }
 
 static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
-                               struct mhi_ring *ring, u32 el_per_ring,
-                               u32 intmodt_val, u32 msi_vec)
+                              struct mhi_ring *ring,
+                              u32 el_per_ring,
+                              u32 intmodt_val,
+                              u32 msi_vec,
+                              enum MHI_BRSTMODE brstmode)
 {
        ev_list->mhi_event_er_type  = MHI_EVENT_RING_TYPE_VALID;
        ev_list->mhi_msi_vector     = msi_vec;
@@ -144,6 +148,20 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
        ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
        ring->el_size = sizeof(union mhi_event_pkt);
        ring->overwrite_en = 0;
+
+       ring->db_mode.db_mode = 1;
+       ring->db_mode.brstmode = brstmode;
+       switch (ring->db_mode.brstmode) {
+       case MHI_BRSTMODE_ENABLE:
+               ring->db_mode.process_db = mhi_process_db_brstmode;
+               break;
+       case MHI_BRSTMODE_DISABLE:
+               ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+               break;
+       default:
+               ring->db_mode.process_db = mhi_process_db;
+       }
+
        /* Flush writes to MMIO */
        wmb();
        return 0;
@@ -159,9 +177,12 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
                event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
                mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
                mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
-                       mhi_dev_ctxt->ev_ring_props[i].nr_desc,
-                       mhi_dev_ctxt->ev_ring_props[i].intmod,
-                       mhi_dev_ctxt->ev_ring_props[i].msi_vec);
+                                   mhi_dev_ctxt->ev_ring_props[i].nr_desc,
+                                   mhi_dev_ctxt->ev_ring_props[i].intmod,
+                                   mhi_dev_ctxt->ev_ring_props[i].msi_vec,
+                                   GET_EV_PROPS(EV_BRSTMODE,
+                                                mhi_dev_ctxt->
+                                                ev_ring_props[i].flags));
        }
 }
 
@@ -219,10 +240,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
        u32 i = 0;
        unsigned long flags = 0;
        int ret_val = 0;
-       spinlock_t *lock =
-               &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
        struct mhi_ring *event_ctxt =
                &mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
+       spinlock_t *lock = &event_ctxt->ring_lock;
 
        if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
                mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
index 370ee0e..88a6a74 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
 
 
 #define MHICFG (0x10)
-#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
-#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
-#define MHICFG_NER_MASK 0xff0000
-#define MHICFG_NER_SHIFT 0x10
-#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
-#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
-#define MHICFG_NCH_MASK 0xff
-#define MHICFG_NCH_SHIFT 0x0
+#define MHICFG_NHWER_MASK (0xff000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xff0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xff00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xff)
+#define MHICFG_NCH_SHIFT (0)
 
 
 #define CHDBOFF (0x18)
index b3fff19..395e19c 100644 (file)
@@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
                                "Failed to register with esoc ret %d.\n",
                                ret_val);
        }
-       mhi_pcie_dev->mhi_ctxt.bus_scale_table =
-                               msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev);
-       mhi_pcie_dev->mhi_ctxt.bus_client =
-               msm_bus_scale_register_client(
-                               mhi_pcie_dev->mhi_ctxt.bus_scale_table);
-       if (!mhi_pcie_dev->mhi_ctxt.bus_client) {
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Could not register for bus control ret: %d.\n",
-                       mhi_pcie_dev->mhi_ctxt.bus_client);
-       } else {
-               ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1);
-               if (ret_val)
-                       mhi_log(MHI_MSG_CRITICAL,
-                               "Could not set bus frequency ret: %d\n",
-                               ret_val);
-       }
 
        device_disable_async_suspend(&pcie_device->dev);
        ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
@@ -188,9 +172,7 @@ mhi_state_transition_error:
                   mhi_dev_ctxt->dev_space.dev_mem_len,
                   mhi_dev_ctxt->dev_space.dev_mem_start,
                   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
-       kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
-       kfree(mhi_dev_ctxt->mhi_chan_mutex);
-       kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
+
        kfree(mhi_dev_ctxt->ev_ring_props);
        mhi_rem_pm_sysfs(&pcie_device->dev);
 sysfs_config_err:
@@ -203,7 +185,9 @@ msi_config_err:
 }
 
 static const struct dev_pm_ops pm_ops = {
-       SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL)
+       SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
+                          mhi_runtime_resume,
+                          mhi_runtime_idle)
        SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
 };
 
@@ -217,14 +201,15 @@ static struct pci_driver mhi_pcie_driver = {
 };
 
 static int mhi_pci_probe(struct pci_dev *pcie_device,
-               const struct pci_device_id *mhi_device_id)
+                        const struct pci_device_id *mhi_device_id)
 {
        int ret_val = 0;
        struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
        struct platform_device *plat_dev;
+       struct mhi_device_ctxt *mhi_dev_ctxt;
        u32 nr_dev = mhi_devices.nr_of_devices;
 
-       mhi_log(MHI_MSG_INFO, "Entering.\n");
+       mhi_log(MHI_MSG_INFO, "Entering\n");
        mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
        if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
                mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
@@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
        mhi_devices.nr_of_devices++;
        plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
        pcie_device->dev.of_node = plat_dev->dev.of_node;
-       pm_runtime_put_noidle(&pcie_device->dev);
+       mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
+       INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
+       mutex_init(&mhi_dev_ctxt->pm_lock);
+       rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
+       spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
+       tasklet_init(&mhi_dev_ctxt->ev_task,
+                    mhi_ctrl_ev_task,
+                    (unsigned long)mhi_dev_ctxt);
+
+       mhi_dev_ctxt->flags.link_up = 1;
+       ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
        mhi_pcie_dev->pcie_device = pcie_device;
        mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
        mhi_pcie_dev->mhi_pci_link_event.events =
-                       (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP |
-                        MSM_PCIE_EVENT_WAKEUP);
+                       (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
        mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
        mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
        mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
        ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
-       if (ret_val)
+       if (ret_val) {
                mhi_log(MHI_MSG_ERROR,
                        "Failed to register for link notifications %d.\n",
                        ret_val);
+               return ret_val;
+       }
+
+       /* Initialize MHI CNTXT */
+       ret_val = mhi_ctxt_init(mhi_pcie_dev);
+       if (ret_val) {
+               mhi_log(MHI_MSG_ERROR,
+                       "MHI Initialization failed, ret %d\n",
+                       ret_val);
+               goto deregister_pcie;
+       }
+       pci_set_master(mhi_pcie_dev->pcie_device);
+
+       mutex_lock(&mhi_dev_ctxt->pm_lock);
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
+       ret_val = set_mhi_base_state(mhi_pcie_dev);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       if (ret_val) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Error Setting MHI Base State %d\n", ret_val);
+               goto unlock_pm_lock;
+       }
+
+       if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
+               ret_val = bhi_probe(mhi_pcie_dev);
+               if (ret_val) {
+                       mhi_log(MHI_MSG_ERROR,
+                               "Error with bhi_probe ret:%d", ret_val);
+                       goto unlock_pm_lock;
+               }
+       }
+
+       init_mhi_base_state(mhi_dev_ctxt);
+
+       pm_runtime_set_autosuspend_delay(&pcie_device->dev,
+                                        MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
+       pm_runtime_use_autosuspend(&pcie_device->dev);
+       pm_suspend_ignore_children(&pcie_device->dev, true);
+
+       /*
+        * pci framework will increment usage count (twice) before
+        * calling local device driver probe function.
+        * 1st pci.c pci_pm_init() calls pm_runtime_forbid
+        * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
+        * Framework expect pci device driver to call pm_runtime_put_noidle
+        * to decrement usage count after successful probe and
+        * and call pm_runtime_allow to enable runtime suspend.
+        * MHI will allow runtime after entering AMSS state.
+        */
+       pm_runtime_mark_last_busy(&pcie_device->dev);
+       pm_runtime_put_noidle(&pcie_device->dev);
+
+       /*
+        * Keep the MHI state in Active (M0) state until AMSS because EP
+        * would error fatal if we try to enter M1 before entering
+        * AMSS state.
+        */
+       read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_assert_device_wake(mhi_dev_ctxt, false);
+       read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+       mutex_unlock(&mhi_dev_ctxt->pm_lock);
+
+       return 0;
+
+unlock_pm_lock:
+       mutex_unlock(&mhi_dev_ctxt->pm_lock);
+deregister_pcie:
+       msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
        return ret_val;
 }
 
 static int mhi_plat_probe(struct platform_device *pdev)
 {
        u32 nr_dev = mhi_devices.nr_of_devices;
+       struct mhi_device_ctxt *mhi_dev_ctxt;
        int r = 0;
 
        mhi_log(MHI_MSG_INFO, "Entered\n");
+       mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+
+       mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+       if (!mhi_dev_ctxt->bus_scale_table)
+               return -ENODATA;
+       mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
+               (mhi_dev_ctxt->bus_scale_table);
+       if (!mhi_dev_ctxt->bus_client)
+               return -EINVAL;
+
        mhi_devices.device_list[nr_dev].plat_dev = pdev;
        r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
        if (r)
index 93c3de3..52afc46 100644 (file)
@@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
        int i;
 
-       mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
-                                       mhi_dev_ctxt->mmio_info.nr_event_rings,
-                                       GFP_KERNEL);
-       if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
-               goto ev_mutex_free;
-       mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
-                                               MHI_MAX_CHANNELS, GFP_KERNEL);
-       if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
-               goto chan_mutex_free;
-       mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
-                                               NR_OF_CMD_RINGS, GFP_KERNEL);
-       if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
-               goto cmd_mutex_free;
-
-       mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
-                                               MHI_MAX_CHANNELS, GFP_KERNEL);
-       if (NULL == mhi_dev_ctxt->db_write_lock)
-               goto db_write_lock_free;
-       for (i = 0; i < MHI_MAX_CHANNELS; ++i)
-               mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
-       for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
-               spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
-       for (i = 0; i < NR_OF_CMD_RINGS; ++i)
-               mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
-       for (i = 0; i < MHI_MAX_CHANNELS; ++i)
-               spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
-       rwlock_init(&mhi_dev_ctxt->xfer_lock);
-       mutex_init(&mhi_dev_ctxt->mhi_link_state);
-       mutex_init(&mhi_dev_ctxt->pm_lock);
-       atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
-       return 0;
+       for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
+               struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
 
-db_write_lock_free:
-       kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
-cmd_mutex_free:
-       kfree(mhi_dev_ctxt->mhi_chan_mutex);
-chan_mutex_free:
-       kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
-ev_mutex_free:
-       return -ENOMEM;
+               mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock);
+               spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock);
+               spin_lock_init(&ring->ring_lock);
+       }
+
+       for (i = 0; i < NR_OF_CMD_RINGS; i++) {
+               struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i];
+
+               spin_lock_init(&ring->ring_lock);
+       }
+
+       return 0;
 }
 
 size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -115,7 +90,7 @@ void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt,
        chan_ctxt->mhi_trb_write_ptr = p_base_addr;
        chan_ctxt->mhi_trb_ring_len = len;
        /* Prepulate the channel ctxt */
-       chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+       chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
        chan_ctxt->mhi_event_ring_index = ev_index;
 }
 
@@ -173,6 +148,8 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
        ring[PRIMARY_CMD_RING].len = ring_size;
        ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
        ring[PRIMARY_CMD_RING].overwrite_en = 0;
+       ring[PRIMARY_CMD_RING].db_mode.process_db =
+               mhi_process_db_brstmode_disable;
        return 0;
 }
 
@@ -547,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
        }
        init_event_ctxt_array(mhi_dev_ctxt);
        mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
-       mhi_dev_ctxt->enable_lpm = 1;
 
        r = mhi_spawn_threads(mhi_dev_ctxt);
        if (r) {
@@ -573,9 +549,6 @@ error_wq_init:
                   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
 error_during_dev_mem_init:
 error_during_local_ev_ctxt:
-       kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
-       kfree(mhi_dev_ctxt->mhi_chan_mutex);
-       kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
 error_during_sync:
        kfree(mhi_dev_ctxt->ev_ring_props);
 error_during_props:
@@ -585,24 +558,28 @@ error_during_props:
 /**
  * @brief Initialize the channel context and shadow context
  *
- * @cc_list:           Context to initialize
- * @trb_list_phy:      Physical base address for the TRE ring
- * @trb_list_virt:     Virtual base address for the TRE ring
- * @el_per_ring:       Number of TREs this ring will contain
- * @chan_type:         Type of channel IN/OUT
- * @event_ring:         Event ring to be mapped to this channel context
- * @ring:               Shadow context to be initialized alongside
- *
+ * @cc_list: Context to initialize
+ * @trb_list_phy: Physical base address for the TRE ring
+ * @trb_list_virt: Virtual base address for the TRE ring
+ * @el_per_ring: Number of TREs this ring will contain
+ * @chan_type: Type of channel IN/OUT
+ * @event_ring: Event ring to be mapped to this channel context
+ * @ring: Shadow context to be initialized alongside
+ * @chan_state: Channel state
+ * @preserve_db_state: Do not reset DB state during resume
  * @Return errno
  */
 int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
-               uintptr_t trb_list_phy, uintptr_t trb_list_virt,
-               u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
-               u32 event_ring, struct mhi_ring *ring,
-               enum MHI_CHAN_STATE chan_state)
+                      uintptr_t trb_list_phy, uintptr_t trb_list_virt,
+                      u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
+                      u32 event_ring, struct mhi_ring *ring,
+                      enum MHI_CHAN_STATE chan_state,
+                      bool preserve_db_state,
+                      enum MHI_BRSTMODE brstmode)
 {
-       cc_list->mhi_chan_state = chan_state;
-       cc_list->mhi_chan_type = chan_type;
+       cc_list->brstmode = brstmode;
+       cc_list->chstate = chan_state;
+       cc_list->chtype = chan_type;
        cc_list->mhi_event_ring_index = event_ring;
        cc_list->mhi_trb_ring_base_addr = trb_list_phy;
        cc_list->mhi_trb_ring_len =
@@ -617,6 +594,21 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
        ring->el_size = sizeof(struct mhi_tx_pkt);
        ring->overwrite_en = 0;
        ring->dir = chan_type;
+       ring->db_mode.db_mode = 1;
+       ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
+       ring->db_mode.brstmode = brstmode;
+
+       switch (ring->db_mode.brstmode) {
+       case MHI_BRSTMODE_ENABLE:
+               ring->db_mode.process_db = mhi_process_db_brstmode;
+               break;
+       case MHI_BRSTMODE_DISABLE:
+               ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+               break;
+       default:
+               ring->db_mode.process_db = mhi_process_db;
+       }
+
        /* Flush writes to MMIO */
        wmb();
        return 0;
index 5336b9f..7a4c560 100644 (file)
 #include "mhi_sys.h"
 #include "mhi_trace.h"
 
-irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
-{
-       struct device *mhi_device = dev_id;
-       struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
-
-       if (!mhi_dev_ctxt) {
-               mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
-               return IRQ_HANDLED;
-       }
-       mhi_dev_ctxt->counters.msi_counter[
-                       IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
-       mhi_log(MHI_MSG_VERBOSE,
-               "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-       trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-       atomic_inc(&mhi_dev_ctxt->flags.events_pending);
-       wake_up_interruptible(
-               mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
-       return IRQ_HANDLED;
-}
-
-irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
-{
-       struct device *mhi_device = dev_id;
-       u32 client_index;
-       struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
-       struct mhi_client_handle *client_handle;
-       struct mhi_client_info_t *client_info;
-       struct mhi_cb_info cb_info;
-       int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-
-       mhi_dev_ctxt->counters.msi_counter[msi_num]++;
-       mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
-       trace_mhi_msi(msi_num);
-       client_index = MHI_MAX_CHANNELS -
-                       (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
-       client_handle = mhi_dev_ctxt->client_handle_list[client_index];
-       client_info = &client_handle->client_info;
-       if (likely(NULL != client_handle)) {
-               client_handle->result.user_data =
-                               client_handle->user_data;
-       if (likely(NULL != &client_info->mhi_client_cb)) {
-                       cb_info.result = &client_handle->result;
-                       cb_info.cb_reason = MHI_CB_XFER;
-                       cb_info.chan = client_handle->chan_info.chan_nr;
-                       cb_info.result->transaction_status = 0;
-                       client_info->mhi_client_cb(&cb_info);
-               }
-       }
-       return IRQ_HANDLED;
-}
-
 static int mhi_process_event_ring(
                struct mhi_device_ctxt *mhi_dev_ctxt,
                u32 ev_index,
@@ -76,12 +25,17 @@ static int mhi_process_event_ring(
        union mhi_event_pkt event_to_process;
        int ret_val = 0;
        struct mhi_event_ctxt *ev_ctxt = NULL;
-       union mhi_cmd_pkt *cmd_pkt = NULL;
-       union mhi_event_pkt *ev_ptr = NULL;
        struct mhi_ring *local_ev_ctxt =
                &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
-       u32 event_code;
 
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+               mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               return -EIO;
+       }
+       mhi_assert_device_wake(mhi_dev_ctxt, false);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
        ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
 
        device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
@@ -96,59 +50,84 @@ static int mhi_process_event_ring(
 
        while ((local_rp != device_rp) && (event_quota > 0) &&
                        (device_rp != NULL) && (local_rp != NULL)) {
+
                event_to_process = *local_rp;
-               ev_ptr = &event_to_process;
-               event_code = get_cmd_pkt(mhi_dev_ctxt,
-                                       ev_ptr, &cmd_pkt, ev_index);
-               if (((MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process)) ==
-                   MHI_PKT_TYPE_CMD_COMPLETION_EVENT)) &&
-                   (event_code == MHI_EVENT_CC_SUCCESS)) {
-                       mhi_log(MHI_MSG_INFO, "Command Completion event\n");
-                       if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) ==
-                            MHI_PKT_TYPE_RESET_CHAN_CMD)) {
-                               mhi_log(MHI_MSG_INFO, "First Reset CC event\n");
-                               MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
-                                       MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD);
-                               ret_val = -EINPROGRESS;
-                               break;
-                       } else if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)
-                                   == MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD)) {
-                               MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
-                                                MHI_PKT_TYPE_RESET_CHAN_CMD);
-                               mhi_log(MHI_MSG_INFO,
-                                       "Processing Reset CC event\n");
-                       }
-               }
-               if (unlikely(0 != recycle_trb_and_ring(mhi_dev_ctxt,
-                                               local_ev_ctxt,
-                                               MHI_RING_TYPE_EVENT_RING,
-                                               ev_index)))
-                       mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n");
-               switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) {
+               read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               recycle_trb_and_ring(mhi_dev_ctxt,
+                                    local_ev_ctxt,
+                                    MHI_RING_TYPE_EVENT_RING,
+                                    ev_index);
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+               switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)) {
                case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
-                       mhi_log(MHI_MSG_INFO,
-                                       "MHI CCE received ring 0x%x\n",
-                                       ev_index);
+               {
+                       union mhi_cmd_pkt *cmd_pkt;
+                       u32 chan;
+                       struct mhi_chan_cfg *cfg;
+                       unsigned long flags;
+                       struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+                               mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
                        __pm_stay_awake(&mhi_dev_ctxt->w_lock);
                        __pm_relax(&mhi_dev_ctxt->w_lock);
-                       ret_val = parse_cmd_event(mhi_dev_ctxt,
-                                       &event_to_process, ev_index);
+                       get_cmd_pkt(mhi_dev_ctxt,
+                                   &event_to_process,
+                                   &cmd_pkt, ev_index);
+                       MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
+                       cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+                       mhi_log(MHI_MSG_INFO,
+                               "MHI CCE received ring 0x%x chan:%u\n",
+                               ev_index,
+                               chan);
+                       spin_lock_irqsave(&cfg->event_lock, flags);
+                       cfg->cmd_pkt = *cmd_pkt;
+                       cfg->cmd_event_pkt =
+                               event_to_process.cmd_complete_event_pkt;
+                       complete(&cfg->cmd_complete);
+                       spin_unlock_irqrestore(&cfg->event_lock, flags);
+                       spin_lock_irqsave(&cmd_ring->ring_lock,
+                                         flags);
+                       ctxt_del_element(cmd_ring, NULL);
+                       spin_unlock_irqrestore(&cmd_ring->ring_lock,
+                                              flags);
                        break;
+               }
                case MHI_PKT_TYPE_TX_EVENT:
                        __pm_stay_awake(&mhi_dev_ctxt->w_lock);
                        parse_xfer_event(mhi_dev_ctxt,
-                                               &event_to_process, ev_index);
+                                        &event_to_process,
+                                        ev_index);
                        __pm_relax(&mhi_dev_ctxt->w_lock);
                        break;
                case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
                {
                        enum STATE_TRANSITION new_state;
-
+                       unsigned long flags;
                        new_state = MHI_READ_STATE(&event_to_process);
                        mhi_log(MHI_MSG_INFO,
-                                       "MHI STE received ring 0x%x\n",
-                                       ev_index);
-                       mhi_init_state_transition(mhi_dev_ctxt, new_state);
+                               "MHI STE received ring 0x%x State:%s\n",
+                               ev_index,
+                               state_transition_str(new_state));
+
+                       /* If transitioning to M1 schedule worker thread */
+                       if (new_state == STATE_TRANSITION_M1) {
+                               write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
+                                                  flags);
+                               mhi_dev_ctxt->mhi_state =
+                                       mhi_get_m_state(mhi_dev_ctxt);
+                               if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
+                                       mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
+                                       mhi_dev_ctxt->counters.m0_m1++;
+                                       schedule_work(&mhi_dev_ctxt->
+                                                     process_m1_worker);
+                               }
+                               write_unlock_irqrestore(&mhi_dev_ctxt->
+                                                       pm_xfer_lock,
+                                                       flags);
+                       } else {
+                               mhi_init_state_transition(mhi_dev_ctxt,
+                                                         new_state);
+                       }
                        break;
                }
                case MHI_PKT_TYPE_EE_EVENT:
@@ -178,10 +157,7 @@ static int mhi_process_event_ring(
                        mhi_log(MHI_MSG_INFO,
                           "MHI System Error Detected. Triggering Reset\n");
                        BUG();
-                       if (!mhi_trigger_reset(mhi_dev_ctxt))
-                               mhi_log(MHI_MSG_ERROR,
-                               "Failed to reset for SYSERR recovery\n");
-               break;
+                       break;
                default:
                        mhi_log(MHI_MSG_ERROR,
                                "Unsupported packet type code 0x%x\n",
@@ -198,6 +174,9 @@ static int mhi_process_event_ring(
                ret_val = 0;
                --event_quota;
        }
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
        return ret_val;
 }
 
@@ -207,7 +186,7 @@ int parse_event_thread(void *ctxt)
        u32 i = 0;
        int ret_val = 0;
        int ret_val_process_event = 0;
-       atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
+       atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
 
        /* Go through all event rings */
        for (;;) {
@@ -215,7 +194,7 @@ int parse_event_thread(void *ctxt)
                        wait_event_interruptible(
                                *mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
                                ((atomic_read(
-                               &mhi_dev_ctxt->flags.events_pending) > 0) &&
+                               &mhi_dev_ctxt->counters.events_pending) > 0) &&
                                        !mhi_dev_ctxt->flags.stop_threads) ||
                                mhi_dev_ctxt->flags.kill_threads ||
                                (mhi_dev_ctxt->flags.stop_threads &&
@@ -237,27 +216,45 @@ int parse_event_thread(void *ctxt)
                        break;
                }
                mhi_dev_ctxt->flags.ev_thread_stopped = 0;
-               atomic_dec(&mhi_dev_ctxt->flags.events_pending);
-               for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
+               atomic_dec(&mhi_dev_ctxt->counters.events_pending);
+               for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
                        if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
                                mhi_log(MHI_MSG_INFO,
-                                  "SYS_ERR detected, not processing events\n");
-                               atomic_set(&mhi_dev_ctxt->flags.events_pending,
+                               "SYS_ERR detected, not processing events\n");
+                               atomic_set(&mhi_dev_ctxt->
+                                          counters.events_pending,
                                           0);
                                break;
                        }
                        if (GET_EV_PROPS(EV_MANAGED,
-                                       mhi_dev_ctxt->ev_ring_props[i].flags)){
+                                       mhi_dev_ctxt->ev_ring_props[i].flags)) {
                                ret_val_process_event =
-                                   mhi_process_event_ring(mhi_dev_ctxt, i,
-                                    mhi_dev_ctxt->ev_ring_props[i].nr_desc);
-                               if (ret_val_process_event ==
-                                       -EINPROGRESS)
+                                   mhi_process_event_ring(mhi_dev_ctxt,
+                                                 i,
+                                                 mhi_dev_ctxt->
+                                                 ev_ring_props[i].nr_desc);
+                               if (ret_val_process_event == -EINPROGRESS)
                                        atomic_inc(ev_pen_ptr);
                        }
                }
        }
-       return ret_val;
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+       struct mhi_device_ctxt *mhi_dev_ctxt =
+               (struct mhi_device_ctxt *)data;
+       const unsigned CTRL_EV_RING = 0;
+       struct mhi_event_ring_cfg *ring_props =
+               &mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
+
+       mhi_log(MHI_MSG_VERBOSE, "Enter\n");
+       /* Process control event ring */
+       mhi_process_event_ring(mhi_dev_ctxt,
+                              CTRL_EV_RING,
+                              ring_props->nr_desc);
+       enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
+       mhi_log(MHI_MSG_VERBOSE, "Exit\n");
 }
 
 struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
@@ -268,8 +265,8 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
        client_handle->result.bytes_xferd = 0;
        client_handle->result.transaction_status = 0;
        ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
-                               client_handle->event_ring_index,
-                               1);
+                                        client_handle->event_ring_index,
+                                        1);
        if (ret_val)
                mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
        return &(client_handle->result);
@@ -277,20 +274,79 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
 
 void mhi_mask_irq(struct mhi_client_handle *client_handle)
 {
-       disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
-                                       client_handle->msi_vec));
-       client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++;
-       if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr >
-                  (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1))
-               mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n");
+       struct mhi_device_ctxt *mhi_dev_ctxt =
+               client_handle->mhi_dev_ctxt;
+       struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+               mhi_local_event_ctxt[client_handle->event_ring_index];
+
+       disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+       ev_ring->msi_disable_cntr++;
 }
 
 void mhi_unmask_irq(struct mhi_client_handle *client_handle)
 {
-       client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++;
-       enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
-                       client_handle->msi_vec));
-       if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr >
-                  client_handle->mhi_dev_ctxt->counters.msi_disable_cntr)
-               mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n");
+       struct mhi_device_ctxt *mhi_dev_ctxt =
+               client_handle->mhi_dev_ctxt;
+       struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+               mhi_local_event_ctxt[client_handle->event_ring_index];
+
+       ev_ring->msi_enable_cntr++;
+       enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+}
+
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
+{
+       struct device *mhi_device = dev_id;
+       struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+       int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
+
+       if (!mhi_dev_ctxt) {
+               mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
+               return IRQ_HANDLED;
+       }
+       mhi_dev_ctxt->counters.msi_counter[
+                       IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
+       mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
+       trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+       if (msi) {
+               atomic_inc(&mhi_dev_ctxt->counters.events_pending);
+               wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
+       } else  {
+               disable_irq_nosync(irq_number);
+               tasklet_schedule(&mhi_dev_ctxt->ev_task);
+       }
+
+       return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
+{
+       struct device *mhi_device = dev_id;
+       u32 client_index;
+       struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+       struct mhi_client_handle *client_handle;
+       struct mhi_client_info_t *client_info;
+       struct mhi_cb_info cb_info;
+       int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+       mhi_dev_ctxt->counters.msi_counter[msi_num]++;
+       mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
+       trace_mhi_msi(msi_num);
+       client_index = MHI_MAX_CHANNELS -
+                       (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
+       client_handle = mhi_dev_ctxt->client_handle_list[client_index];
+       client_info = &client_handle->client_info;
+       if (likely(client_handle)) {
+               client_handle->result.user_data =
+                               client_handle->user_data;
+               if (likely(client_info->mhi_client_cb)) {
+                       cb_info.result = &client_handle->result;
+                       cb_info.cb_reason = MHI_CB_XFER;
+                       cb_info.chan = client_handle->chan_info.chan_nr;
+                       cb_info.result->transaction_status = 0;
+                       client_info->mhi_client_cb(&cb_info);
+               }
+       }
+       return IRQ_HANDLED;
 }
index bb47f53..133c0ee 100644 (file)
@@ -96,7 +96,6 @@
                                ((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
                                ((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings)))
 
-
 /* MHI Transfer Ring Elements 7.4.1*/
 #define TX_TRB_LEN
 #define MHI_TX_TRB_LEN__SHIFT (0)
 #define MHI_CHAN_TYPE__MASK (3)
 #define MHI_CHAN_TYPE__SHIFT (6)
 
+#define PRESERVE_DB_STATE
+#define MHI_PRESERVE_DB_STATE__MASK (1)
+#define MHI_PRESERVE_DB_STATE__SHIFT (8)
+
+#define BRSTMODE
+#define MHI_BRSTMODE__MASK (3)
+#define MHI_BRSTMODE__SHIFT (9)
+
 #define GET_CHAN_PROPS(_FIELD, _VAL) \
        (((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
 
+#define EV_BRSTMODE
+#define MHI_EV_BRSTMODE__MASK (3)
+#define MHI_EV_BRSTMODE__SHIFT (5)
+
 #define EV_TYPE
 #define MHI_EV_TYPE__MASK (3)
 #define MHI_EV_TYPE__SHIFT (3)
index c949745..a873ea9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@
 #include "mhi_macros.h"
 #include "mhi_trace.h"
 
+static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
+                         union mhi_cmd_pkt *cmd_pkt);
+
 static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
 {
        bb_ctxt->el_size = sizeof(struct mhi_buf_info);
@@ -212,7 +215,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
                        ring->len, ring->base,
                         cc_list->mhi_trb_ring_base_addr);
        mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
-                               MHI_CHAN_STATE_DISABLED);
+                          MHI_CHAN_STATE_DISABLED,
+                          false,
+                          MHI_BRSTMODE_DEFAULT);
        return 0;
 }
 
@@ -259,7 +264,11 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
                                client_handle->chan_info.flags),
                           client_handle->chan_info.ev_ring,
                           &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
-                          MHI_CHAN_STATE_ENABLED);
+                          MHI_CHAN_STATE_ENABLED,
+                          GET_CHAN_PROPS(PRESERVE_DB_STATE,
+                                         client_handle->chan_info.flags),
+                          GET_CHAN_PROPS(BRSTMODE,
+                                         client_handle->chan_info.flags));
        mhi_log(MHI_MSG_INFO, "Exited\n");
        return 0;
 }
@@ -268,48 +277,58 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
 {
        int ret_val = 0;
        struct mhi_device_ctxt *mhi_dev_ctxt;
-       int r = 0;
        int chan;
+       struct mhi_chan_cfg *cfg;
+       struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+       union mhi_cmd_pkt cmd_pkt;
+       enum MHI_EVENT_CCS ev_code;
 
-       if (NULL == client_handle ||
-           client_handle->magic != MHI_HANDLE_MAGIC)
+       if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
                return -EINVAL;
 
        mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
-       r = get_chan_props(mhi_dev_ctxt,
-                           client_handle->chan_info.chan_nr,
-                          &client_handle->chan_info);
-       if (r)
-               return r;
+       ret_val = get_chan_props(mhi_dev_ctxt,
+                                client_handle->chan_info.chan_nr,
+                                &client_handle->chan_info);
+       if (ret_val)
+               return ret_val;
 
        chan = client_handle->chan_info.chan_nr;
+       cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+       mutex_lock(&cfg->chan_lock);
        mhi_log(MHI_MSG_INFO,
                "Entered: Client opening chan 0x%x\n", chan);
        if (mhi_dev_ctxt->dev_exec_env <
                GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
-                                   client_handle->chan_info.flags)) {
+                              client_handle->chan_info.flags)) {
                mhi_log(MHI_MSG_INFO,
                        "Chan %d, MHI exec_env %d, not ready!\n",
-                       chan, mhi_dev_ctxt->dev_exec_env);
+                       chan,
+                       mhi_dev_ctxt->dev_exec_env);
+               mutex_unlock(&cfg->chan_lock);
                return -ENOTCONN;
        }
-       r = populate_tre_ring(client_handle);
-       if (r) {
+       ret_val = populate_tre_ring(client_handle);
+       if (ret_val) {
                mhi_log(MHI_MSG_ERROR,
                        "Failed to initialize tre ring chan %d ret %d\n",
-                       chan, r);
-               return r;
+                       chan,
+                       ret_val);
+               mutex_unlock(&cfg->chan_lock);
+               return ret_val;
        }
        client_handle->event_ring_index =
-               mhi_dev_ctxt->dev_space.ring_ctxt.
-                               cc_list[chan].mhi_event_ring_index;
-       r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
-                       client_handle->chan_info.max_desc);
-       if (r) {
+               mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].
+               mhi_event_ring_index;
+       ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
+                                client_handle->chan_info.max_desc);
+       if (ret_val) {
                mhi_log(MHI_MSG_ERROR,
                        "Failed to initialize bb ctxt chan %d ret %d\n",
-                       chan, r);
-               return r;
+                       chan,
+                       ret_val);
+               mutex_unlock(&cfg->chan_lock);
+               return ret_val;
        }
 
        client_handle->msi_vec =
@@ -319,16 +338,67 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
                mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
                        client_handle->event_ring_index].mhi_intmodt;
 
-       init_completion(&client_handle->chan_open_complete);
-       ret_val = start_chan_sync(client_handle);
+       init_completion(&cfg->cmd_complete);
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+               mhi_log(MHI_MSG_ERROR,
+                       "MHI State is disabled\n");
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               mutex_unlock(&cfg->chan_lock);
+               return -EIO;
+       }
+       WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+       mhi_assert_device_wake(mhi_dev_ctxt, false);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
 
-       if (0 != ret_val)
+       ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+                              MHI_COMMAND_START_CHAN,
+                              chan);
+       if (ret_val) {
                mhi_log(MHI_MSG_ERROR,
-                       "Failed to start chan 0x%x, ret %d\n", chan, ret_val);
-       BUG_ON(ret_val);
+                       "Failed to send start cmd for chan %d ret %d\n",
+                       chan, ret_val);
+               goto error_completion;
+       }
+       ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
+                               msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
+       if (!ret_val) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Failed to receive cmd completion for %d\n",
+                       chan);
+               goto error_completion;
+       } else {
+               ret_val = 0;
+       }
+
+       spin_lock(&cfg->event_lock);
+       cmd_event_pkt = cfg->cmd_event_pkt;
+       cmd_pkt = cfg->cmd_pkt;
+       spin_unlock(&cfg->event_lock);
+
+       ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+                                  ((union mhi_event_pkt *)&cmd_event_pkt));
+       if (ev_code != MHI_EVENT_CC_SUCCESS) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Error to receive event completion ev_code:0x%x\n",
+                       ev_code);
+               ret_val = -EIO;
+               goto error_completion;
+       }
+
        client_handle->chan_status = 1;
+
+error_completion:
+
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       mutex_unlock(&cfg->chan_lock);
+
        mhi_log(MHI_MSG_INFO,
-               "Exited chan 0x%x\n", chan);
+               "Exited chan 0x%x ret:%d\n", chan, ret_val);
        return ret_val;
 }
 EXPORT_SYMBOL(mhi_open_channel);
@@ -387,46 +457,79 @@ EXPORT_SYMBOL(mhi_register_channel);
 void mhi_close_channel(struct mhi_client_handle *client_handle)
 {
        u32 chan;
-       int r = 0;
        int ret_val = 0;
+       struct mhi_chan_cfg *cfg;
+       struct mhi_device_ctxt *mhi_dev_ctxt;
+       struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+       union mhi_cmd_pkt cmd_pkt;
+       enum MHI_EVENT_CCS ev_code;
 
        if (!client_handle ||
            client_handle->magic != MHI_HANDLE_MAGIC ||
            !client_handle->chan_status)
                return;
 
+       mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
        chan = client_handle->chan_info.chan_nr;
+       cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
 
        mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
-       init_completion(&client_handle->chan_reset_complete);
-       if (!atomic_read(&client_handle->mhi_dev_ctxt->flags.pending_ssr)) {
-               ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
-                                       MHI_COMMAND_RESET_CHAN, chan);
-               if (ret_val != 0) {
-                       mhi_log(MHI_MSG_ERROR,
-                               "Failed to send reset cmd for chan %d ret %d\n",
-                               chan, ret_val);
-               }
-               r = wait_for_completion_timeout(
-                               &client_handle->chan_reset_complete,
+       mutex_lock(&cfg->chan_lock);
+       init_completion(&cfg->cmd_complete);
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+       mhi_assert_device_wake(mhi_dev_ctxt, false);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+                              MHI_COMMAND_RESET_CHAN,
+                              chan);
+       if (ret_val) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Failed to send reset cmd for chan %d ret %d\n",
+                       chan,
+                       ret_val);
+               goto error_completion;
+       }
+       ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
                                msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
-               if (!r)
-                       mhi_log(MHI_MSG_ERROR,
-                                       "Failed to reset chan %d ret %d\n",
-                                       chan, r);
-       } else {
-               /*
-                * Assumption: Device is not playing with our
-                * buffers after BEFORE_SHUTDOWN
-                */
-               mhi_log(MHI_MSG_INFO,
-                       "Pending SSR local free only chan %d.\n", chan);
+       if (!ret_val) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Failed to receive cmd completion for %d\n",
+                       chan);
+               goto error_completion;
+       }
+
+       spin_lock_irq(&cfg->event_lock);
+       cmd_event_pkt = cfg->cmd_event_pkt;
+       cmd_pkt = cfg->cmd_pkt;
+       spin_unlock_irq(&cfg->event_lock);
+       ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+                                  ((union mhi_event_pkt *)&cmd_event_pkt));
+       if (ev_code != MHI_EVENT_CC_SUCCESS) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Error to receive event completion ev_cod:0x%x\n",
+                       ev_code);
+               goto error_completion;
        }
 
+       ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
+       if (ret_val)
+               mhi_log(MHI_MSG_ERROR,
+                       "Error resetting cmd ret:%d\n",
+                       ret_val);
+
+error_completion:
+
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
        mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
        free_tre_ring(client_handle);
        mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
        client_handle->chan_status = 0;
+       mutex_unlock(&cfg->chan_lock);
 }
 EXPORT_SYMBOL(mhi_close_channel);
 
@@ -439,93 +542,47 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
        chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
        db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan,
                                                (uintptr_t) chan_ctxt->wp);
-       mhi_dev_ctxt->mhi_chan_db_order[chan]++;
-               mhi_process_db(mhi_dev_ctxt,
-                               mhi_dev_ctxt->mmio_info.chan_db_addr,
-                               chan, db_value);
-}
-
-int mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-       int ret_val = 0;
+       chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
+                                     mhi_dev_ctxt->mmio_info.chan_db_addr,
+                                     chan,
+                                     db_value);
 
-       if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
-               mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
-                       (atomic_read(&mhi_dev_ctxt->flags.m2_transition)));
-               if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
-                       if (mhi_dev_ctxt->flags.link_up) {
-                               mhi_assert_device_wake(mhi_dev_ctxt);
-                               ret_val = -ENOTCONN;
-                       }
-               } else{
-                       mhi_log(MHI_MSG_INFO, "M2 transition flag is set\n");
-                       ret_val = -ENOTCONN;
-               }
-       } else {
-               ret_val = 0;
-       }
-
-       return ret_val;
 }
 
 static inline int mhi_queue_tre(struct mhi_device_ctxt
-                                                       *mhi_dev_ctxt,
-                                           u32 chan,
-                                           enum MHI_RING_TYPE type)
+                               *mhi_dev_ctxt,
+                               u32 chan,
+                               enum MHI_RING_TYPE type)
 {
        struct mhi_chan_ctxt *chan_ctxt;
        unsigned long flags = 0;
-       int ret_val = 0;
        u64 db_value = 0;
 
        chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
-       mhi_dev_ctxt->counters.m1_m0++;
 
-       if (type == MHI_RING_TYPE_CMD_RING)
-               atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
+       if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+               return -EACCES;
 
-       ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
-       if (likely(((ret_val == 0) &&
-           (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
-             (mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) &&
-           (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) &&
-           (!mhi_dev_ctxt->flags.pending_M3))) {
-               if (likely(type == MHI_RING_TYPE_XFER_RING)) {
-                       spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
-                                          flags);
-                       db_value =
-                        mhi_v2p_addr(
-                               mhi_dev_ctxt,
-                               MHI_RING_TYPE_XFER_RING,
-                               chan,
-                       (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
-                       mhi_dev_ctxt->mhi_chan_db_order[chan]++;
-                       mhi_update_chan_db(mhi_dev_ctxt, chan);
-                       spin_unlock_irqrestore(
-                          &mhi_dev_ctxt->db_write_lock[chan], flags);
-               } else if (type == MHI_RING_TYPE_CMD_RING) {
-                       db_value = mhi_v2p_addr(mhi_dev_ctxt,
-                                               MHI_RING_TYPE_CMD_RING,
-                                               PRIMARY_CMD_RING,
-                                               (uintptr_t)
-                       mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING].wp);
-                       mhi_dev_ctxt->cmd_ring_order++;
-                       mhi_process_db(mhi_dev_ctxt,
-                               mhi_dev_ctxt->mmio_info.cmd_db_addr,
-                               0, db_value);
-               } else {
-                       mhi_log(MHI_MSG_VERBOSE,
-                       "Wrong type of packet = %d\n", type);
-                       ret_val = -EPROTO;
-               }
+       if (likely(type == MHI_RING_TYPE_XFER_RING)) {
+               struct mhi_ring *mhi_ring =
+                       &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+               spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+               mhi_update_chan_db(mhi_dev_ctxt, chan);
+               spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
        } else {
-               mhi_log(MHI_MSG_VERBOSE,
-                       "Wakeup, pending data state %d chan state %d\n",
-                                                mhi_dev_ctxt->mhi_state,
-                                                chan_ctxt->mhi_chan_state);
-                       ret_val = 0;
+               struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+                               mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
+               db_value = mhi_v2p_addr(mhi_dev_ctxt,
+                                       MHI_RING_TYPE_CMD_RING,
+                                       PRIMARY_CMD_RING,
+                                       (uintptr_t)cmd_ring->wp);
+               cmd_ring->db_mode.process_db
+                       (mhi_dev_ctxt,
+                        mhi_dev_ctxt->mmio_info.cmd_db_addr,
+                        0,
+                        db_value);
        }
-       return ret_val;
+       return 0;
 }
 static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
                  int chan, void *buf, size_t buf_len,
@@ -645,17 +702,11 @@ static int mhi_queue_dma_xfer(
        int ret_val;
        enum MHI_CLIENT_CHANNEL chan;
        struct mhi_device_ctxt *mhi_dev_ctxt;
-       unsigned long flags;
-
-       if (!client_handle || !buf || !buf_len)
-               return -EINVAL;
 
        mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
        MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
                        "Client buffer is of invalid length\n");
        chan = client_handle->chan_info.chan_nr;
-       mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
-       pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
 
        pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
        pkt_loc->data_tx_pkt.buffer_ptr = buf;
@@ -682,24 +733,14 @@ static int mhi_queue_dma_xfer(
                                (void *)&pkt_loc);
        if (unlikely(0 != ret_val)) {
                mhi_log(MHI_MSG_VERBOSE,
-                               "Failed to insert trb in xfer ring\n");
-               goto error;
+                       "Failed to insert trb in xfer ring\n");
+               return ret_val;
        }
-       read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       atomic_inc(&mhi_dev_ctxt->flags.data_pending);
+
        if (MHI_OUT ==
            GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
                atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
-       ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
-       if (unlikely(ret_val))
-               mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n");
-       atomic_dec(&mhi_dev_ctxt->flags.data_pending);
-       read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
 
-error:
-       pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-       mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
-       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
        return ret_val;
 }
 
@@ -709,10 +750,28 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
        int r;
        enum dma_data_direction dma_dir;
        struct mhi_buf_info *bb;
+       struct mhi_device_ctxt *mhi_dev_ctxt;
+       u32 chan;
+       unsigned long flags;
 
        if (!client_handle || !buf || !buf_len)
                return -EINVAL;
 
+       mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+       chan = client_handle->chan_info.chan_nr;
+
+       read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+       if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
+               read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+               mhi_log(MHI_MSG_ERROR,
+                       "MHI is not in active state\n");
+               return -EINVAL;
+       }
+
+       pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       mhi_assert_device_wake(mhi_dev_ctxt, false);
+       read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
        if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
                dma_dir = DMA_TO_DEVICE;
        else
@@ -723,8 +782,16 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
                                buf, buf_len, dma_dir, &bb);
        if (r) {
                mhi_log(MHI_MSG_VERBOSE,
-                               "Failed to create BB, chan %d ret %d\n",
-                               client_handle->chan_info.chan_nr, r);
+                       "Failed to create BB, chan %d ret %d\n",
+                       chan,
+                       r);
+               pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+                                         dev_info->pcie_device->dev);
+               pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->
+                                     pcie_device->dev);
+               read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+               mhi_deassert_device_wake(mhi_dev_ctxt);
+               read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
                return r;
        }
 
@@ -733,9 +800,9 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
                        buf, buf_len, (u64)bb->bb_p_addr,
                        client_handle->chan_info.chan_nr);
        r = mhi_queue_dma_xfer(client_handle,
-                               bb->bb_p_addr,
-                               bb->buf_len,
-                               mhi_flags);
+                              bb->bb_p_addr,
+                              bb->buf_len,
+                              mhi_flags);
 
        /*
         * Assumption: If create_bounce_buffer did not fail, we do not
@@ -743,48 +810,47 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
         * out of sync with the descriptor list which is problematic.
         */
        BUG_ON(r);
-       return r;
+
+       read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+       mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
+       if (dma_dir == DMA_FROM_DEVICE) {
+               pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+                                         dev_info->pcie_device->dev);
+               pm_runtime_put_noidle(&mhi_dev_ctxt->
+                                     dev_info->pcie_device->dev);
+               mhi_deassert_device_wake(mhi_dev_ctxt);
+       }
+       read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+       return 0;
 }
 EXPORT_SYMBOL(mhi_queue_xfer);
 
 int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
                        enum MHI_COMMAND cmd, u32 chan)
 {
-       unsigned long flags = 0;
        union mhi_cmd_pkt *cmd_pkt = NULL;
        enum MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED;
        enum MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED;
        enum MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD;
-       struct mutex *chan_mutex = NULL;
        int ret_val = 0;
+       unsigned long flags, flags2;
+       struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+               mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
 
-       if (chan >= MHI_MAX_CHANNELS ||
-               cmd >= MHI_COMMAND_MAX_NR || mhi_dev_ctxt == NULL) {
+       if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) {
                mhi_log(MHI_MSG_ERROR,
-                       "Invalid channel id, received id: 0x%x", chan);
+                       "Invalid channel id, received id: 0x%x",
+                       chan);
                return -EINVAL;
        }
 
        mhi_log(MHI_MSG_INFO,
-               "Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n",
-                       mhi_dev_ctxt->mhi_state,
-                       mhi_dev_ctxt->dev_exec_env,
-                       chan, cmd);
-       mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
-       pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-       /*
-        * If there is a cmd pending a device confirmation,
-        * do not send anymore for this channel
-        */
-       if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) {
-               mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan);
-               ret_val = -EALREADY;
-               goto error_invalid;
-       }
+               "Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+               mhi_dev_ctxt->dev_exec_env, chan, cmd);
 
-       atomic_inc(&mhi_dev_ctxt->flags.data_pending);
        from_state =
-           mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].mhi_chan_state;
+           mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].chstate;
 
        switch (cmd) {
                break;
@@ -812,34 +878,26 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
                mhi_log(MHI_MSG_ERROR, "Bad command received\n");
        }
 
-       mutex_lock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
-       ret_val = ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt,
-                       (void *)&cmd_pkt);
+       spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+       ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt);
        if (ret_val) {
                mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
-               goto error_general;
+               spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
+               return ret_val;
        }
-       chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
-       mutex_lock(chan_mutex);
+
        MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type);
        MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-       mutex_unlock(chan_mutex);
-       mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING;
-
-       read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+       read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags2);
        mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING);
-       read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+       read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2);
+       spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
 
-       mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n",
-                                                               cmd, chan);
-error_general:
-       mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
+       mhi_log(MHI_MSG_VERBOSE,
+               "Sent command 0x%x for chan %d\n",
+               cmd,
+               chan);
 error_invalid:
-       pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-       mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
-       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-
-       atomic_dec(&mhi_dev_ctxt->flags.data_pending);
        mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
        return ret_val;
 }
@@ -870,7 +928,6 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
 
        result->buf_addr = bb->client_buf;
        result->bytes_xferd = bb->filled_size;
-       result->transaction_status = 0;
 
        /* At this point the bounce buffer is no longer necessary
         * Whatever was received from the device was copied back to the
@@ -922,6 +979,9 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
 
        mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan);
        atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
        ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
                                                NULL);
        BUG_ON(ret_val);
@@ -1018,16 +1078,8 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
                                ctxt_index_rp, ctxt_index_wp, chan);
                        BUG_ON(bb_index != ctxt_index_rp);
                } else  {
-                       /* Hardware Channel, no client registerered,
-                               drop data */
-                       recycle_trb_and_ring(mhi_dev_ctxt,
-                                &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
-                                MHI_RING_TYPE_XFER_RING,
-                                chan);
                        BUG();
-                       /* No bounce buffer to recycle as no user request
-                        * can be present.
-                        */
+
                }
        }
        return 0;
@@ -1120,6 +1172,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
                        mhi_log(MHI_MSG_CRITICAL,
                                "Failed to get nr available trbs ret: %d.\n",
                                ret_val);
+                       panic("critical error");
                        return ret_val;
                }
                do {
@@ -1166,24 +1219,27 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
        {
                struct mhi_ring *chan_ctxt = NULL;
                u64 db_value = 0;
+               unsigned long flags;
 
-               mhi_dev_ctxt->flags.uldl_enabled = 1;
                chan = MHI_EV_READ_CHID(EV_CHID, event);
-               mhi_dev_ctxt->flags.db_mode[chan] = 1;
                chan_ctxt =
                        &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
                mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
+               spin_lock_irqsave(&chan_ctxt->ring_lock, flags);
+               chan_ctxt->db_mode.db_mode = 1;
                if (chan_ctxt->wp != chan_ctxt->rp) {
                        db_value = mhi_v2p_addr(mhi_dev_ctxt,
                                                MHI_RING_TYPE_XFER_RING, chan,
                                                (uintptr_t) chan_ctxt->wp);
-                       mhi_process_db(mhi_dev_ctxt,
+                       chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
                                     mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
                                     db_value);
                }
                client_handle = mhi_dev_ctxt->client_handle_list[chan];
-                       if (NULL != client_handle)
-                               result->transaction_status = -ENOTCONN;
+               if (client_handle)
+                       result->transaction_status = -ENOTCONN;
+               spin_unlock_irqrestore(&chan_ctxt->ring_lock, flags);
                break;
        }
        case MHI_EVENT_CC_BAD_TRE:
@@ -1216,6 +1272,10 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
        u64 db_value = 0;
        void *removed_element = NULL;
        void *added_element = NULL;
+       spinlock_t *lock;
+       unsigned long flags;
+       struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+               mhi_local_event_ctxt[ring_index];
 
        ret_val = ctxt_del_element(ring, &removed_element);
 
@@ -1224,102 +1284,27 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
                return ret_val;
        }
        ret_val = ctxt_add_element(ring, &added_element);
-       if (0 != ret_val)
+       if (ret_val) {
                mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
-       db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
-                                                       (uintptr_t) ring->wp);
-       if (0 != ret_val)
                return ret_val;
-       if (MHI_RING_TYPE_XFER_RING == ring_type) {
-               union mhi_xfer_pkt *removed_xfer_pkt =
-                       (union mhi_xfer_pkt *)removed_element;
-               union mhi_xfer_pkt *added_xfer_pkt =
-                       (union mhi_xfer_pkt *)added_element;
-               added_xfer_pkt->data_tx_pkt =
-                               *(struct mhi_tx_pkt *)removed_xfer_pkt;
-       } else if (MHI_RING_TYPE_EVENT_RING == ring_type) {
-
-               spinlock_t *lock;
-               unsigned long flags;
-
-               if (ring_index >= mhi_dev_ctxt->mmio_info.nr_event_rings)
-                       return -ERANGE;
-               lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
-               spin_lock_irqsave(lock, flags);
-               db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
-                                                       (uintptr_t) ring->wp);
-               mhi_log(MHI_MSG_INFO,
-                       "Updating ctxt, ring index %d\n", ring_index);
-               mhi_update_ctxt(mhi_dev_ctxt,
-                               mhi_dev_ctxt->mmio_info.event_db_addr,
-                               ring_index, db_value);
-               mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
-               mhi_dev_ctxt->counters.ev_counter[ring_index]++;
-               spin_unlock_irqrestore(lock, flags);
-       }
-       atomic_inc(&mhi_dev_ctxt->flags.data_pending);
-       /* Asserting Device Wake here, will imediately wake mdm */
-       if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
-            MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) &&
-            mhi_dev_ctxt->flags.link_up) {
-               switch (ring_type) {
-               case MHI_RING_TYPE_CMD_RING:
-               {
-                       struct mutex *cmd_mutex = NULL;
-
-                       cmd_mutex =
-                               &mhi_dev_ctxt->
-                               mhi_cmd_mutex_list[PRIMARY_CMD_RING];
-                       mutex_lock(cmd_mutex);
-                       mhi_dev_ctxt->cmd_ring_order = 1;
-                       mhi_process_db(mhi_dev_ctxt,
-                               mhi_dev_ctxt->mmio_info.cmd_db_addr,
-                               ring_index, db_value);
-                       mutex_unlock(cmd_mutex);
-                       break;
-               }
-               case MHI_RING_TYPE_EVENT_RING:
-               {
-                       spinlock_t *lock = NULL;
-                       unsigned long flags = 0;
-
-                       lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
-                       spin_lock_irqsave(lock, flags);
-                       mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
-                       if ((mhi_dev_ctxt->counters.ev_counter[ring_index] %
-                                               MHI_EV_DB_INTERVAL) == 0) {
-                               db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type,
-                                                       ring_index,
-                                                       (uintptr_t) ring->wp);
-                               mhi_process_db(mhi_dev_ctxt,
-                                       mhi_dev_ctxt->mmio_info.event_db_addr,
-                                       ring_index, db_value);
-                       }
-                       spin_unlock_irqrestore(lock, flags);
-                       break;
-               }
-               case MHI_RING_TYPE_XFER_RING:
-               {
-                       unsigned long flags = 0;
-
-                       spin_lock_irqsave(
-                               &mhi_dev_ctxt->db_write_lock[ring_index],
-                               flags);
-                       mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
-                       mhi_process_db(mhi_dev_ctxt,
-                                       mhi_dev_ctxt->mmio_info.chan_db_addr,
-                                       ring_index, db_value);
-                       spin_unlock_irqrestore(
-                               &mhi_dev_ctxt->db_write_lock[ring_index],
-                               flags);
-                       break;
-               }
-               default:
-                       mhi_log(MHI_MSG_ERROR, "Bad ring type\n");
-               }
        }
-       atomic_dec(&mhi_dev_ctxt->flags.data_pending);
-       return ret_val;
+
+       if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+               return -EACCES;
+
+       lock = &mhi_ring->ring_lock;
+       spin_lock_irqsave(lock, flags);
+       db_value = mhi_v2p_addr(mhi_dev_ctxt,
+                               ring_type,
+                               ring_index,
+                               (uintptr_t) ring->wp);
+       mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+                                    mhi_dev_ctxt->mmio_info.event_db_addr,
+                                    ring_index, db_value);
+       spin_unlock_irqrestore(lock, flags);
+
+       return 0;
+
 }
 
 static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -1330,13 +1315,11 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
        struct mhi_ring *local_chan_ctxt;
        struct mhi_chan_ctxt *chan_ctxt;
        struct mhi_client_handle *client_handle = NULL;
-       struct mutex *chan_mutex;
-       int pending_el = 0;
+       int pending_el = 0, i;
        struct mhi_ring *bb_ctxt;
 
        MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
 
-
        if (!VALID_CHAN_NR(chan)) {
                mhi_log(MHI_MSG_ERROR,
                        "Bad channel number for CCE\n");
@@ -1344,8 +1327,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
        }
 
        bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
-       chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
-       mutex_lock(chan_mutex);
        client_handle = mhi_dev_ctxt->client_handle_list[chan];
        local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
        chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
@@ -1365,6 +1346,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
                                chan, pending_el);
 
        atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks);
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       for (i = 0; i < pending_el; i++)
+               mhi_deassert_device_wake(mhi_dev_ctxt);
+
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+       for (i = 0; i < pending_el; i++) {
+               pm_runtime_put_noidle(&mhi_dev_ctxt->
+                                     dev_info->pcie_device->dev);
+               pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+                                         dev_info->pcie_device->dev);
+       }
 
        /* Reset the local channel context */
        local_chan_ctxt->rp = local_chan_ctxt->base;
@@ -1372,37 +1365,17 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
        local_chan_ctxt->ack_rp = local_chan_ctxt->base;
 
        /* Reset the mhi channel context */
-       chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+       chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
        chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
        chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
 
        mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n");
        reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
 
-       mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
-       mutex_unlock(chan_mutex);
        mhi_log(MHI_MSG_INFO, "Reset complete.\n");
-       if (NULL != client_handle)
-               complete(&client_handle->chan_reset_complete);
        return ret_val;
 }
 
-static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
-                                               union mhi_cmd_pkt *cmd_pkt)
-{
-       u32 chan;
-
-       MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-       if (!VALID_CHAN_NR(chan))
-               mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
-       mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
-                                       MHI_CMD_NOT_PENDING;
-       mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan);
-       if (NULL != mhi_dev_ctxt->client_handle_list[chan])
-               complete(
-               &mhi_dev_ctxt->client_handle_list[chan]->chan_open_complete);
-       return 0;
-}
 enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
                                union mhi_event_pkt *ev_pkt,
                                union mhi_cmd_pkt **cmd_pkt,
@@ -1421,68 +1394,13 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
        return MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt);
 }
 
-int parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt,
-                               union mhi_event_pkt *ev_pkt, u32 event_index)
-{
-       int ret_val = 0;
-       union mhi_cmd_pkt *cmd_pkt = NULL;
-       u32 event_code = 0;
-
-       event_code = get_cmd_pkt(mhi_dev_ctxt, ev_pkt, &cmd_pkt, event_index);
-       switch (event_code) {
-       case MHI_EVENT_CC_SUCCESS:
-       {
-               u32 chan = 0;
-
-               MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-               switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) {
-               mhi_log(MHI_MSG_INFO, "CCE chan %d cmd %d\n", chan,
-                               MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
-               case MHI_PKT_TYPE_RESET_CHAN_CMD:
-                       ret_val = reset_chan_cmd(mhi_dev_ctxt, cmd_pkt);
-                       if (ret_val)
-                               mhi_log(MHI_MSG_INFO,
-                               "Failed to process reset cmd ret %d\n",
-                               ret_val);
-                       break;
-               case MHI_PKT_TYPE_STOP_CHAN_CMD:
-                       if (ret_val) {
-                               mhi_log(MHI_MSG_INFO,
-                                               "Failed to set chan state\n");
-                               return ret_val;
-                       }
-                       break;
-               case MHI_PKT_TYPE_START_CHAN_CMD:
-                       ret_val = start_chan_cmd(mhi_dev_ctxt, cmd_pkt);
-                       if (ret_val)
-                               mhi_log(MHI_MSG_INFO,
-                                       "Failed to process reset cmd\n");
-                       break;
-               default:
-                       mhi_log(MHI_MSG_INFO,
-                               "Bad cmd type 0x%x\n",
-                               MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
-                       break;
-               }
-               mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
-               atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
-               break;
-       }
-       default:
-               mhi_log(MHI_MSG_INFO, "Unhandled mhi completion code\n");
-               break;
-       }
-       ctxt_del_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, NULL);
-       return 0;
-}
-
 int mhi_poll_inbound(struct mhi_client_handle *client_handle,
                     struct mhi_result *result)
 {
        struct mhi_tx_pkt *pending_trb = 0;
        struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
        struct mhi_ring *local_chan_ctxt = NULL;
-       struct mutex *chan_mutex = NULL;
+       struct mhi_chan_cfg *cfg;
        struct mhi_ring *bb_ctxt = NULL;
        struct mhi_buf_info *bb = NULL;
        int  chan = 0, r = 0;
@@ -1495,10 +1413,10 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
        mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
        chan = client_handle->chan_info.chan_nr;
        local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
-       chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
+       cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
        bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
 
-       mutex_lock(chan_mutex);
+       mutex_lock(&cfg->chan_lock);
        if (bb_ctxt->rp != bb_ctxt->ack_rp) {
                pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
                result->flags = pending_trb->info;
@@ -1524,7 +1442,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
                result->bytes_xferd = 0;
                r = -ENODATA;
        }
-       mutex_unlock(chan_mutex);
+       mutex_unlock(&cfg->chan_lock);
        mhi_log(MHI_MSG_VERBOSE,
                "Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
                result->buf_addr, result->bytes_xferd, chan);
@@ -1581,49 +1499,80 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
        return MHI_EPID;
 }
 
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+/*
+ * mhi_assert_device_wake - Set WAKE_DB register
+ * force_set - if true, will set bit regardless of counts
+ */
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+                           bool force_set)
 {
-       if ((mhi_dev_ctxt->mmio_info.chan_db_addr) &&
-              (mhi_dev_ctxt->flags.link_up)) {
-                       mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
-                               mhi_dev_ctxt->enable_lpm);
-                       atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
+       unsigned long flags;
+
+       if (unlikely(force_set)) {
+               spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+               atomic_inc(&mhi_dev_ctxt->counters.device_wake);
+               mhi_write_db(mhi_dev_ctxt,
+                            mhi_dev_ctxt->mmio_info.chan_db_addr,
+                            MHI_DEV_WAKE_DB, 1);
+               spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
+       } else {
+               if (likely(atomic_add_unless(&mhi_dev_ctxt->
+                                            counters.device_wake,
+                                            1,
+                                            0)))
+                       return;
+
+               spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+               if ((atomic_inc_return(&mhi_dev_ctxt->counters.device_wake)
+                    == 1) &&
+                   MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
                        mhi_write_db(mhi_dev_ctxt,
                                     mhi_dev_ctxt->mmio_info.chan_db_addr,
-                                    MHI_DEV_WAKE_DB, 1);
-                       mhi_dev_ctxt->device_wake_asserted = 1;
-       } else {
-               mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
+                                    MHI_DEV_WAKE_DB,
+                                    1);
+               }
+               spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
        }
-       return 0;
 }
 
-inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
-       if ((mhi_dev_ctxt->enable_lpm) &&
-           (atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
-           (mhi_dev_ctxt->mmio_info.chan_db_addr != NULL) &&
-           (mhi_dev_ctxt->flags.link_up)) {
-               mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
-               atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
-               mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr,
-                               MHI_DEV_WAKE_DB, 0);
-               mhi_dev_ctxt->device_wake_asserted = 0;
-       } else {
-               mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n",
-                               mhi_dev_ctxt->enable_lpm,
-                               atomic_read(&mhi_dev_ctxt->flags.device_wake),
-                               mhi_dev_ctxt->flags.link_up);
-       }
-       return 0;
+       unsigned long flags;
+
+       WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0);
+
+       if (likely(atomic_add_unless
+                  (&mhi_dev_ctxt->counters.device_wake, -1, 1)))
+               return;
+
+       spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+       if ((atomic_dec_return(&mhi_dev_ctxt->counters.device_wake) == 0) &&
+           MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+               mhi_write_db(mhi_dev_ctxt,
+                            mhi_dev_ctxt->mmio_info.chan_db_addr,
+                            MHI_DEV_WAKE_DB,
+                            0);
+       spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
 }
 
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm)
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm)
 {
-       mhi_log(MHI_MSG_VERBOSE, "LPM Set %d\n", enable_lpm);
-       client_handle->mhi_dev_ctxt->enable_lpm = enable_lpm ? 1 : 0;
+       struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+       unsigned long flags;
+
+       read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+       /* Disable low power mode by asserting Wake */
+       if (enable_lpm == false)
+               mhi_assert_device_wake(mhi_dev_ctxt, false);
+       else
+               mhi_deassert_device_wake(mhi_dev_ctxt);
+
+       read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
        return 0;
 }
+EXPORT_SYMBOL(mhi_set_lpm);
 
 int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
                                int index)
@@ -1648,10 +1597,57 @@ int mhi_deregister_channel(struct mhi_client_handle
 }
 EXPORT_SYMBOL(mhi_deregister_channel);
 
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+                            void __iomem *io_addr,
+                            uintptr_t chan,
+                            u32 val)
+{
+       struct mhi_ring *ring_ctxt =
+               &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+       if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr)
+               ring_ctxt = &mhi_dev_ctxt->
+                       mhi_local_chan_ctxt[chan];
+       else
+               ring_ctxt = &mhi_dev_ctxt->
+                       mhi_local_event_ctxt[chan];
+
+       mhi_log(MHI_MSG_VERBOSE,
+                       "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+                       io_addr, chan, val);
+
+       mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+
+       if (ring_ctxt->db_mode.db_mode) {
+               mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+               ring_ctxt->db_mode.db_mode = 0;
+       } else {
+               mhi_log(MHI_MSG_INFO,
+                       "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+                       chan,
+                       ring_ctxt->db_mode.brstmode,
+                       ring_ctxt->db_mode.db_mode);
+       }
+}
+
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+                            void __iomem *io_addr,
+                            uintptr_t chan,
+                            u32 val)
+{
+       mhi_log(MHI_MSG_VERBOSE,
+                       "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+                       io_addr, chan, val);
+
+       mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+       mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+}
+
 void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
                  void __iomem *io_addr,
                  uintptr_t chan, u32 val)
 {
+
        mhi_log(MHI_MSG_VERBOSE,
                        "db.set addr: %p io_offset 0x%lx val:0x%x\n",
                        io_addr, chan, val);
@@ -1660,28 +1656,29 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
 
        /* Channel Doorbell and Polling Mode Disabled or Software Channel*/
        if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
+               struct mhi_ring *chan_ctxt =
+                       &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
                if (!(IS_HARDWARE_CHANNEL(chan) &&
-                   mhi_dev_ctxt->flags.uldl_enabled &&
-                   !mhi_dev_ctxt->flags.db_mode[chan])) {
+                   !chan_ctxt->db_mode.db_mode)) {
                        mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
-                       mhi_dev_ctxt->flags.db_mode[chan] = 0;
+                       chan_ctxt->db_mode.db_mode = 0;
                } else {
                        mhi_log(MHI_MSG_INFO,
-                        "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
-                         chan, mhi_dev_ctxt->flags.uldl_enabled,
-                               mhi_dev_ctxt->flags.db_mode[chan]);
+                               "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+                               chan, chan_ctxt->db_mode.brstmode,
+                               chan_ctxt->db_mode.db_mode);
                }
        /* Event Doorbell and Polling mode Disabled */
        } else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
-               /* Only ring for software channel */
-               if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
-                   !mhi_dev_ctxt->flags.uldl_enabled) {
+               struct mhi_ring *ev_ctxt =
+                       &mhi_dev_ctxt->mhi_local_event_ctxt[chan];
+               /* Only ring for software channel or db mode*/
+               if (!(IS_HW_EV_RING(mhi_dev_ctxt, chan) &&
+                   !ev_ctxt->db_mode.db_mode)) {
                        mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
-                       mhi_dev_ctxt->flags.db_mode[chan] = 0;
                }
        } else {
                mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
-               mhi_dev_ctxt->flags.db_mode[chan] = 0;
        }
 }
 
index ddf18e4..b444737 100644 (file)
@@ -9,6 +9,19 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
  */
+#include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/cpu.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
 #include "mhi_sys.h"
 #include "mhi_hwio.h"
 #include "mhi.h"
@@ -17,25 +30,40 @@ int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
        u32 pcie_word_val = 0;
        u32 expiry_counter;
+       unsigned long flags;
+       rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
 
        mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
+       read_lock_irqsave(pm_xfer_lock, flags);
+       if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+               read_unlock_irqrestore(pm_xfer_lock, flags);
+               return -EIO;
+       }
        pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
-                                       MHISTATUS);
+                                    MHISTATUS);
        MHI_READ_FIELD(pcie_word_val,
                        MHICTRL_RESET_MASK,
                        MHICTRL_RESET_SHIFT);
+       read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
        if (pcie_word_val == 0xFFFFFFFF)
                return -ENOTCONN;
+
        while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
                expiry_counter++;
                mhi_log(MHI_MSG_ERROR,
                        "Device is not RESET, sleeping and retrying.\n");
                msleep(MHI_READY_STATUS_TIMEOUT_MS);
+               read_lock_irqsave(pm_xfer_lock, flags);
+               if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+                       read_unlock_irqrestore(pm_xfer_lock, flags);
+                       return -EIO;
+               }
                pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
                                                        MHICTRL);
                MHI_READ_FIELD(pcie_word_val,
                                MHICTRL_RESET_MASK,
                                MHICTRL_RESET_SHIFT);
+               read_unlock_irqrestore(pm_xfer_lock, flags);
        }
 
        if (MHI_STATE_READY != pcie_word_val)
@@ -47,15 +75,23 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
        u32 pcie_word_val = 0;
        u32 expiry_counter;
+       unsigned long flags;
+       rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
 
        mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
 
+       read_lock_irqsave(pm_xfer_lock, flags);
+       if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+               read_unlock_irqrestore(pm_xfer_lock, flags);
+               return -EIO;
+       }
        /* Read MMIO and poll for READY bit to be set */
        pcie_word_val = mhi_reg_read(
                        mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
        MHI_READ_FIELD(pcie_word_val,
                        MHISTATUS_READY_MASK,
                        MHISTATUS_READY_SHIFT);
+       read_unlock_irqrestore(pm_xfer_lock, flags);
 
        if (pcie_word_val == 0xFFFFFFFF)
                return -ENOTCONN;
@@ -65,10 +101,16 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
                mhi_log(MHI_MSG_ERROR,
                        "Device is not ready, sleeping and retrying.\n");
                msleep(MHI_READY_STATUS_TIMEOUT_MS);
+               read_lock_irqsave(pm_xfer_lock, flags);
+               if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+                       read_unlock_irqrestore(pm_xfer_lock, flags);
+                       return -EIO;
+               }
                pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
                                             MHISTATUS);
                MHI_READ_FIELD(pcie_word_val,
                                MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
+               read_unlock_irqrestore(pm_xfer_lock, flags);
        }
 
        if (pcie_word_val != MHI_STATE_READY)
@@ -102,21 +144,20 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
        mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
                                mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
        if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
-               mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
-                                       mhi_dev_ctxt->dev_props->mhi_ver);
-               if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
-                       ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
-               if (ret_val)
+               mhi_log(MHI_MSG_CRITICAL,
+                       "Bad MMIO version, 0x%x\n",
+                       mhi_dev_ctxt->dev_props->mhi_ver);
                        return ret_val;
        }
+
        /* Enable the channels */
        for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
                        struct mhi_chan_ctxt *chan_ctxt =
                                &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
                if (VALID_CHAN_NR(i))
-                       chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+                       chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
                else
-                       chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+                       chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
        }
        mhi_log(MHI_MSG_INFO,
                        "Read back MMIO Ready bit successfully. Moving on..\n");
@@ -144,6 +185,11 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
                                MHICFG,
                                MHICFG_NER_MASK, MHICFG_NER_SHIFT,
                                mhi_dev_ctxt->mmio_info.nr_event_rings);
+       mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
+                           MHICFG,
+                           MHICFG_NHWER_MASK,
+                           MHICFG_NHWER_SHIFT,
+                           mhi_dev_ctxt->mmio_info.nr_hw_event_rings);
 
        pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
        pcie_word_val = HIGH_WORD(pcie_dword_val);
index a928c05..2f44601 100644 (file)
 #include "mhi_hwio.h"
 
 /* Write only sysfs attributes */
-static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
 static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
-static DEVICE_ATTR(MHI_RESET, S_IWUSR, NULL, sysfs_init_mhi_reset);
 
 /* Read only sysfs attributes */
 
 static struct attribute *mhi_attributes[] = {
-       &dev_attr_MHI_M3.attr,
        &dev_attr_MHI_M0.attr,
-       &dev_attr_MHI_RESET.attr,
        NULL,
 };
 
@@ -42,21 +38,20 @@ static struct attribute_group mhi_attribute_group = {
 int mhi_pci_suspend(struct device *dev)
 {
        int r = 0;
-       struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
 
-       if (NULL == mhi_dev_ctxt)
-               return -EINVAL;
-       mhi_log(MHI_MSG_INFO, "Entered, MHI state %d\n",
-                       mhi_dev_ctxt->mhi_state);
-       atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
+       mhi_log(MHI_MSG_INFO, "Entered\n");
 
-       r = mhi_initiate_m3(mhi_dev_ctxt);
+       /* if rpm status still active then force suspend */
+       if (!pm_runtime_status_suspended(dev)) {
+               r = mhi_runtime_suspend(dev);
+               if (r)
+                       return r;
+       }
 
-       if (!r)
-               return r;
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
 
-       atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
-       mhi_log(MHI_MSG_INFO, "Exited, ret %d\n", r);
+       mhi_log(MHI_MSG_INFO, "Exit\n");
        return r;
 }
 
@@ -65,61 +60,150 @@ int mhi_runtime_suspend(struct device *dev)
        int r = 0;
        struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
 
-       mhi_log(MHI_MSG_INFO, "Entered\n");
-       r = mhi_initiate_m3(mhi_dev_ctxt);
-       if (r)
-               mhi_log(MHI_MSG_ERROR, "Init M3 failed ret %d\n", r);
+       mutex_lock(&mhi_dev_ctxt->pm_lock);
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+       mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
+               mhi_dev_ctxt->mhi_pm_state,
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+       /* Link is already disabled */
+       if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
+          mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
+               mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               mutex_unlock(&mhi_dev_ctxt->pm_lock);
+               return 0;
+       }
+
+       if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+               mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               mutex_unlock(&mhi_dev_ctxt->pm_lock);
+               return -EBUSY;
+       }
+
+       mhi_assert_device_wake(mhi_dev_ctxt, false);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+                              mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+                              mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+                              msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+       if (!r) {
+               mhi_log(MHI_MSG_CRITICAL,
+                       "Failed to get M0||M1 event, timeout, current state:%s\n",
+                       TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+               r = -EIO;
+               goto rpm_suspend_exit;
+       }
+
+       mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
+       mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_log(MHI_MSG_INFO,
+                       "Waiting for M3 completion.\n");
+       r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
+                              mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
+                              msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
+       if (!r) {
+               mhi_log(MHI_MSG_CRITICAL,
+                       "Failed to get M3 event, timeout, current state:%s\n",
+                       TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+               r = -EIO;
+               goto rpm_suspend_exit;
+       }
+
+       r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
+       if (r) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Failed to Turn off link ret:%d\n", r);
+       }
 
-       pm_runtime_mark_last_busy(dev);
+rpm_suspend_exit:
        mhi_log(MHI_MSG_INFO, "Exited\n");
+       mutex_unlock(&mhi_dev_ctxt->pm_lock);
        return r;
 }
 
+int mhi_runtime_idle(struct device *dev)
+{
+       mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
+
+       /*
+        * RPM framework during runtime resume always calls
+        * rpm_idle to see if device ready to suspend.
+        * If dev.power usage_count count is 0, rpm fw will call
+        * rpm_idle cb to see if device is ready to suspend.
+        * if cb return 0, or cb not defined the framework will
+        * assume device driver is ready to suspend;
+        * therefore, fw will schedule runtime suspend.
+        * In MHI power management, MHI host shall go to
+        * runtime suspend only after entering MHI State M2, even if
+        * usage count is 0.  Return -EBUSY to disable automatic suspend.
+        */
+       return -EBUSY;
+}
+
 int mhi_runtime_resume(struct device *dev)
 {
        int r = 0;
        struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
 
-       mhi_log(MHI_MSG_INFO, "Entered\n");
-       r = mhi_initiate_m0(mhi_dev_ctxt);
-       if (r)
-               mhi_log(MHI_MSG_ERROR, "Init M0 failed ret %d\n", r);
-       pm_runtime_mark_last_busy(dev);
-       mhi_log(MHI_MSG_INFO, "Exited\n");
+       mutex_lock(&mhi_dev_ctxt->pm_lock);
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       WARN_ON(mhi_dev_ctxt->mhi_pm_state != MHI_PM_M3);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+       /* turn on link */
+       r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
+       if (r) {
+               mhi_log(MHI_MSG_CRITICAL,
+                       "Failed to resume link\n");
+               goto rpm_resume_exit;
+       }
+
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+       /* Set and wait for M0 Event */
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+                              mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+                              mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+                              msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+       if (!r) {
+               mhi_log(MHI_MSG_ERROR,
+                       "Failed to get M0 event, timeout\n");
+               r = -EIO;
+               goto rpm_resume_exit;
+       }
+       r = 0; /* no errors */
+
+rpm_resume_exit:
+       mutex_unlock(&mhi_dev_ctxt->pm_lock);
+       mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
        return r;
 }
 
 int mhi_pci_resume(struct device *dev)
 {
        int r = 0;
-       struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
 
-       r = mhi_initiate_m0(mhi_dev_ctxt);
-       if (r)
-               goto exit;
-       r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
-                       msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
-       switch (r) {
-       case 0:
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Timeout: No M0 event after %d ms\n",
-                       MHI_MAX_SUSPEND_TIMEOUT);
-               mhi_dev_ctxt->counters.m0_event_timeouts++;
-               r = -ETIME;
-               break;
-       case -ERESTARTSYS:
+       r = mhi_runtime_resume(dev);
+       if (r) {
                mhi_log(MHI_MSG_CRITICAL,
-                       "Going Down...\n");
-               break;
-       default:
-               mhi_log(MHI_MSG_INFO,
-                       "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
-               r = 0;
+                       "Failed to resume link\n");
+       } else {
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
        }
-exit:
-       atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
+
        return r;
 }
 
@@ -133,57 +217,15 @@ void mhi_rem_pm_sysfs(struct device *dev)
        return sysfs_remove_group(&dev->kobj, &mhi_attribute_group);
 }
 
-ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       int r = 0;
-       struct mhi_device_ctxt *mhi_dev_ctxt =
-               &mhi_devices.device_list[0].mhi_ctxt;
-       r = mhi_initiate_m3(mhi_dev_ctxt);
-       if (r) {
-               mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to suspend %d\n", r);
-               return r;
-       }
-       r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
-       if (r)
-               mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to turn off link ret %d\n", r);
-
-       return count;
-}
-ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t count)
-{
-       struct mhi_device_ctxt *mhi_dev_ctxt =
-               &mhi_devices.device_list[0].mhi_ctxt;
-       int r = 0;
-
-       mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n");
-       r = mhi_trigger_reset(mhi_dev_ctxt);
-       if (r != 0)
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Failed to trigger MHI RESET ret %d\n",
-                       r);
-       else
-               mhi_log(MHI_MSG_INFO, "Triggered! MHI RESET\n");
-       return count;
-}
 ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct mhi_device_ctxt *mhi_dev_ctxt =
                &mhi_devices.device_list[0].mhi_ctxt;
-       if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
-               mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to resume link\n");
-               return count;
-       }
-       mhi_initiate_m0(mhi_dev_ctxt);
-       mhi_log(MHI_MSG_CRITICAL,
-                       "Current mhi_state = 0x%x\n",
-                       mhi_dev_ctxt->mhi_state);
 
+       pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
        return count;
 }
 
@@ -194,35 +236,42 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
 
        mhi_log(MHI_MSG_INFO, "Entered...\n");
        pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
-       mutex_lock(&mhi_dev_ctxt->mhi_link_state);
+
        if (0 == mhi_dev_ctxt->flags.link_up) {
                mhi_log(MHI_MSG_CRITICAL,
                        "Link already marked as down, nothing to do\n");
                goto exit;
        }
-       /* Disable shadow to avoid restoring D3 hot struct device */
-       r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0);
-       if (r)
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Failed to stop shadow config space: %d\n", r);
 
-       r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot);
+       r = pci_save_state(pcie_dev);
        if (r) {
                mhi_log(MHI_MSG_CRITICAL,
-                       "Failed to set pcie power state to D3 hotret: %x\n", r);
-               goto exit;
+                       "Failed to save pcie state ret: %d\n",
+                       r);
        }
+       mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
+       pci_disable_device(pcie_dev);
+       r = pci_set_power_state(pcie_dev, PCI_D3hot);
+       if (r) {
+               mhi_log(MHI_MSG_CRITICAL,
+                       "Failed to set pcie power state to D3 hot ret: %d\n",
+                       r);
+       }
+
        r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
-                       mhi_dev_ctxt->dev_info->pcie_device->bus->number,
-                       mhi_dev_ctxt->dev_info->pcie_device,
+                               pcie_dev->bus->number,
+                               pcie_dev,
                        NULL,
                        0);
        if (r)
                mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to suspend pcie bus ret 0x%x\n", r);
+                       "Failed to suspend pcie bus ret 0x%x\n", r);
+
+       r = mhi_set_bus_request(mhi_dev_ctxt, 0);
+       if (r)
+               mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
        mhi_dev_ctxt->flags.link_up = 0;
 exit:
-       mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
        mhi_log(MHI_MSG_INFO, "Exited...\n");
        return 0;
 }
@@ -234,37 +283,40 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
 
        pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
 
-       mutex_lock(&mhi_dev_ctxt->mhi_link_state);
        mhi_log(MHI_MSG_INFO, "Entered...\n");
        if (mhi_dev_ctxt->flags.link_up)
                goto exit;
+
+       r  = mhi_set_bus_request(mhi_dev_ctxt, 1);
+       if (r)
+               mhi_log(MHI_MSG_CRITICAL,
+                       "Could not set bus frequency ret: %d\n",
+                       r);
+
        r = msm_pcie_pm_control(MSM_PCIE_RESUME,
-                       mhi_dev_ctxt->dev_info->pcie_device->bus->number,
-                       mhi_dev_ctxt->dev_info->pcie_device,
-                       NULL, 0);
+                               pcie_dev->bus->number,
+                               pcie_dev,
+                               NULL,
+                               0);
        if (r) {
                mhi_log(MHI_MSG_CRITICAL,
                                "Failed to resume pcie bus ret %d\n", r);
                goto exit;
        }
 
-       r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device,
-                               PCI_D0);
-       if (r) {
-               mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to load stored state %d\n", r);
-               goto exit;
-       }
-       r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device);
-       if (r) {
+       r = pci_enable_device(pcie_dev);
+       if (r)
                mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to Recover config space ret: %d\n", r);
-               goto exit;
-       }
+                       "Failed to enable device ret:%d\n",
+                       r);
+
+       pci_load_and_free_saved_state(pcie_dev,
+                                     &mhi_dev_ctxt->dev_props->pcie_state);
+       pci_restore_state(pcie_dev);
+       pci_set_master(pcie_dev);
+
        mhi_dev_ctxt->flags.link_up = 1;
 exit:
-       mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
        mhi_log(MHI_MSG_INFO, "Exited...\n");
        return r;
 }
-
index e5a6dd5..07d0098 100644 (file)
@@ -48,6 +48,9 @@ static int add_element(struct mhi_ring *ring, void **rp,
                *assigned_addr = (char *)ring->wp;
        *wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
                                                (uintptr_t)ring->base);
+
+       /* force update visible to other cores */
+       smp_wmb();
        return 0;
 }
 
@@ -101,6 +104,9 @@ int delete_element(struct mhi_ring *ring, void **rp,
 
        *rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
                                                (uintptr_t)ring->base);
+
+       /* force update visible to other cores */
+       smp_wmb();
        return 0;
 }
 
@@ -108,6 +114,7 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
 {
        u32 chan;
        struct mhi_device_ctxt *ctxt;
+       int bb_ring, ch_ring;
 
        if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
            !client_handle->mhi_dev_ctxt)
@@ -115,7 +122,10 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
        ctxt = client_handle->mhi_dev_ctxt;
        chan = client_handle->chan_info.chan_nr;
 
-       return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+       bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
+       ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+
+       return min(bb_ring, ch_ring);
 }
 EXPORT_SYMBOL(mhi_get_free_desc);
 
index 8ee3ded..defd6f4 100644 (file)
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/pm_runtime.h>
 #include <mhi_sys.h>
 #include <mhi.h>
 #include <mhi_bhi.h>
 static int mhi_ssr_notify_cb(struct notifier_block *nb,
                        unsigned long action, void *data)
 {
-       int ret_val = 0;
-       struct mhi_device_ctxt *mhi_dev_ctxt =
-               &mhi_devices.device_list[0].mhi_ctxt;
-       struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
 
-       mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
-       if (NULL != mhi_dev_ctxt)
-               mhi_dev_ctxt->esoc_notif = action;
        switch (action) {
        case SUBSYS_BEFORE_POWERUP:
                mhi_log(MHI_MSG_INFO,
                        "Received Subsystem event BEFORE_POWERUP\n");
-               atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1);
-               ret_val = init_mhi_base_state(mhi_dev_ctxt);
-               if (0 != ret_val)
-                       mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to transition to base state %d.\n",
-                               ret_val);
                break;
        case SUBSYS_AFTER_POWERUP:
                mhi_log(MHI_MSG_INFO,
@@ -148,7 +136,7 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
        }
 }
 
-static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
 {
        u32 pcie_word_val = 0;
        int r = 0;
@@ -159,13 +147,11 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
        mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
        pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
                                     BHI_EXECENV);
+       mhi_dev_ctxt->dev_exec_env = pcie_word_val;
        if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
                mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
        } else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
                mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
-               r = bhi_probe(mhi_pcie_dev);
-               if (r)
-                       mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n");
        } else {
                mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
                        pcie_word_val);
@@ -178,10 +164,9 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
 
 void mhi_link_state_cb(struct msm_pcie_notify *notify)
 {
-       int ret_val = 0;
+
        struct mhi_pcie_dev_info *mhi_pcie_dev;
        struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
-       int r = 0;
 
        if (NULL == notify || NULL == notify->data) {
                mhi_log(MHI_MSG_CRITICAL,
@@ -198,32 +183,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
        case MSM_PCIE_EVENT_LINKUP:
                mhi_log(MHI_MSG_INFO,
                        "Received MSM_PCIE_EVENT_LINKUP\n");
-               if (0 == mhi_pcie_dev->link_up_cntr) {
-                       mhi_log(MHI_MSG_INFO,
-                               "Initializing MHI for the first time\n");
-                               r = mhi_ctxt_init(mhi_pcie_dev);
-                               if (r) {
-                                       mhi_log(MHI_MSG_ERROR,
-                                       "MHI initialization failed, ret %d.\n",
-                                       r);
-                                       r = msm_pcie_register_event(
-                                       &mhi_pcie_dev->mhi_pci_link_event);
-                                       mhi_log(MHI_MSG_ERROR,
-                                       "Deregistered from PCIe notif r %d.\n",
-                                       r);
-                                       return;
-                               }
-                               mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
-                               mhi_pcie_dev->mhi_ctxt.flags.link_up = 1;
-                               pci_set_master(mhi_pcie_dev->pcie_device);
-                               r = set_mhi_base_state(mhi_pcie_dev);
-                               if (r)
-                                       return;
-                               init_mhi_base_state(mhi_dev_ctxt);
-               } else {
-                       mhi_log(MHI_MSG_INFO,
-                               "Received Link Up Callback\n");
-               }
                mhi_pcie_dev->link_up_cntr++;
                break;
        case MSM_PCIE_EVENT_WAKEUP:
@@ -231,17 +190,14 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
                        "Received MSM_PCIE_EVENT_WAKE\n");
                __pm_stay_awake(&mhi_dev_ctxt->w_lock);
                __pm_relax(&mhi_dev_ctxt->w_lock);
-               if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) {
-                       mhi_log(MHI_MSG_INFO,
-                               "There is a pending resume, doing nothing.\n");
-                       return;
-               }
-               ret_val = mhi_init_state_transition(mhi_dev_ctxt,
-                               STATE_TRANSITION_WAKE);
-               if (0 != ret_val) {
-                       mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to init state transition, to %d\n",
-                               STATE_TRANSITION_WAKE);
+
+               if (mhi_dev_ctxt->flags.mhi_initialized) {
+                       pm_runtime_get(&mhi_dev_ctxt->
+                                      dev_info->pcie_device->dev);
+                       pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+                                                 dev_info->pcie_device->dev);
+                       pm_runtime_put_noidle(&mhi_dev_ctxt->
+                                             dev_info->pcie_device->dev);
                }
                break;
        default:
@@ -255,12 +211,6 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
        int r = 0;
 
-       mhi_assert_device_wake(mhi_dev_ctxt);
-       mhi_dev_ctxt->flags.link_up = 1;
-       r = mhi_set_bus_request(mhi_dev_ctxt, 1);
-       if (r)
-               mhi_log(MHI_MSG_INFO,
-                       "Failed to scale bus request to active set.\n");
        r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
        if (r) {
                mhi_log(MHI_MSG_CRITICAL,
index ca4520a..1021a56 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
-static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+const char *state_transition_str(enum STATE_TRANSITION state)
+{
+       static const char * const mhi_states_transition_str[] = {
+               "RESET",
+               "READY",
+               "M0",
+               "M1",
+               "M2",
+               "M3",
+               "BHI",
+               "SBL",
+               "AMSS",
+               "LINK_DOWN",
+               "WAKE"
+       };
+
+       if (state == STATE_TRANSITION_SYS_ERR)
+               return "SYS_ERR";
+
+       return (state <= STATE_TRANSITION_WAKE) ?
+               mhi_states_transition_str[state] : "Invalid";
+}
+
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+       u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
+                                      MHISTATUS,
+                                      MHISTATUS_MHISTATE_MASK,
+                                      MHISTATUS_MHISTATE_SHIFT);
+
+       return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
+}
+
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
                                        enum MHI_STATE new_state)
 {
        if (MHI_STATE_RESET == new_state) {
@@ -41,23 +74,22 @@ static void conditional_chan_db_write(
 {
        u64 db_value;
        unsigned long flags;
-
-       mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
-       spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
-       if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
-               db_value =
-               mhi_v2p_addr(mhi_dev_ctxt,
-                       MHI_RING_TYPE_XFER_RING, chan,
-                       (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
-               mhi_process_db(mhi_dev_ctxt,
-                              mhi_dev_ctxt->mmio_info.chan_db_addr,
-                              chan, db_value);
-       }
-       mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
-       spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
+       struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+       spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+       db_value = mhi_v2p_addr(mhi_dev_ctxt,
+                               MHI_RING_TYPE_XFER_RING,
+                               chan,
+                               (uintptr_t)mhi_ring->wp);
+       mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+                                    mhi_dev_ctxt->mmio_info.chan_db_addr,
+                                    chan,
+                                    db_value);
+       spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
 }
 
-static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
+static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
+                             bool reset_db_mode)
 {
        u32 i = 0;
        struct mhi_ring *local_ctxt = NULL;
@@ -66,40 +98,38 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
        for (i = 0; i < MHI_MAX_CHANNELS; ++i)
                if (VALID_CHAN_NR(i)) {
                        local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
-                       if (IS_HARDWARE_CHANNEL(i))
-                               mhi_dev_ctxt->flags.db_mode[i] = 1;
-                       if ((local_ctxt->wp != local_ctxt->rp) ||
-                          ((local_ctxt->wp != local_ctxt->rp) &&
-                           (local_ctxt->dir == MHI_IN)))
+
+                       /* Reset the DB Mode state to DB Mode */
+                       if (local_ctxt->db_mode.preserve_db_state == 0
+                           && reset_db_mode)
+                               local_ctxt->db_mode.db_mode = 1;
+
+                       if (local_ctxt->wp != local_ctxt->rp)
                                conditional_chan_db_write(mhi_dev_ctxt, i);
                }
 }
 
 static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
 {
-       struct mutex *cmd_mutex = NULL;
        u64 db_value;
        u64 rp = 0;
        struct mhi_ring *local_ctxt = NULL;
 
        mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
-       cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
-       mhi_dev_ctxt->cmd_ring_order = 0;
-       mutex_lock(cmd_mutex);
+
        local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
        rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
                                                PRIMARY_CMD_RING,
                                                (uintptr_t)local_ctxt->rp);
-       db_value =
-               mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
-                       PRIMARY_CMD_RING,
-                       (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
-       if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
-               mhi_process_db(mhi_dev_ctxt,
-                              mhi_dev_ctxt->mmio_info.cmd_db_addr,
-                                                       0, db_value);
-       mhi_dev_ctxt->cmd_ring_order = 0;
-       mutex_unlock(cmd_mutex);
+       db_value = mhi_v2p_addr(mhi_dev_ctxt,
+                               MHI_RING_TYPE_CMD_RING,
+                               PRIMARY_CMD_RING,
+                               (uintptr_t)local_ctxt->wp);
+       if (rp != db_value)
+               local_ctxt->db_mode.process_db(mhi_dev_ctxt,
+                               mhi_dev_ctxt->mmio_info.cmd_db_addr,
+                               0,
+                               db_value);
 }
 
 static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -107,24 +137,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
        u32 i;
        u64 db_value = 0;
        struct mhi_event_ctxt *event_ctxt = NULL;
+       struct mhi_ring *mhi_ring;
        spinlock_t *lock = NULL;
        unsigned long flags;
 
        for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
-               lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
-               mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+               mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
+               lock = &mhi_ring->ring_lock;
                spin_lock_irqsave(lock, flags);
                event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
-               db_value =
-                mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
-                       i,
-                       (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[i].wp);
-               if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) {
-                       mhi_process_db(mhi_dev_ctxt,
-                                      mhi_dev_ctxt->mmio_info.event_db_addr,
-                                      i, db_value);
-               }
-               mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+               db_value = mhi_v2p_addr(mhi_dev_ctxt,
+                                       MHI_RING_TYPE_EVENT_RING,
+                                       i,
+                                       (uintptr_t)mhi_ring->wp);
+               mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+                               mhi_dev_ctxt->mmio_info.event_db_addr,
+                               i,
+                               db_value);
                spin_unlock_irqrestore(lock, flags);
        }
 }
@@ -133,168 +162,121 @@ static int process_m0_transition(
                        struct mhi_device_ctxt *mhi_dev_ctxt,
                        enum STATE_TRANSITION cur_work_item)
 {
-       unsigned long flags;
-       int r = 0;
 
-       mhi_log(MHI_MSG_INFO, "Entered\n");
+       mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
 
-       if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
+       switch (mhi_dev_ctxt->mhi_state) {
+       case MHI_STATE_M2:
                mhi_dev_ctxt->counters.m2_m0++;
-       } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
-                       mhi_dev_ctxt->counters.m3_m0++;
-       } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
-               mhi_log(MHI_MSG_INFO,
-                       "Transitioning from READY.\n");
-       } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
-               mhi_log(MHI_MSG_INFO,
-                       "Transitioning from M1.\n");
-       } else {
-               mhi_log(MHI_MSG_INFO,
-                       "MHI State %d link state %d. Quitting\n",
-                       mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up);
+               break;
+       case MHI_STATE_M3:
+               mhi_dev_ctxt->counters.m3_m0++;
+               break;
+       default:
+               break;
        }
 
-       read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
-       atomic_inc(&mhi_dev_ctxt->flags.data_pending);
-       mhi_assert_device_wake(mhi_dev_ctxt);
-       read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_assert_device_wake(mhi_dev_ctxt, true);
 
        if (mhi_dev_ctxt->flags.mhi_initialized) {
                ring_all_ev_dbs(mhi_dev_ctxt);
-               ring_all_chan_dbs(mhi_dev_ctxt);
+               ring_all_chan_dbs(mhi_dev_ctxt, true);
                ring_all_cmd_dbs(mhi_dev_ctxt);
        }
-       atomic_dec(&mhi_dev_ctxt->flags.data_pending);
-       r  = mhi_set_bus_request(mhi_dev_ctxt, 1);
-       if (r)
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Could not set bus frequency ret: %d\n",
-                       r);
-       mhi_dev_ctxt->flags.pending_M0 = 0;
-       if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) {
-               atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
-               atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
-       }
-       wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event);
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       if (!mhi_dev_ctxt->flags.pending_M3 &&
-            mhi_dev_ctxt->flags.link_up &&
-            mhi_dev_ctxt->flags.mhi_initialized)
-               mhi_deassert_device_wake(mhi_dev_ctxt);
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
 
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
        mhi_log(MHI_MSG_INFO, "Exited\n");
        return 0;
 }
 
-static int process_m1_transition(
-               struct mhi_device_ctxt  *mhi_dev_ctxt,
-               enum STATE_TRANSITION cur_work_item)
+void process_m1_transition(struct work_struct *work)
 {
-       unsigned long flags = 0;
-       int r = 0;
+       struct mhi_device_ctxt *mhi_dev_ctxt;
+
+       mhi_dev_ctxt = container_of(work,
+                                   struct mhi_device_ctxt,
+                                   process_m1_worker);
+       mutex_lock(&mhi_dev_ctxt->pm_lock);
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
 
        mhi_log(MHI_MSG_INFO,
-                       "Processing M1 state transition from state %d\n",
-                       mhi_dev_ctxt->mhi_state);
-
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       if (!mhi_dev_ctxt->flags.pending_M3) {
-               mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
-               atomic_inc(&mhi_dev_ctxt->flags.m2_transition);
-               mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
-               mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
-               mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
-               mhi_dev_ctxt->counters.m1_m2++;
+               "Processing M1 state transition from state %s\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+       /* We either Entered M3 or we did M3->M0 Exit */
+       if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
+               write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+               mutex_unlock(&mhi_dev_ctxt->pm_lock);
+               return;
        }
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-       r = mhi_set_bus_request(mhi_dev_ctxt, 0);
-       if (r)
-               mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
 
-       mhi_log(MHI_MSG_INFO, "Debouncing M2\n");
+       mhi_log(MHI_MSG_INFO, "Transitioning  to M2 Transition\n");
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
+       mhi_dev_ctxt->counters.m1_m2++;
+       mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
+       mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
        msleep(MHI_M2_DEBOUNCE_TMR_MS);
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
 
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       mhi_log(MHI_MSG_INFO, "Pending acks %d\n",
-               atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
-       if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) ||
-                        mhi_dev_ctxt->flags.pending_M3) {
-               mhi_assert_device_wake(mhi_dev_ctxt);
-       } else {
-               pm_runtime_mark_last_busy(
-                               &mhi_dev_ctxt->dev_info->pcie_device->dev);
-               r = pm_request_autosuspend(
-                               &mhi_dev_ctxt->dev_info->pcie_device->dev);
-               if (r && r != -EAGAIN) {
-                       mhi_log(MHI_MSG_ERROR,
-                               "Failed to remove counter ret %d\n", r);
-                       BUG_ON(mhi_dev_ctxt->dev_info->
-                               pcie_device->dev.power.runtime_error);
-               }
+       /* During DEBOUNCE Time We could be receiving M0 Event */
+       if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
+               mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
+               mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
        }
-       atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
-       mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-       BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
 
-       return 0;
+       if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+               mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
+                       atomic_read(&mhi_dev_ctxt->counters.device_wake));
+               read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               mhi_assert_device_wake(mhi_dev_ctxt, true);
+               mhi_deassert_device_wake(mhi_dev_ctxt);
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       } else {
+               mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
+               pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+                                         dev_info->pcie_device->dev);
+               pm_request_autosuspend(&mhi_dev_ctxt->
+                                      dev_info->pcie_device->dev);
+       }
+       mutex_unlock(&mhi_dev_ctxt->pm_lock);
 }
 
 static int process_m3_transition(
                struct mhi_device_ctxt *mhi_dev_ctxt,
                enum STATE_TRANSITION cur_work_item)
 {
-       unsigned long flags;
 
        mhi_log(MHI_MSG_INFO,
-                       "Processing M3 state transition\n");
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
-       mhi_dev_ctxt->flags.pending_M3 = 0;
-       wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-       mhi_dev_ctxt->counters.m0_m3++;
-       return 0;
-}
-
-static int mhi_process_link_down(
-               struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-       unsigned long flags;
-       int r;
-
-       mhi_log(MHI_MSG_INFO, "Entered.\n");
-       if (NULL == mhi_dev_ctxt)
-               return -EINVAL;
-
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       mhi_dev_ctxt->flags.mhi_initialized = 0;
-       mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
-       mhi_deassert_device_wake(mhi_dev_ctxt);
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
-       mhi_dev_ctxt->flags.stop_threads = 1;
+               "Entered with State %s\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
 
-       while (!mhi_dev_ctxt->flags.ev_thread_stopped) {
-               wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
-               mhi_log(MHI_MSG_INFO,
-                       "Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
-                       mhi_dev_ctxt->flags.st_thread_stopped,
-                       mhi_dev_ctxt->flags.ev_thread_stopped);
-               msleep(20);
+       switch (mhi_dev_ctxt->mhi_state) {
+       case MHI_STATE_M1:
+               mhi_dev_ctxt->counters.m1_m3++;
+               break;
+       case MHI_STATE_M0:
+               mhi_dev_ctxt->counters.m0_m3++;
+               break;
+       default:
+               break;
        }
 
-       r = mhi_set_bus_request(mhi_dev_ctxt, 0);
-       if (r)
-               mhi_log(MHI_MSG_INFO,
-                       "Failed to scale bus request to sleep set.\n");
-       mhi_turn_off_pcie_link(mhi_dev_ctxt);
-       mhi_dev_ctxt->dev_info->link_down_cntr++;
-       atomic_set(&mhi_dev_ctxt->flags.data_pending, 0);
-       mhi_log(MHI_MSG_INFO, "Exited.\n");
-
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
+       mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
        return 0;
 }
 
@@ -302,51 +284,20 @@ static int process_link_down_transition(
                        struct mhi_device_ctxt *mhi_dev_ctxt,
                        enum STATE_TRANSITION cur_work_item)
 {
-       mhi_log(MHI_MSG_INFO, "Entered\n");
-       if (0 !=
-                       mhi_process_link_down(mhi_dev_ctxt)) {
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Failed to process link down\n");
-       }
-       mhi_log(MHI_MSG_INFO, "Exited.\n");
-       return 0;
+       mhi_log(MHI_MSG_INFO,
+               "Entered with State %s\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+       return -EIO;
 }
 
 static int process_wake_transition(
                        struct mhi_device_ctxt *mhi_dev_ctxt,
                        enum STATE_TRANSITION cur_work_item)
 {
-       int r = 0;
-
-       mhi_log(MHI_MSG_INFO, "Entered\n");
-       __pm_stay_awake(&mhi_dev_ctxt->w_lock);
-
-       if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) {
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Pending SSR, Ignoring.\n");
-               goto exit;
-       }
-       if (mhi_dev_ctxt->flags.mhi_initialized) {
-               r = pm_request_resume(
-                               &mhi_dev_ctxt->dev_info->pcie_device->dev);
-               mhi_log(MHI_MSG_VERBOSE,
-                       "MHI is initialized, transitioning to M0, ret %d\n", r);
-       }
-
-       if (!mhi_dev_ctxt->flags.mhi_initialized) {
-               mhi_log(MHI_MSG_INFO,
-                       "MHI is not initialized transitioning to base.\n");
-               r = init_mhi_base_state(mhi_dev_ctxt);
-               if (0 != r)
-                       mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to transition to base state %d.\n",
-                               r);
-       }
-
-exit:
-       __pm_relax(&mhi_dev_ctxt->w_lock);
-       mhi_log(MHI_MSG_INFO, "Exited.\n");
-       return r;
+       mhi_log(MHI_MSG_INFO,
+               "Entered with State %s\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+       return -EIO;
 
 }
 
@@ -354,9 +305,10 @@ static int process_bhi_transition(
                        struct mhi_device_ctxt *mhi_dev_ctxt,
                        enum STATE_TRANSITION cur_work_item)
 {
-       mhi_turn_on_pcie_link(mhi_dev_ctxt);
        mhi_log(MHI_MSG_INFO, "Entered\n");
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
        mhi_log(MHI_MSG_INFO, "Exited\n");
        return 0;
@@ -369,36 +321,42 @@ static int process_ready_transition(
        int r = 0;
 
        mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
-       mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
 
        r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
-
-       if (r)
+       if (r) {
                mhi_log(MHI_MSG_ERROR,
                        "Failed to reset thread queues\n");
+               return r;
+       }
+
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
        r = mhi_init_mmio(mhi_dev_ctxt);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        /* Initialize MMIO */
        if (r) {
                mhi_log(MHI_MSG_ERROR,
                        "Failure during MMIO initialization\n");
                return r;
        }
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
        r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
                                cur_work_item);
-
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
        if (r) {
                mhi_log(MHI_MSG_ERROR,
                        "Failure during event ring init\n");
                return r;
        }
 
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        mhi_dev_ctxt->flags.stop_threads = 0;
-       mhi_assert_device_wake(mhi_dev_ctxt);
        mhi_reg_write_field(mhi_dev_ctxt,
                        mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
                        MHICTRL_MHISTATE_MASK,
                        MHICTRL_MHISTATE_SHIFT,
                        MHI_STATE_M0);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        return r;
 }
 
@@ -421,37 +379,22 @@ static int process_reset_transition(
                        enum STATE_TRANSITION cur_work_item)
 {
        int r = 0, i = 0;
-       unsigned long flags = 0;
-
        mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
        mhi_dev_ctxt->counters.mhi_reset_cntr++;
-       mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
        r = mhi_test_for_device_reset(mhi_dev_ctxt);
        if (r)
                mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
        r = mhi_test_for_device_ready(mhi_dev_ctxt);
-       switch (r) {
-       case 0:
-               break;
-       case -ENOTCONN:
-               mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
-               break;
-       case -ETIMEDOUT:
-               r = mhi_init_state_transition(mhi_dev_ctxt,
-                                       STATE_TRANSITION_RESET);
-               if (0 != r)
-                       mhi_log(MHI_MSG_CRITICAL,
-                               "Failed to initiate 0x%x state trans\n",
-                               STATE_TRANSITION_RESET);
-               break;
-       default:
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Unexpected ret code detected for\n");
-               break;
+       if (r) {
+               mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
+                       r);
+               return r;
        }
+
        for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
                mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
                                mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
@@ -475,8 +418,8 @@ static int process_reset_transition(
                                STATE_TRANSITION_READY);
        if (0 != r)
                mhi_log(MHI_MSG_CRITICAL,
-               "Failed to initiate 0x%x state trans\n",
-               STATE_TRANSITION_READY);
+                       "Failed to initiate %s state trans\n",
+                       state_transition_str(STATE_TRANSITION_READY));
        return r;
 }
 
@@ -484,45 +427,10 @@ static int process_syserr_transition(
                        struct mhi_device_ctxt *mhi_dev_ctxt,
                        enum STATE_TRANSITION cur_work_item)
 {
-       int r = 0;
-
-       mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n");
-       mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
-       r = mhi_init_state_transition(mhi_dev_ctxt,
-                                       STATE_TRANSITION_RESET);
-       if (r) {
-               mhi_log(MHI_MSG_ERROR,
-                       "Failed to init state transition to RESET ret %d\n", r);
-               mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n");
-       }
-       return r;
-}
-
-int start_chan_sync(struct mhi_client_handle *client_handle)
-{
-       int r = 0;
-       int chan = client_handle->chan_info.chan_nr;
-
-       init_completion(&client_handle->chan_open_complete);
-       r = mhi_send_cmd(client_handle->mhi_dev_ctxt,
-                              MHI_COMMAND_START_CHAN,
-                              chan);
-       if (r != 0) {
-               mhi_log(MHI_MSG_ERROR,
-                       "Failed to send start command for chan %d ret %d\n",
-                       chan, r);
-               return r;
-       }
-       r = wait_for_completion_timeout(
-                       &client_handle->chan_open_complete,
-                       msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
-       if (!r) {
-               mhi_log(MHI_MSG_ERROR,
-                          "Timed out waiting for chan %d start completion\n",
-                           chan);
-               r = -ETIME;
-       }
-       return 0;
+       mhi_log(MHI_MSG_INFO,
+               "Entered with State %s\n",
+               TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+       return -EIO;
 }
 
 static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -546,8 +454,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
                                                chan_info.flags))
                        mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
        }
-       if (exec_env == MHI_EXEC_ENV_AMSS)
-               mhi_deassert_device_wake(mhi_dev_ctxt);
+
        mhi_log(MHI_MSG_INFO, "Done.\n");
 }
 
@@ -555,36 +462,25 @@ static int process_sbl_transition(
                                struct mhi_device_ctxt *mhi_dev_ctxt,
                                enum STATE_TRANSITION cur_work_item)
 {
-       int r = 0;
-       pm_runtime_set_autosuspend_delay(
-                                &mhi_dev_ctxt->dev_info->pcie_device->dev,
-                                MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
-       pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-       r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-       if (r) {
-               mhi_log(MHI_MSG_ERROR,
-               "Failed to activate runtime pm ret %d\n", r);
-       }
-       pm_runtime_enable(&mhi_dev_ctxt->dev_info->pcie_device->dev);
-       mhi_log(MHI_MSG_INFO, "Enabled runtime pm autosuspend\n");
+       mhi_log(MHI_MSG_INFO, "Enabled\n");
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
-       pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
        return 0;
-
 }
 
 static int process_amss_transition(
                                struct mhi_device_ctxt *mhi_dev_ctxt,
                                enum STATE_TRANSITION cur_work_item)
 {
-       int r = 0, i = 0;
-       struct mhi_client_handle *client_handle = NULL;
+       int r = 0;
 
        mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
+       write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
        mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
-       atomic_inc(&mhi_dev_ctxt->flags.data_pending);
-       mhi_assert_device_wake(mhi_dev_ctxt);
+       write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
        if (!mhi_dev_ctxt->flags.mhi_initialized) {
                r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
                                        cur_work_item);
@@ -592,54 +488,40 @@ static int process_amss_transition(
                if (r) {
                        mhi_log(MHI_MSG_CRITICAL,
                                "Failed to set local chan state ret %d\n", r);
+                       mhi_deassert_device_wake(mhi_dev_ctxt);
                        return r;
                }
-               ring_all_chan_dbs(mhi_dev_ctxt);
+               read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+               ring_all_chan_dbs(mhi_dev_ctxt, true);
+               read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
                mhi_log(MHI_MSG_INFO,
                        "Notifying clients that MHI is enabled\n");
                enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
        } else {
                mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
-               for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
-                       client_handle = mhi_dev_ctxt->client_handle_list[i];
-                       if (client_handle && client_handle->chan_status)
-                               r = start_chan_sync(client_handle);
-                               WARN(r, "Failed to start chan %d ret %d\n",
-                                       i, r);
-                               return r;
-               }
-               ring_all_chan_dbs(mhi_dev_ctxt);
        }
+
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
        ring_all_ev_dbs(mhi_dev_ctxt);
-       atomic_dec(&mhi_dev_ctxt->flags.data_pending);
-       if (!mhi_dev_ctxt->flags.pending_M3 &&
-            mhi_dev_ctxt->flags.link_up)
-               mhi_deassert_device_wake(mhi_dev_ctxt);
-       mhi_log(MHI_MSG_INFO, "Exited\n");
-       return 0;
-}
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
 
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-       int r = 0;
-       unsigned long flags = 0;
+       /*
+        * runtime_allow will decrement usage_count, counts were
+        * incremented by pci fw pci_pm_init() or by
+        * mhi shutdown/ssr apis.
+        */
+       mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
 
-       mhi_log(MHI_MSG_INFO, "Entered\n");
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR;
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+       pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+       pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
 
-       mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n");
-       mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
-       mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n");
-       r = mhi_init_state_transition(mhi_dev_ctxt,
-                                           STATE_TRANSITION_RESET);
-       if (0 != r)
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Failed to initiate 0x%x state trans ret %d\n",
-                       STATE_TRANSITION_RESET, r);
-       mhi_log(MHI_MSG_INFO, "Exiting\n");
-       return r;
+       /* During probe we incremented, releasing that count */
+       read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+       mhi_deassert_device_wake(mhi_dev_ctxt);
+       read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+       mhi_log(MHI_MSG_INFO, "Exited\n");
+       return 0;
 }
 
 static int process_stt_work_item(
@@ -648,8 +530,8 @@ static int process_stt_work_item(
 {
        int r = 0;
 
-       mhi_log(MHI_MSG_INFO, "Transitioning to %d\n",
-                               (int)cur_work_item);
+       mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
+               state_transition_str(cur_work_item));
        trace_mhi_state(cur_work_item);
        switch (cur_work_item) {
        case STATE_TRANSITION_BHI:
@@ -670,9 +552,6 @@ static int process_stt_work_item(
        case STATE_TRANSITION_M0:
                r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
                break;
-       case STATE_TRANSITION_M1:
-               r = process_m1_transition(mhi_dev_ctxt, cur_work_item);
-               break;
        case STATE_TRANSITION_M3:
                r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
                break;
@@ -689,7 +568,8 @@ static int process_stt_work_item(
                break;
        default:
                mhi_log(MHI_MSG_ERROR,
-                               "Unrecongized state: %d\n", cur_work_item);
+                       "Unrecongized state: %s\n",
+                       state_transition_str(cur_work_item));
                break;
        }
        return r;
@@ -762,8 +642,8 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
 
        BUG_ON(nr_avail_work_items <= 0);
        mhi_log(MHI_MSG_VERBOSE,
-               "Processing state transition %x\n",
-               new_state);
+               "Processing state transition %s\n",
+               state_transition_str(new_state));
        *(enum STATE_TRANSITION *)stt_ring->wp = new_state;
        r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
        BUG_ON(r);
@@ -771,216 +651,3 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
        wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
        return r;
 }
-
-int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-       int r = 0;
-       unsigned long flags;
-
-       mhi_log(MHI_MSG_INFO,
-               "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
-               mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
-                                       mhi_dev_ctxt->flags.pending_M3);
-       mutex_lock(&mhi_dev_ctxt->pm_lock);
-       mhi_log(MHI_MSG_INFO,
-               "Waiting for M0 M1 or M3. Currently %d...\n",
-                                       mhi_dev_ctxt->mhi_state);
-
-       r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
-               msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
-       switch (r) {
-       case 0:
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Timeout: State %d after %d ms\n",
-                               mhi_dev_ctxt->mhi_state,
-                               MHI_MAX_SUSPEND_TIMEOUT);
-               mhi_dev_ctxt->counters.m0_event_timeouts++;
-               r = -ETIME;
-               goto exit;
-       case -ERESTARTSYS:
-               mhi_log(MHI_MSG_CRITICAL,
-                       "Going Down...\n");
-               goto exit;
-       default:
-               mhi_log(MHI_MSG_INFO,
-                       "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
-               r = 0;
-               break;
-       }
-       if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
-           mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
-               mhi_assert_device_wake(mhi_dev_ctxt);
-               mhi_log(MHI_MSG_INFO,
-                               "MHI state %d, done\n",
-                                       mhi_dev_ctxt->mhi_state);
-               goto exit;
-       } else {
-               if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
-                       mhi_log(MHI_MSG_CRITICAL,
-                                       "Failed to resume link\n");
-                       r = -EIO;
-                       goto exit;
-               }
-
-               write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-               mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n");
-               if (mhi_dev_ctxt->flags.pending_M3) {
-                       mhi_log(MHI_MSG_INFO,
-                               "Pending M3 detected, aborting M0 procedure\n");
-                       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
-                                                               flags);
-                       r = -EPERM;
-                       goto exit;
-               }
-               if (mhi_dev_ctxt->flags.link_up) {
-                       mhi_dev_ctxt->flags.pending_M0 = 1;
-                       mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
-               }
-               write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-               r = wait_event_interruptible_timeout(
-                               *mhi_dev_ctxt->mhi_ev_wq.m0_event,
-                               mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
-                               mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
-                               msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
-               WARN_ON(!r || -ERESTARTSYS == r);
-               if (!r || -ERESTARTSYS == r)
-                       mhi_log(MHI_MSG_ERROR,
-                               "Failed to get M0 event ret %d\n", r);
-               r = 0;
-       }
-exit:
-       mutex_unlock(&mhi_dev_ctxt->pm_lock);
-       mhi_log(MHI_MSG_INFO, "Exited...\n");
-       return r;
-}
-
-int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-
-       unsigned long flags;
-       int r = 0, abort_m3 = 0;
-
-       mhi_log(MHI_MSG_INFO,
-               "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
-               mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
-                                       mhi_dev_ctxt->flags.pending_M3);
-       mutex_lock(&mhi_dev_ctxt->pm_lock);
-       switch (mhi_dev_ctxt->mhi_state) {
-       case MHI_STATE_RESET:
-               mhi_log(MHI_MSG_INFO,
-                               "MHI in RESET turning link off and quitting\n");
-               mhi_turn_off_pcie_link(mhi_dev_ctxt);
-               r = mhi_set_bus_request(mhi_dev_ctxt, 0);
-               if (r)
-                       mhi_log(MHI_MSG_INFO,
-                                       "Failed to set bus freq ret %d\n", r);
-               goto exit;
-       case MHI_STATE_M0:
-       case MHI_STATE_M1:
-       case MHI_STATE_M2:
-               mhi_log(MHI_MSG_INFO,
-                       "Triggering wake out of M2\n");
-               write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-               mhi_dev_ctxt->flags.pending_M3 = 1;
-               if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
-                       mhi_log(MHI_MSG_INFO,
-                               "M2 transition not set\n");
-                       mhi_assert_device_wake(mhi_dev_ctxt);
-               }
-               write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-               r = wait_event_interruptible_timeout(
-                               *mhi_dev_ctxt->mhi_ev_wq.m0_event,
-                               mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
-                               mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
-                               msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
-               if (0 == r || -ERESTARTSYS == r) {
-                       mhi_log(MHI_MSG_CRITICAL,
-                               "MDM failed to come out of M2.\n");
-                       mhi_dev_ctxt->counters.m2_event_timeouts++;
-                       r = -EAGAIN;
-                       goto exit;
-               }
-               break;
-       case MHI_STATE_M3:
-               mhi_log(MHI_MSG_INFO,
-                       "MHI state %d, link state %d.\n",
-                               mhi_dev_ctxt->mhi_state,
-                               mhi_dev_ctxt->flags.link_up);
-               if (mhi_dev_ctxt->flags.link_up)
-                       r = -EAGAIN;
-               else
-                       r = 0;
-               goto exit;
-       default:
-               mhi_log(MHI_MSG_INFO,
-                       "MHI state %d, link state %d.\n",
-                               mhi_dev_ctxt->mhi_state,
-                               mhi_dev_ctxt->flags.link_up);
-               break;
-       }
-       while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
-               mhi_log(MHI_MSG_INFO,
-                       "There are still %d acks pending from device\n",
-                       atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
-                       __pm_stay_awake(&mhi_dev_ctxt->w_lock);
-                       __pm_relax(&mhi_dev_ctxt->w_lock);
-               abort_m3 = 1;
-               r = -EAGAIN;
-               goto exit;
-       }
-
-       if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
-               abort_m3 = 1;
-               r = -EAGAIN;
-               goto exit;
-       }
-       write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-       if (mhi_dev_ctxt->flags.pending_M0) {
-               write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-               r = -EAGAIN;
-               goto exit;
-       }
-       mhi_dev_ctxt->flags.pending_M3 = 1;
-
-       mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
-       write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
-       mhi_log(MHI_MSG_INFO,
-                       "Waiting for M3 completion.\n");
-       r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
-                       mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
-               msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
-       switch (r) {
-       case 0:
-               mhi_log(MHI_MSG_CRITICAL,
-                       "MDM failed to suspend after %d ms\n",
-                       MHI_MAX_SUSPEND_TIMEOUT);
-               mhi_dev_ctxt->counters.m3_event_timeouts++;
-               mhi_dev_ctxt->flags.pending_M3 = 0;
-               goto exit;
-       default:
-               mhi_log(MHI_MSG_INFO,
-                       "M3 completion received\n");
-               break;
-       }
-       mhi_turn_off_pcie_link(mhi_dev_ctxt);
-       r = mhi_set_bus_request(mhi_dev_ctxt, 0);
-       if (r)
-               mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
-exit:
-       if (abort_m3) {
-               write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
-               atomic_inc(&mhi_dev_ctxt->flags.data_pending);
-               write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-               ring_all_chan_dbs(mhi_dev_ctxt);
-               ring_all_cmd_dbs(mhi_dev_ctxt);
-               atomic_dec(&mhi_dev_ctxt->flags.data_pending);
-               mhi_deassert_device_wake(mhi_dev_ctxt);
-       }
-       mhi_dev_ctxt->flags.pending_M3 = 0;
-       mutex_unlock(&mhi_dev_ctxt->pm_lock);
-       return r;
-}
index b865277..c5c025b 100644 (file)
@@ -21,9 +21,9 @@
 enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR;
 
 #ifdef CONFIG_MSM_MHI_DEBUG
-       enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
 #else
-       enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
 #endif
 
 unsigned int mhi_log_override;
@@ -34,6 +34,18 @@ MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
 module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
 
+const char * const mhi_states_str[MHI_STATE_LIMIT] = {
+       "RESET",
+       "READY",
+       "M0",
+       "M1",
+       "M2",
+       "M3",
+       "Reserved: 0x06",
+       "BHI",
+       "SYS_ERR",
+};
+
 static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
                                size_t count, loff_t *offp)
 {
@@ -46,6 +58,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
        int valid_chan = 0;
        struct mhi_chan_ctxt *cc_list;
        struct mhi_client_handle *client_handle;
+       int pkts_queued;
 
        if (NULL == mhi_dev_ctxt)
                return -EIO;
@@ -74,35 +87,37 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
                        mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
                        &v_wp_index);
 
+       pkts_queued = client_handle->chan_info.max_desc -
+               get_nr_avail_ring_elements(&mhi_dev_ctxt->
+                                          mhi_local_chan_ctxt[*offp]) - 1;
        amnt_copied =
        scnprintf(mhi_dev_ctxt->chan_info,
-               MHI_LOG_SIZE,
-               "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
-               "chan:",
-               (unsigned int)*offp,
-               "pkts from dev:",
-               mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
-               "state:",
-               chan_ctxt->mhi_chan_state,
-               "p_base:",
-               chan_ctxt->mhi_trb_ring_base_addr,
-               "v_base:",
-               mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
-               "v_wp:",
-               mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
-               "index:",
-               v_wp_index,
-               "v_rp:",
-               mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
-               "index:",
-               v_rp_index,
-               "pkts_queued",
-               get_nr_avail_ring_elements(
-               &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
-               "/",
-               client_handle->chan_info.max_desc,
-               "bb_used:",
-               mhi_dev_ctxt->counters.bb_used[*offp]);
+                 MHI_LOG_SIZE,
+                 "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
+                 "chan:",
+                 (unsigned int)*offp,
+                 "pkts from dev:",
+                 mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
+                 "state:",
+                 chan_ctxt->chstate,
+                 "p_base:",
+                 chan_ctxt->mhi_trb_ring_base_addr,
+                 "v_base:",
+                 mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
+                 "v_wp:",
+                 mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
+                 "index:",
+                 v_wp_index,
+                 "v_rp:",
+                 mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
+                 "index:",
+                 v_rp_index,
+                 "pkts_queued",
+                 pkts_queued,
+                 "/",
+                 client_handle->chan_info.max_desc,
+                 "bb_used:",
+                 mhi_dev_ctxt->counters.bb_used[*offp]);
 
        *offp += 1;
 
@@ -224,39 +239,37 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
        msleep(100);
        amnt_copied =
        scnprintf(mhi_dev_ctxt->chan_info,
-                       MHI_LOG_SIZE,
-                       "%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n",
-                       "Our State:",
-                       mhi_dev_ctxt->mhi_state,
-                       "M0->M1:",
-                       mhi_dev_ctxt->counters.m0_m1,
-                       "M0<-M1:",
-                       mhi_dev_ctxt->counters.m1_m0,
-                       "M1->M2:",
-                       mhi_dev_ctxt->counters.m1_m2,
-                       "M0<-M2:",
-                       mhi_dev_ctxt->counters.m2_m0,
-                       "M0->M3:",
-                       mhi_dev_ctxt->counters.m0_m3,
-                       "M0<-M3:",
-                       mhi_dev_ctxt->counters.m3_m0,
-                       "M3_ev_TO:",
-                       mhi_dev_ctxt->counters.m3_event_timeouts,
-                       "M0_ev_TO:",
-                       mhi_dev_ctxt->counters.m0_event_timeouts,
-                       "MSI_d:",
-                       mhi_dev_ctxt->counters.msi_disable_cntr,
-                       "MSI_e:",
-                       mhi_dev_ctxt->counters.msi_enable_cntr,
-                       "outstanding_acks:",
-                       atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
-                       "LPM:",
-                       mhi_dev_ctxt->enable_lpm);
+                 MHI_LOG_SIZE,
+                 "%s %s %s 0x%02x %s %u %s %u %s %u %s %u %s %u %s %u %s %d %s %d %s %d\n",
+                 "MHI State:",
+                 TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+                 "PM State:",
+                 mhi_dev_ctxt->mhi_pm_state,
+                 "M0->M1:",
+                 mhi_dev_ctxt->counters.m0_m1,
+                 "M1->M2:",
+                 mhi_dev_ctxt->counters.m1_m2,
+                 "M2->M0:",
+                 mhi_dev_ctxt->counters.m2_m0,
+                 "M0->M3:",
+                 mhi_dev_ctxt->counters.m0_m3,
+                 "M1->M3:",
+                 mhi_dev_ctxt->counters.m1_m3,
+                 "M3->M0:",
+                 mhi_dev_ctxt->counters.m3_m0,
+                 "device_wake:",
+                 atomic_read(&mhi_dev_ctxt->counters.device_wake),
+                 "usage_count:",
+                 atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
+                             power.usage_count),
+                 "outbound_acks:",
+                 atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
        if (amnt_copied < count)
                return amnt_copied - copy_to_user(buf,
                                mhi_dev_ctxt->chan_info, amnt_copied);
        else
                return -ENOMEM;
+       return 0;
 }
 
 static const struct file_operations mhi_dbgfs_state_fops = {
index 765fd46..a948a23 100644 (file)
@@ -46,6 +46,10 @@ extern void *mhi_ipc_log;
                               "[%s] " _msg, __func__, ##__VA_ARGS__);  \
 } while (0)
 
+extern const char * const mhi_states_str[MHI_STATE_LIMIT];
+#define TO_MHI_STATE_STR(state) (((state) >= MHI_STATE_LIMIT) ? \
+                                "INVALID_STATE" : mhi_states_str[state])
+
 irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id);
 
 struct mhi_meminfo {
index 4039b43..4973f91 100644 (file)
@@ -635,14 +635,16 @@ static void handle_main_charge_type(struct pl_data *chip)
 }
 
 #define MIN_ICL_CHANGE_DELTA_UA                300000
-static void handle_settled_aicl_split(struct pl_data *chip)
+static void handle_settled_icl_change(struct pl_data *chip)
 {
        union power_supply_propval pval = {0, };
        int rc;
 
-       if (!get_effective_result(chip->pl_disable_votable)
-               && (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
-                       || chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+       if (get_effective_result(chip->pl_disable_votable))
+               return;
+
+       if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+                       || chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
                /*
                 * call aicl split only when USBIN_USBIN and enabled
                 * and if aicl changed
@@ -659,6 +661,8 @@ static void handle_settled_aicl_split(struct pl_data *chip)
                if (abs((chip->main_settled_ua - chip->pl_settled_ua)
                                - pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
                        split_settled(chip);
+       } else {
+               rerun_election(chip->fcc_votable);
        }
 }
 
@@ -705,7 +709,7 @@ static void status_change_work(struct work_struct *work)
        is_parallel_available(chip);
 
        handle_main_charge_type(chip);
-       handle_settled_aicl_split(chip);
+       handle_settled_icl_change(chip);
        handle_parallel_in_taper(chip);
 }
 
@@ -719,7 +723,8 @@ static int pl_notifier_call(struct notifier_block *nb,
                return NOTIFY_OK;
 
        if ((strcmp(psy->desc->name, "parallel") == 0)
-           || (strcmp(psy->desc->name, "battery") == 0))
+           || (strcmp(psy->desc->name, "battery") == 0)
+           || (strcmp(psy->desc->name, "main") == 0))
                schedule_work(&chip->status_change_work);
 
        return NOTIFY_OK;
index 7e5b239..64f4d46 100644 (file)
@@ -992,6 +992,9 @@ static int smb2_batt_set_prop(struct power_supply *psy,
                /* Not in ship mode as long as the device is active */
                if (!val->intval)
                        break;
+               if (chg->pl.psy)
+                       power_supply_set_property(chg->pl.psy,
+                               POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
                rc = smblib_set_prop_ship_mode(chg, val);
                break;
        case POWER_SUPPLY_PROP_RERUN_AICL:
index 7c29954..fa6fd3e 100644 (file)
@@ -203,6 +203,7 @@ struct smb_iio {
        struct iio_channel      *usbin_i_chan;
        struct iio_channel      *usbin_v_chan;
        struct iio_channel      *batt_i_chan;
+       struct iio_channel      *connector_temp_chan;
        struct iio_channel      *connector_temp_thr1_chan;
        struct iio_channel      *connector_temp_thr2_chan;
        struct iio_channel      *connector_temp_thr3_chan;
index 6af9ce1..37dc154 100644 (file)
@@ -312,6 +312,7 @@ static enum power_supply_property smb138x_batt_props[] = {
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CHARGER_TEMP,
        POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+       POWER_SUPPLY_PROP_SET_SHIP_MODE,
 };
 
 static int smb138x_batt_get_prop(struct power_supply *psy,
@@ -347,6 +348,10 @@ static int smb138x_batt_get_prop(struct power_supply *psy,
        case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
                rc = smblib_get_prop_charger_temp_max(chg, val);
                break;
+       case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+               /* Not in ship mode as long as device is active */
+               val->intval = 0;
+               break;
        default:
                pr_err("batt power supply get prop %d not supported\n", prop);
                return -EINVAL;
@@ -375,6 +380,12 @@ static int smb138x_batt_set_prop(struct power_supply *psy,
        case POWER_SUPPLY_PROP_CAPACITY:
                rc = smblib_set_prop_batt_capacity(chg, val);
                break;
+       case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+               /* Not in ship mode as long as the device is active */
+               if (!val->intval)
+                       break;
+               rc = smblib_set_prop_ship_mode(chg, val);
+               break;
        default:
                pr_err("batt power supply set prop %d not supported\n", prop);
                return -EINVAL;
@@ -430,6 +441,57 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
  * PARALLEL PSY REGISTRATION *
  *****************************/
 
+static int smb138x_get_prop_connector_health(struct smb138x *chip)
+{
+       struct smb_charger *chg = &chip->chg;
+       int rc, lb_mdegc, ub_mdegc, rst_mdegc, connector_mdegc;
+
+       if (!chg->iio.connector_temp_chan ||
+               PTR_ERR(chg->iio.connector_temp_chan) == -EPROBE_DEFER)
+               chg->iio.connector_temp_chan = iio_channel_get(chg->dev,
+                                                       "connector_temp");
+
+       if (IS_ERR(chg->iio.connector_temp_chan))
+               return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+       rc = iio_read_channel_processed(chg->iio.connector_temp_thr1_chan,
+                                                       &lb_mdegc);
+       if (rc < 0) {
+               pr_err("Couldn't read connector lower bound rc=%d\n", rc);
+               return POWER_SUPPLY_HEALTH_UNKNOWN;
+       }
+
+       rc = iio_read_channel_processed(chg->iio.connector_temp_thr2_chan,
+                                                       &ub_mdegc);
+       if (rc < 0) {
+               pr_err("Couldn't read connector upper bound rc=%d\n", rc);
+               return POWER_SUPPLY_HEALTH_UNKNOWN;
+       }
+
+       rc = iio_read_channel_processed(chg->iio.connector_temp_thr3_chan,
+                                                       &rst_mdegc);
+       if (rc < 0) {
+               pr_err("Couldn't read connector reset bound rc=%d\n", rc);
+               return POWER_SUPPLY_HEALTH_UNKNOWN;
+       }
+
+       rc = iio_read_channel_processed(chg->iio.connector_temp_chan,
+                                                       &connector_mdegc);
+       if (rc < 0) {
+               pr_err("Couldn't read connector temperature rc=%d\n", rc);
+               return POWER_SUPPLY_HEALTH_UNKNOWN;
+       }
+
+       if (connector_mdegc < lb_mdegc)
+               return POWER_SUPPLY_HEALTH_COOL;
+       else if (connector_mdegc < ub_mdegc)
+               return POWER_SUPPLY_HEALTH_WARM;
+       else if (connector_mdegc < rst_mdegc)
+               return POWER_SUPPLY_HEALTH_HOT;
+
+       return POWER_SUPPLY_HEALTH_OVERHEAT;
+}
+
 static enum power_supply_property smb138x_parallel_props[] = {
        POWER_SUPPLY_PROP_CHARGE_TYPE,
        POWER_SUPPLY_PROP_CHARGING_ENABLED,
@@ -443,6 +505,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
        POWER_SUPPLY_PROP_MODEL_NAME,
        POWER_SUPPLY_PROP_PARALLEL_MODE,
        POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+       POWER_SUPPLY_PROP_SET_SHIP_MODE,
 };
 
 static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -496,7 +559,11 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
                val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
                break;
        case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
-               rc = smblib_get_prop_die_health(chg, val);
+               val->intval = smb138x_get_prop_connector_health(chip);
+               break;
+       case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+               /* Not in ship mode as long as device is active */
+               val->intval = 0;
                break;
        default:
                pr_err("parallel power supply get prop %d not supported\n",
@@ -558,6 +625,12 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
                rc = smblib_set_charge_param(chg, &chg->param.freq_buck,
                                             val->intval);
                break;
+       case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+               /* Not in ship mode as long as the device is active */
+               if (!val->intval)
+                       break;
+               rc = smblib_set_prop_ship_mode(chg, val);
+               break;
        default:
                pr_err("parallel power supply set prop %d not supported\n",
                        prop);
index 9be8bb9..2307da9 100644 (file)
@@ -37,7 +37,8 @@
 
 #define MSM8998_KBSS_FUSE_CORNERS      4
 #define SDM660_KBSS_FUSE_CORNERS       5
-#define SDM630_KBSS_FUSE_CORNERS       4
+#define SDM630_POWER_KBSS_FUSE_CORNERS 3
+#define SDM630_PERF_KBSS_FUSE_CORNERS  5
 
 /**
  * struct cprh_kbss_fuses - KBSS specific fuse data
@@ -131,18 +132,32 @@ static const char * const cprh_sdm660_perf_kbss_fuse_corner_name[] = {
        [CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2]    = "TURBO_L2",
 };
 
-enum cprh_sdm630_kbss_fuse_corner {
-       CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS     = 0,
-       CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS    = 1,
-       CPRH_SDM630_KBSS_FUSE_CORNER_NOM        = 2,
-       CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1   = 3,
+enum cprh_sdm630_power_kbss_fuse_corner {
+       CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS       = 0,
+       CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS      = 1,
+       CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1     = 2,
 };
 
-static const char * const cprh_sdm630_kbss_fuse_corner_name[] = {
-       [CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS]   = "LowSVS",
-       [CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS]  = "SVSPLUS",
-       [CPRH_SDM630_KBSS_FUSE_CORNER_NOM]      = "NOM",
-       [CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+static const char * const cprh_sdm630_power_kbss_fuse_corner_name[] = {
+       [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS]     = "LowSVS",
+       [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS]    = "SVSPLUS",
+       [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1]   = "TURBO_L1",
+};
+
+enum cprh_sdm630_perf_kbss_fuse_corner {
+       CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS        = 0,
+       CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS       = 1,
+       CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM           = 2,
+       CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO         = 3,
+       CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2      = 4,
+};
+
+static const char * const cprh_sdm630_perf_kbss_fuse_corner_name[] = {
+       [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS]      = "LowSVS",
+       [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS]     = "SVSPLUS",
+       [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM]         = "NOM",
+       [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO]       = "TURBO",
+       [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2]    = "TURBO_L2",
 };
 
 /* KBSS cluster IDs */
@@ -202,17 +217,17 @@ sdm660_kbss_ro_sel_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
 };
 
 static const struct cpr3_fuse_param
-sdm630_kbss_ro_sel_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_ro_sel_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
        [CPRH_KBSS_POWER_CLUSTER_ID] = {
                {{67, 12, 15}, {} },
                {{65, 56, 59}, {} },
-               {{67,  4,  7}, {} },
                {{67,  0,  3}, {} },
        },
        [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
                {{68, 61, 63}, {69,  0,  0} },
                {{69,  1,  4}, {} },
                {{68, 57, 60}, {} },
+               {{68, 53, 56}, {} },
                {{66, 14, 17}, {} },
        },
 };
@@ -252,17 +267,17 @@ sdm660_kbss_init_voltage_param[2][SDM660_KBSS_FUSE_CORNERS][2] = {
 };
 
 static const struct cpr3_fuse_param
-sdm630_kbss_init_voltage_param[2][SDM630_KBSS_FUSE_CORNERS][2] = {
+sdm630_kbss_init_voltage_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][2] = {
        [CPRH_KBSS_POWER_CLUSTER_ID] = {
                {{67, 34, 39}, {} },
                {{71,  3,  8}, {} },
-               {{67, 22, 27}, {} },
                {{67, 16, 21}, {} },
        },
        [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
                {{69, 17, 22}, {} },
                {{69, 23, 28}, {} },
                {{69, 11, 16}, {} },
+               {{69,  5, 10}, {} },
                {{70, 42, 47}, {} },
        },
 };
@@ -302,17 +317,17 @@ sdm660_kbss_target_quot_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
 };
 
 static const struct cpr3_fuse_param
-sdm630_kbss_target_quot_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_target_quot_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
        [CPRH_KBSS_POWER_CLUSTER_ID] = {
                {{68, 12, 23}, {} },
                {{71,  9, 20}, {} },
-               {{67, 52, 63}, {} },
                {{67, 40, 51}, {} },
        },
        [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
                {{69, 53, 63}, {70,  0,  0}, {} },
                {{70,  1, 12}, {} },
                {{69, 41, 52}, {} },
+               {{69, 29, 40}, {} },
                {{70, 48, 59}, {} },
        },
 };
@@ -352,17 +367,17 @@ sdm660_kbss_quot_offset_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
 };
 
 static const struct cpr3_fuse_param
-sdm630_kbss_quot_offset_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_quot_offset_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
        [CPRH_KBSS_POWER_CLUSTER_ID] = {
                {{} },
                {{71, 21, 27}, {} },
-               {{68, 31, 37}, {} },
                {{68, 24, 30}, {} },
        },
        [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
                {{} },
                {{70, 27, 33}, {} },
                {{70, 20, 26}, {} },
+               {{70, 13, 19}, {} },
                {{70, 60, 63}, {71,  0,  2}, {} },
        },
 };
@@ -484,18 +499,27 @@ sdm660_kbss_fuse_ref_volt[2][SDM660_KBSS_FUSE_CORNERS] = {
  * Open loop voltage fuse reference voltages in microvolts for SDM630
  */
 static const int
-sdm630_kbss_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
-       644000,
-       788000,
-       868000,
-       1068000,
+sdm630_kbss_fuse_ref_volt[2][SDM630_PERF_KBSS_FUSE_CORNERS] = {
+       [CPRH_KBSS_POWER_CLUSTER_ID] = {
+               644000,
+               788000,
+               1068000,
+       },
+       [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+               644000,
+               788000,
+               868000,
+               988000,
+               1068000,
+       },
 };
 
 static const int
-sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
+sdm630_perf_kbss_speed_bin_2_fuse_ref_volt[SDM630_PERF_KBSS_FUSE_CORNERS] = {
        644000,
        788000,
        868000,
+       988000,
        1140000,
 };
 
@@ -552,7 +576,7 @@ sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
  * sdm630 configuration
  */
 #define SDM630_KBSS_POWER_CPR_SENSOR_COUNT             6
-#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT       9
+#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT       6
 
 /*
  * SOC IDs
@@ -760,7 +784,7 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
                struct cprh_kbss_fuses *fuse)
 {
        void __iomem *base = vreg->thread->ctrl->fuse_base;
-       int i, id, rc;
+       int i, id, rc, fuse_corners;
 
        rc = cpr3_read_fuse_param(base, sdm630_cpr_fusing_rev_param,
                                &fuse->cpr_fusing_rev);
@@ -772,7 +796,12 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
        cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
 
        id = vreg->thread->ctrl->ctrl_id;
-       for (i = 0; i < SDM630_KBSS_FUSE_CORNERS; i++) {
+       if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+               fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+       else
+               fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
+
+       for (i = 0; i < fuse_corners; i++) {
                rc = cpr3_read_fuse_param(base,
                                sdm630_kbss_init_voltage_param[id][i],
                                &fuse->init_voltage[i]);
@@ -856,7 +885,10 @@ static int cprh_kbss_read_fuse_data(struct cpr3_regulator *vreg)
                fuse_corners = SDM660_KBSS_FUSE_CORNERS;
                break;
        case SDM630_SOC_ID:
-               fuse_corners = SDM630_KBSS_FUSE_CORNERS;
+               if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+                       fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+               else
+                       fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
                break;
        case MSM8998_V1_SOC_ID:
        case MSM8998_V2_SOC_ID:
@@ -1008,10 +1040,15 @@ static int cprh_kbss_calculate_open_loop_voltages(struct cpr3_regulator *vreg)
                        corner_name = cprh_sdm660_perf_kbss_fuse_corner_name;
                break;
        case SDM630_SOC_ID:
-               ref_volt = sdm630_kbss_fuse_ref_volt;
-               if (vreg->speed_bin_fuse == 2)
-                       ref_volt = sdm630_kbss_speed_bin_2_fuse_ref_volt;
-               corner_name = cprh_sdm630_kbss_fuse_corner_name;
+               ref_volt = sdm630_kbss_fuse_ref_volt[id];
+               if (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID
+                       && vreg->speed_bin_fuse == 2)
+                       ref_volt = sdm630_perf_kbss_speed_bin_2_fuse_ref_volt;
+
+               if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+                       corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+               else
+                       corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
                break;
        case MSM8998_V1_SOC_ID:
                ref_volt = msm8998_v1_kbss_fuse_ref_volt;
@@ -1581,11 +1618,19 @@ static int cprh_kbss_calculate_target_quotients(struct cpr3_regulator *vreg)
                }
                break;
        case SDM630_SOC_ID:
-               corner_name = cprh_sdm630_kbss_fuse_corner_name;
-               lowest_fuse_corner =
-                       CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS;
-               highest_fuse_corner =
-                       CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1;
+               if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+                       corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+                       lowest_fuse_corner =
+                               CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS;
+                       highest_fuse_corner =
+                               CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1;
+               } else {
+                       corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
+                       lowest_fuse_corner =
+                               CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS;
+                       highest_fuse_corner =
+                               CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2;
+               }
                break;
        case MSM8998_V1_SOC_ID:
        case MSM8998_V2_SOC_ID:
index cf8f000..dbe2a08 100644 (file)
 #include <linux/kernel.h>
 #include <linux/regmap.h>
 #include <linux/module.h>
+#include <linux/notifier.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/spmi.h>
 #include <linux/platform_device.h>
 #include <linux/string.h>
+#include <linux/workqueue.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
 
 #define QPNP_LABIBB_REGULATOR_DRIVER_NAME      "qcom,qpnp-labibb-regulator"
 
@@ -594,6 +597,7 @@ struct qpnp_labibb {
        const struct lab_ver_ops        *lab_ver_ops;
        struct mutex                    bus_mutex;
        enum qpnp_labibb_mode           mode;
+       struct work_struct              lab_vreg_ok_work;
        bool                            standalone;
        bool                            ttw_en;
        bool                            in_ttw_mode;
@@ -603,10 +607,13 @@ struct qpnp_labibb {
        bool                            ttw_force_lab_on;
        bool                            skip_2nd_swire_cmd;
        bool                            pfm_enable;
+       bool                            notify_lab_vreg_ok_sts;
        u32                             swire_2nd_cmd_delay;
        u32                             swire_ibb_ps_enable_delay;
 };
 
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
 struct ibb_ver_ops {
        int (*set_default_voltage)(struct qpnp_labibb *labibb,
                        bool use_default);
@@ -2124,6 +2131,36 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
        return rc;
 }
 
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+       int rc = 0;
+       u16 retries = 1000, dly = 5000;
+       u8 val;
+       struct qpnp_labibb *labibb  = container_of(work, struct qpnp_labibb,
+                                                       lab_vreg_ok_work);
+
+       while (retries--) {
+               rc = qpnp_labibb_read(labibb, labibb->lab_base +
+                                       REG_LAB_STATUS1, &val, 1);
+               if (rc < 0) {
+                       pr_err("read register %x failed rc = %d\n",
+                               REG_LAB_STATUS1, rc);
+                       return;
+               }
+
+               if (val & LAB_STATUS1_VREG_OK) {
+                       raw_notifier_call_chain(&labibb_notifier,
+                                               LAB_VREG_OK, NULL);
+                       break;
+               }
+
+               usleep_range(dly, dly + 100);
+       }
+
+       if (!retries)
+               pr_err("LAB_VREG_OK not set, failed to notify\n");
+}
+
 static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
 {
        int rc;
@@ -2326,6 +2363,9 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
                labibb->lab_vreg.vreg_enabled = 1;
        }
 
+       if (labibb->notify_lab_vreg_ok_sts)
+               schedule_work(&labibb->lab_vreg_ok_work);
+
        return 0;
 }
 
@@ -2578,6 +2618,9 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
                return rc;
        }
 
+       labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+                                       "qcom,notify-lab-vreg-ok-sts");
+
        rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
                                        &(labibb->lab_vreg.soft_start));
        if (!rc) {
@@ -3817,6 +3860,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
                        goto fail_registration;
                }
        }
+
+       INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
        dev_set_drvdata(&pdev->dev, labibb);
        pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
                                                labibb->lab_vreg.vreg_enabled,
@@ -3834,6 +3879,18 @@ fail_registration:
        return rc;
 }
 
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+       return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+       return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
 static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
 {
        struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
@@ -3843,6 +3900,8 @@ static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
                        regulator_unregister(labibb->lab_vreg.rdev);
                if (labibb->ibb_vreg.rdev)
                        regulator_unregister(labibb->ibb_vreg.rdev);
+
+               cancel_work_sync(&labibb->lab_vreg_ok_work);
        }
        return 0;
 }
index 7369478..bc00a7e 100644 (file)
@@ -35,6 +35,9 @@
 #include "ufs-qcom-debugfs.h"
 #include <linux/clk/msm-clk.h>
 
+#define MAX_PROP_SIZE             32
+#define VDDP_REF_CLK_MIN_UV        1200000
+#define VDDP_REF_CLK_MAX_UV        1200000
 /* TODO: further tuning for this parameter may be required */
 #define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US      (10000) /* microseconds */
 
@@ -709,40 +712,105 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
        return err;
 }
 
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+
+static int ufs_qcom_config_vreg(struct device *dev,
+               struct ufs_vreg *vreg, bool on)
 {
-       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       struct phy *phy = host->generic_phy;
        int ret = 0;
+       struct regulator *reg;
+       int min_uV, uA_load;
 
-       if (ufs_qcom_is_link_off(hba)) {
-               /*
-                * Disable the tx/rx lane symbol clocks before PHY is
-                * powered down as the PLL source should be disabled
-                * after downstream clocks are disabled.
-                */
-               ufs_qcom_disable_lane_clks(host);
-               phy_power_off(phy);
-               ret = ufs_qcom_ice_suspend(host);
+       if (!vreg) {
+               WARN_ON(1);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       reg = vreg->reg;
+       if (regulator_count_voltages(reg) > 0) {
+               min_uV = on ? vreg->min_uV : 0;
+               ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+               if (ret) {
+                       dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+                                       __func__, vreg->name, ret);
+                       goto out;
+               }
+
+               uA_load = on ? vreg->max_uA : 0;
+               ret = regulator_set_load(vreg->reg, uA_load);
                if (ret)
-                       dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
-                                       __func__, ret);
+                       goto out;
+       }
+out:
+       return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+       int ret = 0;
+
+       if (vreg->enabled)
+               return ret;
 
-               /* Assert PHY soft reset */
-               ufs_qcom_assert_reset(hba);
+       ret = ufs_qcom_config_vreg(dev, vreg, true);
+       if (ret)
+               goto out;
+
+       ret = regulator_enable(vreg->reg);
+       if (ret)
+               goto out;
+
+       vreg->enabled = true;
+out:
+       return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+       int ret = 0;
+
+       if (!vreg->enabled)
+               return ret;
+
+       ret = regulator_disable(vreg->reg);
+       if (ret)
+               goto out;
+
+       ret = ufs_qcom_config_vreg(dev, vreg, false);
+       if (ret)
                goto out;
-       }
+
+       vreg->enabled = false;
+out:
+       return ret;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       struct phy *phy = host->generic_phy;
+       int ret = 0;
 
        /*
-        * If UniPro link is not active, PHY ref_clk, main PHY analog power
-        * rail and low noise analog power rail for PLL can be switched off.
+        * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+        * power rail and low noise analog power rail for PLL can be
+        * switched off.
         */
        if (!ufs_qcom_is_link_active(hba)) {
                ufs_qcom_disable_lane_clks(host);
                phy_power_off(phy);
+
+               if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+                       ret = ufs_qcom_disable_vreg(hba->dev,
+                                       host->vddp_ref_clk);
                ufs_qcom_ice_suspend(host);
-       }
 
+               if (ufs_qcom_is_link_off(hba)) {
+                       /* Assert PHY soft reset */
+                       ufs_qcom_assert_reset(hba);
+                       goto out;
+               }
+       }
        /* Unvote PM QoS */
        ufs_qcom_pm_qos_suspend(host);
 
@@ -763,6 +831,11 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                goto out;
        }
 
+       if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+                                  hba->spm_lvl > UFS_PM_LVL_3))
+               ufs_qcom_enable_vreg(hba->dev,
+                                     host->vddp_ref_clk);
+
        err = ufs_qcom_enable_lane_clks(host);
        if (err)
                goto out;
@@ -1951,6 +2024,57 @@ static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
                pr_info("%s: will disable all LPM modes\n", __func__);
 }
 
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+                                  struct ufs_vreg **out_vreg)
+{
+       int ret = 0;
+       char prop_name[MAX_PROP_SIZE];
+       struct ufs_vreg *vreg = NULL;
+       struct device *dev = host->hba->dev;
+       struct device_node *np = dev->of_node;
+
+       if (!np) {
+               dev_err(dev, "%s: non DT initialization\n", __func__);
+               goto out;
+       }
+
+       snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+       if (!of_parse_phandle(np, prop_name, 0)) {
+               dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+                        __func__, prop_name);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+       if (!vreg)
+               return -ENOMEM;
+
+       vreg->name = name;
+
+       snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+       ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+       if (ret) {
+               dev_err(dev, "%s: unable to find %s err %d\n",
+                       __func__, prop_name, ret);
+               goto out;
+       }
+
+       vreg->reg = devm_regulator_get(dev, vreg->name);
+       if (IS_ERR(vreg->reg)) {
+               ret = PTR_ERR(vreg->reg);
+               dev_err(dev, "%s: %s get failed, err=%d\n",
+                       __func__, vreg->name, ret);
+       }
+       vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+       vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+       if (!ret)
+               *out_vreg = vreg;
+       return ret;
+}
+
 /**
  * ufs_qcom_init - bind phy with controller
  * @hba: host controller instance
@@ -2068,14 +2192,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
        ufs_qcom_phy_save_controller_version(host->generic_phy,
                host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
 
+       err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+                                     &host->vddp_ref_clk);
        phy_init(host->generic_phy);
        err = phy_power_on(host->generic_phy);
        if (err)
                goto out_unregister_bus;
+       if (host->vddp_ref_clk) {
+               err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+               if (err) {
+                       dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+                               __func__, err);
+                       goto out_disable_phy;
+               }
+       }
 
        err = ufs_qcom_init_lane_clks(host);
        if (err)
-               goto out_disable_phy;
+               goto out_disable_vddp;
 
        ufs_qcom_parse_lpm(host);
        if (host->disable_lpm)
@@ -2100,6 +2234,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 
        goto out;
 
+out_disable_vddp:
+       if (host->vddp_ref_clk)
+               ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
 out_disable_phy:
        phy_power_off(host->generic_phy);
 out_unregister_bus:
index 74d8a7a..12154b2 100644 (file)
@@ -373,6 +373,7 @@ struct ufs_qcom_host {
        spinlock_t ice_work_lock;
        struct work_struct ice_cfg_work;
        struct request *req_pending;
+       struct ufs_vreg *vddp_ref_clk;
 };
 
 static inline u32
index 33cf484..09c50a8 100644 (file)
@@ -2048,7 +2048,7 @@ static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
                                                       uint16_t *best_id)
 {
        struct glink_core_xprt_ctx *xprt;
-       struct glink_core_xprt_ctx *best_xprt;
+       struct glink_core_xprt_ctx *best_xprt = NULL;
        struct glink_core_xprt_ctx *ret;
        bool first = true;
 
@@ -5502,7 +5502,7 @@ static void tx_func(struct kthread_work *work)
 {
        struct channel_ctx *ch_ptr;
        uint32_t prio;
-       uint32_t tx_ready_head_prio;
+       uint32_t tx_ready_head_prio = 0;
        int ret;
        struct channel_ctx *tx_ready_head = NULL;
        bool transmitted_successfully = true;
index d3f44ab..a94f741 100644 (file)
@@ -1157,7 +1157,7 @@ static void smem_module_init_notify(uint32_t state, void *data)
 static void smem_init_security_partition(struct smem_toc_entry *entry,
                                                                uint32_t num)
 {
-       uint16_t remote_host;
+       uint16_t remote_host = 0;
        struct smem_partition_header *hdr;
        bool is_comm_partition = false;
 
index 4d9767b..21e9f17 100644 (file)
@@ -595,6 +595,22 @@ static void pil_release_mmap(struct pil_desc *desc)
        struct pil_priv *priv = desc->priv;
        struct pil_seg *p, *tmp;
        u64 zero = 0ULL;
+
+       if (priv->info) {
+               __iowrite32_copy(&priv->info->start, &zero,
+                                       sizeof(zero) / 4);
+               writel_relaxed(0, &priv->info->size);
+       }
+
+       list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+               list_del(&p->list);
+               kfree(p);
+       }
+}
+
+static void pil_clear_segment(struct pil_desc *desc)
+{
+       struct pil_priv *priv = desc->priv;
        u8 __iomem *buf;
 
        struct pil_map_fw_info map_fw_info = {
@@ -613,16 +629,6 @@ static void pil_release_mmap(struct pil_desc *desc)
        desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
                                                                map_data);
 
-       if (priv->info) {
-               __iowrite32_copy(&priv->info->start, &zero,
-                                       sizeof(zero) / 4);
-               writel_relaxed(0, &priv->info->size);
-       }
-
-       list_for_each_entry_safe(p, tmp, &priv->segs, list) {
-               list_del(&p->list);
-               kfree(p);
-       }
 }
 
 #define IOMAP_SIZE SZ_1M
@@ -914,6 +920,7 @@ out:
                                        &desc->attrs);
                        priv->region = NULL;
                }
+               pil_clear_segment(desc);
                pil_release_mmap(desc);
        }
        return ret;
index 47adc3b..b120883 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -394,8 +394,8 @@ static void audio_notifer_reg_all_clients(void)
        int ret;
 
        list_for_each_safe(ptr, next, &client_list) {
-               client_data = list_entry(ptr,
-                       struct client_data, list);
+               client_data = list_entry(ptr, struct client_data, list);
+
                ret = audio_notifer_reg_client(client_data);
                if (IS_ERR_VALUE(ret))
                        pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
@@ -518,9 +518,8 @@ int audio_notifier_deregister(char *client_name)
                goto done;
        }
        mutex_lock(&notifier_mutex);
-       list_for_each_safe(ptr, next, &client_data->list) {
-               client_data = list_entry(ptr, struct client_data,
-                                       list);
+       list_for_each_safe(ptr, next, &client_list) {
+               client_data = list_entry(ptr, struct client_data, list);
                if (!strcmp(client_name, client_data->client_name)) {
                        ret2 = audio_notifer_dereg_client(client_data);
                        if (ret2 < 0) {
index d80a9fc..9ef96dc 100644 (file)
@@ -27,7 +27,7 @@
 
 #include "rpm_stats.h"
 
-#define RPM_RAIL_BUF_LEN 600
+#define RPM_RAIL_BUF_LEN 1300
 
 #define SNPRINTF(buf, size, format, ...) \
 { \
@@ -118,7 +118,7 @@ static int msm_rpm_rail_type_copy(void __iomem **base, char **buf, int count)
        rail[NAMELEN - 1] = '\0';
        memcpy(rail, &rt.rail, NAMELEN - 1);
        SNPRINTF(*buf, count,
-               "\trail:%-2s num_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
+               "\trail:%-2s \tnum_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
                rail, rt.num_corners, rt.current_corner, rt.last_entered);
 
        *base += sizeof(rt);
index d0fef9d..99ae247 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -207,12 +207,11 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
                ret = qseecom_process_listener_from_smcinvoke(&desc);
 
        *smcinvoke_result = (int32_t)desc.ret[1];
-       if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0]) {
+       if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
                pr_err("SCM call failed with ret val = %d %d %d %d\n",
                                                ret, (int)desc.ret[0],
                                (int)desc.ret[1], (int)desc.ret[2]);
-               ret = ret | desc.ret[0] | desc.ret[1] | desc.ret[2];
-       }
+
        dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
        dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
        return ret;
index 7244dc9..81ce22e 100644 (file)
@@ -889,7 +889,7 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
        if (len < 18)
                return -EINVAL;
 
-       snprintf(str, len, "%pKM", dev_addr);
+       snprintf(str, len, "%pM", dev_addr);
        return 18;
 }
 
index 087cd9f..ea27878 100644 (file)
@@ -1931,6 +1931,11 @@ static void usbpd_sm(struct work_struct *w)
 
        case PE_SNK_SELECT_CAPABILITY:
                if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+                       u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+                       bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+                               && (PD_SRC_PDO_TYPE(pdo) ==
+                                               PD_SRC_PDO_TYPE_AUGMENTED);
+
                        usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
 
                        /* prepare for voltage increase/decrease */
@@ -1942,11 +1947,12 @@ static void usbpd_sm(struct work_struct *w)
                                        &val);
 
                        /*
-                        * if we are changing voltages, we must lower input
-                        * current to pSnkStdby (2.5W). Calculate it and set
-                        * PD_CURRENT_MAX accordingly.
+                        * if changing voltages (not within the same PPS PDO),
+                        * we must lower input current to pSnkStdby (2.5W).
+                        * Calculate it and set PD_CURRENT_MAX accordingly.
                         */
-                       if (pd->requested_voltage != pd->current_voltage) {
+                       if (!same_pps &&
+                               pd->requested_voltage != pd->current_voltage) {
                                int mv = max(pd->requested_voltage,
                                                pd->current_voltage) / 1000;
                                val.intval = (2500000 / mv) * 1000;
index 6430e41..5867c6c 100644 (file)
@@ -72,6 +72,7 @@
 #define QUSB2PHY_PORT_TUNE2             0x84
 #define QUSB2PHY_PORT_TUNE3             0x88
 #define QUSB2PHY_PORT_TUNE4             0x8C
+#define QUSB2PHY_PORT_TUNE5             0x90
 
 /* In case Efuse register shows zero, use this value */
 #define TUNE2_DEFAULT_HIGH_NIBBLE      0xB
 
 #define QUSB2PHY_REFCLK_ENABLE         BIT(0)
 
+unsigned int tune1;
+module_param(tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
 unsigned int tune2;
 module_param(tune2, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
 
+unsigned int tune3;
+module_param(tune3, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+unsigned int tune4;
+module_param(tune4, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+unsigned int tune5;
+module_param(tune5, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
 struct qusb_phy {
        struct usb_phy          phy;
        void __iomem            *base;
@@ -562,13 +580,29 @@ static int qusb_phy_init(struct usb_phy *phy)
                                qphy->base + QUSB2PHY_PORT_TUNE2);
        }
 
-       /* If tune2 modparam set, override tune2 value */
-       if (tune2) {
-               pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
-                                               __func__, tune2);
+       /* If tune modparam set, override tune value */
+
+       pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+                               __func__, tune1, tune2, tune3, tune4, tune5);
+       if (tune1)
+               writel_relaxed(tune1,
+                               qphy->base + QUSB2PHY_PORT_TUNE1);
+
+       if (tune2)
                writel_relaxed(tune2,
                                qphy->base + QUSB2PHY_PORT_TUNE2);
-       }
+
+       if (tune3)
+               writel_relaxed(tune3,
+                               qphy->base + QUSB2PHY_PORT_TUNE3);
+
+       if (tune4)
+               writel_relaxed(tune4,
+                               qphy->base + QUSB2PHY_PORT_TUNE4);
+
+       if (tune5)
+               writel_relaxed(tune5,
+                               qphy->base + QUSB2PHY_PORT_TUNE5);
 
        /* ensure above writes are completed before re-enabling PHY */
        wmb();
index 6182aa2..bf4dc39 100644 (file)
@@ -222,6 +222,7 @@ struct mdss_smmu_client {
        struct dss_module_power mp;
        struct reg_bus_client *reg_bus_clt;
        bool domain_attached;
+       bool domain_reattach;
        bool handoff_pending;
        void __iomem *mmu_base;
        struct list_head _client;
index a183fd7..f4c4c50 100644 (file)
@@ -4276,8 +4276,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
                goto buf_sync_err_2;
        }
 
-       sync_fence_install(rel_fence, rel_fen_fd);
-
        ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
        if (ret) {
                pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
@@ -4314,8 +4312,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
                goto buf_sync_err_3;
        }
 
-       sync_fence_install(retire_fence, retire_fen_fd);
-
        ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
                        sizeof(int));
        if (ret) {
@@ -4326,6 +4322,9 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
                goto buf_sync_err_3;
        }
 
+       sync_fence_install(rel_fence, rel_fen_fd);
+       sync_fence_install(retire_fence, retire_fen_fd);
+
 skip_retire_fence:
        mutex_unlock(&sync_pt_data->sync_mutex);
 
index b8f7a74..e258f25 100644 (file)
@@ -4770,6 +4770,8 @@ static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
 
 static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
 {
+       struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
        /*
         * mapping to hardware expectation of actual mixer programming to
         * happen on following registers:
@@ -4777,6 +4779,11 @@ static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
         *  WB: 3, 4
         * With some exceptions on certain revisions
         */
+
+       if (mdata->mdp_rev == MDSS_MDP_HW_REV_330
+                       && mixer->num == MDSS_MDP_INTF_LAYERMIXER1)
+               return MDSS_MDP_INTF_LAYERMIXER2;
+
        if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
                u32 wb_offset;
 
index 6930444..62e2550 100644 (file)
@@ -299,7 +299,6 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
        for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
                if (!mdss_smmu_is_valid_domain_type(mdata, i))
                        continue;
-
                mdss_smmu = mdss_smmu_get_cb(i);
                if (mdss_smmu && mdss_smmu->base.dev) {
                        if (!mdss_smmu->handoff_pending) {
@@ -326,6 +325,14 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
                                        goto err;
                                }
                                mdss_smmu->domain_attached = true;
+                               if (mdss_smmu->domain_reattach) {
+                                       pr_debug("iommu v2 domain[%i] remove extra vote\n",
+                                                       i);
+                                       /* remove extra power vote */
+                                       mdss_smmu_enable_power(mdss_smmu,
+                                               false);
+                                       mdss_smmu->domain_reattach = false;
+                               }
                                pr_debug("iommu v2 domain[%i] attached\n", i);
                        }
                } else {
@@ -379,6 +386,11 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
                                 */
                                arm_iommu_detach_device(mdss_smmu->base.dev);
                                mdss_smmu->domain_attached = false;
+                               /*
+                                * since we are leaving clocks on, on
+                                * re-attach do not vote for clocks
+                                */
+                               mdss_smmu->domain_reattach = true;
                                pr_debug("iommu v2 domain[%i] detached\n", i);
                        } else {
                                mdss_smmu_enable_power(mdss_smmu, false);
index cd5b78e..b622a66 100644 (file)
@@ -80,7 +80,6 @@
 #define GCC_GPU_CFG_AHB_CLK                    66
 #define GCC_GPU_GPLL0_CLK                      67
 #define GCC_GPU_GPLL0_DIV_CLK                  68
-#define GCC_HMSS_AHB_CLK                       70
 #define GCC_HMSS_DVM_BUS_CLK                   71
 #define GCC_HMSS_RBCPR_CLK                     72
 #define GCC_MMSS_GPLL0_CLK                     73
 #define GPLL6_OUT_MAIN                         157
 #define GPLL6_OUT_TEST                         158
 #define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK         159
-#define HMSS_AHB_CLK_SRC                       160
 #define HMSS_GPLL0_CLK_SRC                     161
 #define HMSS_GPLL4_CLK_SRC                     162
 #define HMSS_RBCPR_CLK_SRC                     163
index 1b3f20e..c4c2565 100644 (file)
@@ -136,6 +136,7 @@ enum iommu_attr {
        DOMAIN_ATTR_EARLY_MAP,
        DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
        DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+       DOMAIN_ATTR_ENABLE_TTBR1,
        DOMAIN_ATTR_MAX,
 };
 
index 6037fbf..d4b4cc7 100644 (file)
@@ -1053,6 +1053,18 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size);
 void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
                unsigned long *size);
 
+/**
+ * gsi_halt_channel_ee - Peripheral should call this function
+ * to stop other EE's channel. This is usually used in SSR clean
+ *
+ * @chan_idx: Virtual channel index
+ * @ee: EE
+ * @code: [out] response code for operation
+
+ * @Return gsi_status
+ */
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
+
 /*
  * Here is a typical sequence of calls
  *
@@ -1250,5 +1262,11 @@ static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
                unsigned long *size)
 {
 }
+
+static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
+        int *code)
+{
+       return -GSI_STATUS_UNSUPPORTED_OP;
+}
 #endif
 #endif
index b8b2226..f8ba31e 100644 (file)
@@ -63,9 +63,10 @@ enum MHI_CLIENT_CHANNEL {
        MHI_CLIENT_RESERVED_1_UPPER = 99,
        MHI_CLIENT_IP_HW_0_OUT = 100,
        MHI_CLIENT_IP_HW_0_IN = 101,
-       MHI_CLIENT_RESERVED_2_LOWER = 102,
+       MHI_CLIENT_IP_HW_ADPL_IN = 102,
+       MHI_CLIENT_RESERVED_2_LOWER = 103,
        MHI_CLIENT_RESERVED_2_UPPER = 127,
-       MHI_MAX_CHANNELS = 102
+       MHI_MAX_CHANNELS = 103
 };
 
 enum MHI_CB_REASON {
@@ -214,7 +215,7 @@ int mhi_get_max_desc(struct mhi_client_handle *client_handle);
 /* RmNET Reserved APIs, This APIs are reserved for use by the linux network
 * stack only. Use by other clients will introduce system wide issues
 */
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm);
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm);
 int mhi_get_epid(struct mhi_client_handle *mhi_handle);
 struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle);
 void mhi_mask_irq(struct mhi_client_handle *client_handle);
diff --git a/include/linux/regulator/qpnp-labibb-regulator.h b/include/linux/regulator/qpnp-labibb-regulator.h
new file mode 100644 (file)
index 0000000..2470695
--- /dev/null
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_LABIBB_REGULATOR_H
+#define _QPNP_LABIBB_REGULATOR_H
+
+enum labibb_notify_event {
+       LAB_VREG_OK = 1,
+};
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb);
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/include/linux/sde_io_util.h b/include/linux/sde_io_util.h
new file mode 100644 (file)
index 0000000..6bd5c16
--- /dev/null
@@ -0,0 +1,113 @@
+/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IO_UTIL_H__
+#define __SDE_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+       u32 len;
+       void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+       DSS_REG_LDO,
+       DSS_REG_VS,
+};
+
+struct dss_vreg {
+       struct regulator *vreg; /* vreg handle */
+       char vreg_name[32];
+       int min_voltage;
+       int max_voltage;
+       int enable_load;
+       int disable_load;
+       int pre_on_sleep;
+       int post_on_sleep;
+       int pre_off_sleep;
+       int post_off_sleep;
+};
+
+struct dss_gpio {
+       unsigned int gpio;
+       unsigned int value;
+       char gpio_name[32];
+};
+
+enum dss_clk_type {
+       DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+       DSS_CLK_PCLK,
+       DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+       struct clk *clk; /* clk handle */
+       char clk_name[32];
+       enum dss_clk_type type;
+       unsigned long rate;
+       unsigned long max_rate;
+};
+
+struct dss_module_power {
+       unsigned int num_vreg;
+       struct dss_vreg *vreg_config;
+       unsigned int num_gpio;
+       struct dss_gpio *gpio_config;
+       unsigned int num_clk;
+       struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+       struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+       int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,        int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+                      uint8_t reg_offset, uint8_t *read_buf);
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+                       uint8_t reg_offset, uint8_t *value);
+
+#endif /* __SDE_IO_UTIL_H__ */
index 4fe580f..d2f19ac 100644 (file)
 #define MSM_PIPE_2D1         0x02
 #define MSM_PIPE_3D0         0x10
 
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK     0xffff
+#define MSM_PIPE_ID(x)       ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x)    ((x) & ~MSM_PIPE_ID_MASK)
+
 /* timeouts are specified in clock-monotonic absolute times (to simplify
  * restarting interrupted ioctls).  The following struct is logically the
  * same as 'struct timespec' but 32/64b ABI safe.
@@ -54,6 +63,7 @@ struct drm_msm_timespec {
 #define MSM_PARAM_CHIP_ID    0x03
 #define MSM_PARAM_MAX_FREQ   0x04
 #define MSM_PARAM_TIMESTAMP  0x05
+#define MSM_PARAM_GMEM_BASE  0x06
 
 struct drm_msm_param {
        __u32 pipe;           /* in, MSM_PIPE_x */
@@ -67,6 +77,7 @@ struct drm_msm_param {
 
 #define MSM_BO_SCANOUT       0x00000001     /* scanout capable */
 #define MSM_BO_GPU_READONLY  0x00000002
+#define MSM_BO_PRIVILEGED    0x00000004
 #define MSM_BO_CACHE_MASK    0x000f0000
 /* cache modes */
 #define MSM_BO_CACHED        0x00010000
@@ -85,10 +96,14 @@ struct drm_msm_gem_new {
        __u32 handle;         /* out */
 };
 
+#define MSM_INFO_IOVA  0x01
+
+#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+
 struct drm_msm_gem_info {
        __u32 handle;         /* in */
-       __u32 pad;
-       __u64 offset;         /* out, offset to pass to mmap() */
+       __u32 flags;          /* in - combination of MSM_INFO_* flags */
+       __u64 offset;         /* out, mmap() offset or iova */
 };
 
 #define MSM_PREP_READ        0x01
@@ -173,12 +188,18 @@ struct drm_msm_gem_submit_bo {
        __u64 presumed;       /* in/out, presumed buffer address */
 };
 
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_RING_MASK 0x000F0000
+#define MSM_SUBMIT_RING_SHIFT 16
+
+#define MSM_SUBMIT_FLAGS (MSM_SUBMIT_RING_MASK)
+
 /* Each cmdstream submit consists of a table of buffers involved, and
  * one or more cmdstream buffers.  This allows for conditional execution
  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  */
 struct drm_msm_gem_submit {
-       __u32 pipe;           /* in, MSM_PIPE_x */
+       __u32 flags;          /* MSM_PIPE_x | MSM_SUBMIT_x */
        __u32 fence;          /* out */
        __u32 nr_bos;         /* in, number of submit_bo's */
        __u32 nr_cmds;        /* in, number of submit_cmd's */
index 133b412..1e3accd 100644 (file)
@@ -277,9 +277,6 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
 
        for_each_possible_cpu(cpu) {
                c = &per_cpu(cpu_state, cpu);
-               if (!c->cluster)
-                       continue;
-
                cluster = c->cluster;
                if (!cluster || !cluster->inited)
                        continue;
@@ -301,6 +298,9 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
                count += snprintf(buf + count, PAGE_SIZE - count,
                                        "\tIs busy: %u\n", c->is_busy);
                count += snprintf(buf + count, PAGE_SIZE - count,
+                                       "\tNot preferred: %u\n",
+                                               c->not_preferred);
+               count += snprintf(buf + count, PAGE_SIZE - count,
                                        "\tNr running: %u\n", cluster->nrrun);
                count += snprintf(buf + count, PAGE_SIZE - count,
                        "\tActive CPUs: %u\n", get_active_cpu_count(cluster));
@@ -323,13 +323,14 @@ static ssize_t store_not_preferred(struct cluster_data *state,
        int ret;
 
        ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
-       if (ret != 1 && ret != state->num_cpus)
+       if (ret != state->num_cpus)
                return -EINVAL;
 
-       i = 0;
        spin_lock_irqsave(&state_lock, flags);
-       list_for_each_entry(c, &state->lru, sib)
-               c->not_preferred = val[i++];
+       for (i = 0; i < state->num_cpus; i++) {
+               c = &per_cpu(cpu_state, i + state->first_cpu);
+               c->not_preferred = val[i];
+       }
        spin_unlock_irqrestore(&state_lock, flags);
 
        return count;
@@ -340,11 +341,14 @@ static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
        struct cpu_data *c;
        ssize_t count = 0;
        unsigned long flags;
+       int i;
 
        spin_lock_irqsave(&state_lock, flags);
-       list_for_each_entry(c, &state->lru, sib)
-               count += snprintf(buf + count, PAGE_SIZE - count,
-                               "\tCPU:%d %u\n", c->cpu, c->not_preferred);
+       for (i = 0; i < state->num_cpus; i++) {
+               c = &per_cpu(cpu_state, i + state->first_cpu);
+               count += scnprintf(buf + count, PAGE_SIZE - count,
+                               "CPU#%d: %u\n", c->cpu, c->not_preferred);
+       }
        spin_unlock_irqrestore(&state_lock, flags);
 
        return count;
index f514dc4..f5383e4 100644 (file)
@@ -149,9 +149,16 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned,
                                                    unsigned long reclaimed)
 {
        unsigned long scale = scanned + reclaimed;
-       unsigned long pressure;
+       unsigned long pressure = 0;
 
        /*
+        * reclaimed can be greater than scanned in cases
+        * like THP, where the scanned is 1 and reclaimed
+        * could be 512
+        */
+       if (reclaimed >= scanned)
+               goto out;
+       /*
         * We calculate the ratio (in percents) of how many pages were
         * scanned vs. reclaimed in a given time frame (window). Note that
         * time is in VM reclaimer's "ticks", i.e. number of pages
@@ -161,6 +168,7 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned,
        pressure = scale - (reclaimed * scale / scanned);
        pressure = pressure * 100 / scale;
 
+out:
        pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
                 scanned, reclaimed);
 
index 196ae84..2baffcb 100644 (file)
@@ -2547,15 +2547,23 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
                                    sc->nr_scanned - nr_scanned,
                                    zone_lru_pages);
 
+               /*
+                * Record the subtree's reclaim efficiency. The reclaimed
+                * pages from slab is excluded here because the corresponding
+                * scanned pages is not accounted. Moreover, freeing a page
+                * by slab shrinking depends on each slab's object population,
+                * making the cost model (i.e. scan:free) different from that
+                * of LRU.
+                */
+               vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
+                          sc->nr_scanned - nr_scanned,
+                          sc->nr_reclaimed - nr_reclaimed);
+
                if (reclaim_state) {
                        sc->nr_reclaimed += reclaim_state->reclaimed_slab;
                        reclaim_state->reclaimed_slab = 0;
                }
 
-               vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
-                          sc->nr_scanned - nr_scanned,
-                          sc->nr_reclaimed - nr_reclaimed);
-
                if (sc->nr_reclaimed - nr_reclaimed)
                        reclaimable = true;
 
index 5211c40..1cdfe02 100644 (file)
@@ -1664,6 +1664,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
 
        if (len > INT_MAX)
                len = INT_MAX;
+       if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+               return -EFAULT;
 
        err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
        if (unlikely(err))
@@ -1723,6 +1725,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
 
        if (size > INT_MAX)
                size = INT_MAX;
+       if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+               return -EFAULT;
        err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
        if (unlikely(err))
                return err;
index 17a3a6d..1dcd4ed 100755 (executable)
@@ -60,6 +60,8 @@ my $spelling_file = "$D/spelling.txt";
 my $codespell = 0;
 my $codespellfile = "/usr/share/codespell/dictionary.txt";
 my $color = 1;
+my $qca_sign_off = 0;
+my $codeaurora_sign_off = 0;
 
 sub help {
        my ($exitcode) = @_;
@@ -2429,10 +2431,16 @@ sub process {
                                             "email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
                                }
                        }
-                       if ($chk_author && $line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
-                               WARN("BAD_SIGN_OFF",
-                                    "invalid Signed-off-by identity\n" . $line );
-                       }                       
+                       if ($chk_author) {
+                               if ($line =~ /^\s*signed-off-by:.*qca\.qualcomm\.com/i) {
+                                       $qca_sign_off = 1;
+                               } elsif ($line =~ /^\s*signed-off-by:.*codeaurora\.org/i) {
+                                       $codeaurora_sign_off = 1;
+                               } elsif ($line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
+                                       WARN("BAD_SIGN_OFF",
+                                            "invalid Signed-off-by identity\n" . $line );
+                               }
+                       }
 
 # Check for duplicate signatures
                        my $sig_nospace = $line;
@@ -2558,7 +2566,8 @@ sub process {
                }
 
 #check the patch for invalid author credentials
-               if ($chk_author && $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
+               if ($chk_author && !($line =~ /^From:.*qca\.qualcomm\.com/) &&
+                   $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
                        WARN("BAD_AUTHOR", "invalid author identity\n" . $line );
                }
 
@@ -6042,6 +6051,11 @@ sub process {
                }
        }
 
+       if ($chk_author && $qca_sign_off && !$codeaurora_sign_off) {
+               WARN("BAD_SIGN_OFF",
+                    "QCA Signed-off-by requires CODEAURORA Signed-off-by\n" . $line );
+       }
+
        # If we have no input at all, then there is nothing to report on
        # so just keep quiet.
        if ($#rawlines == -1) {
index 8d623d0..566bcb0 100644 (file)
@@ -989,7 +989,7 @@ config SND_SOC_MSM_STUB
 
 config SND_SOC_MSM_HDMI_CODEC_RX
        bool "HDMI Audio Playback"
-       depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998)
+       depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
        help
        HDMI audio drivers should be built only if the platform
         supports hdmi panel.
index 8502387..2667d9e 100644 (file)
 #define MSM_SDW_VERSION_1_0 0x0001
 #define MSM_SDW_VERSION_ENTRY_SIZE 32
 
+/*
+ * 200 Milliseconds sufficient for DSP bring up in the modem
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 200
+
 static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
 static struct snd_soc_dai_driver msm_sdw_dai[];
-static bool initial_boot = true;
-static bool is_ssr_en;
 static bool skip_irq = true;
 
 static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
@@ -1036,6 +1040,13 @@ static int msm_sdw_swrm_read(void *handle, int reg)
                __func__, reg);
        sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
        sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
+
+       /*
+        * Add sleep as SWR slave access read takes time.
+        * Allow for RD_DONE to complete for previous register if any.
+        */
+       usleep_range(50, 55);
+
        /* read_lock */
        mutex_lock(&msm_sdw->sdw_read_lock);
        ret = regmap_bulk_write(msm_sdw->regmap, sdw_rd_addr_base,
@@ -1629,6 +1640,8 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
        struct msm_sdw_priv *msm_sdw = container_of(nb,
                                                    struct msm_sdw_priv,
                                                    service_nb);
+       bool adsp_ready = false;
+       unsigned long timeout;
 
        pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
 
@@ -1641,15 +1654,34 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
                                        SWR_DEVICE_DOWN, NULL);
                break;
        case AUDIO_NOTIFIER_SERVICE_UP:
-               if (initial_boot) {
-                       initial_boot = false;
-                       break;
+               if (!q6core_is_adsp_ready()) {
+                       dev_dbg(msm_sdw->dev, "ADSP isn't ready\n");
+                       timeout = jiffies +
+                                 msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+                       while (!time_after(jiffies, timeout)) {
+                               if (!q6core_is_adsp_ready()) {
+                                       dev_dbg(msm_sdw->dev,
+                                               "ADSP isn't ready\n");
+                               } else {
+                                       dev_dbg(msm_sdw->dev,
+                                               "ADSP is ready\n");
+                                       adsp_ready = true;
+                                       goto powerup;
+                               }
+                       }
+               } else {
+                       adsp_ready = true;
+                       dev_dbg(msm_sdw->dev, "%s: DSP is ready\n", __func__);
+               }
+powerup:
+               if (adsp_ready) {
+                       msm_sdw->dev_up = true;
+                       msm_sdw_init_reg(msm_sdw->codec);
+                       regcache_mark_dirty(msm_sdw->regmap);
+                       regcache_sync(msm_sdw->regmap);
+                       msm_sdw_set_spkr_mode(msm_sdw->codec,
+                                             msm_sdw->spkr_mode);
                }
-               msm_sdw->dev_up = true;
-               msm_sdw_init_reg(msm_sdw->codec);
-               regcache_mark_dirty(msm_sdw->regmap);
-               regcache_sync(msm_sdw->regmap);
-               msm_sdw_set_spkr_mode(msm_sdw->codec, msm_sdw->spkr_mode);
                break;
        default:
                break;
@@ -1676,17 +1708,14 @@ static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
        msm_sdw_init_reg(codec);
        msm_sdw->version = MSM_SDW_VERSION_1_0;
 
-       if (is_ssr_en) {
-               msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
-               ret = audio_notifier_register("msm_sdw",
-                                       AUDIO_NOTIFIER_ADSP_DOMAIN,
-                                       &msm_sdw->service_nb);
-               if (ret < 0)
-                       dev_err(msm_sdw->dev,
-                               "%s: Audio notifier register failed ret = %d\n",
-                               __func__, ret);
-       }
-
+       msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+       ret = audio_notifier_register("msm_sdw",
+                               AUDIO_NOTIFIER_ADSP_DOMAIN,
+                               &msm_sdw->service_nb);
+       if (ret < 0)
+               dev_err(msm_sdw->dev,
+                       "%s: Audio notifier register failed ret = %d\n",
+                       __func__, ret);
        return 0;
 }
 
index 8f7db4d..5ecd787 100644 (file)
@@ -93,8 +93,6 @@ static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
 static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[];
 /* By default enable the internal speaker boost */
 static bool spkr_boost_en = true;
-static bool initial_boot = true;
-static bool is_ssr_en;
 
 static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
        "cdc-vdd-mic-bias",
@@ -1453,7 +1451,6 @@ static int msm_anlg_cdc_codec_enable_clock_block(struct snd_soc_codec *codec,
        } else {
                snd_soc_update_bits(codec,
                        MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
-               msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_OFF);
        }
        return 0;
 }
@@ -3500,18 +3497,24 @@ static const struct snd_soc_dapm_widget msm_anlg_cdc_dapm_widgets[] = {
        SND_SOC_DAPM_INPUT("AMIC1"),
        SND_SOC_DAPM_INPUT("AMIC2"),
        SND_SOC_DAPM_INPUT("AMIC3"),
-       SND_SOC_DAPM_INPUT("PDM_IN_RX1"),
-       SND_SOC_DAPM_INPUT("PDM_IN_RX2"),
-       SND_SOC_DAPM_INPUT("PDM_IN_RX3"),
+       SND_SOC_DAPM_AIF_IN("PDM_IN_RX1", "PDM Playback",
+               0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("PDM_IN_RX2", "PDM Playback",
+               0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("PDM_IN_RX3", "PDM Playback",
+               0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_OUTPUT("EAR"),
        SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
        SND_SOC_DAPM_OUTPUT("HEADPHONE"),
        SND_SOC_DAPM_OUTPUT("SPK_OUT"),
        SND_SOC_DAPM_OUTPUT("LINEOUT"),
-       SND_SOC_DAPM_OUTPUT("ADC1_OUT"),
-       SND_SOC_DAPM_OUTPUT("ADC2_OUT"),
-       SND_SOC_DAPM_OUTPUT("ADC3_OUT"),
+       SND_SOC_DAPM_AIF_OUT("ADC1_OUT", "PDM Capture",
+               0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("ADC2_OUT", "PDM Capture",
+               0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("ADC3_OUT", "PDM Capture",
+               0, SND_SOC_NOPM, 0, 0),
 };
 
 static const struct sdm660_cdc_reg_mask_val msm_anlg_cdc_reg_defaults[] = {
@@ -3772,7 +3775,6 @@ static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
        snd_soc_write(codec,
                MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
 
-       msm_anlg_cdc_bringup(codec);
        atomic_set(&pdata->int_mclk0_enabled, false);
        msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
        set_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
@@ -3794,14 +3796,6 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
        /* delay is required to make sure sound card state updated */
        usleep_range(5000, 5100);
 
-       msm_anlg_cdc_codec_init_reg(codec);
-       msm_anlg_cdc_update_reg_defaults(codec);
-
-       regcache_mark_dirty(codec->component.regmap);
-       regcache_sync_region(codec->component.regmap,
-                            MSM89XX_PMIC_DIGITAL_REVISION1,
-                            MSM89XX_PMIC_CDC_MAX_REGISTER);
-
        snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
                                MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
        snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
@@ -3850,10 +3844,6 @@ static int sdm660_cdc_notifier_service_cb(struct notifier_block *nb,
                msm_anlg_cdc_device_down(codec);
                break;
        case AUDIO_NOTIFIER_SERVICE_UP:
-               if (initial_boot) {
-                       initial_boot = false;
-                       break;
-               }
                dev_dbg(codec->dev,
                        "ADSP is about to power up. bring up codec\n");
 
@@ -4053,17 +4043,16 @@ int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
                return -ENOMEM;
        }
        sdm660_cdc_priv->version_entry = version_entry;
-       if (is_ssr_en) {
-               sdm660_cdc_priv->audio_ssr_nb.notifier_call =
-                                       sdm660_cdc_notifier_service_cb;
-               ret = audio_notifier_register("pmic_analog_cdc",
-                                             AUDIO_NOTIFIER_ADSP_DOMAIN,
-                                             &sdm660_cdc_priv->audio_ssr_nb);
-               if (ret < 0) {
-                       pr_err("%s: Audio notifier register failed ret = %d\n",
-                               __func__, ret);
-                       return ret;
-               }
+
+       sdm660_cdc_priv->audio_ssr_nb.notifier_call =
+                               sdm660_cdc_notifier_service_cb;
+       ret = audio_notifier_register("pmic_analog_cdc",
+                                     AUDIO_NOTIFIER_ADSP_DOMAIN,
+                                     &sdm660_cdc_priv->audio_ssr_nb);
+       if (ret < 0) {
+               pr_err("%s: Audio notifier register failed ret = %d\n",
+                       __func__, ret);
+               return ret;
        }
        return 0;
 }
index 5e078db..f1c3b40 100644 (file)
@@ -74,6 +74,7 @@ static int msm_digcdc_clock_control(bool flag)
 
        pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
 
+       mutex_lock(&pdata->cdc_int_mclk0_mutex);
        if (flag) {
                if (atomic_read(&pdata->int_mclk0_enabled) == false) {
                        pdata->digital_cdc_core_clk.enable = 1;
@@ -83,6 +84,7 @@ static int msm_digcdc_clock_control(bool flag)
                        if (ret < 0) {
                                pr_err("%s:failed to enable the MCLK\n",
                                       __func__);
+                               mutex_unlock(&pdata->cdc_int_mclk0_mutex);
                                return ret;
                        }
                        pr_debug("enabled digital codec core clk\n");
@@ -94,6 +96,7 @@ static int msm_digcdc_clock_control(bool flag)
                dev_dbg(registered_digcodec->dev,
                        "disable MCLK, workq to disable set already\n");
        }
+       mutex_unlock(&pdata->cdc_int_mclk0_mutex);
        return 0;
 }
 
index faa4430..20dc350 100644 (file)
@@ -216,9 +216,6 @@ static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
 
 static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
 {
-       if (cache_always)
-               return false;
-
        switch (reg) {
        case WSA881X_CHIP_ID0:
        case WSA881X_CHIP_ID1:
index ba74175..62547f2 100644 (file)
@@ -78,7 +78,6 @@ enum {
        WSA881X_DEV_UP,
 };
 
-bool cache_always;
 /*
  * Private data Structure for wsa881x. All parameters related to
  * WSA881X codec needs to be defined here.
@@ -1226,9 +1225,6 @@ static int wsa881x_swr_probe(struct swr_device *pdev)
                if (ret)
                        goto err;
        }
-
-       cache_always = of_property_read_bool(pdev->dev.of_node,
-                                       "qcom,cache-always");
        wsa881x_gpio_ctrl(wsa881x, true);
        wsa881x->state = WSA881X_DEV_UP;
 
index 1782375..9bd9f95 100644 (file)
@@ -20,7 +20,6 @@
 
 #define WSA881X_MAX_SWR_PORTS   4
 
-extern bool cache_always;
 extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
                                u8 num_port, unsigned int *ch_mask,
                                unsigned int *ch_rate);
index 6ecd300..eb650d1 100644 (file)
@@ -30,16 +30,6 @@ config SND_SOC_QDSP_DEBUG
         is inducing kernel panic upon encountering critical
         errors from DSP audio modules
 
-config DOLBY_DAP
-       bool "Enable Dolby DAP"
-       depends on SND_SOC_MSM_QDSP6V2_INTF
-       help
-        To add support for dolby DAP post processing.
-        This support is to configure the post processing parameters
-        to DSP. The configuration includes sending the end point
-        device, end point dependent post processing parameters and
-        the various posrt processing parameters
-
 config DOLBY_DS2
        bool "Enable Dolby DS2"
        depends on SND_SOC_MSM_QDSP6V2_INTF
@@ -122,9 +112,10 @@ config SND_SOC_INT_CODEC
        select MSM_CDC_PINCTRL
        select SND_SOC_MSM_SDW
        select SND_SOC_SDM660_CDC
+       select SND_SOC_MSM_HDMI_CODEC_RX
        select QTI_PP
        select DTS_SRS_TM
-       select DOLBY_DS2
+       select DOLBY_LICENSE
        select SND_HWDEP
        select MSM_ULTRASOUND
        select DTS_EAGLE
@@ -153,10 +144,11 @@ config SND_SOC_EXT_CODEC
        select SND_SOC_WCD9335
        select SND_SOC_WCD934X
        select SND_SOC_WSA881X
+       select SND_SOC_MSM_HDMI_CODEC_RX
        select MFD_CORE
        select QTI_PP
        select DTS_SRS_TM
-       select DOLBY_DS2
+       select DOLBY_LICENSE
        select SND_SOC_CPE
        select SND_SOC_WCD_CPE
        select SND_HWDEP
index e6fa114..e7b51c5 100644 (file)
@@ -421,7 +421,8 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
                                        "KHZ_88P2", "KHZ_96", "KHZ_176P4",
                                        "KHZ_192", "KHZ_352P8", "KHZ_384"};
 static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
-                                                 "KHZ_192"};
+                                       "KHZ_192", "KHZ_32", "KHZ_44P1",
+                                       "KHZ_88P2", "KHZ_176P4"};
 static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
                                    "Five", "Six", "Seven", "Eight"};
 static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -1500,6 +1501,22 @@ static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
                return idx;
 
        switch (ext_disp_rx_cfg[idx].sample_rate) {
+       case SAMPLING_RATE_176P4KHZ:
+               sample_rate_val = 6;
+               break;
+
+       case SAMPLING_RATE_88P2KHZ:
+               sample_rate_val = 5;
+               break;
+
+       case SAMPLING_RATE_44P1KHZ:
+               sample_rate_val = 4;
+               break;
+
+       case SAMPLING_RATE_32KHZ:
+               sample_rate_val = 3;
+               break;
+
        case SAMPLING_RATE_192KHZ:
                sample_rate_val = 2;
                break;
@@ -1530,6 +1547,18 @@ static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
                return idx;
 
        switch (ucontrol->value.integer.value[0]) {
+       case 6:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+               break;
+       case 5:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+               break;
+       case 4:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+               break;
+       case 3:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+               break;
        case 2:
                ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
                break;
index 469ab1a..d4db55f 100644 (file)
@@ -10,7 +10,6 @@ obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
                                 msm-dai-stub-v2.o
 obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
 obj-$(CONFIG_DTS_EAGLE) += msm-dts-eagle.o
-obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
 obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
 obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
 obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
deleted file mode 100644 (file)
index 4664d39..0000000
+++ /dev/null
@@ -1,1092 +0,0 @@
-/* Copyright (c) 2013-2014,2016-2017, The Linux Foundation. All rights reserved.
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 and
-* only version 2 as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*/
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <sound/control.h>
-#include <sound/q6adm-v2.h>
-#include <sound/q6core.h>
-
-#include "msm-dolby-dap-config.h"
-
-#ifndef DOLBY_PARAM_VCNB_MAX_LENGTH
-#define DOLBY_PARAM_VCNB_MAX_LENGTH 40
-#endif
-
-/* dolby endp based parameters */
-struct dolby_dap_endp_params_s {
-       int device;
-       int device_ch_caps;
-       int dap_device;
-       int params_id[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
-       int params_len[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
-       int params_offset[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
-       int params_val[DOLBY_ENDDEP_PARAM_LENGTH];
-};
-
-const struct dolby_dap_endp_params_s
-                       dolby_dap_endp_params[NUM_DOLBY_ENDP_DEVICE] = {
-       {EARPIECE, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {SPEAKER, 2, DOLBY_ENDP_INT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {WIRED_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {WIRED_HEADPHONE, 2, DOLBY_ENDP_HEADPHONES,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {BLUETOOTH_SCO, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {BLUETOOTH_SCO_HEADSET, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {BLUETOOTH_SCO_CARKIT, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {BLUETOOTH_A2DP, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {BLUETOOTH_A2DP_HEADPHONES, 2, DOLBY_ENDP_HEADPHONES,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {BLUETOOTH_A2DP_SPEAKER, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {AUX_DIGITAL, 2, DOLBY_ENDP_HDMI,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-496, -496, 0}
-       },
-       {AUX_DIGITAL, 6, DOLBY_ENDP_HDMI,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-496, -496, 0}
-       },
-       {AUX_DIGITAL, 8, DOLBY_ENDP_HDMI,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-496, -496, 0}
-       },
-       {ANLG_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {DGTL_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {USB_ACCESSORY, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {USB_DEVICE, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {REMOTE_SUBMIX, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {PROXY, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {PROXY, 6, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {FM, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-       {FM_TX, 2, DOLBY_ENDP_EXT_SPEAKERS,
-               {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
-               {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
-                DOLBY_ENDDEP_PARAM_VMB_LENGTH},
-               {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
-                DOLBY_ENDDEP_PARAM_VMB_OFFSET},
-               {-320, -320, 144}
-       },
-};
-
-/* dolby param ids to/from dsp */
-static uint32_t        dolby_dap_params_id[ALL_DOLBY_PARAMS] = {
-       DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
-       DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
-       DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
-       DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
-       DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
-       DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
-       DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
-       DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB,  DOLBY_PARAM_ID_PLMD,
-       DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
-       DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
-       DOLBY_PARAM_ID_IEA,  DOLBY_PARAM_ID_DEA,  DOLBY_PARAM_ID_DED,
-       DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
-       DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
-       DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
-       DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
-       DOLBY_PARAM_ID_VEN,  DOLBY_PARAM_ID_PSTG, DOLBY_COMMIT_ALL_TO_DSP,
-       DOLBY_COMMIT_TO_DSP, DOLBY_USE_CACHE, DOLBY_AUTO_ENDP,
-       DOLBY_AUTO_ENDDEP_PARAMS
-};
-
-/* modifed state:      0x00000000 - Not updated
-*                      > 0x00000000 && < 0x00010000
-*                              Updated and not commited to DSP
-*                      0x00010001 - Updated and commited to DSP
-*                      > 0x00010001 - Modified the commited value
-*/
-static int dolby_dap_params_modified[MAX_DOLBY_PARAMS] = { 0 };
-/* param offset */
-static uint32_t        dolby_dap_params_offset[MAX_DOLBY_PARAMS] = {
-       DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
-       DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
-       DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
-       DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
-       DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
-       DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
-       DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
-       DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
-       DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
-       DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
-       DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
-       DOLBY_PARAM_PLB_OFFSET,  DOLBY_PARAM_PLMD_OFFSET,
-       DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
-       DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
-       DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
-       DOLBY_PARAM_IEA_OFFSET,  DOLBY_PARAM_DEA_OFFSET,
-       DOLBY_PARAM_DED_OFFSET,  DOLBY_PARAM_GEBG_OFFSET,
-       DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
-       DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
-       DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
-       DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
-       DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
-       DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
-       DOLBY_PARAM_PSTG_OFFSET
-};
-/* param_length */
-static uint32_t        dolby_dap_params_length[MAX_DOLBY_PARAMS] = {
-       DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
-       DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
-       DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
-       DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
-       DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
-       DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
-       DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
-       DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
-       DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
-       DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
-       DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
-       DOLBY_PARAM_PLB_LENGTH,  DOLBY_PARAM_PLMD_LENGTH,
-       DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
-       DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
-       DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
-       DOLBY_PARAM_IEA_LENGTH,  DOLBY_PARAM_DEA_LENGTH,
-       DOLBY_PARAM_DED_LENGTH,  DOLBY_PARAM_GEBG_LENGTH,
-       DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
-       DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
-       DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
-       DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
-       DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
-       DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
-       DOLBY_PARAM_PSTG_LENGTH
-};
-
-/* param_value */
-static uint32_t        dolby_dap_params_value[TOTAL_LENGTH_DOLBY_PARAM] = {0};
-
-struct dolby_dap_params_get_s {
-       int32_t  port_id;
-       uint32_t device_id;
-       uint32_t param_id;
-       uint32_t offset;
-       uint32_t length;
-};
-
-struct dolby_dap_params_states_s {
-       bool use_cache;
-       bool auto_endp;
-       bool enddep_params;
-       int  port_id[AFE_MAX_PORTS];
-       int  copp_idx[AFE_MAX_PORTS];
-       int  port_open_count;
-       int  port_ids_dolby_can_be_enabled;
-       int  device;
-};
-
-static struct dolby_dap_params_get_s dolby_dap_params_get = {-1, DEVICE_OUT_ALL,
-                                                            0, 0, 0};
-static struct dolby_dap_params_states_s dolby_dap_params_states = { true, true,
-                                               true, {DOLBY_INVALID_PORT_ID},
-                                               {-1}, 0, DEVICE_OUT_ALL, 0 };
-/*
-port_ids_dolby_can_be_enabled is set to 0x7FFFFFFF.
-this needs to be removed after interface validation
-*/
-
-static int msm_dolby_dap_map_device_to_dolby_endpoint(int device)
-{
-       int i, dolby_dap_device = DOLBY_ENDP_EXT_SPEAKERS;
-       for (i = 0; i < NUM_DOLBY_ENDP_DEVICE; i++) {
-               if (dolby_dap_endp_params[i].device == device) {
-                       dolby_dap_device = dolby_dap_endp_params[i].dap_device;
-                       break;
-               }
-       }
-       /* default the endpoint to speaker if corresponding device entry */
-       /* not found */
-       if (i >= NUM_DOLBY_ENDP_DEVICE)
-               dolby_dap_params_states.device = SPEAKER;
-       return dolby_dap_device;
-}
-
-static int msm_dolby_dap_send_end_point(int port_id, int copp_idx)
-{
-       int rc = 0;
-       char *params_value;
-       int *update_params_value;
-       uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
-                               DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
-
-       pr_debug("%s\n", __func__);
-       params_value = kzalloc(params_length, GFP_KERNEL);
-       if (!params_value) {
-               pr_err("%s, params memory alloc failed", __func__);
-               return -ENOMEM;
-       }
-       update_params_value = (int *)params_value;
-       *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-       *update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
-       *update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
-       *update_params_value++ =
-                msm_dolby_dap_map_device_to_dolby_endpoint(
-                                               dolby_dap_params_states.device);
-       rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-                                      params_length);
-       if (rc) {
-               pr_err("%s: send dolby params failed\n", __func__);
-               rc = -EINVAL;
-       }
-       kfree(params_value);
-       return rc;
-}
-
-static int msm_dolby_dap_send_enddep_params(int port_id, int copp_idx,
-                                           int device_channels)
-{
-       int i, j, rc = 0, idx, offset;
-       char *params_value;
-       int *update_params_value;
-       uint32_t params_length = (DOLBY_ENDDEP_PARAM_LENGTH +
-                                       DOLBY_NUM_ENDP_DEPENDENT_PARAMS *
-                                       DOLBY_PARAM_PAYLOAD_SIZE) *
-                               sizeof(uint32_t);
-
-       pr_debug("%s\n", __func__);
-       params_value = kzalloc(params_length, GFP_KERNEL);
-       if (!params_value) {
-               pr_err("%s, params memory alloc failed", __func__);
-               return -ENOMEM;
-       }
-       update_params_value = (int *)params_value;
-       for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
-               if (dolby_dap_endp_params[idx].device ==
-                       dolby_dap_params_states.device) {
-                       if (dolby_dap_params_states.device == AUX_DIGITAL ||
-                           dolby_dap_params_states.device == PROXY) {
-                               if (dolby_dap_endp_params[idx].device_ch_caps ==
-                                       device_channels)
-                                       break;
-                       } else {
-                               break;
-                       }
-               }
-       }
-       if (idx >= NUM_DOLBY_ENDP_DEVICE) {
-               pr_err("%s: device is not set accordingly\n", __func__);
-               kfree(params_value);
-               return -EINVAL;
-       }
-       for (i = 0; i < DOLBY_ENDDEP_PARAM_LENGTH; i++) {
-               *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-               *update_params_value++ =
-                               dolby_dap_endp_params[idx].params_id[i];
-               *update_params_value++ =
-                       dolby_dap_endp_params[idx].params_len[i] *
-                               sizeof(uint32_t);
-               offset = dolby_dap_endp_params[idx].params_offset[i];
-               for (j = 0; j < dolby_dap_endp_params[idx].params_len[i]; j++)
-                       *update_params_value++ =
-                               dolby_dap_endp_params[idx].params_val[offset+j];
-       }
-       rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-                                      params_length);
-       if (rc) {
-               pr_err("%s: send dolby params failed\n", __func__);
-               rc = -EINVAL;
-       }
-       kfree(params_value);
-       return rc;
-}
-
-static int msm_dolby_dap_send_cached_params(int port_id, int copp_idx,
-                                           int commit)
-{
-       char *params_value;
-       int *update_params_value, rc = 0;
-       uint32_t index_offset, i, j;
-       uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
-                               MAX_DOLBY_PARAMS * DOLBY_PARAM_PAYLOAD_SIZE) *
-                               sizeof(uint32_t);
-
-       params_value = kzalloc(params_length, GFP_KERNEL);
-       if (!params_value) {
-               pr_err("%s, params memory alloc failed\n", __func__);
-               return -ENOMEM;
-       }
-       update_params_value = (int *)params_value;
-       params_length = 0;
-       for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
-               if ((dolby_dap_params_modified[i] == 0) ||
-                   ((commit) &&
-                    ((dolby_dap_params_modified[i] & 0x00010000) &&
-                    ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))))
-                       continue;
-               *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-               *update_params_value++ = dolby_dap_params_id[i];
-               *update_params_value++ = dolby_dap_params_length[i] *
-                                               sizeof(uint32_t);
-               index_offset = dolby_dap_params_offset[i];
-               for (j = 0; j < dolby_dap_params_length[i]; j++) {
-                       *update_params_value++ =
-                                       dolby_dap_params_value[index_offset+j];
-               }
-               params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
-                               dolby_dap_params_length[i]) * sizeof(uint32_t);
-       }
-       pr_debug("%s, valid param length: %d", __func__, params_length);
-       if (params_length) {
-               rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-                                               params_length);
-               if (rc) {
-                       pr_err("%s: send dolby params failed\n", __func__);
-                       kfree(params_value);
-                       return -EINVAL;
-               }
-               for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
-                       if ((dolby_dap_params_modified[i] == 0) ||
-                           ((commit) &&
-                            ((dolby_dap_params_modified[i] & 0x00010000) &&
-                            ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))
-                           ))
-                               continue;
-                       dolby_dap_params_modified[i] = 0x00010001;
-               }
-       }
-       kfree(params_value);
-       return 0;
-}
-
-int msm_dolby_dap_init(int port_id, int copp_idx, int channels,
-                      bool is_custom_stereo_on)
-{
-       int ret = 0;
-       int index = adm_validate_and_get_port_index(port_id);
-       if (index < 0) {
-               pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
-                       port_id);
-               return -EINVAL;
-       }
-       if ((port_id != DOLBY_INVALID_PORT_ID) &&
-           (port_id & dolby_dap_params_states.port_ids_dolby_can_be_enabled)) {
-               dolby_dap_params_states.port_id[index] = port_id;
-               dolby_dap_params_states.copp_idx[index] = copp_idx;
-               dolby_dap_params_states.port_open_count++;
-               if (dolby_dap_params_states.auto_endp) {
-                       ret = msm_dolby_dap_send_end_point(port_id, copp_idx);
-                       if (ret) {
-                               pr_err("%s: err sending endppoint\n", __func__);
-                               return ret;
-                       }
-               }
-               if (dolby_dap_params_states.use_cache) {
-                       ret = msm_dolby_dap_send_cached_params(port_id,
-                                                              copp_idx, 0);
-                       if (ret) {
-                               pr_err("%s: err sending cached params\n",
-                                       __func__);
-                               return ret;
-                       }
-               }
-               if (dolby_dap_params_states.enddep_params) {
-                       msm_dolby_dap_send_enddep_params(port_id, copp_idx,
-                                                        channels);
-                       if (ret) {
-                               pr_err("%s: err sending endp dependent params\n",
-                                       __func__);
-                               return ret;
-                       }
-               }
-               if (is_custom_stereo_on)
-                       dolby_dap_set_custom_stereo_onoff(port_id, copp_idx,
-                                                         is_custom_stereo_on);
-       }
-       return ret;
-}
-
-void msm_dolby_dap_deinit(int port_id)
-{
-       int index = adm_validate_and_get_port_index(port_id);
-       if (index < 0) {
-               pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
-                       port_id);
-               return;
-       }
-       dolby_dap_params_states.port_open_count--;
-       if ((dolby_dap_params_states.port_id[index] == port_id) &&
-               (!dolby_dap_params_states.port_open_count)) {
-               dolby_dap_params_states.port_id[index] = DOLBY_INVALID_PORT_ID;
-               dolby_dap_params_states.copp_idx[index] = -1;
-       }
-}
-
-static int msm_dolby_dap_set_vspe_vdhe(int port_id, int copp_idx,
-                                      bool is_custom_stereo_enabled)
-{
-       char *params_value;
-       int *update_params_value, rc = 0;
-       uint32_t index_offset, i, j;
-       uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
-                               2 * DOLBY_PARAM_PAYLOAD_SIZE) *
-                               sizeof(uint32_t);
-       if (port_id == DOLBY_INVALID_PORT_ID) {
-               pr_err("%s: Not a Dolby topology. Do not set custom stereo mixing\n",
-                       __func__);
-               return -EINVAL;
-       }
-       params_value = kzalloc(params_length, GFP_KERNEL);
-       if (!params_value) {
-               pr_err("%s, params memory alloc failed\n", __func__);
-               return -ENOMEM;
-       }
-       update_params_value = (int *)params_value;
-       params_length = 0;
-       /* for VDHE and VSPE DAP params at index 0 and 1 in table */
-       for (i = 0; i < 2; i++) {
-               *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-               *update_params_value++ = dolby_dap_params_id[i];
-               *update_params_value++ = dolby_dap_params_length[i] *
-                                       sizeof(uint32_t);
-               index_offset = dolby_dap_params_offset[i];
-               for (j = 0; j < dolby_dap_params_length[i]; j++) {
-                       if (is_custom_stereo_enabled)
-                               *update_params_value++ = 0;
-                       else
-                               *update_params_value++ =
-                                       dolby_dap_params_value[index_offset+j];
-               }
-               params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
-                               dolby_dap_params_length[i]) * sizeof(uint32_t);
-       }
-       pr_debug("%s, valid param length: %d", __func__, params_length);
-       if (params_length) {
-               rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-                                              params_length);
-               if (rc) {
-                       pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
-                               __func__, rc);
-                       kfree(params_value);
-                       return -EINVAL;
-               }
-       }
-       kfree(params_value);
-       return 0;
-}
-
-int dolby_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
-                                     bool is_custom_stereo_enabled)
-{
-       char *params_value;
-       int *update_params_value, rc = 0;
-       uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
-                               DOLBY_PARAM_PAYLOAD_SIZE) *
-                               sizeof(uint32_t);
-       if (port_id == DOLBY_INVALID_PORT_ID)
-               return -EINVAL;
-
-       msm_dolby_dap_set_vspe_vdhe(port_id, copp_idx,
-                                   is_custom_stereo_enabled);
-       params_value = kzalloc(params_length, GFP_KERNEL);
-       if (!params_value) {
-               pr_err("%s, params memory alloc failed\n", __func__);
-               return -ENOMEM;
-       }
-       update_params_value = (int *)params_value;
-       params_length = 0;
-       *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
-       *update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
-       *update_params_value++ = sizeof(uint32_t);
-       if (is_custom_stereo_enabled)
-               *update_params_value++ = 1;
-       else
-               *update_params_value++ = 0;
-       params_length += (DOLBY_PARAM_PAYLOAD_SIZE + 1) * sizeof(uint32_t);
-       pr_debug("%s, valid param length: %d", __func__, params_length);
-       if (params_length) {
-               rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
-                                              params_length);
-               if (rc) {
-                       pr_err("%s: setting ds1 custom stereo param failed with rc=%d\n",
-                               __func__, rc);
-                       kfree(params_value);
-                       return -EINVAL;
-               }
-       }
-       kfree(params_value);
-       return 0;
-}
-
-static int msm_dolby_dap_map_device_to_port_id(int device)
-{
-       int port_id = SLIMBUS_0_RX;
-       device = DEVICE_OUT_ALL;
-       /*update the device when single stream to multiple device is handled*/
-       if (device == DEVICE_OUT_ALL) {
-               port_id = PRIMARY_I2S_RX | SLIMBUS_0_RX | HDMI_RX |
-                               INT_BT_SCO_RX | INT_FM_RX |
-                               RT_PROXY_PORT_001_RX |
-                               AFE_PORT_ID_PRIMARY_PCM_RX |
-                               MI2S_RX | SECONDARY_I2S_RX |
-                               SLIMBUS_1_RX | SLIMBUS_4_RX | SLIMBUS_3_RX |
-                               AFE_PORT_ID_SECONDARY_MI2S_RX;
-       } else {
-               /* update port_id based on the device */
-       }
-       return port_id;
-}
-
-int msm_dolby_dap_param_to_set_control_get(struct snd_kcontrol *kcontrol,
-                                          struct snd_ctl_elem_value *ucontrol)
-{
-       /* not used while setting the parameters */
-       return 0;
-}
-
-int msm_dolby_dap_param_to_set_control_put(struct snd_kcontrol *kcontrol,
-                                          struct snd_ctl_elem_value *ucontrol)
-{
-       int rc = 0, port_id, copp_idx;
-       uint32_t idx, j, current_offset;
-       uint32_t device = ucontrol->value.integer.value[0];
-       uint32_t param_id = ucontrol->value.integer.value[1];
-       uint32_t offset = ucontrol->value.integer.value[2];
-       uint32_t length = ucontrol->value.integer.value[3];
-
-       dolby_dap_params_states.port_ids_dolby_can_be_enabled =
-                               msm_dolby_dap_map_device_to_port_id(device);
-       for (idx = 0; idx < ALL_DOLBY_PARAMS; idx++) {
-               /*paramid from user space*/
-               if (param_id == dolby_dap_params_id[idx])
-                       break;
-       }
-       if (idx > ALL_DOLBY_PARAMS-1) {
-               pr_err("%s: invalid param id 0x%x to set\n", __func__,
-                       param_id);
-               return -EINVAL;
-       }
-       switch (idx) {
-               case DOLBY_COMMIT_ALL_IDX: {
-                       /* COMIIT ALL: Send all parameters to DSP */
-                       pr_debug("%s: COMMIT_ALL recvd\n", __func__);
-                       for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-                               port_id = dolby_dap_params_states.port_id[idx];
-                               copp_idx =
-                                       dolby_dap_params_states.copp_idx[idx];
-                               if ((copp_idx > 0) &&
-                                   (copp_idx < MAX_COPPS_PER_PORT) &&
-                                   (port_id != DOLBY_INVALID_PORT_ID))
-                                       rc |= msm_dolby_dap_send_cached_params(
-                                                                     port_id,
-                                                                     copp_idx,
-                                                                     0);
-                       }
-               }
-               break;
-               case DOLBY_COMMIT_IDX: {
-                       pr_debug("%s: COMMIT recvd\n", __func__);
-                       /* COMMIT: Send only modified paramters to DSP */
-                       for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-                               port_id = dolby_dap_params_states.port_id[idx];
-                               copp_idx =
-                                       dolby_dap_params_states.copp_idx[idx];
-                               if ((copp_idx > 0) &&
-                                   (copp_idx < MAX_COPPS_PER_PORT) &&
-                                   (port_id == DOLBY_INVALID_PORT_ID))
-                                       rc |= msm_dolby_dap_send_cached_params(
-                                                                     port_id,
-                                                                     copp_idx,
-                                                                     1);
-                       }
-               }
-               break;
-               case DOLBY_USE_CACHE_IDX: {
-                       pr_debug("%s: USE CACHE recvd val: %ld\n", __func__,
-                               ucontrol->value.integer.value[4]);
-                       dolby_dap_params_states.use_cache =
-                               ucontrol->value.integer.value[4];
-               }
-               break;
-               case DOLBY_AUTO_ENDP_IDX: {
-                       pr_debug("%s: AUTO_ENDP recvd val: %ld\n", __func__,
-                               ucontrol->value.integer.value[4]);
-                       dolby_dap_params_states.auto_endp =
-                               ucontrol->value.integer.value[4];
-               }
-               break;
-               case DOLBY_AUTO_ENDDEP_IDX: {
-                       pr_debug("%s: USE_ENDDEP_PARAMS recvd val: %ld\n",
-                               __func__, ucontrol->value.integer.value[4]);
-                       dolby_dap_params_states.enddep_params =
-                               ucontrol->value.integer.value[4];
-               }
-               break;
-               default: {
-                       /* cache the parameters */
-                       dolby_dap_params_modified[idx] += 1;
-                       current_offset = dolby_dap_params_offset[idx] + offset;
-                       if (current_offset >= TOTAL_LENGTH_DOLBY_PARAM) {
-                               pr_err("%s: invalid offset %d at idx %d\n",
-                               __func__, offset, idx);
-                               return -EINVAL;
-                       }
-                       if ((length == 0) || (current_offset + length - 1
-                               < current_offset) || (current_offset + length
-                               > TOTAL_LENGTH_DOLBY_PARAM)) {
-                               pr_err("%s: invalid length %d at idx %d\n",
-                               __func__, length, idx);
-                               return -EINVAL;
-                       }
-                       dolby_dap_params_length[idx] = length;
-                       pr_debug("%s: param recvd deviceId=0x%x paramId=0x%x offset=%d length=%d\n",
-                               __func__, device, param_id, offset, length);
-                       for (j = 0; j < length; j++) {
-                               dolby_dap_params_value[
-                                       dolby_dap_params_offset[idx] +
-                                       offset + j]
-                               = ucontrol->value.integer.value[4+j];
-                               pr_debug("value[%d]: %ld\n", j,
-                                       ucontrol->value.integer.value[4+j]);
-                       }
-               }
-       }
-
-       return rc;
-}
-
-int msm_dolby_dap_param_to_get_control_get(struct snd_kcontrol *kcontrol,
-                                          struct snd_ctl_elem_value *ucontrol)
-{
-       int rc = 0, i, index;
-       char *params_value;
-       int *update_params_value;
-       uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
-                                       sizeof(uint32_t);
-       uint32_t param_payload_len =
-                       DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
-       int port_id = dolby_dap_params_get.port_id, copp_idx;
-
-       if (port_id == DOLBY_INVALID_PORT_ID) {
-               pr_err("%s, port_id not set, do not query ADM\n", __func__);
-               return -EINVAL;
-       }
-       index = adm_validate_and_get_port_index(port_id);
-       if (index < 0) {
-               pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
-                       port_id);
-               return -EINVAL;
-       }
-       copp_idx = dolby_dap_params_states.copp_idx[index];
-       if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
-               pr_debug("%s: get params called before copp open.copp_idx:%d\n",
-                        __func__, copp_idx);
-               return -EINVAL;
-       }
-       if (dolby_dap_params_get.length > 128 - DOLBY_PARAM_PAYLOAD_SIZE) {
-               pr_err("%s: Incorrect parameter length", __func__);
-               return -EINVAL;
-       }
-       params_value = kzalloc(params_length + param_payload_len, GFP_KERNEL);
-       if (!params_value) {
-               pr_err("%s, params memory alloc failed\n", __func__);
-               return -ENOMEM;
-       }
-       if (DOLBY_PARAM_ID_VER == dolby_dap_params_get.param_id) {
-               rc = adm_get_params(port_id, copp_idx,
-                                   DOLBY_BUNDLE_MODULE_ID, DOLBY_PARAM_ID_VER,
-                                   params_length + param_payload_len,
-                                   params_value);
-       } else {
-               for (i = 0; i < MAX_DOLBY_PARAMS; i++)
-                       if (dolby_dap_params_id[i] ==
-                               dolby_dap_params_get.param_id)
-                               break;
-               if (i > MAX_DOLBY_PARAMS-1) {
-                       pr_err("%s: invalid param id to set", __func__);
-                       rc = -EINVAL;
-               } else {
-                       params_length = dolby_dap_params_length[i] *
-                                               sizeof(uint32_t);
-                       rc = adm_get_params(port_id, copp_idx,
-                                           DOLBY_BUNDLE_MODULE_ID,
-                                           dolby_dap_params_id[i],
-                                           params_length + param_payload_len,
-                                           params_value);
-               }
-       }
-       if (rc) {
-               pr_err("%s: get parameters failed rc:%d\n", __func__, rc);
-               kfree(params_value);
-               return -EINVAL;
-       }
-       update_params_value = (int *)params_value;
-       ucontrol->value.integer.value[0] = dolby_dap_params_get.device_id;
-       ucontrol->value.integer.value[1] = dolby_dap_params_get.param_id;
-       ucontrol->value.integer.value[2] = dolby_dap_params_get.offset;
-       ucontrol->value.integer.value[3] = dolby_dap_params_get.length;
-
-       pr_debug("%s: FROM DSP value[0] 0x%x value[1] %d value[2] 0x%x\n",
-                       __func__, update_params_value[0],
-                       update_params_value[1], update_params_value[2]);
-       for (i = 0; i < dolby_dap_params_get.length; i++) {
-               ucontrol->value.integer.value[DOLBY_PARAM_PAYLOAD_SIZE+i] =
-                       update_params_value[i];
-               pr_debug("value[%d]:%d\n", i, update_params_value[i]);
-       }
-       pr_debug("%s: Returning param_id=0x%x offset=%d length=%d\n",
-                       __func__, dolby_dap_params_get.param_id,
-                       dolby_dap_params_get.offset,
-                       dolby_dap_params_get.length);
-       kfree(params_value);
-       return 0;
-}
-
-int msm_dolby_dap_param_to_get_control_put(struct snd_kcontrol *kcontrol,
-                                          struct snd_ctl_elem_value *ucontrol)
-{
-       int port_id, idx, copp_idx;
-       dolby_dap_params_get.device_id = ucontrol->value.integer.value[0];
-       port_id = msm_dolby_dap_map_device_to_port_id(
-                                               dolby_dap_params_get.device_id);
-       for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-               port_id = dolby_dap_params_states.port_id[idx];
-               copp_idx = dolby_dap_params_states.copp_idx[idx];
-               if ((copp_idx < 0) ||
-                   (copp_idx >= MAX_COPPS_PER_PORT) ||
-                   (port_id == DOLBY_INVALID_PORT_ID))
-                       continue;
-               else
-                       break;
-       }
-       if (idx == AFE_MAX_PORTS)
-               port_id = SLIMBUS_0_RX;
-       dolby_dap_params_get.port_id = port_id;
-       dolby_dap_params_get.param_id = ucontrol->value.integer.value[1];
-       dolby_dap_params_get.offset = ucontrol->value.integer.value[2];
-       dolby_dap_params_get.length = ucontrol->value.integer.value[3];
-       pr_debug("%s: param_id=0x%x offset=%d length=%d\n", __func__,
-               dolby_dap_params_get.param_id, dolby_dap_params_get.offset,
-               dolby_dap_params_get.length);
-       return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_get(struct snd_kcontrol *kcontrol,
-                                          struct snd_ctl_elem_value *ucontrol)
-{
-       uint32_t length = dolby_dap_params_value[DOLBY_PARAM_VCNB_OFFSET];
-       char *visualizer_data;
-       int i, rc;
-       int *update_visualizer_data;
-       uint32_t offset, params_length =
-               (2*length + DOLBY_VIS_PARAM_HEADER_SIZE)*sizeof(uint32_t);
-       uint32_t param_payload_len =
-               DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
-       int port_id, copp_idx, idx;
-       if (length > DOLBY_PARAM_VCNB_MAX_LENGTH || length <= 0) {
-               pr_err("%s Incorrect VCNB length", __func__);
-               ucontrol->value.integer.value[0] = 0;
-               return -EINVAL;
-       }
-       for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
-               port_id = dolby_dap_params_states.port_id[idx];
-               copp_idx = dolby_dap_params_states.copp_idx[idx];
-               if ((copp_idx < 0) ||
-                   (copp_idx >= MAX_COPPS_PER_PORT) ||
-                   (port_id == DOLBY_INVALID_PORT_ID))
-                       continue;
-               else
-                       break;
-       }
-       if (idx == AFE_MAX_PORTS) {
-               pr_debug("%s, port_id not set, returning error", __func__);
-               ucontrol->value.integer.value[0] = 0;
-               return -EINVAL;
-       }
-       visualizer_data = kzalloc(params_length, GFP_KERNEL);
-       if (!visualizer_data) {
-               pr_err("%s, params memory alloc failed\n", __func__);
-               return -ENOMEM;
-       }
-       offset = 0;
-       params_length = length * sizeof(uint32_t);
-       rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
-                           DOLBY_PARAM_ID_VCBG,
-                           params_length + param_payload_len,
-                           visualizer_data + offset);
-       if (rc) {
-               pr_err("%s: get parameters failed\n", __func__);
-               kfree(visualizer_data);
-               return -EINVAL;
-       }
-
-       offset = length * sizeof(uint32_t);
-       rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
-                           DOLBY_PARAM_ID_VCBE,
-                           params_length + param_payload_len,
-                           visualizer_data + offset);
-       if (rc) {
-               pr_err("%s: get parameters failed\n", __func__);
-               kfree(visualizer_data);
-               return -EINVAL;
-       }
-
-       ucontrol->value.integer.value[0] = 2*length;
-       pr_debug("%s: visualizer data length %ld\n", __func__,
-                       ucontrol->value.integer.value[0]);
-       update_visualizer_data = (int *)visualizer_data;
-       for (i = 0; i < 2*length; i++) {
-               ucontrol->value.integer.value[1+i] = update_visualizer_data[i];
-               pr_debug("value[%d] %d\n", i, update_visualizer_data[i]);
-       }
-       kfree(visualizer_data);
-       return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_put(struct snd_kcontrol *kcontrol,
-                                          struct snd_ctl_elem_value *ucontrol)
-{
-       /* not used while getting the visualizer data */
-       return 0;
-}
-
-int msm_dolby_dap_endpoint_control_get(struct snd_kcontrol *kcontrol,
-                                      struct snd_ctl_elem_value *ucontrol)
-{
-       /* not used while setting the endpoint */
-       return 0;
-}
-
-int msm_dolby_dap_endpoint_control_put(struct snd_kcontrol *kcontrol,
-                                      struct snd_ctl_elem_value *ucontrol)
-{
-       int device = ucontrol->value.integer.value[0];
-       dolby_dap_params_states.device = device;
-       return 0;
-}
-
-int msm_dolby_dap_security_control_get(struct snd_kcontrol *kcontrol,
-                                      struct snd_ctl_elem_value *ucontrol)
-{
-       /* not used while setting the manfr id*/
-       return 0;
-}
-
-int msm_dolby_dap_security_control_put(struct snd_kcontrol *kcontrol,
-                                      struct snd_ctl_elem_value *ucontrol)
-{
-       int manufacturer_id = ucontrol->value.integer.value[0];
-       core_set_dolby_manufacturer_id(manufacturer_id);
-       return 0;
-}
-
-int msm_dolby_dap_license_control_get(struct snd_kcontrol *kcontrol,
-                               struct snd_ctl_elem_value *ucontrol)
-{
-       ucontrol->value.integer.value[0] =
-                       core_get_license_status(DOLBY_DS1_LICENSE_ID);
-       return 0;
-}
-
-int msm_dolby_dap_license_control_put(struct snd_kcontrol *kcontrol,
-                               struct snd_ctl_elem_value *ucontrol)
-{
-       return core_set_license(ucontrol->value.integer.value[0],
-                                               DOLBY_DS1_LICENSE_ID);
-}
-
-static const struct snd_kcontrol_new dolby_license_controls[] = {
-       SOC_SINGLE_MULTI_EXT("DS1 License", SND_SOC_NOPM, 0,
-       0xFFFFFFFF, 0, 1, msm_dolby_dap_license_control_get,
-       msm_dolby_dap_license_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_security_controls[] = {
-       SOC_SINGLE_MULTI_EXT("DS1 Security", SND_SOC_NOPM, 0,
-       0xFFFFFFFF, 0, 1, msm_dolby_dap_security_control_get,
-       msm_dolby_dap_security_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_set_controls[] = {
-       SOC_SINGLE_MULTI_EXT("DS1 DAP Set Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
-       0, 128, msm_dolby_dap_param_to_set_control_get,
-       msm_dolby_dap_param_to_set_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_get_controls[] = {
-       SOC_SINGLE_MULTI_EXT("DS1 DAP Get Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
-       0, 128, msm_dolby_dap_param_to_get_control_get,
-       msm_dolby_dap_param_to_get_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_visualizer_controls[] = {
-       SOC_SINGLE_MULTI_EXT("DS1 DAP Get Visualizer", SND_SOC_NOPM, 0,
-       0xFFFFFFFF, 0, 41, msm_dolby_dap_param_visualizer_control_get,
-       msm_dolby_dap_param_visualizer_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_end_point_controls[] = {
-       SOC_SINGLE_MULTI_EXT("DS1 DAP Endpoint", SND_SOC_NOPM, 0,
-       0xFFFFFFFF, 0, 1, msm_dolby_dap_endpoint_control_get,
-       msm_dolby_dap_endpoint_control_put),
-};
-
-void msm_dolby_dap_add_controls(struct snd_soc_platform *platform)
-{
-       snd_soc_add_platform_controls(platform,
-                               dolby_license_controls,
-                       ARRAY_SIZE(dolby_license_controls));
-
-       snd_soc_add_platform_controls(platform,
-                               dolby_security_controls,
-                       ARRAY_SIZE(dolby_security_controls));
-
-       snd_soc_add_platform_controls(platform,
-                               dolby_dap_param_to_set_controls,
-                       ARRAY_SIZE(dolby_dap_param_to_set_controls));
-
-       snd_soc_add_platform_controls(platform,
-                               dolby_dap_param_to_get_controls,
-                       ARRAY_SIZE(dolby_dap_param_to_get_controls));
-
-       snd_soc_add_platform_controls(platform,
-                               dolby_dap_param_visualizer_controls,
-                       ARRAY_SIZE(dolby_dap_param_visualizer_controls));
-
-       snd_soc_add_platform_controls(platform,
-                               dolby_dap_param_end_point_controls,
-                       ARRAY_SIZE(dolby_dap_param_end_point_controls));
-}
index cfade42..727bd65 100644 (file)
@@ -10016,6 +10016,9 @@ static int msm_audio_sound_focus_derive_port_id(struct snd_kcontrol *kcontrol,
        } else if (!strcmp(kcontrol->id.name + strlen(prefix),
                                        "TERT_MI2S")) {
                *port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+       } else if (!strcmp(kcontrol->id.name + strlen(prefix),
+                                       "INT3_MI2S")) {
+               *port_id = AFE_PORT_ID_INT3_MI2S_TX;
        } else {
                pr_err("%s: mixer ctl name=%s, could not derive valid port id\n",
                        __func__, kcontrol->id.name);
@@ -10220,6 +10223,36 @@ static const struct snd_kcontrol_new msm_source_tracking_controls[] = {
                .info   = msm_source_tracking_info,
                .get    = msm_audio_source_tracking_get,
        },
+       {
+               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+               .iface  = SNDRV_CTL_ELEM_IFACE_MIXER,
+               .name   = "Sound Focus Voice Tx INT3_MI2S",
+               .info   = msm_sound_focus_info,
+               .get    = msm_voice_sound_focus_get,
+               .put    = msm_voice_sound_focus_put,
+       },
+       {
+               .access = SNDRV_CTL_ELEM_ACCESS_READ,
+               .iface  = SNDRV_CTL_ELEM_IFACE_MIXER,
+               .name   = "Source Tracking Voice Tx INT3_MI2S",
+               .info   = msm_source_tracking_info,
+               .get    = msm_voice_source_tracking_get,
+       },
+       {
+               .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+               .iface  = SNDRV_CTL_ELEM_IFACE_MIXER,
+               .name   = "Sound Focus Audio Tx INT3_MI2S",
+               .info   = msm_sound_focus_info,
+               .get    = msm_audio_sound_focus_get,
+               .put    = msm_audio_sound_focus_put,
+       },
+       {
+               .access = SNDRV_CTL_ELEM_ACCESS_READ,
+               .iface  = SNDRV_CTL_ELEM_IFACE_MIXER,
+               .name   = "Source Tracking Audio Tx INT3_MI2S",
+               .info   = msm_source_tracking_info,
+               .get    = msm_audio_source_tracking_get,
+       },
 };
 
 static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol,
index bc1d21a..fcff383 100644 (file)
@@ -1303,13 +1303,22 @@ static struct cal_block_data *afe_find_cal_topo_id_by_port(
                        MSM_AFE_PORT_TYPE_TX)?(TX_DEVICE):(RX_DEVICE));
                afe_top =
                (struct audio_cal_info_afe_top *)cal_block->cal_info;
-               if ((afe_top->path == path) &&
-                   (afe_top->acdb_id ==
-                    this_afe.dev_acdb_id[afe_port_index])) {
-                       pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
+               if (afe_top->path == path) {
+                       if (this_afe.dev_acdb_id[afe_port_index] > 0) {
+                               if (afe_top->acdb_id ==
+                                   this_afe.dev_acdb_id[afe_port_index]) {
+                                       pr_debug("%s: top_id:%x acdb_id:%d afe_port_id:%d\n",
+                                                __func__, afe_top->topology,
+                                                afe_top->acdb_id,
+                                                q6audio_get_port_id(port_id));
+                                       return cal_block;
+                               }
+                       } else {
+                               pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
                                 __func__, afe_top->topology, afe_top->acdb_id,
                                 q6audio_get_port_id(port_id));
-                       return cal_block;
+                               return cal_block;
+                       }
                }
        }
 
@@ -1497,6 +1506,8 @@ static void send_afe_cal_type(int cal_index, int port_id)
 {
        struct cal_block_data           *cal_block = NULL;
        int ret;
+       int afe_port_index = q6audio_get_port_index(port_id);
+
        pr_debug("%s:\n", __func__);
 
        if (this_afe.cal_data[cal_index] == NULL) {
@@ -1505,10 +1516,17 @@ static void send_afe_cal_type(int cal_index, int port_id)
                goto done;
        }
 
+       if (afe_port_index < 0) {
+               pr_err("%s: Error getting AFE port index %d\n",
+                       __func__, afe_port_index);
+               goto done;
+       }
+
        mutex_lock(&this_afe.cal_data[cal_index]->lock);
 
-       if ((cal_index == AFE_COMMON_RX_CAL) ||
-           (cal_index == AFE_COMMON_TX_CAL))
+       if (((cal_index == AFE_COMMON_RX_CAL) ||
+            (cal_index == AFE_COMMON_TX_CAL)) &&
+           (this_afe.dev_acdb_id[afe_port_index] > 0))
                cal_block = afe_find_cal(cal_index, port_id);
        else
                cal_block = cal_utils_get_only_cal_block(
index 3a8fdf8..edf5719 100644 (file)
@@ -36,6 +36,11 @@ struct dev_config {
        u32 channels;
 };
 
+enum {
+       DP_RX_IDX,
+       EXT_DISP_RX_IDX_MAX,
+};
+
 /* TDM default config */
 static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
        { /* PRI TDM */
@@ -124,6 +129,10 @@ static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
        }
 };
 
+/* Default configuration of external display BE */
+static struct dev_config ext_disp_rx_cfg[] = {
+       [DP_RX_IDX] =   {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
 static struct dev_config usb_rx_cfg = {
        .sample_rate = SAMPLING_RATE_48KHZ,
        .bit_format = SNDRV_PCM_FORMAT_S16_LE,
@@ -251,6 +260,8 @@ static const char *const mi2s_ch_text[] = {"One", "Two", "Three", "Four",
                                           "Eight"};
 static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
                                          "S32_LE"};
+static char const *mi2s_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+                                         "S32_LE"};
 static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
                                    "Five", "Six", "Seven", "Eight"};
 static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -264,7 +275,11 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
                                        "KHZ_16", "KHZ_22P05",
                                        "KHZ_32", "KHZ_44P1", "KHZ_48",
                                        "KHZ_96", "KHZ_192", "KHZ_384"};
+static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+                                                 "KHZ_192"};
 
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_chs, ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(proxy_rx_chs, ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
@@ -282,6 +297,14 @@ static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_sample_rate, mi2s_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_sample_rate, mi2s_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_sample_rate, mi2s_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_format, mi2s_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_chs, mi2s_ch_text);
@@ -294,8 +317,11 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_chs, usb_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_format, ext_disp_bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
+                               ext_disp_sample_rate_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
@@ -667,6 +693,54 @@ static int tdm_get_format_val(int format)
        return value;
 }
 
+static int mi2s_get_format(int value)
+{
+       int format = 0;
+
+       switch (value) {
+       case 0:
+               format = SNDRV_PCM_FORMAT_S16_LE;
+               break;
+       case 1:
+               format = SNDRV_PCM_FORMAT_S24_LE;
+               break;
+       case 2:
+               format = SNDRV_PCM_FORMAT_S24_3LE;
+               break;
+       case 3:
+               format = SNDRV_PCM_FORMAT_S32_LE;
+               break;
+       default:
+               format = SNDRV_PCM_FORMAT_S16_LE;
+               break;
+       }
+       return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+       int value = 0;
+
+       switch (format) {
+       case SNDRV_PCM_FORMAT_S16_LE:
+               value = 0;
+               break;
+       case SNDRV_PCM_FORMAT_S24_LE:
+               value = 1;
+               break;
+       case SNDRV_PCM_FORMAT_S24_3LE:
+               value = 2;
+               break;
+       case SNDRV_PCM_FORMAT_S32_LE:
+               value = 3;
+               break;
+       default:
+               value = 0;
+               break;
+       }
+       return value;
+}
+
 static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
                             struct snd_ctl_elem_value *ucontrol)
 {
@@ -1132,6 +1206,78 @@ static int mi2s_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+static int mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = mi2s_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       mi2s_tx_cfg[idx].bit_format =
+               mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+       pr_debug("%s: idx[%d] _tx_format = %d, item = %d\n", __func__,
+                 idx, mi2s_tx_cfg[idx].bit_format,
+                 ucontrol->value.enumerated.item[0]);
+
+       return 0;
+}
+
+static int mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = mi2s_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       ucontrol->value.enumerated.item[0] =
+               mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+       pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+               idx, mi2s_tx_cfg[idx].bit_format,
+               ucontrol->value.enumerated.item[0]);
+
+       return 0;
+}
+
+static int mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = mi2s_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       mi2s_rx_cfg[idx].bit_format =
+               mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+       pr_debug("%s: idx[%d] _rx_format = %d, item = %d\n", __func__,
+                 idx, mi2s_rx_cfg[idx].bit_format,
+                 ucontrol->value.enumerated.item[0]);
+
+       return 0;
+}
+
+static int mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = mi2s_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       ucontrol->value.enumerated.item[0] =
+               mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+       pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+               idx, mi2s_rx_cfg[idx].bit_format,
+               ucontrol->value.enumerated.item[0]);
+
+       return 0;
+}
+
 static int msm_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
@@ -1514,6 +1660,162 @@ static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
        return rc;
 }
 
+static int ext_disp_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+       int idx;
+
+       if (strnstr(kcontrol->id.name, "Display Port RX",
+                        sizeof("Display Port RX")))
+               idx = DP_RX_IDX;
+       else {
+               pr_err("%s: unsupported BE: %s",
+                       __func__, kcontrol->id.name);
+               idx = -EINVAL;
+       }
+
+       return idx;
+}
+
+static int ext_disp_rx_format_get(struct snd_kcontrol *kcontrol,
+                                 struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = ext_disp_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       switch (ext_disp_rx_cfg[idx].bit_format) {
+       case SNDRV_PCM_FORMAT_S24_LE:
+               ucontrol->value.integer.value[0] = 1;
+               break;
+
+       case SNDRV_PCM_FORMAT_S16_LE:
+       default:
+               ucontrol->value.integer.value[0] = 0;
+               break;
+       }
+
+       pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+                __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+                ucontrol->value.integer.value[0]);
+       return 0;
+}
+
+static int ext_disp_rx_format_put(struct snd_kcontrol *kcontrol,
+                                 struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = ext_disp_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       switch (ucontrol->value.integer.value[0]) {
+       case 1:
+               ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S24_LE;
+               break;
+       case 0:
+       default:
+               ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S16_LE;
+               break;
+       }
+       pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+                __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+                ucontrol->value.integer.value[0]);
+
+       return 0;
+}
+
+static int ext_disp_rx_ch_get(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = ext_disp_get_port_idx(kcontrol);
+
+        if (idx < 0)
+               return idx;
+
+       ucontrol->value.integer.value[0] =
+                       ext_disp_rx_cfg[idx].channels - 2;
+
+       pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+                idx, ext_disp_rx_cfg[idx].channels);
+
+       return 0;
+}
+
+static int ext_disp_rx_ch_put(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = ext_disp_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       ext_disp_rx_cfg[idx].channels =
+                       ucontrol->value.integer.value[0] + 2;
+
+       pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+                idx, ext_disp_rx_cfg[idx].channels);
+       return 1;
+}
+
+static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       int sample_rate_val;
+       int idx = ext_disp_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       switch (ext_disp_rx_cfg[idx].sample_rate) {
+       case SAMPLING_RATE_192KHZ:
+               sample_rate_val = 2;
+               break;
+
+       case SAMPLING_RATE_96KHZ:
+               sample_rate_val = 1;
+               break;
+
+       case SAMPLING_RATE_48KHZ:
+       default:
+               sample_rate_val = 0;
+               break;
+       }
+
+       ucontrol->value.integer.value[0] = sample_rate_val;
+       pr_debug("%s: ext_disp_rx[%d].sample_rate = %d\n", __func__,
+                idx, ext_disp_rx_cfg[idx].sample_rate);
+
+       return 0;
+}
+
+static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       int idx = ext_disp_get_port_idx(kcontrol);
+
+       if (idx < 0)
+               return idx;
+
+       switch (ucontrol->value.integer.value[0]) {
+       case 2:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
+               break;
+       case 1:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_96KHZ;
+               break;
+       case 0:
+       default:
+               ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_48KHZ;
+               break;
+       }
+
+       pr_debug("%s: control value = %ld, ext_disp_rx[%d].sample_rate = %d\n",
+                __func__, ucontrol->value.integer.value[0], idx,
+                ext_disp_rx_cfg[idx].sample_rate);
+       return 0;
+}
+
 const struct snd_kcontrol_new msm_common_snd_controls[] = {
        SOC_ENUM_EXT("PROXY_RX Channels", proxy_rx_chs,
                        proxy_rx_ch_get, proxy_rx_ch_put),
@@ -1565,6 +1867,30 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
        SOC_ENUM_EXT("QUAT_MI2S_TX SampleRate", quat_mi2s_tx_sample_rate,
                        mi2s_tx_sample_rate_get,
                        mi2s_tx_sample_rate_put),
+       SOC_ENUM_EXT("PRIM_MI2S_RX Format", prim_mi2s_rx_format,
+                       mi2s_rx_format_get,
+                       mi2s_rx_format_put),
+       SOC_ENUM_EXT("SEC_MI2S_RX Format", sec_mi2s_rx_format,
+                       mi2s_rx_format_get,
+                       mi2s_rx_format_put),
+       SOC_ENUM_EXT("TERT_MI2S_RX Format", tert_mi2s_rx_format,
+                       mi2s_rx_format_get,
+                       mi2s_rx_format_put),
+       SOC_ENUM_EXT("QUAT_MI2S_RX Format", quat_mi2s_rx_format,
+                       mi2s_rx_format_get,
+                       mi2s_rx_format_put),
+       SOC_ENUM_EXT("PRIM_MI2S_TX Format", prim_mi2s_tx_format,
+                       mi2s_tx_format_get,
+                       mi2s_tx_format_put),
+       SOC_ENUM_EXT("SEC_MI2S_TX Format", sec_mi2s_tx_format,
+                       mi2s_tx_format_get,
+                       mi2s_tx_format_put),
+       SOC_ENUM_EXT("TERT_MI2S_TX Format", tert_mi2s_tx_format,
+                       mi2s_tx_format_get,
+                       mi2s_tx_format_put),
+       SOC_ENUM_EXT("QUAT_MI2S_TX Format", quat_mi2s_tx_format,
+                       mi2s_tx_format_get,
+                       mi2s_tx_format_put),
        SOC_ENUM_EXT("PRIM_MI2S_RX Channels", prim_mi2s_rx_chs,
                        msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
        SOC_ENUM_EXT("PRIM_MI2S_TX Channels", prim_mi2s_tx_chs,
@@ -1585,16 +1911,23 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
                        usb_audio_rx_ch_get, usb_audio_rx_ch_put),
        SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
                        usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+       SOC_ENUM_EXT("Display Port RX Channels", ext_disp_rx_chs,
+                       ext_disp_rx_ch_get, ext_disp_rx_ch_put),
        SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
                        usb_audio_rx_format_get, usb_audio_rx_format_put),
        SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
                        usb_audio_tx_format_get, usb_audio_tx_format_put),
+       SOC_ENUM_EXT("Display Port RX Bit Format", ext_disp_rx_format,
+                       ext_disp_rx_format_get, ext_disp_rx_format_put),
        SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
                        usb_audio_rx_sample_rate_get,
                        usb_audio_rx_sample_rate_put),
        SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
                        usb_audio_tx_sample_rate_get,
                        usb_audio_tx_sample_rate_put),
+       SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
+                       ext_disp_rx_sample_rate_get,
+                       ext_disp_rx_sample_rate_put),
        SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
                        tdm_rx_sample_rate_get,
                        tdm_rx_sample_rate_put),
@@ -1705,6 +2038,23 @@ static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned bit)
        }
 }
 
+static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+{
+       int idx;
+
+       switch (be_id) {
+       case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+               idx = DP_RX_IDX;
+               break;
+       default:
+               pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+               idx = -EINVAL;
+               break;
+       }
+
+       return idx;
+}
+
 /**
  * msm_common_be_hw_params_fixup - updates settings of ALSA BE hw params.
  *
@@ -1722,6 +2072,7 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
        struct snd_interval *channels = hw_param_interval(params,
                                        SNDRV_PCM_HW_PARAM_CHANNELS);
        int rc = 0;
+       int idx;
 
        pr_debug("%s: format = %d, rate = %d\n",
                  __func__, params_format(params), params_rate(params));
@@ -1741,6 +2092,21 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
                channels->min = channels->max = usb_tx_cfg.channels;
                break;
 
+       case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+               idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
+               if (IS_ERR_VALUE(idx)) {
+                       pr_err("%s: Incorrect ext disp idx %d\n",
+                              __func__, idx);
+                       rc = idx;
+                       break;
+               }
+
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                               ext_disp_rx_cfg[idx].bit_format);
+               rate->min = rate->max = ext_disp_rx_cfg[idx].sample_rate;
+               channels->min = channels->max = ext_disp_rx_cfg[idx].channels;
+               break;
+
        case MSM_BACKEND_DAI_AFE_PCM_RX:
                channels->min = channels->max = proxy_rx_cfg.channels;
                rate->min = rate->max = SAMPLING_RATE_48KHZ;
@@ -1870,48 +2236,64 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
                rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_rx_cfg[PRIM_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_rx_cfg[PRIM_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_PRI_MI2S_TX:
                rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_tx_cfg[PRIM_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_tx_cfg[PRIM_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
                rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_rx_cfg[SEC_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_rx_cfg[SEC_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
                rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_tx_cfg[SEC_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_tx_cfg[SEC_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
                rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_rx_cfg[TERT_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_rx_cfg[TERT_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
                rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_tx_cfg[TERT_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_tx_cfg[TERT_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
                rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_rx_cfg[QUAT_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_rx_cfg[QUAT_MI2S].bit_format);
                break;
 
        case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
                rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
                channels->min = channels->max =
                        mi2s_tx_cfg[QUAT_MI2S].channels;
+               param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                              mi2s_tx_cfg[QUAT_MI2S].bit_format);
                break;
 
        default:
@@ -2001,6 +2383,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format)
        u32 bit_per_sample;
 
        switch (bit_format) {
+       case SNDRV_PCM_FORMAT_S32_LE:
        case SNDRV_PCM_FORMAT_S24_3LE:
        case SNDRV_PCM_FORMAT_S24_LE:
                bit_per_sample = 32;
index 4cf5aef..f64074d 100644 (file)
@@ -1861,6 +1861,24 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
        },
 };
 
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+       /* DISP PORT BACK END DAI Link */
+       {
+               .name = LPASS_BE_DISPLAY_PORT,
+               .stream_name = "Display Port Playback",
+               .cpu_dai_name = "msm-dai-q6-dp.24608",
+               .platform_name = "msm-pcm-routing",
+               .codec_name = "msm-ext-disp-audio-codec-rx",
+               .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+               .no_pcm = 1,
+               .dpcm_playback = 1,
+               .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+               .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+               .ignore_pmdown_time = 1,
+               .ignore_suspend = 1,
+       },
+};
+
 static struct snd_soc_dai_link msm_ext_tasha_dai_links[
 ARRAY_SIZE(msm_ext_common_fe_dai) +
 ARRAY_SIZE(msm_ext_tasha_fe_dai) +
@@ -1868,7 +1886,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
 ARRAY_SIZE(msm_ext_tasha_be_dai) +
 ARRAY_SIZE(msm_mi2s_be_dai_links) +
 ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
 
 static struct snd_soc_dai_link msm_ext_tavil_dai_links[
 ARRAY_SIZE(msm_ext_common_fe_dai) +
@@ -1877,7 +1896,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
 ARRAY_SIZE(msm_ext_tavil_be_dai) +
 ARRAY_SIZE(msm_mi2s_be_dai_links) +
 ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
 
 /**
  * populate_snd_card_dailinks - prepares dailink array and initializes card.
@@ -1951,6 +1971,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
                                   sizeof(msm_wcn_be_dai_links));
                        len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
                }
+               if (of_property_read_bool(dev->of_node,
+                                         "qcom,ext-disp-audio-rx")) {
+                       dev_dbg(dev, "%s(): ext disp audio support present\n",
+                                       __func__);
+                       memcpy(msm_ext_tasha_dai_links + len4,
+                               ext_disp_be_dai_link,
+                               sizeof(ext_disp_be_dai_link));
+                       len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+               }
                msm_ext_dai_links = msm_ext_tasha_dai_links;
        } else if (strnstr(card->name, "tavil", strlen(card->name))) {
                len1 = ARRAY_SIZE(msm_ext_common_fe_dai);
@@ -1987,6 +2016,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
                                   sizeof(msm_wcn_be_dai_links));
                        len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
                }
+               if (of_property_read_bool(dev->of_node,
+                                         "qcom,ext-disp-audio-rx")) {
+                       dev_dbg(dev, "%s(): ext disp audio support present\n",
+                                       __func__);
+                       memcpy(msm_ext_tavil_dai_links + len4,
+                               ext_disp_be_dai_link,
+                               sizeof(ext_disp_be_dai_link));
+                       len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+               }
                msm_ext_dai_links = msm_ext_tavil_dai_links;
        } else {
                dev_err(dev, "%s: failing as no matching card name\n",
index 2546380..c2ad89a 100644 (file)
@@ -2896,6 +2896,24 @@ static struct snd_soc_dai_link msm_wsa_be_dai_links[] = {
        },
 };
 
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+       /* DISP PORT BACK END DAI Link */
+       {
+               .name = LPASS_BE_DISPLAY_PORT,
+               .stream_name = "Display Port Playback",
+               .cpu_dai_name = "msm-dai-q6-dp.24608",
+               .platform_name = "msm-pcm-routing",
+               .codec_name = "msm-ext-disp-audio-codec-rx",
+               .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+               .no_pcm = 1,
+               .dpcm_playback = 1,
+               .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+               .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+               .ignore_pmdown_time = 1,
+               .ignore_suspend = 1,
+       },
+};
+
 static struct snd_soc_dai_link msm_int_dai_links[
 ARRAY_SIZE(msm_int_dai) +
 ARRAY_SIZE(msm_int_wsa_dai) +
@@ -2903,7 +2921,8 @@ ARRAY_SIZE(msm_int_be_dai) +
 ARRAY_SIZE(msm_mi2s_be_dai_links) +
 ARRAY_SIZE(msm_auxpcm_be_dai_links)+
 ARRAY_SIZE(msm_wcn_be_dai_links) +
-ARRAY_SIZE(msm_wsa_be_dai_links)];
+ARRAY_SIZE(msm_wsa_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
 
 static struct snd_soc_card sdm660_card = {
        /* snd_soc_card_sdm660 */
@@ -3004,6 +3023,14 @@ static struct snd_soc_card *msm_int_populate_sndcard_dailinks(
                       sizeof(msm_wsa_be_dai_links));
                len1 += ARRAY_SIZE(msm_wsa_be_dai_links);
        }
+       if (of_property_read_bool(dev->of_node, "qcom,ext-disp-audio-rx")) {
+               dev_dbg(dev, "%s(): ext disp audio support present\n",
+                               __func__);
+               memcpy(dailink + len1,
+                       ext_disp_be_dai_link,
+                       sizeof(ext_disp_be_dai_link));
+               len1 += ARRAY_SIZE(ext_disp_be_dai_link);
+       }
        card->dai_link = dailink;
        card->num_links = len1;
        return card;