2 * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/time.h>
17 #include <linux/iopoll.h>
18 #include <linux/platform_device.h>
20 #ifdef CONFIG_QCOM_BUS_SCALING
21 #include <linux/msm-bus.h>
24 #include <soc/qcom/scm.h>
25 #include <linux/phy/phy.h>
26 #include <linux/phy/phy-qcom-ufs.h>
29 #include "ufshcd-pltfrm.h"
33 #include "ufs_quirks.h"
34 #include "ufs-qcom-ice.h"
35 #include "ufs-qcom-debugfs.h"
36 #include <linux/clk/msm-clk.h>
38 #define MAX_PROP_SIZE 32
39 #define VDDP_REF_CLK_MIN_UV 1200000
40 #define VDDP_REF_CLK_MAX_UV 1200000
42 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
43 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
61 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
63 static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
64 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
65 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
67 static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
69 static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
72 print_hex_dump(KERN_ERR, prefix,
73 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
74 16, 4, hba->mmio_base + offset, len * 4, false);
77 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
78 char *prefix, void *priv)
80 ufs_qcom_dump_regs(hba, offset, len, prefix);
83 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
87 err = ufshcd_dme_get(hba,
88 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
90 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
96 static int ufs_qcom_host_clk_get(struct device *dev,
97 const char *name, struct clk **clk_out)
102 clk = devm_clk_get(dev, name);
111 static int ufs_qcom_host_clk_enable(struct device *dev,
112 const char *name, struct clk *clk)
116 err = clk_prepare_enable(clk);
118 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
123 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
125 if (!host->is_lane_clks_enabled)
128 if (host->tx_l1_sync_clk)
129 clk_disable_unprepare(host->tx_l1_sync_clk);
130 clk_disable_unprepare(host->tx_l0_sync_clk);
131 if (host->rx_l1_sync_clk)
132 clk_disable_unprepare(host->rx_l1_sync_clk);
133 clk_disable_unprepare(host->rx_l0_sync_clk);
135 host->is_lane_clks_enabled = false;
138 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
141 struct device *dev = host->hba->dev;
143 if (host->is_lane_clks_enabled)
146 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
147 host->rx_l0_sync_clk);
151 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
152 host->tx_l0_sync_clk);
156 if (host->hba->lanes_per_direction > 1) {
157 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
158 host->rx_l1_sync_clk);
162 /* The tx lane1 clk could be muxed, hence keep this optional */
163 if (host->tx_l1_sync_clk)
164 ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
165 host->tx_l1_sync_clk);
167 host->is_lane_clks_enabled = true;
171 clk_disable_unprepare(host->tx_l0_sync_clk);
173 clk_disable_unprepare(host->rx_l0_sync_clk);
178 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
181 struct device *dev = host->hba->dev;
183 err = ufs_qcom_host_clk_get(dev,
184 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
186 dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
191 err = ufs_qcom_host_clk_get(dev,
192 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
194 dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
199 /* In case of single lane per direction, don't read lane1 clocks */
200 if (host->hba->lanes_per_direction > 1) {
201 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
202 &host->rx_l1_sync_clk);
204 dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
209 /* The tx lane1 clk could be muxed, hence keep this optional */
210 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
211 &host->tx_l1_sync_clk);
217 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
221 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
224 err = ufshcd_dme_get(hba,
225 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
226 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
228 if (err || tx_fsm_val == TX_FSM_HIBERN8)
231 /* sleep for max. 200us */
232 usleep_range(100, 200);
233 } while (time_before(jiffies, timeout));
236 * we might have scheduled out for long during polling so
237 * check the state again.
239 if (time_after(jiffies, timeout))
240 err = ufshcd_dme_get(hba,
241 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
242 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
246 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
248 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
250 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
257 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
259 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
260 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
262 /* make sure above configuration is applied before we return */
266 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
268 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
269 struct phy *phy = host->generic_phy;
271 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
274 /* Assert PHY reset and apply PHY calibration values */
275 ufs_qcom_assert_reset(hba);
276 /* provide 1ms delay to let the reset pulse propagate */
277 usleep_range(1000, 1100);
279 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
282 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
287 /* De-assert PHY reset and start serdes */
288 ufs_qcom_deassert_reset(hba);
291 * after reset deassertion, phy will need all ref clocks,
292 * voltage, current to settle down before starting serdes.
294 usleep_range(1000, 1100);
295 ret = ufs_qcom_phy_start_serdes(phy);
297 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
302 ret = ufs_qcom_phy_is_pcs_ready(phy);
304 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
307 ufs_qcom_select_unipro_mode(host);
314 * The UTP controller has a number of internal clock gating cells (CGCs).
315 * Internal hardware sub-modules within the UTP controller control the CGCs.
316 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
317 * in a specific operation, UTP controller CGCs are by default disabled and
318 * this function enables them (after every UFS link startup) to save some power
321 * UFS host controller v3.0.0 onwards has internal clock gating mechanism
322 * in Qunipro, enable them to save additional power.
324 static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
326 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
329 /* Enable UTP internal clock gating */
331 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
334 /* Ensure that HW clock gating is enabled before next operations */
337 /* Enable Qunipro internal clock gating if supported */
338 if (!ufs_qcom_cap_qunipro_clk_gating(host))
341 /* Enable all the mask bits */
342 err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
343 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
347 err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
348 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
352 err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
353 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
354 DME_VS_CORE_CLK_CTRL);
359 static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
361 struct ufs_clk_info *clki;
364 * Configure the behavior of ufs clocks core and peripheral
365 * memory state when they are turned off.
366 * This configuration is required to allow retaining
367 * ICE crypto configuration (including keys) when
368 * core_clk_ice is turned off, and powering down
369 * non-ICE RAMs of host controller.
371 list_for_each_entry(clki, &hba->clk_list_head, list) {
372 if (!strcmp(clki->name, "core_clk_ice"))
373 clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
375 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
376 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
377 clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
381 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
382 enum ufs_notify_change_status status)
384 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
389 ufs_qcom_force_mem_config(hba);
390 ufs_qcom_power_up_sequence(hba);
392 * The PHY PLL output is the source of tx/rx lane symbol
393 * clocks, hence, enable the lane clocks only after PHY
396 err = ufs_qcom_enable_lane_clks(host);
397 if (!err && host->ice.pdev) {
398 err = ufs_qcom_ice_init(host);
400 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
408 /* check if UFS PHY moved from DISABLED to HIBERN8 */
409 err = ufs_qcom_check_hibern8(hba);
412 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
420 * Returns zero for success and non-zero in case of a failure
422 static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
423 u32 hs, u32 rate, bool update_link_startup_timer,
424 bool is_pre_scale_up)
427 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
428 struct ufs_clk_info *clki;
429 u32 core_clk_period_in_ns;
430 u32 tx_clk_cycles_per_us = 0;
431 unsigned long core_clk_rate = 0;
432 u32 core_clk_cycles_per_us = 0;
434 static u32 pwm_fr_table[][2] = {
441 static u32 hs_fr_table_rA[][2] = {
447 static u32 hs_fr_table_rB[][2] = {
454 * The Qunipro controller does not use following registers:
455 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
456 * UFS_REG_PA_LINK_STARTUP_TIMER
457 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
458 * Aggregation / Auto hibern8 logic.
460 if (ufs_qcom_cap_qunipro(host) &&
461 (!(ufshcd_is_intr_aggr_allowed(hba) ||
462 ufshcd_is_auto_hibern8_supported(hba))))
466 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
470 list_for_each_entry(clki, &hba->clk_list_head, list) {
471 if (!strcmp(clki->name, "core_clk")) {
473 core_clk_rate = clki->max_freq;
475 core_clk_rate = clk_get_rate(clki->clk);
479 /* If frequency is smaller than 1MHz, set to 1MHz */
480 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
481 core_clk_rate = DEFAULT_CLK_RATE_HZ;
483 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
484 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
485 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
487 * make sure above write gets applied before we return from
493 if (ufs_qcom_cap_qunipro(host))
496 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
497 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
498 core_clk_period_in_ns &= MASK_CLK_NS_REG;
503 if (rate == PA_HS_MODE_A) {
504 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
506 "%s: index %d exceeds table size %zu\n",
508 ARRAY_SIZE(hs_fr_table_rA));
511 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
512 } else if (rate == PA_HS_MODE_B) {
513 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
515 "%s: index %d exceeds table size %zu\n",
517 ARRAY_SIZE(hs_fr_table_rB));
520 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
522 dev_err(hba->dev, "%s: invalid rate = %d\n",
529 if (gear > ARRAY_SIZE(pwm_fr_table)) {
531 "%s: index %d exceeds table size %zu\n",
533 ARRAY_SIZE(pwm_fr_table));
536 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
540 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
544 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
545 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
546 /* this register 2 fields shall be written at once */
547 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
548 REG_UFS_TX_SYMBOL_CLK_NS_US);
550 * make sure above write gets applied before we return from
556 if (update_link_startup_timer) {
557 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
558 REG_UFS_PA_LINK_STARTUP_TIMER);
560 * make sure that this configuration is applied before
573 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
574 u32 hs, u32 rate, bool update_link_startup_timer)
576 return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
577 update_link_startup_timer, false);
580 static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
582 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
583 struct phy *phy = host->generic_phy;
587 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
588 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
594 /* make sure RX LineCfg is enabled before link startup */
595 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
599 if (ufs_qcom_cap_qunipro(host)) {
601 * set unipro core clock cycles to 150 & clear clock divider
603 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
608 err = ufs_qcom_enable_hw_clk_gating(hba);
613 * Some UFS devices (and may be host) have issues if LCC is
614 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
615 * before link startup which will make sure that both host
616 * and device TX LCC are disabled once link startup is
619 unipro_ver = ufshcd_get_local_unipro_ver(hba);
620 if (unipro_ver != UFS_UNIPRO_VER_1_41)
621 err = ufshcd_dme_set(hba,
622 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
627 if (!ufs_qcom_cap_qunipro_clk_gating(host))
630 /* Enable all the mask bits */
631 err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
632 SAVECONFIGTIME_MODE_MASK,
638 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
640 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
641 struct phy *phy = host->generic_phy;
645 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
649 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
651 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
657 * Some UFS devices send incorrect LineCfg data as part of power mode
658 * change sequence which may cause host PHY to go into bad state.
659 * Disabling Rx LineCfg of host PHY should help avoid this.
661 if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
662 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
664 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
670 * UFS controller has *clk_req output to GCC, for each one if the clocks
671 * entering it. When *clk_req for a specific clock is de-asserted,
672 * a corresponding clock from GCC is stopped. UFS controller de-asserts
673 * *clk_req outputs when it is in Auto Hibernate state only if the
674 * Clock request feature is enabled.
675 * Enable the Clock request feature:
676 * - Enable HW clock control for UFS clocks in GCC (handled by the
677 * clock driver as part of clk_prepare_enable).
678 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
680 if (ufshcd_is_auto_hibern8_supported(hba))
681 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
685 * Make sure clock request feature gets enabled for HW clk gating
686 * before further operations.
694 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
695 enum ufs_notify_change_status status)
701 err = ufs_qcom_link_startup_pre_change(hba);
704 err = ufs_qcom_link_startup_post_change(hba);
714 static int ufs_qcom_config_vreg(struct device *dev,
715 struct ufs_vreg *vreg, bool on)
718 struct regulator *reg;
728 if (regulator_count_voltages(reg) > 0) {
729 uA_load = on ? vreg->max_uA : 0;
730 ret = regulator_set_load(vreg->reg, uA_load);
734 min_uV = on ? vreg->min_uV : 0;
735 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
737 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
738 __func__, vreg->name, ret);
746 static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
753 ret = ufs_qcom_config_vreg(dev, vreg, true);
757 ret = regulator_enable(vreg->reg);
761 vreg->enabled = true;
766 static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
773 ret = regulator_disable(vreg->reg);
777 ret = ufs_qcom_config_vreg(dev, vreg, false);
781 vreg->enabled = false;
786 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
788 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
789 struct phy *phy = host->generic_phy;
793 * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
794 * power rail and low noise analog power rail for PLL can be
797 if (!ufs_qcom_is_link_active(hba)) {
798 ufs_qcom_disable_lane_clks(host);
801 if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
802 ret = ufs_qcom_disable_vreg(hba->dev,
804 ufs_qcom_ice_suspend(host);
806 if (ufs_qcom_is_link_off(hba)) {
807 /* Assert PHY soft reset */
808 ufs_qcom_assert_reset(hba);
813 ufs_qcom_pm_qos_suspend(host);
819 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
821 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
822 struct phy *phy = host->generic_phy;
825 err = phy_power_on(phy);
827 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
832 if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
833 hba->spm_lvl > UFS_PM_LVL_3))
834 ufs_qcom_enable_vreg(hba->dev,
837 err = ufs_qcom_enable_lane_clks(host);
841 err = ufs_qcom_ice_resume(host);
843 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
848 hba->is_sys_suspended = false;
854 static int ufs_qcom_full_reset(struct ufs_hba *hba)
858 if (!hba->core_reset) {
859 dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
864 ret = reset_control_assert(hba->core_reset);
866 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
872 * The hardware requirement for delay between assert/deassert
873 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
874 * ~125us (4/32768). To be on the safe side add 200us delay.
876 usleep_range(200, 210);
878 ret = reset_control_deassert(hba->core_reset);
880 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
887 #ifdef CONFIG_SCSI_UFS_QCOM_ICE
888 static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
889 struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
891 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
895 if (lrbp->cmd && lrbp->cmd->request)
896 req = lrbp->cmd->request;
900 /* Use request LBA as the DUN value */
902 *dun = (req->bio->bi_iter.bi_sector) >>
903 UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
905 ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
911 int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
913 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
914 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
917 if (!host->ice.pdev ||
918 !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
921 err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
927 int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
928 struct ufshcd_lrb *lrbp, struct request *req)
930 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
933 if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
936 err = ufs_qcom_ice_cfg_end(host, req);
942 int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
944 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
950 err = ufs_qcom_ice_reset(host);
955 static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
957 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
962 return ufs_qcom_ice_get_status(host, status);
964 #else /* !CONFIG_SCSI_UFS_QCOM_ICE */
965 #define ufs_qcom_crypto_req_setup NULL
966 #define ufs_qcom_crytpo_engine_cfg_start NULL
967 #define ufs_qcom_crytpo_engine_cfg_end NULL
968 #define ufs_qcom_crytpo_engine_reset NULL
969 #define ufs_qcom_crypto_engine_get_status NULL
970 #endif /* CONFIG_SCSI_UFS_QCOM_ICE */
972 struct ufs_qcom_dev_params {
973 u32 pwm_rx_gear; /* pwm rx gear to work in */
974 u32 pwm_tx_gear; /* pwm tx gear to work in */
975 u32 hs_rx_gear; /* hs rx gear to work in */
976 u32 hs_tx_gear; /* hs tx gear to work in */
977 u32 rx_lanes; /* number of rx lanes */
978 u32 tx_lanes; /* number of tx lanes */
979 u32 rx_pwr_pwm; /* rx pwm working pwr */
980 u32 tx_pwr_pwm; /* tx pwm working pwr */
981 u32 rx_pwr_hs; /* rx hs working pwr */
982 u32 tx_pwr_hs; /* tx hs working pwr */
983 u32 hs_rate; /* rate A/B to work in HS */
984 u32 desired_working_mode;
987 static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
988 struct ufs_pa_layer_attr *dev_max,
989 struct ufs_pa_layer_attr *agreed_pwr)
993 bool is_dev_sup_hs = false;
994 bool is_qcom_max_hs = false;
996 if (dev_max->pwr_rx == FAST_MODE)
997 is_dev_sup_hs = true;
999 if (qcom_param->desired_working_mode == FAST) {
1000 is_qcom_max_hs = true;
1001 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
1002 qcom_param->hs_tx_gear);
1004 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
1005 qcom_param->pwm_tx_gear);
1009 * device doesn't support HS but qcom_param->desired_working_mode is
1010 * HS, thus device and qcom_param don't agree
1012 if (!is_dev_sup_hs && is_qcom_max_hs) {
1013 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
1016 } else if (is_dev_sup_hs && is_qcom_max_hs) {
1018 * since device supports HS, it supports FAST_MODE.
1019 * since qcom_param->desired_working_mode is also HS
1020 * then final decision (FAST/FASTAUTO) is done according
1021 * to qcom_params as it is the restricting factor
1023 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1024 qcom_param->rx_pwr_hs;
1027 * here qcom_param->desired_working_mode is PWM.
1028 * it doesn't matter whether device supports HS or PWM,
1029 * in both cases qcom_param->desired_working_mode will
1030 * determine the mode
1032 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1033 qcom_param->rx_pwr_pwm;
1037 * we would like tx to work in the minimum number of lanes
1038 * between device capability and vendor preferences.
1039 * the same decision will be made for rx
1041 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
1042 qcom_param->tx_lanes);
1043 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
1044 qcom_param->rx_lanes);
1046 /* device maximum gear is the minimum between device rx and tx gears */
1047 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
1050 * if both device capabilities and vendor pre-defined preferences are
1051 * both HS or both PWM then set the minimum gear to be the chosen
1053 * if one is PWM and one is HS then the one that is PWM get to decide
1054 * what is the gear, as it is the one that also decided previously what
1055 * pwr the device will be configured to.
1057 if ((is_dev_sup_hs && is_qcom_max_hs) ||
1058 (!is_dev_sup_hs && !is_qcom_max_hs))
1059 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
1060 min_t(u32, min_dev_gear, min_qcom_gear);
1061 else if (!is_dev_sup_hs)
1062 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
1064 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
1066 agreed_pwr->hs_rate = qcom_param->hs_rate;
1070 #ifdef CONFIG_QCOM_BUS_SCALING
1071 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
1072 const char *speed_mode)
1074 struct device *dev = host->hba->dev;
1075 struct device_node *np = dev->of_node;
1077 const char *key = "qcom,bus-vector-names";
1084 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
1085 err = of_property_match_string(np, key, "MAX");
1087 err = of_property_match_string(np, key, speed_mode);
1091 dev_err(dev, "%s: Invalid %s mode %d\n",
1092 __func__, speed_mode, err);
1096 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
1098 int gear = max_t(u32, p->gear_rx, p->gear_tx);
1099 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
1102 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
1109 if (!p->pwr_rx && !p->pwr_tx) {
1110 pwr = SLOWAUTO_MODE;
1111 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
1112 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
1113 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
1115 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
1116 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
1119 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
1120 "PWM", gear, lanes);
1124 static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
1128 if (vote != host->bus_vote.curr_vote) {
1129 err = msm_bus_scale_client_update_request(
1130 host->bus_vote.client_handle, vote);
1132 dev_err(host->hba->dev,
1133 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1134 __func__, host->bus_vote.client_handle,
1139 host->bus_vote.curr_vote = vote;
1145 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1149 char mode[BUS_VECTOR_NAME_LEN];
1151 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1153 vote = ufs_qcom_get_bus_vote(host, mode);
1155 err = __ufs_qcom_set_bus_vote(host, vote);
1160 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1162 host->bus_vote.saved_vote = vote;
1166 static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1168 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1172 * In case ufs_qcom_init() is not yet done, simply ignore.
1173 * This ufs_qcom_set_bus_vote() shall be called from
1174 * ufs_qcom_init() after init is done.
1180 vote = host->bus_vote.saved_vote;
1181 if (vote == host->bus_vote.min_bw_vote)
1182 ufs_qcom_update_bus_bw_vote(host);
1184 vote = host->bus_vote.min_bw_vote;
1187 err = __ufs_qcom_set_bus_vote(host, vote);
1189 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1196 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1199 struct ufs_hba *hba = dev_get_drvdata(dev);
1200 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1202 return snprintf(buf, PAGE_SIZE, "%u\n",
1203 host->bus_vote.is_max_bw_needed);
1207 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1208 const char *buf, size_t count)
1210 struct ufs_hba *hba = dev_get_drvdata(dev);
1211 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1214 if (!kstrtou32(buf, 0, &value)) {
1215 host->bus_vote.is_max_bw_needed = !!value;
1216 ufs_qcom_update_bus_bw_vote(host);
1222 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1225 struct msm_bus_scale_pdata *bus_pdata;
1226 struct device *dev = host->hba->dev;
1227 struct platform_device *pdev = to_platform_device(dev);
1228 struct device_node *np = dev->of_node;
1230 bus_pdata = msm_bus_cl_get_pdata(pdev);
1232 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1237 err = of_property_count_strings(np, "qcom,bus-vector-names");
1238 if (err < 0 || err != bus_pdata->num_usecases) {
1239 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1244 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1245 if (!host->bus_vote.client_handle) {
1246 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1252 /* cache the vote index for minimum and maximum bandwidth */
1253 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1254 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1256 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1257 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1258 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1259 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
1260 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
1261 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1265 #else /* CONFIG_QCOM_BUS_SCALING */
1266 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1271 static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1276 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1280 static inline void msm_bus_scale_unregister_client(uint32_t cl)
1283 #endif /* CONFIG_QCOM_BUS_SCALING */
1285 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1287 if (host->dev_ref_clk_ctrl_mmio &&
1288 (enable ^ host->is_dev_ref_clk_enabled)) {
1289 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1292 temp |= host->dev_ref_clk_en_mask;
1294 temp &= ~host->dev_ref_clk_en_mask;
1297 * If we are here to disable this clock it might be immediately
1298 * after entering into hibern8 in which case we need to make
1299 * sure that device ref_clk is active at least 1us after the
1305 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1307 /* ensure that ref_clk is enabled/disabled before we return */
1311 * If we call hibern8 exit after this, we need to make sure that
1312 * device ref_clk is stable for at least 1us before the hibern8
1318 host->is_dev_ref_clk_enabled = enable;
1322 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
1323 enum ufs_notify_change_status status,
1324 struct ufs_pa_layer_attr *dev_max_params,
1325 struct ufs_pa_layer_attr *dev_req_params)
1328 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1329 struct phy *phy = host->generic_phy;
1330 struct ufs_qcom_dev_params ufs_qcom_cap;
1334 if (!dev_req_params) {
1335 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1342 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1343 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1344 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1345 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1346 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1347 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1348 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1349 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1350 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1351 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1352 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1353 ufs_qcom_cap.desired_working_mode =
1354 UFS_QCOM_LIMIT_DESIRED_MODE;
1356 if (host->hw_ver.major == 0x1) {
1358 * HS-G3 operations may not reliably work on legacy QCOM
1359 * UFS host controller hardware even though capability
1360 * exchange during link startup phase may end up
1361 * negotiating maximum supported gear as G3.
1362 * Hence downgrade the maximum supported gear to HS-G2.
1364 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1365 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1366 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1367 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1371 * Platforms using QRBTCv2 phy must limit link to PWM Gear-1
1372 * and SLOW mode to successfully bring up the link.
1374 if (!strcmp(ufs_qcom_phy_name(phy), "ufs_phy_qrbtc_v2")) {
1375 ufs_qcom_cap.tx_lanes = 1;
1376 ufs_qcom_cap.rx_lanes = 1;
1377 ufs_qcom_cap.pwm_rx_gear = UFS_PWM_G1;
1378 ufs_qcom_cap.pwm_tx_gear = UFS_PWM_G1;
1379 ufs_qcom_cap.desired_working_mode = SLOW;
1382 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1386 pr_err("%s: failed to determine capabilities\n",
1391 /* enable the device ref clock before changing to HS mode */
1392 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1393 ufshcd_is_hs_mode(dev_req_params))
1394 ufs_qcom_dev_ref_clk_ctrl(host, true);
1397 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
1398 dev_req_params->pwr_rx,
1399 dev_req_params->hs_rate, false)) {
1400 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1403 * we return error code at the end of the routine,
1404 * but continue to configure UFS_PHY_TX_LANE_ENABLE
1405 * and bus voting as usual
1410 val = ~(MAX_U32 << dev_req_params->lane_tx);
1411 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1413 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1418 /* cache the power mode parameters to use internally */
1419 memcpy(&host->dev_req_params,
1420 dev_req_params, sizeof(*dev_req_params));
1421 ufs_qcom_update_bus_bw_vote(host);
1423 /* disable the device ref clock if entered PWM mode */
1424 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1425 !ufshcd_is_hs_mode(dev_req_params))
1426 ufs_qcom_dev_ref_clk_ctrl(host, false);
1436 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1439 u32 pa_vs_config_reg1;
1441 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1442 &pa_vs_config_reg1);
1446 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1447 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1448 (pa_vs_config_reg1 | (1 << 12)));
1454 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1458 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1459 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1464 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1466 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1468 if (host->hw_ver.major == 0x1)
1469 return UFSHCI_VERSION_11;
1471 return UFSHCI_VERSION_20;
1475 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1476 * @hba: host controller instance
1478 * QCOM UFS host controller might have some non standard behaviours (quirks)
1479 * than what is specified by UFSHCI specification. Advertise all such
1480 * quirks to standard UFS host controller driver so standard takes them into
1483 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1485 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1487 if (host->hw_ver.major == 0x1) {
1488 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1489 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1490 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
1492 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
1493 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1495 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1498 if (host->hw_ver.major == 0x2) {
1499 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1501 if (!ufs_qcom_cap_qunipro(host))
1502 /* Legacy UniPro mode still need following quirks */
1503 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1504 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1505 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1508 if (host->disable_lpm)
1509 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
1512 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1514 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1516 if (!host->disable_lpm) {
1517 hba->caps |= UFSHCD_CAP_CLK_GATING;
1518 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1519 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1521 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1523 if (host->hw_ver.major >= 0x2) {
1524 if (!host->disable_lpm)
1525 hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
1526 host->caps = UFS_QCOM_CAP_QUNIPRO |
1527 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1529 if (host->hw_ver.major >= 0x3) {
1530 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1532 * The UFS PHY attached to v3.0.0 controller supports entering
1533 * deeper low power state of SVS2. This lets the controller
1534 * run at much lower clock frequencies for saving power.
1535 * Assuming this and any future revisions of the controller
1536 * support this capability. Need to revist this assumption if
1537 * any future platform with this core doesn't support the
1538 * capability, as there will be no benefit running at lower
1541 host->caps |= UFS_QCOM_CAP_SVS2;
1546 * ufs_qcom_setup_clocks - enables/disable clocks
1547 * @hba: host controller instance
1548 * @on: If true, enable clocks else disable them.
1549 * @is_gating_context: If true then it means this function is called from
1550 * aggressive clock gating context and we may only need to gate off important
1551 * clocks. If false then make sure to gate off all clocks.
1553 * Returns 0 on success, non-zero on failure.
1555 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1556 bool is_gating_context)
1558 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1562 * In case ufs_qcom_init() is not yet done, simply ignore.
1563 * This ufs_qcom_setup_clocks() shall be called from
1564 * ufs_qcom_init() after init is done.
1570 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1574 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1576 dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1578 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1581 /* enable the device ref clock for HS mode*/
1582 if (ufshcd_is_hs_mode(&hba->pwr_info))
1583 ufs_qcom_dev_ref_clk_ctrl(host, true);
1585 err = ufs_qcom_ice_resume(host);
1589 err = ufs_qcom_ice_suspend(host);
1593 /* M-PHY RMMI interface clocks can be turned off */
1594 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1596 * If auto hibern8 is supported then the link will already
1597 * be in hibern8 state and the ref clock can be gated.
1599 if (ufshcd_is_auto_hibern8_supported(hba) ||
1600 !ufs_qcom_is_link_active(hba)) {
1601 /* turn off UFS local PHY ref_clk */
1602 ufs_qcom_phy_disable_ref_clk(host->generic_phy);
1603 /* disable device ref_clk */
1604 ufs_qcom_dev_ref_clk_ctrl(host, false);
1612 #ifdef CONFIG_SMP /* CONFIG_SMP */
1613 static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1617 if (cpu >= 0 && cpu < num_possible_cpus())
1618 for (i = 0; i < host->pm_qos.num_groups; i++)
1619 if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1622 return host->pm_qos.default_cpu;
1625 static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1627 unsigned long flags;
1628 struct ufs_qcom_host *host;
1629 struct ufs_qcom_pm_qos_cpu_group *group;
1634 host = ufshcd_get_variant(hba);
1635 if (!host->pm_qos.groups)
1638 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1640 spin_lock_irqsave(hba->host->host_lock, flags);
1641 if (!host->pm_qos.is_enabled)
1644 group->active_reqs++;
1645 if (group->state != PM_QOS_REQ_VOTE &&
1646 group->state != PM_QOS_VOTED) {
1647 group->state = PM_QOS_REQ_VOTE;
1648 queue_work(host->pm_qos.workq, &group->vote_work);
1651 spin_unlock_irqrestore(hba->host->host_lock, flags);
1654 /* hba->host->host_lock is assumed to be held by caller */
1655 static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1657 struct ufs_qcom_pm_qos_cpu_group *group;
1659 if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1662 group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1664 if (--group->active_reqs)
1666 group->state = PM_QOS_REQ_UNVOTE;
1667 queue_work(host->pm_qos.workq, &group->unvote_work);
1670 static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1673 unsigned long flags = 0;
1679 spin_lock_irqsave(hba->host->host_lock, flags);
1680 __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1682 spin_unlock_irqrestore(hba->host->host_lock, flags);
1685 static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1687 struct ufs_qcom_pm_qos_cpu_group *group =
1688 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1689 struct ufs_qcom_host *host = group->host;
1690 unsigned long flags;
1692 spin_lock_irqsave(host->hba->host->host_lock, flags);
1694 if (!host->pm_qos.is_enabled || !group->active_reqs) {
1695 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1699 group->state = PM_QOS_VOTED;
1700 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1702 pm_qos_update_request(&group->req, group->latency_us);
1705 static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1707 struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1708 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1709 struct ufs_qcom_host *host = group->host;
1710 unsigned long flags;
1713 * Check if new requests were submitted in the meantime and do not
1716 spin_lock_irqsave(host->hba->host->host_lock, flags);
1718 if (!host->pm_qos.is_enabled || group->active_reqs) {
1719 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1723 group->state = PM_QOS_UNVOTED;
1724 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1726 pm_qos_update_request(&group->req, PM_QOS_DEFAULT_VALUE);
1729 static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1730 struct device_attribute *attr, char *buf)
1732 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1733 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1735 return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1738 static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1739 struct device_attribute *attr, const char *buf, size_t count)
1741 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1742 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1743 unsigned long value;
1744 unsigned long flags;
1748 if (kstrtoul(buf, 0, &value))
1754 * Must take the spinlock and save irqs before changing the enabled
1755 * flag in order to keep correctness of PM QoS release.
1757 spin_lock_irqsave(hba->host->host_lock, flags);
1758 if (enable == host->pm_qos.is_enabled) {
1759 spin_unlock_irqrestore(hba->host->host_lock, flags);
1762 host->pm_qos.is_enabled = enable;
1763 spin_unlock_irqrestore(hba->host->host_lock, flags);
1766 for (i = 0; i < host->pm_qos.num_groups; i++) {
1767 cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1768 cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1769 spin_lock_irqsave(hba->host->host_lock, flags);
1770 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1771 host->pm_qos.groups[i].active_reqs = 0;
1772 spin_unlock_irqrestore(hba->host->host_lock, flags);
1773 pm_qos_update_request(&host->pm_qos.groups[i].req,
1774 PM_QOS_DEFAULT_VALUE);
1780 static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1781 struct device_attribute *attr, char *buf)
1783 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1784 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1789 for (i = 0; i < host->pm_qos.num_groups; i++) {
1790 ret = snprintf(&buf[offset], PAGE_SIZE,
1791 "cpu group #%d(mask=0x%lx): %d\n", i,
1792 host->pm_qos.groups[i].mask.bits[0],
1793 host->pm_qos.groups[i].latency_us);
1803 static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1804 struct device_attribute *attr, const char *buf, size_t count)
1806 struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1807 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1808 unsigned long value;
1809 unsigned long flags;
1816 /* reserve one byte for null termination */
1817 strbuf = kmalloc(count + 1, GFP_KERNEL);
1820 strbuf_copy = strbuf;
1821 strlcpy(strbuf, buf, count + 1);
1823 for (i = 0; i < host->pm_qos.num_groups; i++) {
1824 token = strsep(&strbuf, ",");
1828 ret = kstrtoul(token, 0, &value);
1832 spin_lock_irqsave(hba->host->host_lock, flags);
1833 host->pm_qos.groups[i].latency_us = value;
1834 spin_unlock_irqrestore(hba->host->host_lock, flags);
1841 static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1843 struct device_node *node = host->hba->dev->of_node;
1844 struct device_attribute *attr;
1848 char wq_name[sizeof("ufs_pm_qos_00")];
1851 num_groups = of_property_count_u32_elems(node,
1852 "qcom,pm-qos-cpu-groups");
1853 if (num_groups <= 0)
1856 num_values = of_property_count_u32_elems(node,
1857 "qcom,pm-qos-cpu-group-latency-us");
1858 if (num_values <= 0)
1861 if (num_values != num_groups || num_groups > num_possible_cpus()) {
1862 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1863 __func__, num_groups, num_values, num_possible_cpus());
1867 host->pm_qos.num_groups = num_groups;
1868 host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1869 sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1870 if (!host->pm_qos.groups)
1873 for (i = 0; i < host->pm_qos.num_groups; i++) {
1876 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1880 host->pm_qos.groups[i].mask.bits[0] = mask;
1881 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1882 cpu_possible_mask)) {
1883 dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1888 ret = of_property_read_u32_index(node,
1889 "qcom,pm-qos-cpu-group-latency-us", i,
1890 &host->pm_qos.groups[i].latency_us);
1894 host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_IRQ;
1895 host->pm_qos.groups[i].req.irq = host->hba->irq;
1896 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1897 host->pm_qos.groups[i].active_reqs = 0;
1898 host->pm_qos.groups[i].host = host;
1900 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1901 ufs_qcom_pm_qos_vote_work);
1902 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1903 ufs_qcom_pm_qos_unvote_work);
1906 ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1907 &host->pm_qos.default_cpu);
1908 if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1909 host->pm_qos.default_cpu = 0;
1912 * Use a single-threaded workqueue to assure work submitted to the queue
1913 * is performed in order. Consider the following 2 possible cases:
1915 * 1. A new request arrives and voting work is scheduled for it. Before
1916 * the voting work is performed the request is finished and unvote
1917 * work is also scheduled.
1918 * 2. A request is finished and unvote work is scheduled. Before the
1919 * work is performed a new request arrives and voting work is also
1922 * In both cases a vote work and unvote work wait to be performed.
1923 * If ordering is not guaranteed, then the end state might be the
1924 * opposite of the desired state.
1926 snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1927 host->hba->host->host_no);
1928 host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1929 if (!host->pm_qos.workq) {
1930 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1936 /* Initialization was ok, add all PM QoS requests */
1937 for (i = 0; i < host->pm_qos.num_groups; i++)
1938 pm_qos_add_request(&host->pm_qos.groups[i].req,
1939 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1941 /* PM QoS latency sys-fs attribute */
1942 attr = &host->pm_qos.latency_attr;
1943 attr->show = ufs_qcom_pm_qos_latency_show;
1944 attr->store = ufs_qcom_pm_qos_latency_store;
1945 sysfs_attr_init(&attr->attr);
1946 attr->attr.name = "pm_qos_latency_us";
1947 attr->attr.mode = S_IRUGO | S_IWUSR;
1948 if (device_create_file(host->hba->var->dev, attr))
1949 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1951 /* PM QoS enable sys-fs attribute */
1952 attr = &host->pm_qos.enable_attr;
1953 attr->show = ufs_qcom_pm_qos_enable_show;
1954 attr->store = ufs_qcom_pm_qos_enable_store;
1955 sysfs_attr_init(&attr->attr);
1956 attr->attr.name = "pm_qos_enable";
1957 attr->attr.mode = S_IRUGO | S_IWUSR;
1958 if (device_create_file(host->hba->var->dev, attr))
1959 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1961 host->pm_qos.is_enabled = true;
1966 kfree(host->pm_qos.groups);
1968 host->pm_qos.groups = NULL;
1969 return ret ? ret : -ENOTSUPP;
1972 static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
1976 if (!host->pm_qos.groups)
1979 for (i = 0; i < host->pm_qos.num_groups; i++)
1980 flush_work(&host->pm_qos.groups[i].unvote_work);
1983 static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
1987 if (!host->pm_qos.groups)
1990 for (i = 0; i < host->pm_qos.num_groups; i++)
1991 pm_qos_remove_request(&host->pm_qos.groups[i].req);
1992 destroy_workqueue(host->pm_qos.workq);
1994 kfree(host->pm_qos.groups);
1995 host->pm_qos.groups = NULL;
1997 #endif /* CONFIG_SMP */
1999 #define ANDROID_BOOT_DEV_MAX 30
2000 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
2003 static int __init get_android_boot_dev(char *str)
2005 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
2008 __setup("androidboot.bootdevice=", get_android_boot_dev);
2012 * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
2014 static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
2016 struct device_node *node = host->hba->dev->of_node;
2018 host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
2019 if (host->disable_lpm)
2020 pr_info("%s: will disable all LPM modes\n", __func__);
2023 static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
2024 struct ufs_vreg **out_vreg)
2027 char prop_name[MAX_PROP_SIZE];
2028 struct ufs_vreg *vreg = NULL;
2029 struct device *dev = host->hba->dev;
2030 struct device_node *np = dev->of_node;
2033 dev_err(dev, "%s: non DT initialization\n", __func__);
2037 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
2038 if (!of_parse_phandle(np, prop_name, 0)) {
2039 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
2040 __func__, prop_name);
2045 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
2051 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
2052 ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
2054 dev_err(dev, "%s: unable to find %s err %d\n",
2055 __func__, prop_name, ret);
2059 vreg->reg = devm_regulator_get(dev, vreg->name);
2060 if (IS_ERR(vreg->reg)) {
2061 ret = PTR_ERR(vreg->reg);
2062 dev_err(dev, "%s: %s get failed, err=%d\n",
2063 __func__, vreg->name, ret);
2065 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
2066 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
2075 * ufs_qcom_init - bind phy with controller
2076 * @hba: host controller instance
2078 * Binds PHY with controller and powers up PHY enabling clocks
2081 * Returns -EPROBE_DEFER if binding fails, returns negative error
2082 * on phy power up failure and returns zero on success.
2084 static int ufs_qcom_init(struct ufs_hba *hba)
2087 struct device *dev = hba->dev;
2088 struct platform_device *pdev = to_platform_device(dev);
2089 struct ufs_qcom_host *host;
2090 struct resource *res;
2092 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2095 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
2099 /* Make a two way bind between the qcom host and the hba */
2101 spin_lock_init(&host->ice_work_lock);
2103 ufshcd_set_variant(hba, host);
2105 err = ufs_qcom_ice_get_dev(host);
2106 if (err == -EPROBE_DEFER) {
2108 * UFS driver might be probed before ICE driver does.
2109 * In that case we would like to return EPROBE_DEFER code
2110 * in order to delay its probing.
2112 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
2116 } else if (err == -ENODEV) {
2118 * ICE device is not enabled in DTS file. No need for further
2119 * initialization of ICE driver.
2121 dev_warn(dev, "%s: ICE device is not enabled",
2124 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
2129 host->generic_phy = devm_phy_get(dev, "ufsphy");
2131 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
2133 * UFS driver might be probed before the phy driver does.
2134 * In that case we would like to return EPROBE_DEFER code.
2136 err = -EPROBE_DEFER;
2137 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
2140 } else if (IS_ERR(host->generic_phy)) {
2141 err = PTR_ERR(host->generic_phy);
2142 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
2146 err = ufs_qcom_pm_qos_init(host);
2148 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
2150 /* restore the secure configuration */
2151 ufs_qcom_update_sec_cfg(hba, true);
2153 err = ufs_qcom_bus_register(host);
2157 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
2158 &host->hw_ver.minor, &host->hw_ver.step);
2161 * for newer controllers, device reference clock control bit has
2162 * moved inside UFS controller register address space itself.
2164 if (host->hw_ver.major >= 0x02) {
2165 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
2166 host->dev_ref_clk_en_mask = BIT(26);
2168 /* "dev_ref_clk_ctrl_mem" is optional resource */
2169 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2171 host->dev_ref_clk_ctrl_mmio =
2172 devm_ioremap_resource(dev, res);
2173 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
2175 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
2177 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
2178 host->dev_ref_clk_ctrl_mmio = NULL;
2180 host->dev_ref_clk_en_mask = BIT(5);
2184 /* update phy revision information before calling phy_init() */
2185 ufs_qcom_phy_save_controller_version(host->generic_phy,
2186 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
2188 err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
2189 &host->vddp_ref_clk);
2190 phy_init(host->generic_phy);
2191 err = phy_power_on(host->generic_phy);
2193 goto out_unregister_bus;
2194 if (host->vddp_ref_clk) {
2195 err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
2197 dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
2199 goto out_disable_phy;
2203 err = ufs_qcom_init_lane_clks(host);
2205 goto out_disable_vddp;
2207 ufs_qcom_parse_lpm(host);
2208 if (host->disable_lpm)
2209 pm_runtime_forbid(host->hba->dev);
2210 ufs_qcom_set_caps(hba);
2211 ufs_qcom_advertise_quirks(hba);
2213 ufs_qcom_set_bus_vote(hba, true);
2214 ufs_qcom_setup_clocks(hba, true, false);
2216 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
2217 ufs_qcom_hosts[hba->dev->id] = host;
2219 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
2220 ufs_qcom_get_default_testbus_cfg(host);
2221 err = ufs_qcom_testbus_config(host);
2223 dev_warn(dev, "%s: failed to configure the testbus %d\n",
2231 if (host->vddp_ref_clk)
2232 ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
2234 phy_power_off(host->generic_phy);
2236 phy_exit(host->generic_phy);
2237 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
2239 devm_kfree(dev, host);
2240 ufshcd_set_variant(hba, NULL);
2245 static void ufs_qcom_exit(struct ufs_hba *hba)
2247 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2249 msm_bus_scale_unregister_client(host->bus_vote.client_handle);
2250 ufs_qcom_disable_lane_clks(host);
2251 phy_power_off(host->generic_phy);
2252 ufs_qcom_pm_qos_remove(host);
2255 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
2259 u32 core_clk_ctrl_reg;
2261 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
2264 err = ufshcd_dme_get(hba,
2265 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2266 &core_clk_ctrl_reg);
2270 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2271 core_clk_ctrl_reg |= clk_cycles;
2273 /* Clear CORE_CLK_DIV_EN */
2274 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2276 err = ufshcd_dme_set(hba,
2277 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2283 static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
2285 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2286 struct phy *phy = host->generic_phy;
2289 /* The default low power mode configuration is SVS2 */
2290 if (!ufs_qcom_cap_svs2(host))
2293 if (!((host->hw_ver.major == 0x3) &&
2294 (host->hw_ver.minor == 0x0) &&
2295 (host->hw_ver.step == 0x0)))
2299 * The link should be put in hibern8 state before
2300 * configuring the PHY to enter/exit SVS2 mode.
2302 err = ufshcd_uic_hibern8_enter(hba);
2306 err = ufs_qcom_phy_configure_lpm(phy, enable);
2310 err = ufshcd_uic_hibern8_exit(hba);
2315 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2317 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2318 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2321 if (!ufs_qcom_cap_qunipro(host))
2324 err = ufs_qcom_configure_lpm(hba, false);
2329 __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2330 attr->hs_rate, false, true);
2332 /* set unipro core clock cycles to 150 and clear clock divider */
2333 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
2338 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
2340 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2342 if (!ufs_qcom_cap_qunipro(host))
2345 return ufs_qcom_configure_lpm(hba, true);
2348 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2350 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2351 struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2354 if (!ufs_qcom_cap_qunipro(host))
2358 ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2359 attr->hs_rate, false);
2361 if (ufs_qcom_cap_svs2(host))
2363 * For SVS2 set unipro core clock cycles to 37 and
2364 * clear clock divider
2366 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
2369 * For SVS set unipro core clock cycles to 75 and
2370 * clear clock divider
2372 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
2377 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2378 bool scale_up, enum ufs_notify_change_status status)
2380 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2386 err = ufs_qcom_clk_scale_up_pre_change(hba);
2388 err = ufs_qcom_clk_scale_down_pre_change(hba);
2392 err = ufs_qcom_clk_scale_down_post_change(hba);
2394 ufs_qcom_update_bus_bw_vote(host);
2397 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2406 * This function should be called to restore the security configuration of UFS
2407 * register space after coming out of UFS host core power collapse.
2409 * @hba: host controller instance
2410 * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2411 * and set "false" when secure configuration is lost.
2413 static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2417 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2419 /* scm command buffer structrue */
2420 struct msm_scm_cmd_buf {
2421 unsigned int device_id;
2424 #define RESTORE_SEC_CFG_CMD 0x2
2425 #define UFS_TZ_DEV_ID 19
2427 if (!host || !hba->vreg_info.vdd_hba ||
2428 !(host->sec_cfg_updated ^ restore_sec_cfg)) {
2430 } else if (host->caps &
2431 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE) {
2433 } else if (!restore_sec_cfg) {
2435 * Clear the flag so next time when this function is called
2436 * with restore_sec_cfg set to true, we can restore the secure
2439 host->sec_cfg_updated = false;
2441 } else if (hba->clk_gating.state != CLKS_ON) {
2443 * Clocks should be ON to restore the host controller secure
2450 * If we are here, Host controller clocks are running, Host controller
2451 * power collapse feature is supported and Host controller has just came
2452 * out of power collapse.
2454 cbuf.device_id = UFS_TZ_DEV_ID;
2455 ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
2456 if (ret || scm_ret) {
2457 dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
2458 __func__, ret, scm_ret);
2462 host->sec_cfg_updated = true;
2466 dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
2467 __func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
2472 static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2474 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2476 if (ufs_qcom_cap_svs2(host))
2478 /* Default SVS support @ HS G2 frequencies*/
2482 void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2483 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2484 char *str, void *priv))
2487 struct ufs_qcom_host *host;
2489 if (unlikely(!hba)) {
2490 pr_err("%s: hba is NULL\n", __func__);
2493 if (unlikely(!print_fn)) {
2494 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2498 host = ufshcd_get_variant(hba);
2499 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2502 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2503 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2505 reg = ufshcd_readl(hba, REG_UFS_CFG1);
2507 ufshcd_writel(hba, reg, REG_UFS_CFG1);
2509 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2510 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2512 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2513 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2515 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2516 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2518 /* clear bit 17 - UTP_DBG_RAMS_EN */
2519 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
2521 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2522 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2524 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2525 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2527 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2528 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2530 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2531 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2533 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2534 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2536 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2537 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2539 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2540 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2543 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2545 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
2546 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
2547 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
2548 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
2550 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
2551 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
2555 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2557 /* provide a legal default configuration */
2558 host->testbus.select_major = TSTBUS_UNIPRO;
2559 host->testbus.select_minor = 37;
2562 bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
2563 u8 select_major, u8 select_minor)
2565 if (select_major >= TSTBUS_MAX) {
2566 dev_err(host->hba->dev,
2567 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
2568 __func__, select_major);
2573 * Not performing check for each individual select_major
2574 * mappings of select_minor, since there is no harm in
2575 * configuring a non-existent select_minor
2577 if (select_minor > 0xFF) {
2578 dev_err(host->hba->dev,
2579 "%s: 0x%05X is not a legal testbus option\n",
2580 __func__, select_minor);
2588 * The caller of this function must make sure that the controller
2589 * is out of runtime suspend and appropriate clocks are enabled
2592 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2595 int offset = 0, ret = 0, testbus_sel_offset = 19;
2596 u32 mask = TEST_BUS_SUB_SEL_MASK;
2597 unsigned long flags;
2598 struct ufs_hba *hba;
2603 spin_lock_irqsave(hba->host->host_lock, flags);
2604 switch (host->testbus.select_major) {
2606 reg = UFS_TEST_BUS_CTRL_0;
2610 reg = UFS_TEST_BUS_CTRL_0;
2614 reg = UFS_TEST_BUS_CTRL_0;
2618 reg = UFS_TEST_BUS_CTRL_0;
2622 reg = UFS_TEST_BUS_CTRL_1;
2626 reg = UFS_TEST_BUS_CTRL_1;
2630 reg = UFS_TEST_BUS_CTRL_1;
2634 reg = UFS_TEST_BUS_CTRL_1;
2637 case TSTBUS_WRAPPER:
2638 reg = UFS_TEST_BUS_CTRL_2;
2641 case TSTBUS_COMBINED:
2642 reg = UFS_TEST_BUS_CTRL_2;
2645 case TSTBUS_UTP_HCI:
2646 reg = UFS_TEST_BUS_CTRL_2;
2650 reg = UFS_UNIPRO_CFG;
2655 * No need for a default case, since
2656 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2661 spin_unlock_irqrestore(hba->host->host_lock, flags);
2663 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2664 (u32)host->testbus.select_major << testbus_sel_offset,
2666 ufshcd_rmwl(host->hba, mask,
2667 (u32)host->testbus.select_minor << offset,
2670 dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
2674 ufs_qcom_enable_test_bus(host);
2676 * Make sure the test bus configuration is
2677 * committed before returning.
2684 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2686 ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
2689 static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
2691 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2692 u32 *testbus = NULL;
2693 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
2695 testbus = kmalloc(testbus_len, GFP_KERNEL);
2699 host->testbus.select_major = TSTBUS_UNIPRO;
2700 for (i = 0; i < nminor; i++) {
2701 host->testbus.select_minor = i;
2702 ufs_qcom_testbus_config(host);
2703 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2705 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
2706 16, 4, testbus, testbus_len, false);
2710 static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba)
2712 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2713 u32 *testbus = NULL;
2714 int i, nminor = 32, testbus_len = nminor * sizeof(u32);
2716 testbus = kmalloc(testbus_len, GFP_KERNEL);
2720 host->testbus.select_major = TSTBUS_UTP_HCI;
2721 for (i = 0; i < nminor; i++) {
2722 host->testbus.select_minor = i;
2723 ufs_qcom_testbus_config(host);
2724 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2726 print_hex_dump(KERN_ERR, "UTP_HCI_TEST_BUS ", DUMP_PREFIX_OFFSET,
2727 16, 4, testbus, testbus_len, false);
2731 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
2733 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2734 struct phy *phy = host->generic_phy;
2736 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2737 "HCI Vendor Specific Registers ");
2738 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
2743 /* sleep a bit intermittently as we are dumping too much data */
2744 usleep_range(1000, 1100);
2745 ufs_qcom_testbus_read(hba);
2746 usleep_range(1000, 1100);
2747 ufs_qcom_print_unipro_testbus(hba);
2748 usleep_range(1000, 1100);
2749 ufs_qcom_print_utp_hci_testbus(hba);
2750 usleep_range(1000, 1100);
2751 ufs_qcom_phy_dbg_register_dump(phy);
2752 usleep_range(1000, 1100);
2753 ufs_qcom_ice_print_regs(host);
2757 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2759 * The variant operations configure the necessary controller and PHY
2760 * handshake during initialization.
2762 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
2763 .init = ufs_qcom_init,
2764 .exit = ufs_qcom_exit,
2765 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
2766 .clk_scale_notify = ufs_qcom_clk_scale_notify,
2767 .setup_clocks = ufs_qcom_setup_clocks,
2768 .hce_enable_notify = ufs_qcom_hce_enable_notify,
2769 .link_startup_notify = ufs_qcom_link_startup_notify,
2770 .pwr_change_notify = ufs_qcom_pwr_change_notify,
2771 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
2772 .suspend = ufs_qcom_suspend,
2773 .resume = ufs_qcom_resume,
2774 .full_reset = ufs_qcom_full_reset,
2775 .update_sec_cfg = ufs_qcom_update_sec_cfg,
2776 .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
2777 .set_bus_vote = ufs_qcom_set_bus_vote,
2778 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
2779 #ifdef CONFIG_DEBUG_FS
2780 .add_debugfs = ufs_qcom_dbg_add_debugfs,
2784 static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2785 .crypto_req_setup = ufs_qcom_crypto_req_setup,
2786 .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
2787 .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
2788 .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
2789 .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2792 static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2793 .req_start = ufs_qcom_pm_qos_req_start,
2794 .req_end = ufs_qcom_pm_qos_req_end,
2797 static struct ufs_hba_variant ufs_hba_qcom_variant = {
2799 .vops = &ufs_hba_qcom_vops,
2800 .crypto_vops = &ufs_hba_crypto_variant_ops,
2801 .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
2805 * ufs_qcom_probe - probe routine of the driver
2806 * @pdev: pointer to Platform device handle
2808 * Return zero for success and non-zero for failure
2810 static int ufs_qcom_probe(struct platform_device *pdev)
2813 struct device *dev = &pdev->dev;
2814 struct device_node *np = dev->of_node;
2817 * On qcom platforms, bootdevice is the primary storage
2818 * device. This device can either be eMMC or UFS.
2819 * The type of device connected is detected at runtime.
2820 * So, if an eMMC device is connected, and this function
2821 * is invoked, it would turn-off the regulator if it detects
2822 * that the storage device is not ufs.
2823 * These regulators are turned ON by the bootloaders & turning
2824 * them off without sending PON may damage the connected device.
2825 * Hence, check for the connected device early-on & don't turn-off
2828 if (of_property_read_bool(np, "non-removable") &&
2829 strlen(android_boot_dev) &&
2830 strcmp(android_boot_dev, dev_name(dev)))
2833 /* Perform generic probe */
2834 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
2836 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2842 * ufs_qcom_remove - set driver_data of the device to NULL
2843 * @pdev: pointer to platform device handle
2847 static int ufs_qcom_remove(struct platform_device *pdev)
2849 struct ufs_hba *hba = platform_get_drvdata(pdev);
2851 pm_runtime_get_sync(&(pdev)->dev);
2856 static const struct of_device_id ufs_qcom_of_match[] = {
2857 { .compatible = "qcom,ufshc"},
2860 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
2862 static const struct dev_pm_ops ufs_qcom_pm_ops = {
2863 .suspend = ufshcd_pltfrm_suspend,
2864 .resume = ufshcd_pltfrm_resume,
2865 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2866 .runtime_resume = ufshcd_pltfrm_runtime_resume,
2867 .runtime_idle = ufshcd_pltfrm_runtime_idle,
2870 static struct platform_driver ufs_qcom_pltform = {
2871 .probe = ufs_qcom_probe,
2872 .remove = ufs_qcom_remove,
2873 .shutdown = ufshcd_pltfrm_shutdown,
2875 .name = "ufshcd-qcom",
2876 .pm = &ufs_qcom_pm_ops,
2877 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2880 module_platform_driver(ufs_qcom_pltform);
2882 MODULE_LICENSE("GPL v2");