OSDN Git Service

Merge tag 'v4.4.214' into 10
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / scsi / ufs / ufs-qcom.c
1 /*
2  * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include <linux/iopoll.h>
18 #include <linux/platform_device.h>
19
20 #ifdef CONFIG_QCOM_BUS_SCALING
21 #include <linux/msm-bus.h>
22 #endif
23
24 #include <soc/qcom/scm.h>
25 #include <linux/phy/phy.h>
26 #include <linux/phy/phy-qcom-ufs.h>
27
28 #include "ufshcd.h"
29 #include "ufshcd-pltfrm.h"
30 #include "unipro.h"
31 #include "ufs-qcom.h"
32 #include "ufshci.h"
33 #include "ufs_quirks.h"
34 #include "ufs-qcom-ice.h"
35 #include "ufs-qcom-debugfs.h"
36 #include <linux/clk/msm-clk.h>
37
38 #define MAX_PROP_SIZE              32
39 #define VDDP_REF_CLK_MIN_UV        1200000
40 #define VDDP_REF_CLK_MAX_UV        1200000
41
42 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN   \
43         (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
44
45 enum {
46         TSTBUS_UAWM,
47         TSTBUS_UARM,
48         TSTBUS_TXUC,
49         TSTBUS_RXUC,
50         TSTBUS_DFC,
51         TSTBUS_TRLUT,
52         TSTBUS_TMRLUT,
53         TSTBUS_OCSC,
54         TSTBUS_UTP_HCI,
55         TSTBUS_COMBINED,
56         TSTBUS_WRAPPER,
57         TSTBUS_UNIPRO,
58         TSTBUS_MAX,
59 };
60
61 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
62
63 static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
64 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
65 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
66                                                        u32 clk_cycles);
67 static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
68
69 static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
70                 char *prefix)
71 {
72         print_hex_dump(KERN_ERR, prefix,
73                         len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
74                         16, 4, hba->mmio_base + offset, len * 4, false);
75 }
76
77 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
78                 char *prefix, void *priv)
79 {
80         ufs_qcom_dump_regs(hba, offset, len, prefix);
81 }
82
83 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
84 {
85         int err = 0;
86
87         err = ufshcd_dme_get(hba,
88                         UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
89         if (err)
90                 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
91                                 __func__, err);
92
93         return err;
94 }
95
96 static int ufs_qcom_host_clk_get(struct device *dev,
97                 const char *name, struct clk **clk_out)
98 {
99         struct clk *clk;
100         int err = 0;
101
102         clk = devm_clk_get(dev, name);
103         if (IS_ERR(clk))
104                 err = PTR_ERR(clk);
105         else
106                 *clk_out = clk;
107
108         return err;
109 }
110
111 static int ufs_qcom_host_clk_enable(struct device *dev,
112                 const char *name, struct clk *clk)
113 {
114         int err = 0;
115
116         err = clk_prepare_enable(clk);
117         if (err)
118                 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
119
120         return err;
121 }
122
123 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
124 {
125         if (!host->is_lane_clks_enabled)
126                 return;
127
128         if (host->tx_l1_sync_clk)
129                 clk_disable_unprepare(host->tx_l1_sync_clk);
130         clk_disable_unprepare(host->tx_l0_sync_clk);
131         if (host->rx_l1_sync_clk)
132                 clk_disable_unprepare(host->rx_l1_sync_clk);
133         clk_disable_unprepare(host->rx_l0_sync_clk);
134
135         host->is_lane_clks_enabled = false;
136 }
137
138 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
139 {
140         int err = 0;
141         struct device *dev = host->hba->dev;
142
143         if (host->is_lane_clks_enabled)
144                 return 0;
145
146         err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
147                 host->rx_l0_sync_clk);
148         if (err)
149                 goto out;
150
151         err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
152                 host->tx_l0_sync_clk);
153         if (err)
154                 goto disable_rx_l0;
155
156         if (host->hba->lanes_per_direction > 1) {
157                 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
158                         host->rx_l1_sync_clk);
159                 if (err)
160                         goto disable_tx_l0;
161
162                 /* The tx lane1 clk could be muxed, hence keep this optional */
163                 if (host->tx_l1_sync_clk)
164                         ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
165                                                  host->tx_l1_sync_clk);
166         }
167         host->is_lane_clks_enabled = true;
168         goto out;
169
170 disable_tx_l0:
171         clk_disable_unprepare(host->tx_l0_sync_clk);
172 disable_rx_l0:
173         clk_disable_unprepare(host->rx_l0_sync_clk);
174 out:
175         return err;
176 }
177
178 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
179 {
180         int err = 0;
181         struct device *dev = host->hba->dev;
182
183         err = ufs_qcom_host_clk_get(dev,
184                         "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
185         if (err) {
186                 dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
187                                 __func__, err);
188                 goto out;
189         }
190
191         err = ufs_qcom_host_clk_get(dev,
192                         "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
193         if (err) {
194                 dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
195                                 __func__, err);
196                 goto out;
197         }
198
199         /* In case of single lane per direction, don't read lane1 clocks */
200         if (host->hba->lanes_per_direction > 1) {
201                 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
202                         &host->rx_l1_sync_clk);
203                 if (err) {
204                         dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
205                                         __func__, err);
206                         goto out;
207                 }
208
209                 /* The tx lane1 clk could be muxed, hence keep this optional */
210                 ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
211                                         &host->tx_l1_sync_clk);
212         }
213 out:
214         return err;
215 }
216
217 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
218 {
219         int err;
220         u32 tx_fsm_val = 0;
221         unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
222
223         do {
224                 err = ufshcd_dme_get(hba,
225                                 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
226                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
227                                 &tx_fsm_val);
228                 if (err || tx_fsm_val == TX_FSM_HIBERN8)
229                         break;
230
231                 /* sleep for max. 200us */
232                 usleep_range(100, 200);
233         } while (time_before(jiffies, timeout));
234
235         /*
236          * we might have scheduled out for long during polling so
237          * check the state again.
238          */
239         if (time_after(jiffies, timeout))
240                 err = ufshcd_dme_get(hba,
241                                 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
242                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
243                                 &tx_fsm_val);
244
245         if (err) {
246                 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
247                                 __func__, err);
248         } else if (tx_fsm_val != TX_FSM_HIBERN8) {
249                 err = tx_fsm_val;
250                 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
251                                 __func__, err);
252         }
253
254         return err;
255 }
256
257 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
258 {
259         ufshcd_rmwl(host->hba, QUNIPRO_SEL,
260                    ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
261                    REG_UFS_CFG1);
262         /* make sure above configuration is applied before we return */
263         mb();
264 }
265
266 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
267 {
268         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
269         struct phy *phy = host->generic_phy;
270         int ret = 0;
271         bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
272                                                         ? true : false;
273
274         /* Assert PHY reset and apply PHY calibration values */
275         ufs_qcom_assert_reset(hba);
276         /* provide 1ms delay to let the reset pulse propagate */
277         usleep_range(1000, 1100);
278
279         ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
280
281         if (ret) {
282                 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
283                         __func__, ret);
284                 goto out;
285         }
286
287         /* De-assert PHY reset and start serdes */
288         ufs_qcom_deassert_reset(hba);
289
290         /*
291          * after reset deassertion, phy will need all ref clocks,
292          * voltage, current to settle down before starting serdes.
293          */
294         usleep_range(1000, 1100);
295         ret = ufs_qcom_phy_start_serdes(phy);
296         if (ret) {
297                 dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
298                         __func__, ret);
299                 goto out;
300         }
301
302         ret = ufs_qcom_phy_is_pcs_ready(phy);
303         if (ret)
304                 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
305                         __func__, ret);
306
307         ufs_qcom_select_unipro_mode(host);
308
309 out:
310         return ret;
311 }
312
313 /*
314  * The UTP controller has a number of internal clock gating cells (CGCs).
315  * Internal hardware sub-modules within the UTP controller control the CGCs.
316  * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
317  * in a specific operation, UTP controller CGCs are by default disabled and
318  * this function enables them (after every UFS link startup) to save some power
319  * leakage.
320  *
321  * UFS host controller v3.0.0 onwards has internal clock gating mechanism
322  * in Qunipro, enable them to save additional power.
323  */
324 static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
325 {
326         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
327         int err = 0;
328
329         /* Enable UTP internal clock gating */
330         ufshcd_writel(hba,
331                 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
332                 REG_UFS_CFG2);
333
334         /* Ensure that HW clock gating is enabled before next operations */
335         mb();
336
337         /* Enable Qunipro internal clock gating if supported */
338         if (!ufs_qcom_cap_qunipro_clk_gating(host))
339                 goto out;
340
341         /* Enable all the mask bits */
342         err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
343                                 DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
344         if (err)
345                 goto out;
346
347         err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
348                                 PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
349         if (err)
350                 goto out;
351
352         err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
353                                 DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
354                                 DME_VS_CORE_CLK_CTRL);
355 out:
356         return err;
357 }
358
359 static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
360 {
361         struct ufs_clk_info *clki;
362
363         /*
364          * Configure the behavior of ufs clocks core and peripheral
365          * memory state when they are turned off.
366          * This configuration is required to allow retaining
367          * ICE crypto configuration (including keys) when
368          * core_clk_ice is turned off, and powering down
369          * non-ICE RAMs of host controller.
370          */
371         list_for_each_entry(clki, &hba->clk_list_head, list) {
372                 if (!strcmp(clki->name, "core_clk_ice"))
373                         clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
374                 else
375                         clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
376                 clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
377                 clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
378         }
379 }
380
381 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
382                                       enum ufs_notify_change_status status)
383 {
384         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
385         int err = 0;
386
387         switch (status) {
388         case PRE_CHANGE:
389                 ufs_qcom_force_mem_config(hba);
390                 ufs_qcom_power_up_sequence(hba);
391                 /*
392                  * The PHY PLL output is the source of tx/rx lane symbol
393                  * clocks, hence, enable the lane clocks only after PHY
394                  * is initialized.
395                  */
396                 err = ufs_qcom_enable_lane_clks(host);
397                 if (!err && host->ice.pdev) {
398                         err = ufs_qcom_ice_init(host);
399                         if (err) {
400                                 dev_err(hba->dev, "%s: ICE init failed (%d)\n",
401                                         __func__, err);
402                                 err = -EINVAL;
403                         }
404                 }
405
406                 break;
407         case POST_CHANGE:
408                 /* check if UFS PHY moved from DISABLED to HIBERN8 */
409                 err = ufs_qcom_check_hibern8(hba);
410                 break;
411         default:
412                 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
413                 err = -EINVAL;
414                 break;
415         }
416         return err;
417 }
418
419 /**
420  * Returns zero for success and non-zero in case of a failure
421  */
422 static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
423                                u32 hs, u32 rate, bool update_link_startup_timer,
424                                bool is_pre_scale_up)
425 {
426         int ret = 0;
427         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
428         struct ufs_clk_info *clki;
429         u32 core_clk_period_in_ns;
430         u32 tx_clk_cycles_per_us = 0;
431         unsigned long core_clk_rate = 0;
432         u32 core_clk_cycles_per_us = 0;
433
434         static u32 pwm_fr_table[][2] = {
435                 {UFS_PWM_G1, 0x1},
436                 {UFS_PWM_G2, 0x1},
437                 {UFS_PWM_G3, 0x1},
438                 {UFS_PWM_G4, 0x1},
439         };
440
441         static u32 hs_fr_table_rA[][2] = {
442                 {UFS_HS_G1, 0x1F},
443                 {UFS_HS_G2, 0x3e},
444                 {UFS_HS_G3, 0x7D},
445         };
446
447         static u32 hs_fr_table_rB[][2] = {
448                 {UFS_HS_G1, 0x24},
449                 {UFS_HS_G2, 0x49},
450                 {UFS_HS_G3, 0x92},
451         };
452
453         /*
454          * The Qunipro controller does not use following registers:
455          * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
456          * UFS_REG_PA_LINK_STARTUP_TIMER
457          * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
458          * Aggregation / Auto hibern8 logic.
459         */
460         if (ufs_qcom_cap_qunipro(host) &&
461             (!(ufshcd_is_intr_aggr_allowed(hba) ||
462                ufshcd_is_auto_hibern8_supported(hba))))
463                 goto out;
464
465         if (gear == 0) {
466                 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
467                 goto out_error;
468         }
469
470         list_for_each_entry(clki, &hba->clk_list_head, list) {
471                 if (!strcmp(clki->name, "core_clk")) {
472                         if (is_pre_scale_up)
473                                 core_clk_rate = clki->max_freq;
474                         else
475                                 core_clk_rate = clk_get_rate(clki->clk);
476                 }
477         }
478
479         /* If frequency is smaller than 1MHz, set to 1MHz */
480         if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
481                 core_clk_rate = DEFAULT_CLK_RATE_HZ;
482
483         core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
484         if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
485                 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
486                 /*
487                  * make sure above write gets applied before we return from
488                  * this function.
489                  */
490                 mb();
491         }
492
493         if (ufs_qcom_cap_qunipro(host))
494                 goto out;
495
496         core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
497         core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
498         core_clk_period_in_ns &= MASK_CLK_NS_REG;
499
500         switch (hs) {
501         case FASTAUTO_MODE:
502         case FAST_MODE:
503                 if (rate == PA_HS_MODE_A) {
504                         if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
505                                 dev_err(hba->dev,
506                                         "%s: index %d exceeds table size %zu\n",
507                                         __func__, gear,
508                                         ARRAY_SIZE(hs_fr_table_rA));
509                                 goto out_error;
510                         }
511                         tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
512                 } else if (rate == PA_HS_MODE_B) {
513                         if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
514                                 dev_err(hba->dev,
515                                         "%s: index %d exceeds table size %zu\n",
516                                         __func__, gear,
517                                         ARRAY_SIZE(hs_fr_table_rB));
518                                 goto out_error;
519                         }
520                         tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
521                 } else {
522                         dev_err(hba->dev, "%s: invalid rate = %d\n",
523                                 __func__, rate);
524                         goto out_error;
525                 }
526                 break;
527         case SLOWAUTO_MODE:
528         case SLOW_MODE:
529                 if (gear > ARRAY_SIZE(pwm_fr_table)) {
530                         dev_err(hba->dev,
531                                         "%s: index %d exceeds table size %zu\n",
532                                         __func__, gear,
533                                         ARRAY_SIZE(pwm_fr_table));
534                         goto out_error;
535                 }
536                 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
537                 break;
538         case UNCHANGED:
539         default:
540                 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
541                 goto out_error;
542         }
543
544         if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
545             (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
546                 /* this register 2 fields shall be written at once */
547                 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
548                               REG_UFS_TX_SYMBOL_CLK_NS_US);
549                 /*
550                  * make sure above write gets applied before we return from
551                  * this function.
552                  */
553                 mb();
554         }
555
556         if (update_link_startup_timer) {
557                 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
558                               REG_UFS_PA_LINK_STARTUP_TIMER);
559                 /*
560                  * make sure that this configuration is applied before
561                  * we return
562                  */
563                 mb();
564         }
565         goto out;
566
567 out_error:
568         ret = -EINVAL;
569 out:
570         return ret;
571 }
572
573 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
574                                u32 hs, u32 rate, bool update_link_startup_timer)
575 {
576         return  __ufs_qcom_cfg_timers(hba, gear, hs, rate,
577                                       update_link_startup_timer, false);
578 }
579
580 static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
581 {
582         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
583         struct phy *phy = host->generic_phy;
584         u32 unipro_ver;
585         int err = 0;
586
587         if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
588                 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
589                         __func__);
590                 err = -EINVAL;
591                 goto out;
592         }
593
594         /* make sure RX LineCfg is enabled before link startup */
595         err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
596         if (err)
597                 goto out;
598
599         if (ufs_qcom_cap_qunipro(host)) {
600                 /*
601                  * set unipro core clock cycles to 150 & clear clock divider
602                  */
603                 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
604                 if (err)
605                         goto out;
606         }
607
608         err = ufs_qcom_enable_hw_clk_gating(hba);
609         if (err)
610                 goto out;
611
612         /*
613          * Some UFS devices (and may be host) have issues if LCC is
614          * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
615          * before link startup which will make sure that both host
616          * and device TX LCC are disabled once link startup is
617          * completed.
618          */
619         unipro_ver = ufshcd_get_local_unipro_ver(hba);
620         if (unipro_ver != UFS_UNIPRO_VER_1_41)
621                 err = ufshcd_dme_set(hba,
622                                      UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
623                                      0);
624         if (err)
625                 goto out;
626
627         if (!ufs_qcom_cap_qunipro_clk_gating(host))
628                 goto out;
629
630         /* Enable all the mask bits */
631         err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
632                                 SAVECONFIGTIME_MODE_MASK,
633                                 PA_VS_CONFIG_REG1);
634 out:
635         return err;
636 }
637
638 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
639 {
640         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
641         struct phy *phy = host->generic_phy;
642         u32 tx_lanes;
643         int err = 0;
644
645         err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
646         if (err)
647                 goto out;
648
649         err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
650         if (err) {
651                 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
652                         __func__);
653                 goto out;
654         }
655
656         /*
657          * Some UFS devices send incorrect LineCfg data as part of power mode
658          * change sequence which may cause host PHY to go into bad state.
659          * Disabling Rx LineCfg of host PHY should help avoid this.
660          */
661         if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
662                 err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
663         if (err) {
664                 dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
665                         __func__);
666                 goto out;
667         }
668
669         /*
670          * UFS controller has *clk_req output to GCC, for each one if the clocks
671          * entering it. When *clk_req for a specific clock is de-asserted,
672          * a corresponding clock from GCC is stopped. UFS controller de-asserts
673          * *clk_req outputs when it is in Auto Hibernate state only if the
674          * Clock request feature is enabled.
675          * Enable the Clock request feature:
676          * - Enable HW clock control for UFS clocks in GCC (handled by the
677          *   clock driver as part of clk_prepare_enable).
678          * - Set the AH8_CFG.*CLK_REQ register bits to 1.
679          */
680         if (ufshcd_is_auto_hibern8_supported(hba))
681                 ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
682                                    UFS_HW_CLK_CTRL_EN,
683                                    UFS_AH8_CFG);
684         /*
685          * Make sure clock request feature gets enabled for HW clk gating
686          * before further operations.
687          */
688         mb();
689
690 out:
691         return err;
692 }
693
694 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
695                                         enum ufs_notify_change_status status)
696 {
697         int err = 0;
698
699         switch (status) {
700         case PRE_CHANGE:
701                 err = ufs_qcom_link_startup_pre_change(hba);
702                 break;
703         case POST_CHANGE:
704                 err = ufs_qcom_link_startup_post_change(hba);
705                 break;
706         default:
707                 break;
708         }
709
710         return err;
711 }
712
713
714 static int ufs_qcom_config_vreg(struct device *dev,
715                 struct ufs_vreg *vreg, bool on)
716 {
717         int ret = 0;
718         struct regulator *reg;
719         int min_uV, uA_load;
720
721         if (!vreg) {
722                 WARN_ON(1);
723                 ret = -EINVAL;
724                 goto out;
725         }
726
727         reg = vreg->reg;
728         if (regulator_count_voltages(reg) > 0) {
729                 uA_load = on ? vreg->max_uA : 0;
730                 ret = regulator_set_load(vreg->reg, uA_load);
731                 if (ret)
732                         goto out;
733
734                 min_uV = on ? vreg->min_uV : 0;
735                 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
736                 if (ret) {
737                         dev_err(dev, "%s: %s set voltage failed, err=%d\n",
738                                         __func__, vreg->name, ret);
739                         goto out;
740                 }
741         }
742 out:
743         return ret;
744 }
745
746 static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
747 {
748         int ret = 0;
749
750         if (vreg->enabled)
751                 return ret;
752
753         ret = ufs_qcom_config_vreg(dev, vreg, true);
754         if (ret)
755                 goto out;
756
757         ret = regulator_enable(vreg->reg);
758         if (ret)
759                 goto out;
760
761         vreg->enabled = true;
762 out:
763         return ret;
764 }
765
766 static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
767 {
768         int ret = 0;
769
770         if (!vreg->enabled)
771                 return ret;
772
773         ret = regulator_disable(vreg->reg);
774         if (ret)
775                 goto out;
776
777         ret = ufs_qcom_config_vreg(dev, vreg, false);
778         if (ret)
779                 goto out;
780
781         vreg->enabled = false;
782 out:
783         return ret;
784 }
785
786 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
787 {
788         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
789         struct phy *phy = host->generic_phy;
790         int ret = 0;
791
792         /*
793          * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
794          * power rail and low noise analog power rail for PLL can be
795          * switched off.
796          */
797         if (!ufs_qcom_is_link_active(hba)) {
798                 ufs_qcom_disable_lane_clks(host);
799                 phy_power_off(phy);
800
801                 if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
802                         ret = ufs_qcom_disable_vreg(hba->dev,
803                                         host->vddp_ref_clk);
804                 ufs_qcom_ice_suspend(host);
805
806                 if (ufs_qcom_is_link_off(hba)) {
807                         /* Assert PHY soft reset */
808                         ufs_qcom_assert_reset(hba);
809                         goto out;
810                 }
811         }
812         /* Unvote PM QoS */
813         ufs_qcom_pm_qos_suspend(host);
814
815 out:
816         return ret;
817 }
818
819 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
820 {
821         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
822         struct phy *phy = host->generic_phy;
823         int err;
824
825         err = phy_power_on(phy);
826         if (err) {
827                 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
828                         __func__, err);
829                 goto out;
830         }
831
832         if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
833                                    hba->spm_lvl > UFS_PM_LVL_3))
834                 ufs_qcom_enable_vreg(hba->dev,
835                                       host->vddp_ref_clk);
836
837         err = ufs_qcom_enable_lane_clks(host);
838         if (err)
839                 goto out;
840
841         err = ufs_qcom_ice_resume(host);
842         if (err) {
843                 dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
844                         __func__, err);
845                 goto out;
846         }
847
848         hba->is_sys_suspended = false;
849
850 out:
851         return err;
852 }
853
854 static int ufs_qcom_full_reset(struct ufs_hba *hba)
855 {
856         int ret = -ENOTSUPP;
857
858         if (!hba->core_reset) {
859                 dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
860                                 ret);
861                 goto out;
862         }
863
864         ret = reset_control_assert(hba->core_reset);
865         if (ret) {
866                 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
867                                 __func__, ret);
868                 goto out;
869         }
870
871         /*
872          * The hardware requirement for delay between assert/deassert
873          * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
874          * ~125us (4/32768). To be on the safe side add 200us delay.
875          */
876         usleep_range(200, 210);
877
878         ret = reset_control_deassert(hba->core_reset);
879         if (ret)
880                 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
881                                 __func__, ret);
882
883 out:
884         return ret;
885 }
886
887 #ifdef CONFIG_SCSI_UFS_QCOM_ICE
888 static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
889         struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
890 {
891         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
892         struct request *req;
893         int ret;
894
895         if (lrbp->cmd && lrbp->cmd->request)
896                 req = lrbp->cmd->request;
897         else
898                 return 0;
899
900         /* Use request LBA as the DUN value */
901         if (req->bio)
902                 *dun = (req->bio->bi_iter.bi_sector) >>
903                                 UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
904
905         ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
906
907         return ret;
908 }
909
910 static
911 int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
912 {
913         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
914         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
915         int err = 0;
916
917         if (!host->ice.pdev ||
918             !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
919                 goto out;
920
921         err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
922 out:
923         return err;
924 }
925
926 static
927 int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
928                 struct ufshcd_lrb *lrbp, struct request *req)
929 {
930         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
931         int err = 0;
932
933         if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
934                 goto out;
935
936         err = ufs_qcom_ice_cfg_end(host, req);
937 out:
938         return err;
939 }
940
941 static
942 int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
943 {
944         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
945         int err = 0;
946
947         if (!host->ice.pdev)
948                 goto out;
949
950         err = ufs_qcom_ice_reset(host);
951 out:
952         return err;
953 }
954
955 static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
956 {
957         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
958
959         if (!status)
960                 return -EINVAL;
961
962         return ufs_qcom_ice_get_status(host, status);
963 }
964 #else /* !CONFIG_SCSI_UFS_QCOM_ICE */
965 #define ufs_qcom_crypto_req_setup               NULL
966 #define ufs_qcom_crytpo_engine_cfg_start        NULL
967 #define ufs_qcom_crytpo_engine_cfg_end          NULL
968 #define ufs_qcom_crytpo_engine_reset            NULL
969 #define ufs_qcom_crypto_engine_get_status       NULL
970 #endif /* CONFIG_SCSI_UFS_QCOM_ICE */
971
972 struct ufs_qcom_dev_params {
973         u32 pwm_rx_gear;        /* pwm rx gear to work in */
974         u32 pwm_tx_gear;        /* pwm tx gear to work in */
975         u32 hs_rx_gear;         /* hs rx gear to work in */
976         u32 hs_tx_gear;         /* hs tx gear to work in */
977         u32 rx_lanes;           /* number of rx lanes */
978         u32 tx_lanes;           /* number of tx lanes */
979         u32 rx_pwr_pwm;         /* rx pwm working pwr */
980         u32 tx_pwr_pwm;         /* tx pwm working pwr */
981         u32 rx_pwr_hs;          /* rx hs working pwr */
982         u32 tx_pwr_hs;          /* tx hs working pwr */
983         u32 hs_rate;            /* rate A/B to work in HS */
984         u32 desired_working_mode;
985 };
986
987 static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
988                                       struct ufs_pa_layer_attr *dev_max,
989                                       struct ufs_pa_layer_attr *agreed_pwr)
990 {
991         int min_qcom_gear;
992         int min_dev_gear;
993         bool is_dev_sup_hs = false;
994         bool is_qcom_max_hs = false;
995
996         if (dev_max->pwr_rx == FAST_MODE)
997                 is_dev_sup_hs = true;
998
999         if (qcom_param->desired_working_mode == FAST) {
1000                 is_qcom_max_hs = true;
1001                 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
1002                                       qcom_param->hs_tx_gear);
1003         } else {
1004                 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
1005                                       qcom_param->pwm_tx_gear);
1006         }
1007
1008         /*
1009          * device doesn't support HS but qcom_param->desired_working_mode is
1010          * HS, thus device and qcom_param don't agree
1011          */
1012         if (!is_dev_sup_hs && is_qcom_max_hs) {
1013                 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
1014                         __func__);
1015                 return -ENOTSUPP;
1016         } else if (is_dev_sup_hs && is_qcom_max_hs) {
1017                 /*
1018                  * since device supports HS, it supports FAST_MODE.
1019                  * since qcom_param->desired_working_mode is also HS
1020                  * then final decision (FAST/FASTAUTO) is done according
1021                  * to qcom_params as it is the restricting factor
1022                  */
1023                 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1024                                                 qcom_param->rx_pwr_hs;
1025         } else {
1026                 /*
1027                  * here qcom_param->desired_working_mode is PWM.
1028                  * it doesn't matter whether device supports HS or PWM,
1029                  * in both cases qcom_param->desired_working_mode will
1030                  * determine the mode
1031                  */
1032                  agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
1033                                                 qcom_param->rx_pwr_pwm;
1034         }
1035
1036         /*
1037          * we would like tx to work in the minimum number of lanes
1038          * between device capability and vendor preferences.
1039          * the same decision will be made for rx
1040          */
1041         agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
1042                                                 qcom_param->tx_lanes);
1043         agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
1044                                                 qcom_param->rx_lanes);
1045
1046         /* device maximum gear is the minimum between device rx and tx gears */
1047         min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
1048
1049         /*
1050          * if both device capabilities and vendor pre-defined preferences are
1051          * both HS or both PWM then set the minimum gear to be the chosen
1052          * working gear.
1053          * if one is PWM and one is HS then the one that is PWM get to decide
1054          * what is the gear, as it is the one that also decided previously what
1055          * pwr the device will be configured to.
1056          */
1057         if ((is_dev_sup_hs && is_qcom_max_hs) ||
1058             (!is_dev_sup_hs && !is_qcom_max_hs))
1059                 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
1060                         min_t(u32, min_dev_gear, min_qcom_gear);
1061         else if (!is_dev_sup_hs)
1062                 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
1063         else
1064                 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
1065
1066         agreed_pwr->hs_rate = qcom_param->hs_rate;
1067         return 0;
1068 }
1069
1070 #ifdef CONFIG_QCOM_BUS_SCALING
1071 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
1072                 const char *speed_mode)
1073 {
1074         struct device *dev = host->hba->dev;
1075         struct device_node *np = dev->of_node;
1076         int err;
1077         const char *key = "qcom,bus-vector-names";
1078
1079         if (!speed_mode) {
1080                 err = -EINVAL;
1081                 goto out;
1082         }
1083
1084         if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
1085                 err = of_property_match_string(np, key, "MAX");
1086         else
1087                 err = of_property_match_string(np, key, speed_mode);
1088
1089 out:
1090         if (err < 0)
1091                 dev_err(dev, "%s: Invalid %s mode %d\n",
1092                                 __func__, speed_mode, err);
1093         return err;
1094 }
1095
1096 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
1097 {
1098         int gear = max_t(u32, p->gear_rx, p->gear_tx);
1099         int lanes = max_t(u32, p->lane_rx, p->lane_tx);
1100         int pwr;
1101
1102         /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
1103         if (!gear)
1104                 gear = 1;
1105
1106         if (!lanes)
1107                 lanes = 1;
1108
1109         if (!p->pwr_rx && !p->pwr_tx) {
1110                 pwr = SLOWAUTO_MODE;
1111                 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
1112         } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
1113                  p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
1114                 pwr = FAST_MODE;
1115                 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
1116                          p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
1117         } else {
1118                 pwr = SLOW_MODE;
1119                 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
1120                          "PWM", gear, lanes);
1121         }
1122 }
1123
1124 static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
1125 {
1126         int err = 0;
1127
1128         if (vote != host->bus_vote.curr_vote) {
1129                 err = msm_bus_scale_client_update_request(
1130                                 host->bus_vote.client_handle, vote);
1131                 if (err) {
1132                         dev_err(host->hba->dev,
1133                                 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
1134                                 __func__, host->bus_vote.client_handle,
1135                                 vote, err);
1136                         goto out;
1137                 }
1138
1139                 host->bus_vote.curr_vote = vote;
1140         }
1141 out:
1142         return err;
1143 }
1144
1145 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1146 {
1147         int vote;
1148         int err = 0;
1149         char mode[BUS_VECTOR_NAME_LEN];
1150
1151         ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
1152
1153         vote = ufs_qcom_get_bus_vote(host, mode);
1154         if (vote >= 0)
1155                 err = __ufs_qcom_set_bus_vote(host, vote);
1156         else
1157                 err = vote;
1158
1159         if (err)
1160                 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
1161         else
1162                 host->bus_vote.saved_vote = vote;
1163         return err;
1164 }
1165
1166 static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1167 {
1168         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1169         int vote, err;
1170
1171         /*
1172          * In case ufs_qcom_init() is not yet done, simply ignore.
1173          * This ufs_qcom_set_bus_vote() shall be called from
1174          * ufs_qcom_init() after init is done.
1175          */
1176         if (!host)
1177                 return 0;
1178
1179         if (on) {
1180                 vote = host->bus_vote.saved_vote;
1181                 if (vote == host->bus_vote.min_bw_vote)
1182                         ufs_qcom_update_bus_bw_vote(host);
1183         } else {
1184                 vote = host->bus_vote.min_bw_vote;
1185         }
1186
1187         err = __ufs_qcom_set_bus_vote(host, vote);
1188         if (err)
1189                 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1190                                 __func__, err);
1191
1192         return err;
1193 }
1194
1195 static ssize_t
1196 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1197                         char *buf)
1198 {
1199         struct ufs_hba *hba = dev_get_drvdata(dev);
1200         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1201
1202         return snprintf(buf, PAGE_SIZE, "%u\n",
1203                         host->bus_vote.is_max_bw_needed);
1204 }
1205
1206 static ssize_t
1207 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
1208                 const char *buf, size_t count)
1209 {
1210         struct ufs_hba *hba = dev_get_drvdata(dev);
1211         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1212         uint32_t value;
1213
1214         if (!kstrtou32(buf, 0, &value)) {
1215                 host->bus_vote.is_max_bw_needed = !!value;
1216                 ufs_qcom_update_bus_bw_vote(host);
1217         }
1218
1219         return count;
1220 }
1221
1222 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1223 {
1224         int err;
1225         struct msm_bus_scale_pdata *bus_pdata;
1226         struct device *dev = host->hba->dev;
1227         struct platform_device *pdev = to_platform_device(dev);
1228         struct device_node *np = dev->of_node;
1229
1230         bus_pdata = msm_bus_cl_get_pdata(pdev);
1231         if (!bus_pdata) {
1232                 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
1233                 err = -ENODATA;
1234                 goto out;
1235         }
1236
1237         err = of_property_count_strings(np, "qcom,bus-vector-names");
1238         if (err < 0 || err != bus_pdata->num_usecases) {
1239                 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
1240                                 __func__, err);
1241                 goto out;
1242         }
1243
1244         host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
1245         if (!host->bus_vote.client_handle) {
1246                 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
1247                                 __func__);
1248                 err = -EFAULT;
1249                 goto out;
1250         }
1251
1252         /* cache the vote index for minimum and maximum bandwidth */
1253         host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
1254         host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
1255
1256         host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
1257         host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
1258         sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
1259         host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
1260         host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
1261         err = device_create_file(dev, &host->bus_vote.max_bus_bw);
1262 out:
1263         return err;
1264 }
1265 #else /* CONFIG_QCOM_BUS_SCALING */
1266 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
1267 {
1268         return 0;
1269 }
1270
1271 static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
1272 {
1273         return 0;
1274 }
1275
1276 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
1277 {
1278         return 0;
1279 }
1280 static inline void msm_bus_scale_unregister_client(uint32_t cl)
1281 {
1282 }
1283 #endif /* CONFIG_QCOM_BUS_SCALING */
1284
1285 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
1286 {
1287         if (host->dev_ref_clk_ctrl_mmio &&
1288             (enable ^ host->is_dev_ref_clk_enabled)) {
1289                 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
1290
1291                 if (enable)
1292                         temp |= host->dev_ref_clk_en_mask;
1293                 else
1294                         temp &= ~host->dev_ref_clk_en_mask;
1295
1296                 /*
1297                  * If we are here to disable this clock it might be immediately
1298                  * after entering into hibern8 in which case we need to make
1299                  * sure that device ref_clk is active at least 1us after the
1300                  * hibern8 enter.
1301                  */
1302                 if (!enable)
1303                         udelay(1);
1304
1305                 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
1306
1307                 /* ensure that ref_clk is enabled/disabled before we return */
1308                 wmb();
1309
1310                 /*
1311                  * If we call hibern8 exit after this, we need to make sure that
1312                  * device ref_clk is stable for at least 1us before the hibern8
1313                  * exit command.
1314                  */
1315                 if (enable)
1316                         udelay(1);
1317
1318                 host->is_dev_ref_clk_enabled = enable;
1319         }
1320 }
1321
1322 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
1323                                 enum ufs_notify_change_status status,
1324                                 struct ufs_pa_layer_attr *dev_max_params,
1325                                 struct ufs_pa_layer_attr *dev_req_params)
1326 {
1327         u32 val;
1328         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1329         struct phy *phy = host->generic_phy;
1330         struct ufs_qcom_dev_params ufs_qcom_cap;
1331         int ret = 0;
1332         int res = 0;
1333
1334         if (!dev_req_params) {
1335                 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
1336                 ret = -EINVAL;
1337                 goto out;
1338         }
1339
1340         switch (status) {
1341         case PRE_CHANGE:
1342                 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
1343                 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
1344                 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
1345                 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
1346                 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
1347                 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
1348                 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
1349                 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
1350                 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
1351                 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
1352                 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
1353                 ufs_qcom_cap.desired_working_mode =
1354                                         UFS_QCOM_LIMIT_DESIRED_MODE;
1355
1356                 if (host->hw_ver.major == 0x1) {
1357                         /*
1358                          * HS-G3 operations may not reliably work on legacy QCOM
1359                          * UFS host controller hardware even though capability
1360                          * exchange during link startup phase may end up
1361                          * negotiating maximum supported gear as G3.
1362                          * Hence downgrade the maximum supported gear to HS-G2.
1363                          */
1364                         if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
1365                                 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
1366                         if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
1367                                 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
1368                 }
1369
1370                 /*
1371                  * Platforms using QRBTCv2 phy must limit link to PWM Gear-1
1372                  * and SLOW mode to successfully bring up the link.
1373                  */
1374                 if (!strcmp(ufs_qcom_phy_name(phy), "ufs_phy_qrbtc_v2")) {
1375                         ufs_qcom_cap.tx_lanes = 1;
1376                         ufs_qcom_cap.rx_lanes = 1;
1377                         ufs_qcom_cap.pwm_rx_gear = UFS_PWM_G1;
1378                         ufs_qcom_cap.pwm_tx_gear = UFS_PWM_G1;
1379                         ufs_qcom_cap.desired_working_mode = SLOW;
1380                 }
1381
1382                 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
1383                                                  dev_max_params,
1384                                                  dev_req_params);
1385                 if (ret) {
1386                         pr_err("%s: failed to determine capabilities\n",
1387                                         __func__);
1388                         goto out;
1389                 }
1390
1391                 /* enable the device ref clock before changing to HS mode */
1392                 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
1393                         ufshcd_is_hs_mode(dev_req_params))
1394                         ufs_qcom_dev_ref_clk_ctrl(host, true);
1395                 break;
1396         case POST_CHANGE:
1397                 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
1398                                         dev_req_params->pwr_rx,
1399                                         dev_req_params->hs_rate, false)) {
1400                         dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
1401                                 __func__);
1402                         /*
1403                          * we return error code at the end of the routine,
1404                          * but continue to configure UFS_PHY_TX_LANE_ENABLE
1405                          * and bus voting as usual
1406                          */
1407                         ret = -EINVAL;
1408                 }
1409
1410                 val = ~(MAX_U32 << dev_req_params->lane_tx);
1411                 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1412                 if (res) {
1413                         dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1414                                 __func__, res);
1415                         ret = res;
1416                 }
1417
1418                 /* cache the power mode parameters to use internally */
1419                 memcpy(&host->dev_req_params,
1420                                 dev_req_params, sizeof(*dev_req_params));
1421                 ufs_qcom_update_bus_bw_vote(host);
1422
1423                 /* disable the device ref clock if entered PWM mode */
1424                 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1425                         !ufshcd_is_hs_mode(dev_req_params))
1426                         ufs_qcom_dev_ref_clk_ctrl(host, false);
1427                 break;
1428         default:
1429                 ret = -EINVAL;
1430                 break;
1431         }
1432 out:
1433         return ret;
1434 }
1435
1436 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1437 {
1438         int err;
1439         u32 pa_vs_config_reg1;
1440
1441         err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1442                              &pa_vs_config_reg1);
1443         if (err)
1444                 goto out;
1445
1446         /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1447         err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1448                             (pa_vs_config_reg1 | (1 << 12)));
1449
1450 out:
1451         return err;
1452 }
1453
1454 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1455 {
1456         int err = 0;
1457
1458         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1459                 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1460
1461         return err;
1462 }
1463
1464 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1465 {
1466         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1467
1468         if (host->hw_ver.major == 0x1)
1469                 return UFSHCI_VERSION_11;
1470         else
1471                 return UFSHCI_VERSION_20;
1472 }
1473
1474 /**
1475  * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1476  * @hba: host controller instance
1477  *
1478  * QCOM UFS host controller might have some non standard behaviours (quirks)
1479  * than what is specified by UFSHCI specification. Advertise all such
1480  * quirks to standard UFS host controller driver so standard takes them into
1481  * account.
1482  */
1483 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1484 {
1485         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1486
1487         if (host->hw_ver.major == 0x1) {
1488                 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1489                               | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1490                               | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
1491
1492                 if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
1493                         hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1494
1495                 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1496         }
1497
1498         if (host->hw_ver.major == 0x2) {
1499                 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1500
1501                 if (!ufs_qcom_cap_qunipro(host))
1502                         /* Legacy UniPro mode still need following quirks */
1503                         hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1504                                 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1505                                 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1506         }
1507
1508         if (host->disable_lpm)
1509                 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
1510 }
1511
1512 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1513 {
1514         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1515
1516         if (!host->disable_lpm) {
1517                 hba->caps |= UFSHCD_CAP_CLK_GATING;
1518                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1519                 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1520         }
1521         hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1522
1523         if (host->hw_ver.major >= 0x2) {
1524                 if (!host->disable_lpm)
1525                         hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
1526                 host->caps = UFS_QCOM_CAP_QUNIPRO |
1527                              UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1528         }
1529         if (host->hw_ver.major >= 0x3) {
1530                 host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
1531                 /*
1532                  * The UFS PHY attached to v3.0.0 controller supports entering
1533                  * deeper low power state of SVS2. This lets the controller
1534                  * run at much lower clock frequencies for saving power.
1535                  * Assuming this and any future revisions of the controller
1536                  * support this capability. Need to revist this assumption if
1537                  * any future platform with this core doesn't support the
1538                  * capability, as there will be no benefit running at lower
1539                  * frequencies then.
1540                  */
1541                 host->caps |= UFS_QCOM_CAP_SVS2;
1542         }
1543 }
1544
1545 /**
1546  * ufs_qcom_setup_clocks - enables/disable clocks
1547  * @hba: host controller instance
1548  * @on: If true, enable clocks else disable them.
1549  * @is_gating_context: If true then it means this function is called from
1550  * aggressive clock gating context and we may only need to gate off important
1551  * clocks. If false then make sure to gate off all clocks.
1552  *
1553  * Returns 0 on success, non-zero on failure.
1554  */
1555 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1556                                  bool is_gating_context)
1557 {
1558         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1559         int err;
1560
1561         /*
1562          * In case ufs_qcom_init() is not yet done, simply ignore.
1563          * This ufs_qcom_setup_clocks() shall be called from
1564          * ufs_qcom_init() after init is done.
1565          */
1566         if (!host)
1567                 return 0;
1568
1569         if (on) {
1570                 err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
1571                 if (err)
1572                         goto out;
1573
1574                 err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
1575                 if (err) {
1576                         dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
1577                                 __func__, err);
1578                         ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1579                         goto out;
1580                 }
1581                 /* enable the device ref clock for HS mode*/
1582                 if (ufshcd_is_hs_mode(&hba->pwr_info))
1583                         ufs_qcom_dev_ref_clk_ctrl(host, true);
1584
1585                 err = ufs_qcom_ice_resume(host);
1586                 if (err)
1587                         goto out;
1588         } else {
1589                 err = ufs_qcom_ice_suspend(host);
1590                 if (err)
1591                         goto out;
1592
1593                 /* M-PHY RMMI interface clocks can be turned off */
1594                 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1595                 /*
1596                  * If auto hibern8 is supported then the link will already
1597                  * be in hibern8 state and the ref clock can be gated.
1598                  */
1599                 if (ufshcd_is_auto_hibern8_supported(hba) ||
1600                     !ufs_qcom_is_link_active(hba)) {
1601                         /* turn off UFS local PHY ref_clk */
1602                         ufs_qcom_phy_disable_ref_clk(host->generic_phy);
1603                         /* disable device ref_clk */
1604                         ufs_qcom_dev_ref_clk_ctrl(host, false);
1605                 }
1606         }
1607
1608 out:
1609         return err;
1610 }
1611
1612 #ifdef CONFIG_SMP /* CONFIG_SMP */
1613 static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
1614 {
1615         int i;
1616
1617         if (cpu >= 0 && cpu < num_possible_cpus())
1618                 for (i = 0; i < host->pm_qos.num_groups; i++)
1619                         if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
1620                                 return i;
1621
1622         return host->pm_qos.default_cpu;
1623 }
1624
1625 static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
1626 {
1627         unsigned long flags;
1628         struct ufs_qcom_host *host;
1629         struct ufs_qcom_pm_qos_cpu_group *group;
1630
1631         if (!hba || !req)
1632                 return;
1633
1634         host = ufshcd_get_variant(hba);
1635         if (!host->pm_qos.groups)
1636                 return;
1637
1638         group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
1639
1640         spin_lock_irqsave(hba->host->host_lock, flags);
1641         if (!host->pm_qos.is_enabled)
1642                 goto out;
1643
1644         group->active_reqs++;
1645         if (group->state != PM_QOS_REQ_VOTE &&
1646                         group->state != PM_QOS_VOTED) {
1647                 group->state = PM_QOS_REQ_VOTE;
1648                 queue_work(host->pm_qos.workq, &group->vote_work);
1649         }
1650 out:
1651         spin_unlock_irqrestore(hba->host->host_lock, flags);
1652 }
1653
1654 /* hba->host->host_lock is assumed to be held by caller */
1655 static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
1656 {
1657         struct ufs_qcom_pm_qos_cpu_group *group;
1658
1659         if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
1660                 return;
1661
1662         group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
1663
1664         if (--group->active_reqs)
1665                 return;
1666         group->state = PM_QOS_REQ_UNVOTE;
1667         queue_work(host->pm_qos.workq, &group->unvote_work);
1668 }
1669
1670 static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
1671         bool should_lock)
1672 {
1673         unsigned long flags = 0;
1674
1675         if (!hba || !req)
1676                 return;
1677
1678         if (should_lock)
1679                 spin_lock_irqsave(hba->host->host_lock, flags);
1680         __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
1681         if (should_lock)
1682                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1683 }
1684
1685 static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
1686 {
1687         struct ufs_qcom_pm_qos_cpu_group *group =
1688                 container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
1689         struct ufs_qcom_host *host = group->host;
1690         unsigned long flags;
1691
1692         spin_lock_irqsave(host->hba->host->host_lock, flags);
1693
1694         if (!host->pm_qos.is_enabled || !group->active_reqs) {
1695                 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1696                 return;
1697         }
1698
1699         group->state = PM_QOS_VOTED;
1700         spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1701
1702         pm_qos_update_request(&group->req, group->latency_us);
1703 }
1704
1705 static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
1706 {
1707         struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
1708                 struct ufs_qcom_pm_qos_cpu_group, unvote_work);
1709         struct ufs_qcom_host *host = group->host;
1710         unsigned long flags;
1711
1712         /*
1713          * Check if new requests were submitted in the meantime and do not
1714          * unvote if so.
1715          */
1716         spin_lock_irqsave(host->hba->host->host_lock, flags);
1717
1718         if (!host->pm_qos.is_enabled || group->active_reqs) {
1719                 spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1720                 return;
1721         }
1722
1723         group->state = PM_QOS_UNVOTED;
1724         spin_unlock_irqrestore(host->hba->host->host_lock, flags);
1725
1726         pm_qos_update_request(&group->req, PM_QOS_DEFAULT_VALUE);
1727 }
1728
1729 static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
1730                 struct device_attribute *attr, char *buf)
1731 {
1732         struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1733         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1734
1735         return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
1736 }
1737
1738 static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
1739                 struct device_attribute *attr, const char *buf, size_t count)
1740 {
1741         struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1742         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1743         unsigned long value;
1744         unsigned long flags;
1745         bool enable;
1746         int i;
1747
1748         if (kstrtoul(buf, 0, &value))
1749                 return -EINVAL;
1750
1751         enable = !!value;
1752
1753         /*
1754          * Must take the spinlock and save irqs before changing the enabled
1755          * flag in order to keep correctness of PM QoS release.
1756          */
1757         spin_lock_irqsave(hba->host->host_lock, flags);
1758         if (enable == host->pm_qos.is_enabled) {
1759                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1760                 return count;
1761         }
1762         host->pm_qos.is_enabled = enable;
1763         spin_unlock_irqrestore(hba->host->host_lock, flags);
1764
1765         if (!enable)
1766                 for (i = 0; i < host->pm_qos.num_groups; i++) {
1767                         cancel_work_sync(&host->pm_qos.groups[i].vote_work);
1768                         cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
1769                         spin_lock_irqsave(hba->host->host_lock, flags);
1770                         host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1771                         host->pm_qos.groups[i].active_reqs = 0;
1772                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1773                         pm_qos_update_request(&host->pm_qos.groups[i].req,
1774                                 PM_QOS_DEFAULT_VALUE);
1775                 }
1776
1777         return count;
1778 }
1779
1780 static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
1781                 struct device_attribute *attr, char *buf)
1782 {
1783         struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1784         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1785         int ret;
1786         int i;
1787         int offset = 0;
1788
1789         for (i = 0; i < host->pm_qos.num_groups; i++) {
1790                 ret = snprintf(&buf[offset], PAGE_SIZE,
1791                         "cpu group #%d(mask=0x%lx): %d\n", i,
1792                         host->pm_qos.groups[i].mask.bits[0],
1793                         host->pm_qos.groups[i].latency_us);
1794                 if (ret > 0)
1795                         offset += ret;
1796                 else
1797                         break;
1798         }
1799
1800         return offset;
1801 }
1802
1803 static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
1804                 struct device_attribute *attr, const char *buf, size_t count)
1805 {
1806         struct ufs_hba *hba = dev_get_drvdata(dev->parent);
1807         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1808         unsigned long value;
1809         unsigned long flags;
1810         char *strbuf;
1811         char *strbuf_copy;
1812         char *token;
1813         int i;
1814         int ret;
1815
1816         /* reserve one byte for null termination */
1817         strbuf = kmalloc(count + 1, GFP_KERNEL);
1818         if (!strbuf)
1819                 return -ENOMEM;
1820         strbuf_copy = strbuf;
1821         strlcpy(strbuf, buf, count + 1);
1822
1823         for (i = 0; i < host->pm_qos.num_groups; i++) {
1824                 token = strsep(&strbuf, ",");
1825                 if (!token)
1826                         break;
1827
1828                 ret = kstrtoul(token, 0, &value);
1829                 if (ret)
1830                         break;
1831
1832                 spin_lock_irqsave(hba->host->host_lock, flags);
1833                 host->pm_qos.groups[i].latency_us = value;
1834                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1835         }
1836
1837         kfree(strbuf_copy);
1838         return count;
1839 }
1840
1841 static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
1842 {
1843         struct device_node *node = host->hba->dev->of_node;
1844         struct device_attribute *attr;
1845         int ret = 0;
1846         int num_groups;
1847         int num_values;
1848         char wq_name[sizeof("ufs_pm_qos_00")];
1849         int i;
1850
1851         num_groups = of_property_count_u32_elems(node,
1852                 "qcom,pm-qos-cpu-groups");
1853         if (num_groups <= 0)
1854                 goto no_pm_qos;
1855
1856         num_values = of_property_count_u32_elems(node,
1857                 "qcom,pm-qos-cpu-group-latency-us");
1858         if (num_values <= 0)
1859                 goto no_pm_qos;
1860
1861         if (num_values != num_groups || num_groups > num_possible_cpus()) {
1862                 dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
1863                         __func__, num_groups, num_values, num_possible_cpus());
1864                 goto no_pm_qos;
1865         }
1866
1867         host->pm_qos.num_groups = num_groups;
1868         host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
1869                         sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
1870         if (!host->pm_qos.groups)
1871                 return -ENOMEM;
1872
1873         for (i = 0; i < host->pm_qos.num_groups; i++) {
1874                 u32 mask;
1875
1876                 ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
1877                         i, &mask);
1878                 if (ret)
1879                         goto free_groups;
1880                 host->pm_qos.groups[i].mask.bits[0] = mask;
1881                 if (!cpumask_subset(&host->pm_qos.groups[i].mask,
1882                         cpu_possible_mask)) {
1883                         dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
1884                                 __func__, mask);
1885                         goto free_groups;
1886                 }
1887
1888                 ret = of_property_read_u32_index(node,
1889                         "qcom,pm-qos-cpu-group-latency-us", i,
1890                         &host->pm_qos.groups[i].latency_us);
1891                 if (ret)
1892                         goto free_groups;
1893
1894                 host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_IRQ;
1895                 host->pm_qos.groups[i].req.irq = host->hba->irq;
1896                 host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
1897                 host->pm_qos.groups[i].active_reqs = 0;
1898                 host->pm_qos.groups[i].host = host;
1899
1900                 INIT_WORK(&host->pm_qos.groups[i].vote_work,
1901                         ufs_qcom_pm_qos_vote_work);
1902                 INIT_WORK(&host->pm_qos.groups[i].unvote_work,
1903                         ufs_qcom_pm_qos_unvote_work);
1904         }
1905
1906         ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
1907                 &host->pm_qos.default_cpu);
1908         if (ret || host->pm_qos.default_cpu > num_possible_cpus())
1909                 host->pm_qos.default_cpu = 0;
1910
1911         /*
1912          * Use a single-threaded workqueue to assure work submitted to the queue
1913          * is performed in order. Consider the following 2 possible cases:
1914          *
1915          * 1. A new request arrives and voting work is scheduled for it. Before
1916          *    the voting work is performed the request is finished and unvote
1917          *    work is also scheduled.
1918          * 2. A request is finished and unvote work is scheduled. Before the
1919          *    work is performed a new request arrives and voting work is also
1920          *    scheduled.
1921          *
1922          * In both cases a vote work and unvote work wait to be performed.
1923          * If ordering is not guaranteed, then the end state might be the
1924          * opposite of the desired state.
1925          */
1926         snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
1927                 host->hba->host->host_no);
1928         host->pm_qos.workq = create_singlethread_workqueue(wq_name);
1929         if (!host->pm_qos.workq) {
1930                 dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
1931                                 __func__);
1932                 ret = -ENOMEM;
1933                 goto free_groups;
1934         }
1935
1936         /* Initialization was ok, add all PM QoS requests */
1937         for (i = 0; i < host->pm_qos.num_groups; i++)
1938                 pm_qos_add_request(&host->pm_qos.groups[i].req,
1939                         PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
1940
1941         /* PM QoS latency sys-fs attribute */
1942         attr = &host->pm_qos.latency_attr;
1943         attr->show = ufs_qcom_pm_qos_latency_show;
1944         attr->store = ufs_qcom_pm_qos_latency_store;
1945         sysfs_attr_init(&attr->attr);
1946         attr->attr.name = "pm_qos_latency_us";
1947         attr->attr.mode = S_IRUGO | S_IWUSR;
1948         if (device_create_file(host->hba->var->dev, attr))
1949                 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
1950
1951         /* PM QoS enable sys-fs attribute */
1952         attr = &host->pm_qos.enable_attr;
1953         attr->show = ufs_qcom_pm_qos_enable_show;
1954         attr->store = ufs_qcom_pm_qos_enable_store;
1955         sysfs_attr_init(&attr->attr);
1956         attr->attr.name = "pm_qos_enable";
1957         attr->attr.mode = S_IRUGO | S_IWUSR;
1958         if (device_create_file(host->hba->var->dev, attr))
1959                 dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
1960
1961         host->pm_qos.is_enabled = true;
1962
1963         return 0;
1964
1965 free_groups:
1966         kfree(host->pm_qos.groups);
1967 no_pm_qos:
1968         host->pm_qos.groups = NULL;
1969         return ret ? ret : -ENOTSUPP;
1970 }
1971
1972 static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
1973 {
1974         int i;
1975
1976         if (!host->pm_qos.groups)
1977                 return;
1978
1979         for (i = 0; i < host->pm_qos.num_groups; i++)
1980                 flush_work(&host->pm_qos.groups[i].unvote_work);
1981 }
1982
1983 static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
1984 {
1985         int i;
1986
1987         if (!host->pm_qos.groups)
1988                 return;
1989
1990         for (i = 0; i < host->pm_qos.num_groups; i++)
1991                 pm_qos_remove_request(&host->pm_qos.groups[i].req);
1992         destroy_workqueue(host->pm_qos.workq);
1993
1994         kfree(host->pm_qos.groups);
1995         host->pm_qos.groups = NULL;
1996 }
1997 #endif /* CONFIG_SMP */
1998
1999 #define ANDROID_BOOT_DEV_MAX    30
2000 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
2001
2002 #ifndef MODULE
2003 static int __init get_android_boot_dev(char *str)
2004 {
2005         strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
2006         return 1;
2007 }
2008 __setup("androidboot.bootdevice=", get_android_boot_dev);
2009 #endif
2010
2011 /*
2012  * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
2013  */
2014 static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
2015 {
2016         struct device_node *node = host->hba->dev->of_node;
2017
2018         host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
2019         if (host->disable_lpm)
2020                 pr_info("%s: will disable all LPM modes\n", __func__);
2021 }
2022
2023 static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
2024                                    struct ufs_vreg **out_vreg)
2025 {
2026         int ret = 0;
2027         char prop_name[MAX_PROP_SIZE];
2028         struct ufs_vreg *vreg = NULL;
2029         struct device *dev = host->hba->dev;
2030         struct device_node *np = dev->of_node;
2031
2032         if (!np) {
2033                 dev_err(dev, "%s: non DT initialization\n", __func__);
2034                 goto out;
2035         }
2036
2037         snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
2038         if (!of_parse_phandle(np, prop_name, 0)) {
2039                 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
2040                          __func__, prop_name);
2041                 ret = -ENODEV;
2042                 goto out;
2043         }
2044
2045         vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
2046         if (!vreg)
2047                 return -ENOMEM;
2048
2049         vreg->name = name;
2050
2051         snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
2052         ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
2053         if (ret) {
2054                 dev_err(dev, "%s: unable to find %s err %d\n",
2055                         __func__, prop_name, ret);
2056                 goto out;
2057         }
2058
2059         vreg->reg = devm_regulator_get(dev, vreg->name);
2060         if (IS_ERR(vreg->reg)) {
2061                 ret = PTR_ERR(vreg->reg);
2062                 dev_err(dev, "%s: %s get failed, err=%d\n",
2063                         __func__, vreg->name, ret);
2064         }
2065         vreg->min_uV = VDDP_REF_CLK_MIN_UV;
2066         vreg->max_uV = VDDP_REF_CLK_MAX_UV;
2067
2068 out:
2069         if (!ret)
2070                 *out_vreg = vreg;
2071         return ret;
2072 }
2073
2074 /**
2075  * ufs_qcom_init - bind phy with controller
2076  * @hba: host controller instance
2077  *
2078  * Binds PHY with controller and powers up PHY enabling clocks
2079  * and regulators.
2080  *
2081  * Returns -EPROBE_DEFER if binding fails, returns negative error
2082  * on phy power up failure and returns zero on success.
2083  */
2084 static int ufs_qcom_init(struct ufs_hba *hba)
2085 {
2086         int err;
2087         struct device *dev = hba->dev;
2088         struct platform_device *pdev = to_platform_device(dev);
2089         struct ufs_qcom_host *host;
2090         struct resource *res;
2091
2092         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2093         if (!host) {
2094                 err = -ENOMEM;
2095                 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
2096                 goto out;
2097         }
2098
2099         /* Make a two way bind between the qcom host and the hba */
2100         host->hba = hba;
2101         spin_lock_init(&host->ice_work_lock);
2102
2103         ufshcd_set_variant(hba, host);
2104
2105         err = ufs_qcom_ice_get_dev(host);
2106         if (err == -EPROBE_DEFER) {
2107                 /*
2108                  * UFS driver might be probed before ICE driver does.
2109                  * In that case we would like to return EPROBE_DEFER code
2110                  * in order to delay its probing.
2111                  */
2112                 dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
2113                         __func__, err);
2114                 goto out_host_free;
2115
2116         } else if (err == -ENODEV) {
2117                 /*
2118                  * ICE device is not enabled in DTS file. No need for further
2119                  * initialization of ICE driver.
2120                  */
2121                 dev_warn(dev, "%s: ICE device is not enabled",
2122                         __func__);
2123         } else if (err) {
2124                 dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
2125                         __func__, err);
2126                 goto out_host_free;
2127         }
2128
2129         host->generic_phy = devm_phy_get(dev, "ufsphy");
2130
2131         if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
2132                 /*
2133                  * UFS driver might be probed before the phy driver does.
2134                  * In that case we would like to return EPROBE_DEFER code.
2135                  */
2136                 err = -EPROBE_DEFER;
2137                 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
2138                         __func__, err);
2139                 goto out_host_free;
2140         } else if (IS_ERR(host->generic_phy)) {
2141                 err = PTR_ERR(host->generic_phy);
2142                 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
2143                 goto out;
2144         }
2145
2146         err = ufs_qcom_pm_qos_init(host);
2147         if (err)
2148                 dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
2149
2150         /* restore the secure configuration */
2151         ufs_qcom_update_sec_cfg(hba, true);
2152
2153         err = ufs_qcom_bus_register(host);
2154         if (err)
2155                 goto out_host_free;
2156
2157         ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
2158                 &host->hw_ver.minor, &host->hw_ver.step);
2159
2160         /*
2161          * for newer controllers, device reference clock control bit has
2162          * moved inside UFS controller register address space itself.
2163          */
2164         if (host->hw_ver.major >= 0x02) {
2165                 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
2166                 host->dev_ref_clk_en_mask = BIT(26);
2167         } else {
2168                 /* "dev_ref_clk_ctrl_mem" is optional resource */
2169                 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2170                 if (res) {
2171                         host->dev_ref_clk_ctrl_mmio =
2172                                         devm_ioremap_resource(dev, res);
2173                         if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
2174                                 dev_warn(dev,
2175                                         "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
2176                                         __func__,
2177                                         PTR_ERR(host->dev_ref_clk_ctrl_mmio));
2178                                 host->dev_ref_clk_ctrl_mmio = NULL;
2179                         }
2180                         host->dev_ref_clk_en_mask = BIT(5);
2181                 }
2182         }
2183
2184         /* update phy revision information before calling phy_init() */
2185         ufs_qcom_phy_save_controller_version(host->generic_phy,
2186                 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
2187
2188         err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
2189                                       &host->vddp_ref_clk);
2190         phy_init(host->generic_phy);
2191         err = phy_power_on(host->generic_phy);
2192         if (err)
2193                 goto out_unregister_bus;
2194         if (host->vddp_ref_clk) {
2195                 err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
2196                 if (err) {
2197                         dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
2198                                 __func__, err);
2199                         goto out_disable_phy;
2200                 }
2201         }
2202
2203         err = ufs_qcom_init_lane_clks(host);
2204         if (err)
2205                 goto out_disable_vddp;
2206
2207         ufs_qcom_parse_lpm(host);
2208         if (host->disable_lpm)
2209                 pm_runtime_forbid(host->hba->dev);
2210         ufs_qcom_set_caps(hba);
2211         ufs_qcom_advertise_quirks(hba);
2212
2213         ufs_qcom_set_bus_vote(hba, true);
2214         ufs_qcom_setup_clocks(hba, true, false);
2215
2216         if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
2217                 ufs_qcom_hosts[hba->dev->id] = host;
2218
2219         host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
2220         ufs_qcom_get_default_testbus_cfg(host);
2221         err = ufs_qcom_testbus_config(host);
2222         if (err) {
2223                 dev_warn(dev, "%s: failed to configure the testbus %d\n",
2224                                 __func__, err);
2225                 err = 0;
2226         }
2227
2228         goto out;
2229
2230 out_disable_vddp:
2231         if (host->vddp_ref_clk)
2232                 ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
2233 out_disable_phy:
2234         phy_power_off(host->generic_phy);
2235 out_unregister_bus:
2236         phy_exit(host->generic_phy);
2237         msm_bus_scale_unregister_client(host->bus_vote.client_handle);
2238 out_host_free:
2239         devm_kfree(dev, host);
2240         ufshcd_set_variant(hba, NULL);
2241 out:
2242         return err;
2243 }
2244
2245 static void ufs_qcom_exit(struct ufs_hba *hba)
2246 {
2247         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2248
2249         msm_bus_scale_unregister_client(host->bus_vote.client_handle);
2250         ufs_qcom_disable_lane_clks(host);
2251         phy_power_off(host->generic_phy);
2252         ufs_qcom_pm_qos_remove(host);
2253 }
2254
2255 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
2256                                                        u32 clk_cycles)
2257 {
2258         int err;
2259         u32 core_clk_ctrl_reg;
2260
2261         if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
2262                 return -EINVAL;
2263
2264         err = ufshcd_dme_get(hba,
2265                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2266                             &core_clk_ctrl_reg);
2267         if (err)
2268                 goto out;
2269
2270         core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
2271         core_clk_ctrl_reg |= clk_cycles;
2272
2273         /* Clear CORE_CLK_DIV_EN */
2274         core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
2275
2276         err = ufshcd_dme_set(hba,
2277                             UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
2278                             core_clk_ctrl_reg);
2279 out:
2280         return err;
2281 }
2282
2283 static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
2284 {
2285         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2286         struct phy *phy = host->generic_phy;
2287         int err = 0;
2288
2289         /* The default low power mode configuration is SVS2 */
2290         if (!ufs_qcom_cap_svs2(host))
2291                 goto out;
2292
2293         if (!((host->hw_ver.major == 0x3) &&
2294             (host->hw_ver.minor == 0x0) &&
2295             (host->hw_ver.step == 0x0)))
2296                 goto out;
2297
2298         /*
2299          * The link should be put in hibern8 state before
2300          * configuring the PHY to enter/exit SVS2 mode.
2301          */
2302         err = ufshcd_uic_hibern8_enter(hba);
2303         if (err)
2304                 goto out;
2305
2306         err = ufs_qcom_phy_configure_lpm(phy, enable);
2307         if (err)
2308                 goto out;
2309
2310         err = ufshcd_uic_hibern8_exit(hba);
2311 out:
2312         return err;
2313 }
2314
2315 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
2316 {
2317         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2318         struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2319         int err = 0;
2320
2321         if (!ufs_qcom_cap_qunipro(host))
2322                 goto out;
2323
2324         err = ufs_qcom_configure_lpm(hba, false);
2325         if (err)
2326                 goto out;
2327
2328         if (attr)
2329                 __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2330                                       attr->hs_rate, false, true);
2331
2332         /* set unipro core clock cycles to 150 and clear clock divider */
2333         err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
2334 out:
2335         return err;
2336 }
2337
2338 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
2339 {
2340         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2341
2342         if (!ufs_qcom_cap_qunipro(host))
2343                 return 0;
2344
2345         return ufs_qcom_configure_lpm(hba, true);
2346 }
2347
2348 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
2349 {
2350         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2351         struct ufs_pa_layer_attr *attr = &host->dev_req_params;
2352         int err = 0;
2353
2354         if (!ufs_qcom_cap_qunipro(host))
2355                 return 0;
2356
2357         if (attr)
2358                 ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
2359                                     attr->hs_rate, false);
2360
2361         if (ufs_qcom_cap_svs2(host))
2362                 /*
2363                  * For SVS2 set unipro core clock cycles to 37 and
2364                  * clear clock divider
2365                  */
2366                 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
2367         else
2368                 /*
2369                  * For SVS set unipro core clock cycles to 75 and
2370                  * clear clock divider
2371                  */
2372                 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
2373
2374         return err;
2375 }
2376
2377 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
2378                 bool scale_up, enum ufs_notify_change_status status)
2379 {
2380         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2381         int err = 0;
2382
2383         switch (status) {
2384         case PRE_CHANGE:
2385                 if (scale_up)
2386                         err = ufs_qcom_clk_scale_up_pre_change(hba);
2387                 else
2388                         err = ufs_qcom_clk_scale_down_pre_change(hba);
2389                 break;
2390         case POST_CHANGE:
2391                 if (!scale_up)
2392                         err = ufs_qcom_clk_scale_down_post_change(hba);
2393
2394                 ufs_qcom_update_bus_bw_vote(host);
2395                 break;
2396         default:
2397                 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
2398                 err = -EINVAL;
2399                 break;
2400         }
2401
2402         return err;
2403 }
2404
2405 /*
2406  * This function should be called to restore the security configuration of UFS
2407  * register space after coming out of UFS host core power collapse.
2408  *
2409  * @hba: host controller instance
2410  * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
2411  * and set "false" when secure configuration is lost.
2412  */
2413 static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
2414 {
2415         int ret = 0;
2416         u64 scm_ret = 0;
2417         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2418
2419         /* scm command buffer structrue */
2420         struct msm_scm_cmd_buf {
2421                 unsigned int device_id;
2422                 unsigned int spare;
2423         } cbuf = {0};
2424         #define RESTORE_SEC_CFG_CMD     0x2
2425         #define UFS_TZ_DEV_ID           19
2426
2427         if (!host || !hba->vreg_info.vdd_hba ||
2428             !(host->sec_cfg_updated ^ restore_sec_cfg)) {
2429                 return 0;
2430         } else if (host->caps &
2431                    UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE) {
2432                 return 0;
2433         } else if (!restore_sec_cfg) {
2434                 /*
2435                  * Clear the flag so next time when this function is called
2436                  * with restore_sec_cfg set to true, we can restore the secure
2437                  * configuration.
2438                  */
2439                 host->sec_cfg_updated = false;
2440                 goto out;
2441         } else if (hba->clk_gating.state != CLKS_ON) {
2442                 /*
2443                  * Clocks should be ON to restore the host controller secure
2444                  * configuration.
2445                  */
2446                 goto out;
2447         }
2448
2449         /*
2450          * If we are here, Host controller clocks are running, Host controller
2451          * power collapse feature is supported and Host controller has just came
2452          * out of power collapse.
2453          */
2454         cbuf.device_id = UFS_TZ_DEV_ID;
2455         ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
2456         if (ret || scm_ret) {
2457                 dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
2458                         __func__, ret, scm_ret);
2459                 if (!ret)
2460                         ret = scm_ret;
2461         } else {
2462                 host->sec_cfg_updated = true;
2463         }
2464
2465 out:
2466         dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
2467                 __func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
2468         return ret;
2469 }
2470
2471
2472 static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
2473 {
2474         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2475
2476         if (ufs_qcom_cap_svs2(host))
2477                 return UFS_HS_G1;
2478         /* Default SVS support @ HS G2 frequencies*/
2479         return UFS_HS_G2;
2480 }
2481
2482 void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
2483                 void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
2484                                 char *str, void *priv))
2485 {
2486         u32 reg;
2487         struct ufs_qcom_host *host;
2488
2489         if (unlikely(!hba)) {
2490                 pr_err("%s: hba is NULL\n", __func__);
2491                 return;
2492         }
2493         if (unlikely(!print_fn)) {
2494                 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
2495                 return;
2496         }
2497
2498         host = ufshcd_get_variant(hba);
2499         if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
2500                 return;
2501
2502         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
2503         print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
2504
2505         reg = ufshcd_readl(hba, REG_UFS_CFG1);
2506         reg |= UFS_BIT(17);
2507         ufshcd_writel(hba, reg, REG_UFS_CFG1);
2508
2509         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
2510         print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
2511
2512         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
2513         print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
2514
2515         reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
2516         print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
2517
2518         /* clear bit 17 - UTP_DBG_RAMS_EN */
2519         ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
2520
2521         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
2522         print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
2523
2524         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
2525         print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
2526
2527         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
2528         print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
2529
2530         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
2531         print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
2532
2533         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
2534         print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
2535
2536         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
2537         print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
2538
2539         reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
2540         print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
2541 }
2542
2543 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
2544 {
2545         if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
2546                 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
2547                                 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
2548                 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
2549         } else {
2550                 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
2551                 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
2552         }
2553 }
2554
2555 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
2556 {
2557         /* provide a legal default configuration */
2558         host->testbus.select_major = TSTBUS_UNIPRO;
2559         host->testbus.select_minor = 37;
2560 }
2561
2562 bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
2563                 u8 select_major, u8 select_minor)
2564 {
2565         if (select_major >= TSTBUS_MAX) {
2566                 dev_err(host->hba->dev,
2567                         "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
2568                         __func__, select_major);
2569                 return false;
2570         }
2571
2572         /*
2573          * Not performing check for each individual select_major
2574          * mappings of select_minor, since there is no harm in
2575          * configuring a non-existent select_minor
2576          */
2577         if (select_minor > 0xFF) {
2578                 dev_err(host->hba->dev,
2579                         "%s: 0x%05X is not a legal testbus option\n",
2580                         __func__, select_minor);
2581                 return false;
2582         }
2583
2584         return true;
2585 }
2586
2587 /*
2588  * The caller of this function must make sure that the controller
2589  * is out of runtime suspend and appropriate clocks are enabled
2590  * before accessing.
2591  */
2592 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
2593 {
2594         int reg = 0;
2595         int offset = 0, ret = 0, testbus_sel_offset = 19;
2596         u32 mask = TEST_BUS_SUB_SEL_MASK;
2597         unsigned long flags;
2598         struct ufs_hba *hba;
2599
2600         if (!host)
2601                 return -EINVAL;
2602         hba = host->hba;
2603         spin_lock_irqsave(hba->host->host_lock, flags);
2604         switch (host->testbus.select_major) {
2605         case TSTBUS_UAWM:
2606                 reg = UFS_TEST_BUS_CTRL_0;
2607                 offset = 24;
2608                 break;
2609         case TSTBUS_UARM:
2610                 reg = UFS_TEST_BUS_CTRL_0;
2611                 offset = 16;
2612                 break;
2613         case TSTBUS_TXUC:
2614                 reg = UFS_TEST_BUS_CTRL_0;
2615                 offset = 8;
2616                 break;
2617         case TSTBUS_RXUC:
2618                 reg = UFS_TEST_BUS_CTRL_0;
2619                 offset = 0;
2620                 break;
2621         case TSTBUS_DFC:
2622                 reg = UFS_TEST_BUS_CTRL_1;
2623                 offset = 24;
2624                 break;
2625         case TSTBUS_TRLUT:
2626                 reg = UFS_TEST_BUS_CTRL_1;
2627                 offset = 16;
2628                 break;
2629         case TSTBUS_TMRLUT:
2630                 reg = UFS_TEST_BUS_CTRL_1;
2631                 offset = 8;
2632                 break;
2633         case TSTBUS_OCSC:
2634                 reg = UFS_TEST_BUS_CTRL_1;
2635                 offset = 0;
2636                 break;
2637         case TSTBUS_WRAPPER:
2638                 reg = UFS_TEST_BUS_CTRL_2;
2639                 offset = 16;
2640                 break;
2641         case TSTBUS_COMBINED:
2642                 reg = UFS_TEST_BUS_CTRL_2;
2643                 offset = 8;
2644                 break;
2645         case TSTBUS_UTP_HCI:
2646                 reg = UFS_TEST_BUS_CTRL_2;
2647                 offset = 0;
2648                 break;
2649         case TSTBUS_UNIPRO:
2650                 reg = UFS_UNIPRO_CFG;
2651                 offset = 20;
2652                 mask = 0xFFF;
2653                 break;
2654         /*
2655          * No need for a default case, since
2656          * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
2657          * is legal
2658          */
2659         }
2660         mask <<= offset;
2661         spin_unlock_irqrestore(hba->host->host_lock, flags);
2662         if (reg) {
2663                 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
2664                     (u32)host->testbus.select_major << testbus_sel_offset,
2665                     REG_UFS_CFG1);
2666                 ufshcd_rmwl(host->hba, mask,
2667                     (u32)host->testbus.select_minor << offset,
2668                     reg);
2669         } else {
2670                 dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
2671                 ret = -EINVAL;
2672                 goto out;
2673         }
2674         ufs_qcom_enable_test_bus(host);
2675         /*
2676          * Make sure the test bus configuration is
2677          * committed before returning.
2678          */
2679         mb();
2680 out:
2681         return ret;
2682 }
2683
2684 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
2685 {
2686         ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
2687 }
2688
2689 static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
2690 {
2691         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2692         u32 *testbus = NULL;
2693         int i, nminor = 256, testbus_len = nminor * sizeof(u32);
2694
2695         testbus = kmalloc(testbus_len, GFP_KERNEL);
2696         if (!testbus)
2697                 return;
2698
2699         host->testbus.select_major = TSTBUS_UNIPRO;
2700         for (i = 0; i < nminor; i++) {
2701                 host->testbus.select_minor = i;
2702                 ufs_qcom_testbus_config(host);
2703                 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2704         }
2705         print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
2706                         16, 4, testbus, testbus_len, false);
2707         kfree(testbus);
2708 }
2709
2710 static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba)
2711 {
2712         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2713         u32 *testbus = NULL;
2714         int i, nminor = 32, testbus_len = nminor * sizeof(u32);
2715
2716         testbus = kmalloc(testbus_len, GFP_KERNEL);
2717         if (!testbus)
2718                 return;
2719
2720         host->testbus.select_major = TSTBUS_UTP_HCI;
2721         for (i = 0; i < nminor; i++) {
2722                 host->testbus.select_minor = i;
2723                 ufs_qcom_testbus_config(host);
2724                 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
2725         }
2726         print_hex_dump(KERN_ERR, "UTP_HCI_TEST_BUS ", DUMP_PREFIX_OFFSET,
2727                         16, 4, testbus, testbus_len, false);
2728         kfree(testbus);
2729 }
2730
2731 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
2732 {
2733         struct ufs_qcom_host *host = ufshcd_get_variant(hba);
2734         struct phy *phy = host->generic_phy;
2735
2736         ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
2737                         "HCI Vendor Specific Registers ");
2738         ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
2739
2740         if (no_sleep)
2741                 return;
2742
2743         /* sleep a bit intermittently as we are dumping too much data */
2744         usleep_range(1000, 1100);
2745         ufs_qcom_testbus_read(hba);
2746         usleep_range(1000, 1100);
2747         ufs_qcom_print_unipro_testbus(hba);
2748         usleep_range(1000, 1100);
2749         ufs_qcom_print_utp_hci_testbus(hba);
2750         usleep_range(1000, 1100);
2751         ufs_qcom_phy_dbg_register_dump(phy);
2752         usleep_range(1000, 1100);
2753         ufs_qcom_ice_print_regs(host);
2754 }
2755
2756 /**
2757  * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
2758  *
2759  * The variant operations configure the necessary controller and PHY
2760  * handshake during initialization.
2761  */
2762 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
2763         .init                   = ufs_qcom_init,
2764         .exit                   = ufs_qcom_exit,
2765         .get_ufs_hci_version    = ufs_qcom_get_ufs_hci_version,
2766         .clk_scale_notify       = ufs_qcom_clk_scale_notify,
2767         .setup_clocks           = ufs_qcom_setup_clocks,
2768         .hce_enable_notify      = ufs_qcom_hce_enable_notify,
2769         .link_startup_notify    = ufs_qcom_link_startup_notify,
2770         .pwr_change_notify      = ufs_qcom_pwr_change_notify,
2771         .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
2772         .suspend                = ufs_qcom_suspend,
2773         .resume                 = ufs_qcom_resume,
2774         .full_reset             = ufs_qcom_full_reset,
2775         .update_sec_cfg         = ufs_qcom_update_sec_cfg,
2776         .get_scale_down_gear    = ufs_qcom_get_scale_down_gear,
2777         .set_bus_vote           = ufs_qcom_set_bus_vote,
2778         .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
2779 #ifdef CONFIG_DEBUG_FS
2780         .add_debugfs            = ufs_qcom_dbg_add_debugfs,
2781 #endif
2782 };
2783
2784 static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
2785         .crypto_req_setup       = ufs_qcom_crypto_req_setup,
2786         .crypto_engine_cfg_start        = ufs_qcom_crytpo_engine_cfg_start,
2787         .crypto_engine_cfg_end  = ufs_qcom_crytpo_engine_cfg_end,
2788         .crypto_engine_reset      = ufs_qcom_crytpo_engine_reset,
2789         .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
2790 };
2791
2792 static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
2793         .req_start      = ufs_qcom_pm_qos_req_start,
2794         .req_end        = ufs_qcom_pm_qos_req_end,
2795 };
2796
2797 static struct ufs_hba_variant ufs_hba_qcom_variant = {
2798         .name           = "qcom",
2799         .vops           = &ufs_hba_qcom_vops,
2800         .crypto_vops    = &ufs_hba_crypto_variant_ops,
2801         .pm_qos_vops    = &ufs_hba_pm_qos_variant_ops,
2802 };
2803
2804 /**
2805  * ufs_qcom_probe - probe routine of the driver
2806  * @pdev: pointer to Platform device handle
2807  *
2808  * Return zero for success and non-zero for failure
2809  */
2810 static int ufs_qcom_probe(struct platform_device *pdev)
2811 {
2812         int err;
2813         struct device *dev = &pdev->dev;
2814         struct device_node *np = dev->of_node;
2815
2816         /*
2817          * On qcom platforms, bootdevice is the primary storage
2818          * device. This device can either be eMMC or UFS.
2819          * The type of device connected is detected at runtime.
2820          * So, if an eMMC device is connected, and this function
2821          * is invoked, it would turn-off the regulator if it detects
2822          * that the storage device is not ufs.
2823          * These regulators are turned ON by the bootloaders & turning
2824          * them off without sending PON may damage the connected device.
2825          * Hence, check for the connected device early-on & don't turn-off
2826          * the regulators.
2827          */
2828         if (of_property_read_bool(np, "non-removable") &&
2829             strlen(android_boot_dev) &&
2830             strcmp(android_boot_dev, dev_name(dev)))
2831                 return -ENODEV;
2832
2833         /* Perform generic probe */
2834         err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
2835         if (err)
2836                 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
2837
2838         return err;
2839 }
2840
2841 /**
2842  * ufs_qcom_remove - set driver_data of the device to NULL
2843  * @pdev: pointer to platform device handle
2844  *
2845  * Always return 0
2846  */
2847 static int ufs_qcom_remove(struct platform_device *pdev)
2848 {
2849         struct ufs_hba *hba =  platform_get_drvdata(pdev);
2850
2851         pm_runtime_get_sync(&(pdev)->dev);
2852         ufshcd_remove(hba);
2853         return 0;
2854 }
2855
2856 static const struct of_device_id ufs_qcom_of_match[] = {
2857         { .compatible = "qcom,ufshc"},
2858         {},
2859 };
2860 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
2861
2862 static const struct dev_pm_ops ufs_qcom_pm_ops = {
2863         .suspend        = ufshcd_pltfrm_suspend,
2864         .resume         = ufshcd_pltfrm_resume,
2865         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
2866         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
2867         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
2868 };
2869
2870 static struct platform_driver ufs_qcom_pltform = {
2871         .probe  = ufs_qcom_probe,
2872         .remove = ufs_qcom_remove,
2873         .shutdown = ufshcd_pltfrm_shutdown,
2874         .driver = {
2875                 .name   = "ufshcd-qcom",
2876                 .pm     = &ufs_qcom_pm_ops,
2877                 .of_match_table = of_match_ptr(ufs_qcom_of_match),
2878         },
2879 };
2880 module_platform_driver(ufs_qcom_pltform);
2881
2882 MODULE_LICENSE("GPL v2");