OSDN Git Service

Merge "ASoC: sdm660_cdc: Prevent MICBIAS1 enable during headset record"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / phy / phy-qcom-ufs.c
1 /*
2  * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #include "phy-qcom-ufs-i.h"
16
17 #define MAX_PROP_NAME              32
18 #define VDDA_PHY_MIN_UV            800000
19 #define VDDA_PHY_MAX_UV            925000
20 #define VDDA_PLL_MIN_UV            1200000
21 #define VDDA_PLL_MAX_UV            1800000
22 #define VDDP_REF_CLK_MIN_UV        1200000
23 #define VDDP_REF_CLK_MAX_UV        1200000
24
25 static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
26                                     const char *, bool);
27 static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
28                                   const char *);
29 static int ufs_qcom_phy_base_init(struct platform_device *pdev,
30                                   struct ufs_qcom_phy *phy_common);
31
32 void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
33                            struct ufs_qcom_phy_calibration *tbl,
34                            int tbl_size)
35 {
36         int i;
37
38         for (i = 0; i < tbl_size; i++)
39                 writel_relaxed(tbl[i].cfg_value,
40                                ufs_qcom_phy->mmio + tbl[i].reg_offset);
41 }
42 EXPORT_SYMBOL(ufs_qcom_phy_write_tbl);
43
44 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
45                            struct ufs_qcom_phy_calibration *tbl_A,
46                            int tbl_size_A,
47                            struct ufs_qcom_phy_calibration *tbl_B,
48                            int tbl_size_B, bool is_rate_B)
49 {
50         int ret = 0;
51
52         if (!tbl_A) {
53                 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
54                 ret = EINVAL;
55                 goto out;
56         }
57
58         ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_A, tbl_size_A);
59
60         /*
61          * In case we would like to work in rate B, we need
62          * to override a registers that were configured in rate A table
63          * with registers of rate B table.
64          * table.
65          */
66         if (is_rate_B) {
67                 if (!tbl_B) {
68                         dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
69                                 __func__);
70                         ret = EINVAL;
71                         goto out;
72                 }
73
74                 ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_B, tbl_size_B);
75         }
76
77         /* flush buffered writes */
78         mb();
79
80 out:
81         return ret;
82 }
83 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
84
85 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
86                                 struct ufs_qcom_phy *common_cfg,
87                                 const struct phy_ops *ufs_qcom_phy_gen_ops,
88                                 struct ufs_qcom_phy_specific_ops *phy_spec_ops)
89 {
90         int err;
91         struct device *dev = &pdev->dev;
92         struct phy *generic_phy = NULL;
93         struct phy_provider *phy_provider;
94
95         err = ufs_qcom_phy_base_init(pdev, common_cfg);
96         if (err) {
97                 dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
98                 goto out;
99         }
100
101         phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
102         if (IS_ERR(phy_provider)) {
103                 err = PTR_ERR(phy_provider);
104                 dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
105                 goto out;
106         }
107
108         generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
109         if (IS_ERR(generic_phy)) {
110                 err =  PTR_ERR(generic_phy);
111                 dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
112                 generic_phy = NULL;
113                 goto out;
114         }
115
116         /*
117          * UFS PHY power management is managed by its parent (UFS host
118          * controller) hence set the no the no runtime PM callbacks flag
119          * on UFS PHY device to avoid any accidental attempt to call the
120          * PM callbacks for PHY device.
121          */
122         pm_runtime_no_callbacks(&generic_phy->dev);
123
124         common_cfg->phy_spec_ops = phy_spec_ops;
125         common_cfg->dev = dev;
126
127 out:
128         return generic_phy;
129 }
130 EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
131
132 /*
133  * This assumes the embedded phy structure inside generic_phy is of type
134  * struct ufs_qcom_phy. In order to function properly it's crucial
135  * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
136  * as the first inside generic_phy.
137  */
138 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
139 {
140         return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
141 }
142 EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
143
144 static
145 int ufs_qcom_phy_base_init(struct platform_device *pdev,
146                            struct ufs_qcom_phy *phy_common)
147 {
148         struct device *dev = &pdev->dev;
149         struct resource *res;
150         int err = 0;
151
152         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
153         if (!res) {
154                 dev_err(dev, "%s: phy_mem resource not found\n", __func__);
155                 err = -ENOMEM;
156                 goto out;
157         }
158
159         phy_common->mmio = devm_ioremap_resource(dev, res);
160         if (IS_ERR((void const *)phy_common->mmio)) {
161                 err = PTR_ERR((void const *)phy_common->mmio);
162                 phy_common->mmio = NULL;
163                 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
164                         __func__, err);
165         }
166 out:
167         return err;
168 }
169
170 static int __ufs_qcom_phy_clk_get(struct phy *phy,
171                          const char *name, struct clk **clk_out, bool err_print)
172 {
173         struct clk *clk;
174         int err = 0;
175         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
176         struct device *dev = ufs_qcom_phy->dev;
177
178         clk = devm_clk_get(dev, name);
179         if (IS_ERR(clk)) {
180                 err = PTR_ERR(clk);
181                 if (err_print)
182                         dev_err(dev, "failed to get %s err %d", name, err);
183         } else {
184                 *clk_out = clk;
185         }
186
187         return err;
188 }
189
190 static
191 int ufs_qcom_phy_clk_get(struct phy *phy,
192                          const char *name, struct clk **clk_out)
193 {
194         return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
195 }
196
197 int
198 ufs_qcom_phy_init_clks(struct phy *generic_phy,
199                        struct ufs_qcom_phy *phy_common)
200 {
201         int err;
202
203         /*
204          * tx_iface_clk does not exist in newer version of ufs-phy HW,
205          * so don't return error if it is not found
206          */
207         __ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
208                                    &phy_common->tx_iface_clk, false);
209
210         /*
211          * rx_iface_clk does not exist in newer version of ufs-phy HW,
212          * so don't return error if it is not found
213          */
214         __ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
215                                    &phy_common->rx_iface_clk, false);
216
217         err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
218                                    &phy_common->ref_clk_src);
219         if (err)
220                 goto out;
221
222         /*
223          * "ref_clk_parent" is optional hence don't abort init if it's not
224          * found.
225          */
226         __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
227                                    &phy_common->ref_clk_parent, false);
228
229         err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
230                                    &phy_common->ref_clk);
231         if (err)
232                 goto out;
233
234         /*
235          * "ref_aux_clk" is optional and only supported by certain
236          * phy versions, don't abort init if it's not found.
237          */
238          __ufs_qcom_phy_clk_get(generic_phy, "ref_aux_clk",
239                                    &phy_common->ref_aux_clk, false);
240 out:
241         return err;
242 }
243 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
244
245 int
246 ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
247                               struct ufs_qcom_phy *phy_common)
248 {
249         int err;
250
251         err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
252                 "vdda-pll");
253         if (err)
254                 goto out;
255
256         err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
257                 "vdda-phy");
258         if (err)
259                 goto out;
260
261         /* vddp-ref-clk-* properties are optional */
262         __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
263                                  "vddp-ref-clk", true);
264 out:
265         return err;
266 }
267 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
268
269 static int __ufs_qcom_phy_init_vreg(struct phy *phy,
270                 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
271 {
272         int err = 0;
273         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
274         struct device *dev = ufs_qcom_phy->dev;
275
276         char prop_name[MAX_PROP_NAME];
277
278         if (dev->of_node) {
279                 snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
280                 if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
281                         dev_dbg(dev, "No vreg data found for %s\n", prop_name);
282                         return optional ? err : -ENODATA;
283                 }
284         }
285
286         vreg->name = kstrdup(name, GFP_KERNEL);
287         if (!vreg->name) {
288                 err = -ENOMEM;
289                 goto out;
290         }
291
292         vreg->reg = devm_regulator_get(dev, name);
293         if (IS_ERR(vreg->reg)) {
294                 err = PTR_ERR(vreg->reg);
295                 vreg->reg = NULL;
296                 if (!optional)
297                         dev_err(dev, "failed to get %s, %d\n", name, err);
298                 goto out;
299         }
300
301         if (dev->of_node) {
302                 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
303                 err = of_property_read_u32(dev->of_node,
304                                         prop_name, &vreg->max_uA);
305                 if (err && err != -EINVAL) {
306                         dev_err(dev, "%s: failed to read %s\n",
307                                         __func__, prop_name);
308                         goto out;
309                 } else if (err == -EINVAL || !vreg->max_uA) {
310                         if (regulator_count_voltages(vreg->reg) > 0) {
311                                 dev_err(dev, "%s: %s is mandatory\n",
312                                                 __func__, prop_name);
313                                 goto out;
314                         }
315                         err = 0;
316                 }
317                 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
318                 if (of_get_property(dev->of_node, prop_name, NULL))
319                         vreg->is_always_on = true;
320                 else
321                         vreg->is_always_on = false;
322         }
323
324         if (!strcmp(name, "vdda-pll")) {
325                 vreg->max_uV = VDDA_PLL_MAX_UV;
326                 vreg->min_uV = VDDA_PLL_MIN_UV;
327         } else if (!strcmp(name, "vdda-phy")) {
328                 vreg->max_uV = VDDA_PHY_MAX_UV;
329                 vreg->min_uV = VDDA_PHY_MIN_UV;
330         } else if (!strcmp(name, "vddp-ref-clk")) {
331                 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
332                 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
333         }
334
335 out:
336         if (err)
337                 kfree(vreg->name);
338         return err;
339 }
340
341 static int ufs_qcom_phy_init_vreg(struct phy *phy,
342                         struct ufs_qcom_phy_vreg *vreg, const char *name)
343 {
344         return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
345 }
346
347 static
348 int ufs_qcom_phy_cfg_vreg(struct phy *phy,
349                           struct ufs_qcom_phy_vreg *vreg, bool on)
350 {
351         int ret = 0;
352         struct regulator *reg = vreg->reg;
353         const char *name = vreg->name;
354         int min_uV;
355         int uA_load;
356         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
357         struct device *dev = ufs_qcom_phy->dev;
358
359         BUG_ON(!vreg);
360
361         if (regulator_count_voltages(reg) > 0) {
362                 min_uV = on ? vreg->min_uV : 0;
363                 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
364                 if (ret) {
365                         dev_err(dev, "%s: %s set voltage failed, err=%d\n",
366                                         __func__, name, ret);
367                         goto out;
368                 }
369                 uA_load = on ? vreg->max_uA : 0;
370                 ret = regulator_set_load(reg, uA_load);
371                 if (ret >= 0) {
372                         /*
373                          * regulator_set_load() returns new regulator
374                          * mode upon success.
375                          */
376                         ret = 0;
377                 } else {
378                         dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
379                                         __func__, name, uA_load, ret);
380                         goto out;
381                 }
382         }
383 out:
384         return ret;
385 }
386
387 static
388 int ufs_qcom_phy_enable_vreg(struct phy *phy,
389                              struct ufs_qcom_phy_vreg *vreg)
390 {
391         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
392         struct device *dev = ufs_qcom_phy->dev;
393         int ret = 0;
394
395         if (!vreg || vreg->enabled)
396                 goto out;
397
398         ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
399         if (ret) {
400                 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
401                         __func__, ret);
402                 goto out;
403         }
404
405         ret = regulator_enable(vreg->reg);
406         if (ret) {
407                 dev_err(dev, "%s: enable failed, err=%d\n",
408                                 __func__, ret);
409                 goto out;
410         }
411
412         vreg->enabled = true;
413 out:
414         return ret;
415 }
416
417 int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
418 {
419         int ret = 0;
420         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
421
422         if (phy->is_ref_clk_enabled)
423                 goto out;
424
425         /*
426          * reference clock is propagated in a daisy-chained manner from
427          * source to phy, so ungate them at each stage.
428          */
429         ret = clk_prepare_enable(phy->ref_clk_src);
430         if (ret) {
431                 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
432                                 __func__, ret);
433                 goto out;
434         }
435
436         /*
437          * "ref_clk_parent" is optional clock hence make sure that clk reference
438          * is available before trying to enable the clock.
439          */
440         if (phy->ref_clk_parent) {
441                 ret = clk_prepare_enable(phy->ref_clk_parent);
442                 if (ret) {
443                         dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
444                                         __func__, ret);
445                         goto out_disable_src;
446                 }
447         }
448
449         ret = clk_prepare_enable(phy->ref_clk);
450         if (ret) {
451                 dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
452                                 __func__, ret);
453                 goto out_disable_parent;
454         }
455
456         /*
457          * "ref_aux_clk" is optional clock and only supported by certain
458          * phy versions, hence make sure that clk reference is available
459          * before trying to enable the clock.
460          */
461         if (phy->ref_aux_clk) {
462                 ret = clk_prepare_enable(phy->ref_aux_clk);
463                 if (ret) {
464                         dev_err(phy->dev, "%s: ref_aux_clk enable failed %d\n",
465                                         __func__, ret);
466                         goto out_disable_ref;
467                 }
468         }
469
470         phy->is_ref_clk_enabled = true;
471         goto out;
472
473 out_disable_ref:
474         if (phy->ref_clk)
475                 clk_disable_unprepare(phy->ref_clk);
476 out_disable_parent:
477         if (phy->ref_clk_parent)
478                 clk_disable_unprepare(phy->ref_clk_parent);
479 out_disable_src:
480         clk_disable_unprepare(phy->ref_clk_src);
481 out:
482         return ret;
483 }
484 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
485
486 static
487 int ufs_qcom_phy_disable_vreg(struct phy *phy,
488                               struct ufs_qcom_phy_vreg *vreg)
489 {
490         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
491         struct device *dev = ufs_qcom_phy->dev;
492         int ret = 0;
493
494         if (!vreg || !vreg->enabled || vreg->is_always_on)
495                 goto out;
496
497         ret = regulator_disable(vreg->reg);
498
499         if (!ret) {
500                 /* ignore errors on applying disable config */
501                 ufs_qcom_phy_cfg_vreg(phy, vreg, false);
502                 vreg->enabled = false;
503         } else {
504                 dev_err(dev, "%s: %s disable failed, err=%d\n",
505                                 __func__, vreg->name, ret);
506         }
507 out:
508         return ret;
509 }
510
511 void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
512 {
513         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
514
515         if (phy->is_ref_clk_enabled) {
516                 /*
517                  * "ref_aux_clk" is optional clock and only supported by
518                  * certain phy versions, hence make sure that clk reference
519                  * is available before trying to disable the clock.
520                  */
521                 if (phy->ref_aux_clk)
522                         clk_disable_unprepare(phy->ref_aux_clk);
523                 clk_disable_unprepare(phy->ref_clk);
524                 /*
525                  * "ref_clk_parent" is optional clock hence make sure that clk
526                  * reference is available before trying to disable the clock.
527                  */
528                 if (phy->ref_clk_parent)
529                         clk_disable_unprepare(phy->ref_clk_parent);
530                 clk_disable_unprepare(phy->ref_clk_src);
531                 phy->is_ref_clk_enabled = false;
532         }
533 }
534 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
535
536 /* Turn ON M-PHY RMMI interface clocks */
537 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
538 {
539         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
540         int ret = 0;
541
542         if (phy->is_iface_clk_enabled)
543                 goto out;
544
545         if (!phy->tx_iface_clk)
546                 goto out;
547
548         ret = clk_prepare_enable(phy->tx_iface_clk);
549         if (ret) {
550                 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
551                                 __func__, ret);
552                 goto out;
553         }
554         ret = clk_prepare_enable(phy->rx_iface_clk);
555         if (ret) {
556                 clk_disable_unprepare(phy->tx_iface_clk);
557                 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
558                                 __func__, ret);
559                 goto out;
560         }
561         phy->is_iface_clk_enabled = true;
562
563 out:
564         return ret;
565 }
566 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
567
568 /* Turn OFF M-PHY RMMI interface clocks */
569 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
570 {
571         struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
572
573         if (!phy->tx_iface_clk)
574                 return;
575
576         if (phy->is_iface_clk_enabled) {
577                 clk_disable_unprepare(phy->tx_iface_clk);
578                 clk_disable_unprepare(phy->rx_iface_clk);
579                 phy->is_iface_clk_enabled = false;
580         }
581 }
582 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
583
584 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
585 {
586         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
587         int ret = 0;
588
589         if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
590                 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
591                         __func__);
592                 ret = -ENOTSUPP;
593         } else {
594                 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
595         }
596
597         return ret;
598 }
599 EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
600
601 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
602 {
603         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
604         int ret = 0;
605
606         if (ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable)
607                 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
608                                                                tx_lanes);
609
610         return ret;
611 }
612 EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
613
614 int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl)
615 {
616         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
617         int ret = 0;
618
619         if (ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg)
620                 ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg(ufs_qcom_phy, ctrl);
621
622         return ret;
623 }
624 EXPORT_SYMBOL_GPL(ufs_qcom_phy_ctrl_rx_linecfg);
625
626 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
627                                           u8 major, u16 minor, u16 step)
628 {
629         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
630
631         ufs_qcom_phy->host_ctrl_rev_major = major;
632         ufs_qcom_phy->host_ctrl_rev_minor = minor;
633         ufs_qcom_phy->host_ctrl_rev_step = step;
634 }
635 EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
636
637 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
638 {
639         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
640         int ret = 0;
641
642         if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
643                 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
644                         __func__);
645                 ret = -ENOTSUPP;
646         } else {
647                 ret = ufs_qcom_phy->phy_spec_ops->
648                                 calibrate_phy(ufs_qcom_phy, is_rate_B);
649                 if (ret)
650                         dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
651                                 __func__, ret);
652         }
653
654         return ret;
655 }
656 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
657
658 const char *ufs_qcom_phy_name(struct phy *phy)
659 {
660         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
661
662         return ufs_qcom_phy->name;
663 }
664 EXPORT_SYMBOL(ufs_qcom_phy_name);
665
666 int ufs_qcom_phy_remove(struct phy *generic_phy,
667                         struct ufs_qcom_phy *ufs_qcom_phy)
668 {
669         phy_power_off(generic_phy);
670
671         kfree(ufs_qcom_phy->vdda_pll.name);
672         kfree(ufs_qcom_phy->vdda_phy.name);
673
674         return 0;
675 }
676 EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
677
678 int ufs_qcom_phy_exit(struct phy *generic_phy)
679 {
680         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
681
682         if (ufs_qcom_phy->is_powered_on)
683                 phy_power_off(generic_phy);
684
685         return 0;
686 }
687 EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
688
689 int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
690 {
691         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
692
693         if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
694                 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
695                         __func__);
696                 return -ENOTSUPP;
697         }
698
699         return ufs_qcom_phy->phy_spec_ops->
700                         is_physical_coding_sublayer_ready(ufs_qcom_phy);
701 }
702 EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
703
704 int ufs_qcom_phy_power_on(struct phy *generic_phy)
705 {
706         struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
707         struct device *dev = phy_common->dev;
708         int err;
709
710         err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
711         if (err) {
712                 dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
713                         __func__, err);
714                 goto out;
715         }
716
717         phy_common->phy_spec_ops->power_control(phy_common, true);
718
719         /* vdda_pll also enables ref clock LDOs so enable it first */
720         err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
721         if (err) {
722                 dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
723                         __func__, err);
724                 goto out_disable_phy;
725         }
726
727         err = ufs_qcom_phy_enable_ref_clk(generic_phy);
728         if (err) {
729                 dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
730                         __func__, err);
731                 goto out_disable_pll;
732         }
733
734         /* enable device PHY ref_clk pad rail */
735         if (phy_common->vddp_ref_clk.reg) {
736                 err = ufs_qcom_phy_enable_vreg(generic_phy,
737                                                &phy_common->vddp_ref_clk);
738                 if (err) {
739                         dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
740                                 __func__, err);
741                         goto out_disable_ref_clk;
742                 }
743         }
744
745         phy_common->is_powered_on = true;
746         goto out;
747
748 out_disable_ref_clk:
749         ufs_qcom_phy_disable_ref_clk(generic_phy);
750 out_disable_pll:
751         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
752 out_disable_phy:
753         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
754 out:
755         return err;
756 }
757 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
758
759 int ufs_qcom_phy_power_off(struct phy *generic_phy)
760 {
761         struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
762
763         phy_common->phy_spec_ops->power_control(phy_common, false);
764
765         if (phy_common->vddp_ref_clk.reg)
766                 ufs_qcom_phy_disable_vreg(generic_phy,
767                                           &phy_common->vddp_ref_clk);
768         ufs_qcom_phy_disable_ref_clk(generic_phy);
769
770         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
771         ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
772         phy_common->is_powered_on = false;
773
774         return 0;
775 }
776 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
777
778 int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
779 {
780         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
781         int ret = 0;
782
783         if (ufs_qcom_phy->phy_spec_ops->configure_lpm) {
784                 ret = ufs_qcom_phy->phy_spec_ops->
785                                 configure_lpm(ufs_qcom_phy, enable);
786                 if (ret)
787                         dev_err(ufs_qcom_phy->dev,
788                                 "%s: configure_lpm(%s) failed %d\n",
789                                 __func__, enable ? "enable" : "disable", ret);
790         }
791
792         return ret;
793 }
794 EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
795
796 void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
797                                 int len, char *prefix)
798 {
799         print_hex_dump(KERN_ERR, prefix,
800                         len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
801                         16, 4, phy->mmio + offset, len, false);
802 }
803 EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
804
805 void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
806 {
807         struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
808
809         if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
810                 ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
811 }
812 EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);