2 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include "phy-qcom-ufs-i.h"
17 #define MAX_PROP_NAME 32
18 #define VDDA_PHY_MIN_UV 800000
19 #define VDDA_PHY_MAX_UV 925000
20 #define VDDA_PLL_MIN_UV 1200000
21 #define VDDA_PLL_MAX_UV 1800000
22 #define VDDP_REF_CLK_MIN_UV 1200000
23 #define VDDP_REF_CLK_MAX_UV 1200000
25 static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
27 static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
29 static int ufs_qcom_phy_base_init(struct platform_device *pdev,
30 struct ufs_qcom_phy *phy_common);
32 void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
33 struct ufs_qcom_phy_calibration *tbl,
38 for (i = 0; i < tbl_size; i++)
39 writel_relaxed(tbl[i].cfg_value,
40 ufs_qcom_phy->mmio + tbl[i].reg_offset);
42 EXPORT_SYMBOL(ufs_qcom_phy_write_tbl);
44 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
45 struct ufs_qcom_phy_calibration *tbl_A,
47 struct ufs_qcom_phy_calibration *tbl_B,
48 int tbl_size_B, bool is_rate_B)
53 dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__);
58 ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_A, tbl_size_A);
61 * In case we would like to work in rate B, we need
62 * to override a registers that were configured in rate A table
63 * with registers of rate B table.
68 dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL",
74 ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_B, tbl_size_B);
77 /* flush buffered writes */
83 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
85 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
86 struct ufs_qcom_phy *common_cfg,
87 const struct phy_ops *ufs_qcom_phy_gen_ops,
88 struct ufs_qcom_phy_specific_ops *phy_spec_ops)
91 struct device *dev = &pdev->dev;
92 struct phy *generic_phy = NULL;
93 struct phy_provider *phy_provider;
95 err = ufs_qcom_phy_base_init(pdev, common_cfg);
97 dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
101 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
102 if (IS_ERR(phy_provider)) {
103 err = PTR_ERR(phy_provider);
104 dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
108 generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
109 if (IS_ERR(generic_phy)) {
110 err = PTR_ERR(generic_phy);
111 dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
117 * UFS PHY power management is managed by its parent (UFS host
118 * controller) hence set the no the no runtime PM callbacks flag
119 * on UFS PHY device to avoid any accidental attempt to call the
120 * PM callbacks for PHY device.
122 pm_runtime_no_callbacks(&generic_phy->dev);
124 common_cfg->phy_spec_ops = phy_spec_ops;
125 common_cfg->dev = dev;
130 EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
133 * This assumes the embedded phy structure inside generic_phy is of type
134 * struct ufs_qcom_phy. In order to function properly it's crucial
135 * to keep the embedded struct "struct ufs_qcom_phy common_cfg"
136 * as the first inside generic_phy.
138 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy)
140 return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy);
142 EXPORT_SYMBOL_GPL(get_ufs_qcom_phy);
145 int ufs_qcom_phy_base_init(struct platform_device *pdev,
146 struct ufs_qcom_phy *phy_common)
148 struct device *dev = &pdev->dev;
149 struct resource *res;
152 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
154 dev_err(dev, "%s: phy_mem resource not found\n", __func__);
159 phy_common->mmio = devm_ioremap_resource(dev, res);
160 if (IS_ERR((void const *)phy_common->mmio)) {
161 err = PTR_ERR((void const *)phy_common->mmio);
162 phy_common->mmio = NULL;
163 dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
170 static int __ufs_qcom_phy_clk_get(struct phy *phy,
171 const char *name, struct clk **clk_out, bool err_print)
175 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
176 struct device *dev = ufs_qcom_phy->dev;
178 clk = devm_clk_get(dev, name);
182 dev_err(dev, "failed to get %s err %d", name, err);
191 int ufs_qcom_phy_clk_get(struct phy *phy,
192 const char *name, struct clk **clk_out)
194 return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
198 ufs_qcom_phy_init_clks(struct phy *generic_phy,
199 struct ufs_qcom_phy *phy_common)
204 * tx_iface_clk does not exist in newer version of ufs-phy HW,
205 * so don't return error if it is not found
207 __ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
208 &phy_common->tx_iface_clk, false);
211 * rx_iface_clk does not exist in newer version of ufs-phy HW,
212 * so don't return error if it is not found
214 __ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
215 &phy_common->rx_iface_clk, false);
217 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
218 &phy_common->ref_clk_src);
223 * "ref_clk_parent" is optional hence don't abort init if it's not
226 __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
227 &phy_common->ref_clk_parent, false);
229 err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
230 &phy_common->ref_clk);
235 * "ref_aux_clk" is optional and only supported by certain
236 * phy versions, don't abort init if it's not found.
238 __ufs_qcom_phy_clk_get(generic_phy, "ref_aux_clk",
239 &phy_common->ref_aux_clk, false);
243 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
246 ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
247 struct ufs_qcom_phy *phy_common)
251 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
256 err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
261 /* vddp-ref-clk-* properties are optional */
262 __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
263 "vddp-ref-clk", true);
267 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
269 static int __ufs_qcom_phy_init_vreg(struct phy *phy,
270 struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
273 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
274 struct device *dev = ufs_qcom_phy->dev;
276 char prop_name[MAX_PROP_NAME];
279 snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
280 if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
281 dev_dbg(dev, "No vreg data found for %s\n", prop_name);
282 return optional ? err : -ENODATA;
286 vreg->name = kstrdup(name, GFP_KERNEL);
292 vreg->reg = devm_regulator_get(dev, name);
293 if (IS_ERR(vreg->reg)) {
294 err = PTR_ERR(vreg->reg);
297 dev_err(dev, "failed to get %s, %d\n", name, err);
302 snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name);
303 err = of_property_read_u32(dev->of_node,
304 prop_name, &vreg->max_uA);
305 if (err && err != -EINVAL) {
306 dev_err(dev, "%s: failed to read %s\n",
307 __func__, prop_name);
309 } else if (err == -EINVAL || !vreg->max_uA) {
310 if (regulator_count_voltages(vreg->reg) > 0) {
311 dev_err(dev, "%s: %s is mandatory\n",
312 __func__, prop_name);
317 snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name);
318 if (of_get_property(dev->of_node, prop_name, NULL))
319 vreg->is_always_on = true;
321 vreg->is_always_on = false;
324 if (!strcmp(name, "vdda-pll")) {
325 vreg->max_uV = VDDA_PLL_MAX_UV;
326 vreg->min_uV = VDDA_PLL_MIN_UV;
327 } else if (!strcmp(name, "vdda-phy")) {
328 vreg->max_uV = VDDA_PHY_MAX_UV;
329 vreg->min_uV = VDDA_PHY_MIN_UV;
330 } else if (!strcmp(name, "vddp-ref-clk")) {
331 vreg->max_uV = VDDP_REF_CLK_MAX_UV;
332 vreg->min_uV = VDDP_REF_CLK_MIN_UV;
341 static int ufs_qcom_phy_init_vreg(struct phy *phy,
342 struct ufs_qcom_phy_vreg *vreg, const char *name)
344 return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
348 int ufs_qcom_phy_cfg_vreg(struct phy *phy,
349 struct ufs_qcom_phy_vreg *vreg, bool on)
352 struct regulator *reg = vreg->reg;
353 const char *name = vreg->name;
356 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
357 struct device *dev = ufs_qcom_phy->dev;
361 if (regulator_count_voltages(reg) > 0) {
362 min_uV = on ? vreg->min_uV : 0;
363 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
365 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
366 __func__, name, ret);
369 uA_load = on ? vreg->max_uA : 0;
370 ret = regulator_set_load(reg, uA_load);
373 * regulator_set_load() returns new regulator
378 dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n",
379 __func__, name, uA_load, ret);
388 int ufs_qcom_phy_enable_vreg(struct phy *phy,
389 struct ufs_qcom_phy_vreg *vreg)
391 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
392 struct device *dev = ufs_qcom_phy->dev;
395 if (!vreg || vreg->enabled)
398 ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
400 dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
405 ret = regulator_enable(vreg->reg);
407 dev_err(dev, "%s: enable failed, err=%d\n",
412 vreg->enabled = true;
417 int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
420 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
422 if (phy->is_ref_clk_enabled)
426 * reference clock is propagated in a daisy-chained manner from
427 * source to phy, so ungate them at each stage.
429 ret = clk_prepare_enable(phy->ref_clk_src);
431 dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n",
437 * "ref_clk_parent" is optional clock hence make sure that clk reference
438 * is available before trying to enable the clock.
440 if (phy->ref_clk_parent) {
441 ret = clk_prepare_enable(phy->ref_clk_parent);
443 dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n",
445 goto out_disable_src;
449 ret = clk_prepare_enable(phy->ref_clk);
451 dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
453 goto out_disable_parent;
457 * "ref_aux_clk" is optional clock and only supported by certain
458 * phy versions, hence make sure that clk reference is available
459 * before trying to enable the clock.
461 if (phy->ref_aux_clk) {
462 ret = clk_prepare_enable(phy->ref_aux_clk);
464 dev_err(phy->dev, "%s: ref_aux_clk enable failed %d\n",
466 goto out_disable_ref;
470 phy->is_ref_clk_enabled = true;
475 clk_disable_unprepare(phy->ref_clk);
477 if (phy->ref_clk_parent)
478 clk_disable_unprepare(phy->ref_clk_parent);
480 clk_disable_unprepare(phy->ref_clk_src);
484 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
487 int ufs_qcom_phy_disable_vreg(struct phy *phy,
488 struct ufs_qcom_phy_vreg *vreg)
490 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
491 struct device *dev = ufs_qcom_phy->dev;
494 if (!vreg || !vreg->enabled || vreg->is_always_on)
497 ret = regulator_disable(vreg->reg);
500 /* ignore errors on applying disable config */
501 ufs_qcom_phy_cfg_vreg(phy, vreg, false);
502 vreg->enabled = false;
504 dev_err(dev, "%s: %s disable failed, err=%d\n",
505 __func__, vreg->name, ret);
511 void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
513 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
515 if (phy->is_ref_clk_enabled) {
517 * "ref_aux_clk" is optional clock and only supported by
518 * certain phy versions, hence make sure that clk reference
519 * is available before trying to disable the clock.
521 if (phy->ref_aux_clk)
522 clk_disable_unprepare(phy->ref_aux_clk);
523 clk_disable_unprepare(phy->ref_clk);
525 * "ref_clk_parent" is optional clock hence make sure that clk
526 * reference is available before trying to disable the clock.
528 if (phy->ref_clk_parent)
529 clk_disable_unprepare(phy->ref_clk_parent);
530 clk_disable_unprepare(phy->ref_clk_src);
531 phy->is_ref_clk_enabled = false;
534 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
536 /* Turn ON M-PHY RMMI interface clocks */
537 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
539 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
542 if (phy->is_iface_clk_enabled)
545 if (!phy->tx_iface_clk)
548 ret = clk_prepare_enable(phy->tx_iface_clk);
550 dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
554 ret = clk_prepare_enable(phy->rx_iface_clk);
556 clk_disable_unprepare(phy->tx_iface_clk);
557 dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n",
561 phy->is_iface_clk_enabled = true;
566 EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
568 /* Turn OFF M-PHY RMMI interface clocks */
569 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
571 struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
573 if (!phy->tx_iface_clk)
576 if (phy->is_iface_clk_enabled) {
577 clk_disable_unprepare(phy->tx_iface_clk);
578 clk_disable_unprepare(phy->rx_iface_clk);
579 phy->is_iface_clk_enabled = false;
582 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
584 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
586 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
589 if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
590 dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n",
594 ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy);
599 EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
601 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
603 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
606 if (ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable)
607 ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
612 EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
614 int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl)
616 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
619 if (ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg)
620 ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg(ufs_qcom_phy, ctrl);
624 EXPORT_SYMBOL_GPL(ufs_qcom_phy_ctrl_rx_linecfg);
626 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
627 u8 major, u16 minor, u16 step)
629 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
631 ufs_qcom_phy->host_ctrl_rev_major = major;
632 ufs_qcom_phy->host_ctrl_rev_minor = minor;
633 ufs_qcom_phy->host_ctrl_rev_step = step;
635 EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
637 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
639 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
642 if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
643 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
647 ret = ufs_qcom_phy->phy_spec_ops->
648 calibrate_phy(ufs_qcom_phy, is_rate_B);
650 dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
656 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
658 const char *ufs_qcom_phy_name(struct phy *phy)
660 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
662 return ufs_qcom_phy->name;
664 EXPORT_SYMBOL(ufs_qcom_phy_name);
666 int ufs_qcom_phy_remove(struct phy *generic_phy,
667 struct ufs_qcom_phy *ufs_qcom_phy)
669 phy_power_off(generic_phy);
671 kfree(ufs_qcom_phy->vdda_pll.name);
672 kfree(ufs_qcom_phy->vdda_phy.name);
676 EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
678 int ufs_qcom_phy_exit(struct phy *generic_phy)
680 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
682 if (ufs_qcom_phy->is_powered_on)
683 phy_power_off(generic_phy);
687 EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
689 int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
691 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
693 if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
694 dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
699 return ufs_qcom_phy->phy_spec_ops->
700 is_physical_coding_sublayer_ready(ufs_qcom_phy);
702 EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
704 int ufs_qcom_phy_power_on(struct phy *generic_phy)
706 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
707 struct device *dev = phy_common->dev;
710 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
712 dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
717 phy_common->phy_spec_ops->power_control(phy_common, true);
719 /* vdda_pll also enables ref clock LDOs so enable it first */
720 err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
722 dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
724 goto out_disable_phy;
727 err = ufs_qcom_phy_enable_ref_clk(generic_phy);
729 dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
731 goto out_disable_pll;
734 /* enable device PHY ref_clk pad rail */
735 if (phy_common->vddp_ref_clk.reg) {
736 err = ufs_qcom_phy_enable_vreg(generic_phy,
737 &phy_common->vddp_ref_clk);
739 dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
741 goto out_disable_ref_clk;
745 phy_common->is_powered_on = true;
749 ufs_qcom_phy_disable_ref_clk(generic_phy);
751 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
753 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
757 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_on);
759 int ufs_qcom_phy_power_off(struct phy *generic_phy)
761 struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
763 phy_common->phy_spec_ops->power_control(phy_common, false);
765 if (phy_common->vddp_ref_clk.reg)
766 ufs_qcom_phy_disable_vreg(generic_phy,
767 &phy_common->vddp_ref_clk);
768 ufs_qcom_phy_disable_ref_clk(generic_phy);
770 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
771 ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
772 phy_common->is_powered_on = false;
776 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
778 int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
780 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
783 if (ufs_qcom_phy->phy_spec_ops->configure_lpm) {
784 ret = ufs_qcom_phy->phy_spec_ops->
785 configure_lpm(ufs_qcom_phy, enable);
787 dev_err(ufs_qcom_phy->dev,
788 "%s: configure_lpm(%s) failed %d\n",
789 __func__, enable ? "enable" : "disable", ret);
794 EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
796 void ufs_qcom_phy_dump_regs(struct ufs_qcom_phy *phy, int offset,
797 int len, char *prefix)
799 print_hex_dump(KERN_ERR, prefix,
800 len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
801 16, 4, phy->mmio + offset, len, false);
803 EXPORT_SYMBOL(ufs_qcom_phy_dump_regs);
805 void ufs_qcom_phy_dbg_register_dump(struct phy *generic_phy)
807 struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
809 if (ufs_qcom_phy->phy_spec_ops->dbg_register_dump)
810 ufs_qcom_phy->phy_spec_ops->dbg_register_dump(ufs_qcom_phy);
812 EXPORT_SYMBOL(ufs_qcom_phy_dbg_register_dump);