From cd94470626a4bea3339965d4cdfc92bae8cc4c9c Mon Sep 17 00:00:00 2001 From: Hareesh Gundu Date: Thu, 18 May 2017 17:06:33 +0530 Subject: [PATCH] msm: kgsl: Log clk set, enable and prepare failure MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Handle the clk API return value so that it’s easy to catch un clocked GPU register access. Change-Id: I5a1a9a6cbd673394f126bb17b849393268a22b1b Signed-off-by: Hareesh Gundu --- drivers/gpu/msm/kgsl_pwrctrl.c | 110 +++++++++++++++++++++++++++++++---------- 1 file changed, 85 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 0150d50c925b..50da3a128436 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -81,6 +81,12 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device, static void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state); static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level); +static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq, + const char *name); +static void _gpu_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name); +static void _bimc_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name); /** * _record_pwrevent() - Record the history of the new event @@ -405,7 +411,8 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel]; /* Change register settings if any BEFORE pwrlevel change*/ kgsl_pwrctrl_pwrlevel_change_settings(device, 0); - clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq); + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], + pwrlevel->gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->active_pwrlevel); trace_kgsl_pwrlevel(device, @@ -423,9 +430,12 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, if (pwr->gpu_bimc_int_clk) { if (pwr->active_pwrlevel == 0 && !pwr->gpu_bimc_interface_enabled) { - clk_set_rate(pwr->gpu_bimc_int_clk, - pwr->gpu_bimc_int_clk_freq); - clk_prepare_enable(pwr->gpu_bimc_int_clk); + kgsl_pwrctrl_clk_set_rate(pwr->gpu_bimc_int_clk, + pwr->gpu_bimc_int_clk_freq, + "bimc_gpu_clk"); + _bimc_clk_prepare_enable(device, + pwr->gpu_bimc_int_clk, + "bimc_gpu_clk"); pwr->gpu_bimc_interface_enabled = 1; } else if (pwr->previous_pwrlevel == 0 && pwr->gpu_bimc_interface_enabled) { @@ -1650,9 +1660,9 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, (requested_state != KGSL_STATE_NAP)) { for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_unprepare(pwr->grp_clks[i]); - clk_set_rate(pwr->grp_clks[0], + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. - gpu_freq); + gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); } @@ -1664,9 +1674,9 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, for (i = KGSL_MAX_CLKS - 1; i > 0; i--) clk_unprepare(pwr->grp_clks[i]); if ((pwr->pwrlevels[0].gpu_freq > 0)) { - clk_set_rate(pwr->grp_clks[0], + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. - gpu_freq); + gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); } @@ -1679,29 +1689,31 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, /* High latency clock maintenance. */ if (device->state != KGSL_STATE_NAP) { if (pwr->pwrlevels[0].gpu_freq > 0) { - clk_set_rate(pwr->grp_clks[0], + kgsl_pwrctrl_clk_set_rate( + pwr->grp_clks[0], pwr->pwrlevels [pwr->active_pwrlevel]. - gpu_freq); + gpu_freq, clocks[0]); _isense_clk_set_rate(pwr, pwr->active_pwrlevel); } - - for (i = KGSL_MAX_CLKS - 1; i > 0; i--) - clk_prepare(pwr->grp_clks[i]); } - /* as last step, enable grp_clk - this is to let GPU interrupt to come */ + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) - clk_enable(pwr->grp_clks[i]); + _gpu_clk_prepare_enable(device, + pwr->grp_clks[i], clocks[i]); + /* Enable the gpu-bimc-interface clocks */ if (pwr->gpu_bimc_int_clk) { if (pwr->active_pwrlevel == 0 && !pwr->gpu_bimc_interface_enabled) { - clk_set_rate(pwr->gpu_bimc_int_clk, - pwr->gpu_bimc_int_clk_freq); - clk_prepare_enable( - pwr->gpu_bimc_int_clk); + kgsl_pwrctrl_clk_set_rate( + pwr->gpu_bimc_int_clk, + pwr->gpu_bimc_int_clk_freq, + "bimc_gpu_clk"); + _bimc_clk_prepare_enable(device, + pwr->gpu_bimc_int_clk, + "bimc_gpu_clk"); pwr->gpu_bimc_interface_enabled = 1; } } @@ -2022,7 +2034,54 @@ static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level) rate = clk_round_rate(pwr->grp_clks[pwr->isense_clk_indx], level > pwr->isense_clk_on_level ? KGSL_XO_CLK_FREQ : KGSL_ISENSE_CLK_FREQ); - return clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate); + return kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], + rate, clocks[pwr->isense_clk_indx]); +} + +/* + * _gpu_clk_prepare_enable - Enable the specified GPU clock + * Try once to enable it and then BUG() for debug + */ +static void _gpu_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name) +{ + int ret; + + if (device->state == KGSL_STATE_NAP) { + ret = clk_enable(clk); + if (ret) + goto err; + return; + } + + ret = clk_prepare_enable(clk); + if (!ret) + return; +err: + /* Failure is fatal so BUG() to facilitate debug */ + KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret); +} + +/* + * _bimc_clk_prepare_enable - Enable the specified GPU clock + * Try once to enable it and then BUG() for debug + */ +static void _bimc_clk_prepare_enable(struct kgsl_device *device, + struct clk *clk, const char *name) +{ + int ret = clk_prepare_enable(clk); + /* Failure is fatal so BUG() to facilitate debug */ + if (ret) + KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret); +} + +static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq, + const char *name) +{ + int ret = clk_set_rate(grp_clk, freq); + + WARN(ret, "KGSL:%s set freq %d failed:%d\n", name, freq, ret); + return ret; } static inline void _close_pcl(struct kgsl_pwrctrl *pwr) @@ -2117,11 +2176,12 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) pwr->pwrlevels[i].gpu_freq = freq; } - clk_set_rate(pwr->grp_clks[0], - pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq); + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq, clocks[0]); - clk_set_rate(pwr->grp_clks[6], - clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ)); + kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6], + clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ), + clocks[6]); _isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1); -- 2.11.0