OSDN Git Service

da91eb5b7c2ff81252438115d1e9c75353bf3332
[immortalwrt/immortalwrt.git] / target / linux / ipq806x / patches-5.15 / 098-1-cpufreq-add-Krait-dedicated-scaling-driver.patch
1 From cc41a266280cad0b55319e614167c88dff344248 Mon Sep 17 00:00:00 2001
2 From: Ansuel Smith <ansuelsmth@gmail.com>
3 Date: Sat, 22 Feb 2020 16:33:10 +0100
4 Subject: [PATCH 1/8] cpufreq: add Krait dedicated scaling driver
5
6 This new driver is based on generic cpufreq-dt driver.
7 Krait SoCs have 2-4 cpu and one shared L2 cache that can
8 operate at different frequency based on the maximum cpu clk
9 across all core.
10 L2 frequency and voltage are scaled on every frequency change
11 if needed. On Krait SoCs is present a bug that can cause
12 transition problem between frequency bin, to workaround this
13 on more than one transition, the L2 frequency is first set to the
14 base rate and then to the target rate.
15 The L2 frequency use the OPP framework and use the opp-level
16 bindings to link the l2 freq to different cpu freq. This is needed
17 as the Krait l2 clk are note mapped 1:1 to the core clks and some
18 of the l2 clk is set based on a range of the cpu clks. If the driver
19 find a broken config (for example no opp-level set) the l2 scaling is
20 skipped.
21
22 Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
23 ---
24  drivers/cpufreq/Kconfig.arm          |  14 +-
25  drivers/cpufreq/Makefile             |   2 +
26  drivers/cpufreq/qcom-cpufreq-krait.c | 589 +++++++++++++++++++++++++++
27  3 files changed, 604 insertions(+), 1 deletion(-)
28  create mode 100644 drivers/cpufreq/qcom-cpufreq-krait.c
29
30 --- a/drivers/cpufreq/Kconfig.arm
31 +++ b/drivers/cpufreq/Kconfig.arm
32 @@ -150,6 +150,18 @@ config ARM_QCOM_CPUFREQ_HW
33           The driver implements the cpufreq interface for this HW engine.
34           Say Y if you want to support CPUFreq HW.
35  
36 +config ARM_QCOM_CPUFREQ_KRAIT
37 +       tristate "CPU Frequency scaling support for Krait SoCs"
38 +       depends on ARCH_QCOM || COMPILE_TEST
39 +       select PM_OPP
40 +       select ARM_QCOM_CPUFREQ_NVMEM
41 +       help
42 +         This adds the CPUFreq driver for Qualcomm Krait SoC based boards.
43 +         This scale the cache clk and regulator based on the different cpu
44 +         clks when scaling the different cores clk.
45 +
46 +         If in doubt, say N.
47 +
48  config ARM_RASPBERRYPI_CPUFREQ
49         tristate "Raspberry Pi cpufreq support"
50         depends on CLK_RASPBERRYPI || COMPILE_TEST
51 @@ -339,4 +351,4 @@ config ARM_PXA2xx_CPUFREQ
52         help
53           This add the CPUFreq driver support for Intel PXA2xx SOCs.
54  
55 -         If in doubt, say N.
56 +         If in doubt, say N.
57 \ No newline at end of file
58 --- a/drivers/cpufreq/Makefile
59 +++ b/drivers/cpufreq/Makefile
60 @@ -63,6 +63,7 @@ obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)      += pxa2
61  obj-$(CONFIG_PXA3xx)                   += pxa3xx-cpufreq.o
62  obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW)      += qcom-cpufreq-hw.o
63  obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM)   += qcom-cpufreq-nvmem.o
64 +obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRAIT)   += qcom-cpufreq-krait.o
65  obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ)  += raspberrypi-cpufreq.o
66  obj-$(CONFIG_ARM_S3C2410_CPUFREQ)      += s3c2410-cpufreq.o
67  obj-$(CONFIG_ARM_S3C2412_CPUFREQ)      += s3c2412-cpufreq.o
68 @@ -86,6 +87,7 @@ obj-$(CONFIG_ARM_TEGRA186_CPUFREQ)    += te
69  obj-$(CONFIG_ARM_TEGRA194_CPUFREQ)     += tegra194-cpufreq.o
70  obj-$(CONFIG_ARM_TI_CPUFREQ)           += ti-cpufreq.o
71  obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
72 +obj-$(CONFIG_ARM_KRAIT_CPUFREQ)                += krait-cpufreq.o
73  
74  
75  ##################################################################################
76 --- /dev/null
77 +++ b/drivers/cpufreq/qcom-cpufreq-krait.c
78 @@ -0,0 +1,629 @@
79 +// SPDX-License-Identifier: GPL-2.0
80 +
81 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
82 +
83 +#include <linux/clk.h>
84 +#include <linux/cpu.h>
85 +#include <linux/cpufreq.h>
86 +#include <linux/cpumask.h>
87 +#include <linux/err.h>
88 +#include <linux/module.h>
89 +#include <linux/of.h>
90 +#include <linux/of_device.h>
91 +#include <linux/pm_opp.h>
92 +#include <linux/platform_device.h>
93 +#include <linux/regulator/consumer.h>
94 +#include <linux/slab.h>
95 +#include <linux/thermal.h>
96 +
97 +#include "cpufreq-dt.h"
98 +
99 +static struct device *l2_dev;
100 +static struct mutex lock;
101 +
102 +struct private_data {
103 +       struct opp_table *opp_table;
104 +       struct device *cpu_dev;
105 +       struct device *l2_dev;
106 +       const char *reg_name;
107 +       bool have_static_opps;
108 +};
109 +
110 +static int set_target(struct cpufreq_policy *policy, unsigned int index)
111 +{
112 +       struct private_data *priv = policy->driver_data;
113 +       unsigned long freq = policy->freq_table[index].frequency;
114 +       unsigned long target_freq = freq * 1000;
115 +       struct dev_pm_opp *opp;
116 +       unsigned int level;
117 +       int cpu, ret;
118 +
119 +       if (l2_dev) {
120 +               int policy_cpu = policy->cpu;
121 +
122 +               mutex_lock(&lock);
123 +
124 +               /* find the max freq across all core */
125 +               for_each_present_cpu(cpu)
126 +                       if (cpu != policy_cpu)
127 +                               target_freq = max(
128 +                                       target_freq,
129 +                                       (unsigned long)cpufreq_quick_get(cpu));
130 +
131 +               opp = dev_pm_opp_find_freq_exact(priv->cpu_dev, target_freq,
132 +                                                true);
133 +               if (IS_ERR(opp)) {
134 +                       dev_err(l2_dev, "failed to find OPP for %ld\n",
135 +                               target_freq);
136 +                       ret = PTR_ERR(opp);
137 +                       goto l2_scale_fail;
138 +               }
139 +               level = dev_pm_opp_get_level(opp);
140 +               dev_pm_opp_put(opp);
141 +
142 +               /*
143 +                * Hardware constraint:
144 +                * Krait CPU cannot operate at 384MHz with L2 at 1Ghz.
145 +                * Assume index 0 with the idle freq and level > 0 as 
146 +                * any L2 freq > 384MHz.
147 +                * Skip CPU freq change in this corner case.
148 +                */
149 +               if (unlikely(index == 0 && level != 0)) {
150 +                       dev_err(priv->cpu_dev, "Krait CPU can't operate at idle freq with L2 at 1GHz");
151 +                       ret = -EINVAL;
152 +                       goto l2_scale_fail;
153 +               }
154 +
155 +               opp = dev_pm_opp_find_level_exact(l2_dev, level);
156 +               if (IS_ERR(opp)) {
157 +                       dev_err(l2_dev,
158 +                               "failed to find level OPP for %d\n", level);
159 +                       ret = PTR_ERR(opp);
160 +                       goto l2_scale_fail;
161 +               }
162 +               target_freq = dev_pm_opp_get_freq(opp);
163 +               dev_pm_opp_put(opp);
164 +
165 +               ret = dev_pm_opp_set_rate(l2_dev, target_freq);
166 +               if (ret)
167 +                       goto l2_scale_fail;
168 +
169 +               mutex_unlock(&lock);
170 +       }
171 +
172 +       ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
173 +       if (ret)
174 +               return ret;
175 +
176 +       arch_set_freq_scale(policy->related_cpus, freq,
177 +                           policy->cpuinfo.max_freq);
178 +
179 +       return 0;
180 +l2_scale_fail:
181 +       mutex_unlock(&lock);
182 +
183 +       return ret;
184 +}
185 +
186 +/*
187 + * An earlier version of opp-v1 bindings used to name the regulator
188 + * "cpu0-supply", we still need to handle that for backwards compatibility.
189 + */
190 +static const char *find_supply_name(struct device *dev)
191 +{
192 +       struct device_node *np;
193 +       struct property *pp;
194 +       int cpu = dev->id;
195 +       const char *name = NULL;
196 +
197 +       np = of_node_get(dev->of_node);
198 +
199 +       /* This must be valid for sure */
200 +       if (WARN_ON(!np))
201 +               return NULL;
202 +
203 +       /* Try "cpu0" for older DTs */
204 +       if (!cpu) {
205 +               pp = of_find_property(np, "cpu0-supply", NULL);
206 +               if (pp) {
207 +                       name = "cpu0";
208 +                       goto node_put;
209 +               }
210 +       }
211 +
212 +       pp = of_find_property(np, "cpu-supply", NULL);
213 +       if (pp) {
214 +               name = "cpu";
215 +               goto node_put;
216 +       }
217 +
218 +       dev_dbg(dev, "no regulator for cpu%d\n", cpu);
219 +node_put:
220 +       of_node_put(np);
221 +       return name;
222 +}
223 +
224 +static int resources_available(void)
225 +{
226 +       struct device *cpu_dev;
227 +       struct regulator *cpu_reg;
228 +       struct clk *cpu_clk;
229 +       int ret = 0;
230 +       const char *name;
231 +
232 +       cpu_dev = get_cpu_device(0);
233 +       if (!cpu_dev) {
234 +               pr_err("failed to get cpu0 device\n");
235 +               return -ENODEV;
236 +       }
237 +
238 +       cpu_clk = clk_get(cpu_dev, NULL);
239 +       ret = PTR_ERR_OR_ZERO(cpu_clk);
240 +       if (ret) {
241 +               /*
242 +                * If cpu's clk node is present, but clock is not yet
243 +                * registered, we should try defering probe.
244 +                */
245 +               if (ret == -EPROBE_DEFER)
246 +                       dev_dbg(cpu_dev, "clock not ready, retry\n");
247 +               else
248 +                       dev_err(cpu_dev, "failed to get clock: %d\n", ret);
249 +
250 +               return ret;
251 +       }
252 +
253 +       clk_put(cpu_clk);
254 +
255 +       name = find_supply_name(cpu_dev);
256 +       /* Platform doesn't require regulator */
257 +       if (!name)
258 +               return 0;
259 +
260 +       cpu_reg = regulator_get_optional(cpu_dev, name);
261 +       ret = PTR_ERR_OR_ZERO(cpu_reg);
262 +       if (ret) {
263 +               /*
264 +                * If cpu's regulator supply node is present, but regulator is
265 +                * not yet registered, we should try defering probe.
266 +                */
267 +               if (ret == -EPROBE_DEFER)
268 +                       dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
269 +               else
270 +                       dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
271 +
272 +               return ret;
273 +       }
274 +
275 +       regulator_put(cpu_reg);
276 +       return 0;
277 +}
278 +
279 +static int cpufreq_init(struct cpufreq_policy *policy)
280 +{
281 +       struct cpufreq_frequency_table *freq_table;
282 +       struct opp_table *opp_table = NULL;
283 +       unsigned int transition_latency;
284 +       struct private_data *priv;
285 +       struct device *cpu_dev;
286 +       bool fallback = false;
287 +       struct clk *cpu_clk;
288 +       const char *name;
289 +       int ret;
290 +
291 +       cpu_dev = get_cpu_device(policy->cpu);
292 +       if (!cpu_dev) {
293 +               pr_err("failed to get cpu%d device\n", policy->cpu);
294 +               return -ENODEV;
295 +       }
296 +
297 +       cpu_clk = clk_get(cpu_dev, NULL);
298 +       if (IS_ERR(cpu_clk)) {
299 +               ret = PTR_ERR(cpu_clk);
300 +               dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
301 +               return ret;
302 +       }
303 +
304 +       /* Get OPP-sharing information from "operating-points-v2" bindings */
305 +       ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
306 +       if (ret) {
307 +               if (ret != -ENOENT)
308 +                       goto out_put_clk;
309 +
310 +               /*
311 +                * operating-points-v2 not supported, fallback to old method of
312 +                * finding shared-OPPs for backward compatibility if the
313 +                * platform hasn't set sharing CPUs.
314 +                */
315 +               if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
316 +                       fallback = true;
317 +       }
318 +
319 +       /*
320 +        * OPP layer will be taking care of regulators now, but it needs to know
321 +        * the name of the regulator first.
322 +        */
323 +       name = find_supply_name(cpu_dev);
324 +       if (name) {
325 +               opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
326 +               if (IS_ERR(opp_table)) {
327 +                       ret = PTR_ERR(opp_table);
328 +                       dev_err(cpu_dev,
329 +                               "Failed to set regulator for cpu%d: %d\n",
330 +                               policy->cpu, ret);
331 +                       goto out_put_clk;
332 +               }
333 +       }
334 +
335 +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
336 +       if (!priv) {
337 +               ret = -ENOMEM;
338 +               goto out_put_regulator;
339 +       }
340 +
341 +       priv->reg_name = name;
342 +       priv->opp_table = opp_table;
343 +
344 +       /*
345 +        * Initialize OPP tables for all policy->cpus. They will be shared by
346 +        * all CPUs which have marked their CPUs shared with OPP bindings.
347 +        *
348 +        * For platforms not using operating-points-v2 bindings, we do this
349 +        * before updating policy->cpus. Otherwise, we will end up creating
350 +        * duplicate OPPs for policy->cpus.
351 +        *
352 +        * OPPs might be populated at runtime, don't check for error here
353 +        */
354 +       if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
355 +               priv->have_static_opps = true;
356 +
357 +       /*
358 +        * But we need OPP table to function so if it is not there let's
359 +        * give platform code chance to provide it for us.
360 +        */
361 +       ret = dev_pm_opp_get_opp_count(cpu_dev);
362 +       if (ret < 0) {
363 +               dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
364 +               ret = -EPROBE_DEFER;
365 +               goto out_free_opp;
366 +       }
367 +
368 +       if (fallback) {
369 +               cpumask_setall(policy->cpus);
370 +
371 +               /*
372 +                * OPP tables are initialized only for policy->cpu, do it for
373 +                * others as well.
374 +                */
375 +               ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
376 +               if (ret)
377 +                       dev_err(cpu_dev,
378 +                               "%s: failed to mark OPPs as shared: %d\n",
379 +                               __func__, ret);
380 +       }
381 +
382 +       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
383 +       if (ret) {
384 +               dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
385 +               goto out_free_opp;
386 +       }
387 +
388 +       priv->cpu_dev = cpu_dev;
389 +
390 +       policy->driver_data = priv;
391 +       policy->clk = cpu_clk;
392 +       policy->freq_table = freq_table;
393 +
394 +       policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
395 +
396 +       transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
397 +       if (!transition_latency)
398 +               transition_latency = CPUFREQ_ETERNAL;
399 +
400 +       policy->cpuinfo.transition_latency = transition_latency;
401 +       policy->dvfs_possible_from_any_cpu = true;
402 +
403 +       dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
404 +
405 +       return 0;
406 +
407 +out_free_opp:
408 +       if (priv->have_static_opps)
409 +               dev_pm_opp_of_cpumask_remove_table(policy->cpus);
410 +       kfree(priv);
411 +out_put_regulator:
412 +       if (name)
413 +               dev_pm_opp_put_regulators(opp_table);
414 +out_put_clk:
415 +       clk_put(cpu_clk);
416 +
417 +       return ret;
418 +}
419 +
420 +static int cpufreq_online(struct cpufreq_policy *policy)
421 +{
422 +       /* We did light-weight tear down earlier, nothing to do here */
423 +       return 0;
424 +}
425 +
426 +static int cpufreq_offline(struct cpufreq_policy *policy)
427 +{
428 +       /*
429 +        * Preserve policy->driver_data and don't free resources on light-weight
430 +        * tear down.
431 +        */
432 +       return 0;
433 +}
434 +
435 +static int cpufreq_exit(struct cpufreq_policy *policy)
436 +{
437 +       struct private_data *priv = policy->driver_data;
438 +
439 +       dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
440 +       if (priv->have_static_opps)
441 +               dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
442 +       if (priv->reg_name)
443 +               dev_pm_opp_put_regulators(priv->opp_table);
444 +
445 +       clk_put(policy->clk);
446 +       kfree(priv);
447 +
448 +       return 0;
449 +}
450 +
451 +static struct cpufreq_driver krait_cpufreq_driver = {
452 +       .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
453 +                CPUFREQ_IS_COOLING_DEV,
454 +       .verify = cpufreq_generic_frequency_table_verify,
455 +       .target_index = set_target,
456 +       .get = cpufreq_generic_get,
457 +       .init = cpufreq_init,
458 +       .exit = cpufreq_exit,
459 +       .online = cpufreq_online,
460 +       .offline = cpufreq_offline,
461 +       .name = "krait-cpufreq",
462 +       .suspend = cpufreq_generic_suspend,
463 +};
464 +
465 +struct krait_data {
466 +       unsigned long idle_freq;
467 +       bool regulator_enabled;
468 +};
469 +
470 +static int krait_cache_set_opp(struct dev_pm_set_opp_data *data)
471 +{
472 +       unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
473 +       struct dev_pm_opp_supply *supply = &data->new_opp.supplies[0];
474 +       struct regulator *reg = data->regulators[0];
475 +       struct clk *clk = data->clk;
476 +       struct krait_data *kdata;
477 +       unsigned long idle_freq;
478 +       int ret;
479 +
480 +       kdata = (struct krait_data *)dev_get_drvdata(data->dev);
481 +       idle_freq = kdata->idle_freq;
482 +
483 +       /* Scaling up? Scale voltage before frequency */
484 +       if (freq >= old_freq) {
485 +               ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
486 +                                                   supply->u_volt,
487 +                                                   supply->u_volt_max);
488 +               if (ret)
489 +                       goto exit;
490 +       }
491 +
492 +       /*
493 +        * Set to idle bin if switching from normal to high bin
494 +        * or vice versa. It has been notice that a bug is triggered
495 +        * in cache scaling when more than one bin is scaled, to fix
496 +        * this we first need to transition to the base rate and then
497 +        * to target rate
498 +        */
499 +       if (likely(freq != idle_freq && old_freq != idle_freq)) {
500 +               ret = clk_set_rate(clk, idle_freq);
501 +               if (ret)
502 +                       goto exit;
503 +       }
504 +
505 +       ret = clk_set_rate(clk, freq);
506 +       if (ret)
507 +               goto exit;
508 +
509 +       /* Scaling down? Scale voltage after frequency */
510 +       if (freq < old_freq) {
511 +               ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
512 +                                                   supply->u_volt,
513 +                                                   supply->u_volt_max);
514 +       }
515 +
516 +       if (unlikely(!kdata->regulator_enabled)) {
517 +               ret = regulator_enable(reg);
518 +               if (ret < 0)
519 +                       dev_warn(data->dev, "Failed to enable regulator: %d", ret);
520 +               else
521 +                       kdata->regulator_enabled = true;
522 +       }
523 +
524 +exit:
525 +       return ret;
526 +};
527 +
528 +static int krait_cache_probe(struct platform_device *pdev)
529 +{
530 +       struct device *dev = &pdev->dev;
531 +       struct krait_data *data;
532 +       struct opp_table *table;
533 +       struct dev_pm_opp *opp;
534 +       struct device *cpu_dev;
535 +       int ret;
536 +
537 +       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
538 +       if (!data)
539 +               return -ENOMEM;
540 +
541 +       table = dev_pm_opp_set_regulators(dev, (const char *[]){ "l2" }, 1);
542 +       if (IS_ERR(table)) {
543 +               ret = PTR_ERR(table);
544 +               if (ret != -EPROBE_DEFER)
545 +                       dev_err(dev, "failed to set regulators %d\n", ret);
546 +
547 +               return ret;
548 +       }
549 +
550 +       ret = PTR_ERR_OR_ZERO(
551 +               dev_pm_opp_register_set_opp_helper(dev, krait_cache_set_opp));
552 +       if (ret)
553 +               return ret;
554 +
555 +       ret = dev_pm_opp_of_add_table(dev);
556 +       if (ret) {
557 +               dev_err(dev, "failed to parse L2 freq thresholds\n");
558 +               return ret;
559 +       }
560 +
561 +       opp = dev_pm_opp_find_freq_ceil(dev, &data->idle_freq);
562 +       dev_pm_opp_put(opp);
563 +
564 +       /*
565 +        * Check if we have at least opp-level 1, 0 should always be set to
566 +        * the idle freq
567 +        */
568 +       opp = dev_pm_opp_find_level_exact(dev, 1);
569 +       if (IS_ERR(opp)) {
570 +               ret = PTR_ERR(opp);
571 +               dev_err(dev,
572 +                       "Invalid configuration found of l2 opp. Can't find opp-level 1");
573 +               goto invalid_conf;
574 +       }
575 +       dev_pm_opp_put(opp);
576 +
577 +       /*
578 +        * Check opp-level configuration
579 +        * At least 2 level must be set or the cache will always be scaled
580 +        * the idle freq causing some performance problem
581 +        *
582 +        * In case of invalid configuration, the l2 scaling is skipped
583 +        */
584 +       cpu_dev = get_cpu_device(0);
585 +       if (!cpu_dev) {
586 +               pr_err("failed to get cpu0 device\n");
587 +               return -ENODEV;
588 +       }
589 +
590 +       /* With opp error assume cpufreq still has to be registred. Defer probe. */
591 +       ret = dev_pm_opp_get_opp_count(cpu_dev);
592 +       if (ret < 0) {
593 +               ret = -EPROBE_DEFER;
594 +               goto invalid_conf;
595 +       }
596 +
597 +       /*
598 +        * Check if we have at least opp-level 1 in the cpu opp, 0 should always
599 +        * be set to the idle freq
600 +        */
601 +       opp = dev_pm_opp_find_level_exact(cpu_dev, 1);
602 +       if (IS_ERR(opp)) {
603 +               ret = PTR_ERR(opp);
604 +               if (ret != -EPROBE_DEFER)
605 +                       dev_err(dev,
606 +                               "Invalid configuration found of cpu opp. Can't find opp-level 1");
607 +               goto invalid_conf;
608 +       }
609 +       dev_pm_opp_put(opp);
610 +
611 +       platform_set_drvdata(pdev, data);
612 +
613 +       mutex_init(&lock);
614 +
615 +       /* The l2 scaling is enabled by linking the cpufreq driver */
616 +       l2_dev = dev;
617 +
618 +       return 0;
619 +
620 +invalid_conf:
621 +       dev_pm_opp_remove_table(dev);
622 +       dev_pm_opp_put_regulators(table);
623 +       dev_pm_opp_unregister_set_opp_helper(table);
624 +
625 +       return ret;
626 +};
627 +
628 +static int krait_cache_remove(struct platform_device *pdev)
629 +{
630 +       struct device *dev = &pdev->dev;
631 +       struct opp_table *table = dev_pm_opp_get_opp_table(dev);
632 +
633 +       dev_pm_opp_remove_table(dev);
634 +       dev_pm_opp_put_regulators(table);
635 +       dev_pm_opp_unregister_set_opp_helper(table);
636 +
637 +       return 0;
638 +};
639 +
640 +static const struct of_device_id krait_cache_match_table[] = {
641 +       { .compatible = "qcom,krait-cache" },
642 +       {}
643 +};
644 +
645 +static struct platform_driver krait_cache_driver = {
646 +       .driver = {
647 +               .name   = "krait-cache",
648 +               .of_match_table = krait_cache_match_table,
649 +       },
650 +       .probe          = krait_cache_probe,
651 +       .remove         = krait_cache_remove,
652 +};
653 +module_platform_driver(krait_cache_driver);
654 +
655 +static int krait_cpufreq_probe(struct platform_device *pdev)
656 +{
657 +       struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
658 +       int ret;
659 +
660 +       /*
661 +        * All per-cluster (CPUs sharing clock/voltages) initialization is done
662 +        * from ->init(). In probe(), we just need to make sure that clk and
663 +        * regulators are available. Else defer probe and retry.
664 +        *
665 +        * FIXME: Is checking this only for CPU0 sufficient ?
666 +        */
667 +       ret = resources_available();
668 +       if (ret)
669 +               return ret;
670 +
671 +       if (data) {
672 +               if (data->have_governor_per_policy)
673 +                       krait_cpufreq_driver.flags |=
674 +                               CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
675 +
676 +               krait_cpufreq_driver.resume = data->resume;
677 +               if (data->suspend)
678 +                       krait_cpufreq_driver.suspend = data->suspend;
679 +       }
680 +
681 +       ret = cpufreq_register_driver(&krait_cpufreq_driver);
682 +       if (ret)
683 +               dev_err(&pdev->dev, "failed register driver: %d\n", ret);
684 +
685 +       return ret;
686 +}
687 +
688 +static int krait_cpufreq_remove(struct platform_device *pdev)
689 +{
690 +       cpufreq_unregister_driver(&krait_cpufreq_driver);
691 +       return 0;
692 +}
693 +
694 +static struct platform_driver krait_cpufreq_platdrv = {
695 +       .driver = {
696 +               .name   = "krait-cpufreq",
697 +       },
698 +       .probe          = krait_cpufreq_probe,
699 +       .remove         = krait_cpufreq_remove,
700 +};
701 +
702 +module_platform_driver(krait_cpufreq_platdrv);
703 +
704 +MODULE_ALIAS("platform:krait-cpufreq");
705 +MODULE_AUTHOR("Ansuel Smith <ansuelsmth@gmail.com>");
706 +MODULE_DESCRIPTION("Dedicated Krait SoC cpufreq driver");
707 +MODULE_LICENSE("GPL");