2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-provider.h>
13 #include <linux/clk/clk-conf.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/spinlock.h>
17 #include <linux/err.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/sched.h>
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
33 static int prepare_refcnt;
34 static int enable_refcnt;
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
40 static long clk_core_get_accuracy(struct clk_core *core);
41 static unsigned long clk_core_get_rate(struct clk_core *core);
42 static int clk_core_get_phase(struct clk_core *core);
43 static bool clk_core_is_prepared(struct clk_core *core);
44 static bool clk_core_is_enabled(struct clk_core *core);
45 static struct clk_core *clk_core_lookup(const char *name);
47 /*** private data structures ***/
51 const struct clk_ops *ops;
54 struct clk_core *parent;
55 const char **parent_names;
56 struct clk_core **parents;
60 unsigned long req_rate;
61 unsigned long new_rate;
62 struct clk_core *new_parent;
63 struct clk_core *new_child;
65 unsigned int enable_count;
66 unsigned int prepare_count;
67 unsigned long accuracy;
69 struct hlist_head children;
70 struct hlist_node child_node;
71 struct hlist_node debug_node;
72 struct hlist_head clks;
73 unsigned int notifier_count;
74 #ifdef CONFIG_DEBUG_FS
75 struct dentry *dentry;
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/clk.h>
84 struct clk_core *core;
87 unsigned long min_rate;
88 unsigned long max_rate;
89 struct hlist_node clks_node;
93 static void clk_prepare_lock(void)
95 if (!mutex_trylock(&prepare_lock)) {
96 if (prepare_owner == current) {
100 mutex_lock(&prepare_lock);
102 WARN_ON_ONCE(prepare_owner != NULL);
103 WARN_ON_ONCE(prepare_refcnt != 0);
104 prepare_owner = current;
108 static void clk_prepare_unlock(void)
110 WARN_ON_ONCE(prepare_owner != current);
111 WARN_ON_ONCE(prepare_refcnt == 0);
113 if (--prepare_refcnt)
115 prepare_owner = NULL;
116 mutex_unlock(&prepare_lock);
119 static unsigned long clk_enable_lock(void)
123 if (!spin_trylock_irqsave(&enable_lock, flags)) {
124 if (enable_owner == current) {
128 spin_lock_irqsave(&enable_lock, flags);
130 WARN_ON_ONCE(enable_owner != NULL);
131 WARN_ON_ONCE(enable_refcnt != 0);
132 enable_owner = current;
137 static void clk_enable_unlock(unsigned long flags)
139 WARN_ON_ONCE(enable_owner != current);
140 WARN_ON_ONCE(enable_refcnt == 0);
145 spin_unlock_irqrestore(&enable_lock, flags);
148 /*** debugfs support ***/
150 #ifdef CONFIG_DEBUG_FS
151 #include <linux/debugfs.h>
153 static struct dentry *rootdir;
154 static int inited = 0;
155 static DEFINE_MUTEX(clk_debug_lock);
156 static HLIST_HEAD(clk_debug_list);
158 static struct hlist_head *all_lists[] = {
164 static struct hlist_head *orphan_list[] = {
169 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
175 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
177 30 - level * 3, c->name,
178 c->enable_count, c->prepare_count, clk_core_get_rate(c),
179 clk_core_get_accuracy(c), clk_core_get_phase(c));
182 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
185 struct clk_core *child;
190 clk_summary_show_one(s, c, level);
192 hlist_for_each_entry(child, &c->children, child_node)
193 clk_summary_show_subtree(s, child, level + 1);
196 static int clk_summary_show(struct seq_file *s, void *data)
199 struct hlist_head **lists = (struct hlist_head **)s->private;
201 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
202 seq_puts(s, "----------------------------------------------------------------------------------------\n");
206 for (; *lists; lists++)
207 hlist_for_each_entry(c, *lists, child_node)
208 clk_summary_show_subtree(s, c, 0);
210 clk_prepare_unlock();
216 static int clk_summary_open(struct inode *inode, struct file *file)
218 return single_open(file, clk_summary_show, inode->i_private);
221 static const struct file_operations clk_summary_fops = {
222 .open = clk_summary_open,
225 .release = single_release,
228 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
233 seq_printf(s, "\"%s\": { ", c->name);
234 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
235 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
236 seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
237 seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
238 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
241 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
243 struct clk_core *child;
248 clk_dump_one(s, c, level);
250 hlist_for_each_entry(child, &c->children, child_node) {
252 clk_dump_subtree(s, child, level + 1);
258 static int clk_dump(struct seq_file *s, void *data)
261 bool first_node = true;
262 struct hlist_head **lists = (struct hlist_head **)s->private;
268 for (; *lists; lists++) {
269 hlist_for_each_entry(c, *lists, child_node) {
273 clk_dump_subtree(s, c, 0);
277 clk_prepare_unlock();
284 static int clk_dump_open(struct inode *inode, struct file *file)
286 return single_open(file, clk_dump, inode->i_private);
289 static const struct file_operations clk_dump_fops = {
290 .open = clk_dump_open,
293 .release = single_release,
296 static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
301 if (!core || !pdentry) {
306 d = debugfs_create_dir(core->name, pdentry);
312 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
317 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
318 (u32 *)&core->accuracy);
322 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
323 (u32 *)&core->phase);
327 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
328 (u32 *)&core->flags);
332 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
333 (u32 *)&core->prepare_count);
337 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
338 (u32 *)&core->enable_count);
342 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
343 (u32 *)&core->notifier_count);
347 if (core->ops->debug_init) {
348 ret = core->ops->debug_init(core->hw, core->dentry);
357 debugfs_remove_recursive(core->dentry);
364 * clk_debug_register - add a clk node to the debugfs clk tree
365 * @core: the clk being added to the debugfs clk tree
367 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
368 * initialized. Otherwise it bails out early since the debugfs clk tree
369 * will be created lazily by clk_debug_init as part of a late_initcall.
371 static int clk_debug_register(struct clk_core *core)
375 mutex_lock(&clk_debug_lock);
376 hlist_add_head(&core->debug_node, &clk_debug_list);
381 ret = clk_debug_create_one(core, rootdir);
383 mutex_unlock(&clk_debug_lock);
389 * clk_debug_unregister - remove a clk node from the debugfs clk tree
390 * @core: the clk being removed from the debugfs clk tree
392 * Dynamically removes a clk and all it's children clk nodes from the
393 * debugfs clk tree if clk->dentry points to debugfs created by
394 * clk_debug_register in __clk_init.
396 static void clk_debug_unregister(struct clk_core *core)
398 mutex_lock(&clk_debug_lock);
399 hlist_del_init(&core->debug_node);
400 debugfs_remove_recursive(core->dentry);
402 mutex_unlock(&clk_debug_lock);
405 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
406 void *data, const struct file_operations *fops)
408 struct dentry *d = NULL;
410 if (hw->core->dentry)
411 d = debugfs_create_file(name, mode, hw->core->dentry, data,
416 EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
419 * clk_debug_init - lazily create the debugfs clk tree visualization
421 * clks are often initialized very early during boot before memory can
422 * be dynamically allocated and well before debugfs is setup.
423 * clk_debug_init walks the clk tree hierarchy while holding
424 * prepare_lock and creates the topology as part of a late_initcall,
425 * thus insuring that clks initialized very early will still be
426 * represented in the debugfs clk tree. This function should only be
427 * called once at boot-time, and all other clks added dynamically will
428 * be done so with clk_debug_register.
430 static int __init clk_debug_init(void)
432 struct clk_core *core;
435 rootdir = debugfs_create_dir("clk", NULL);
440 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
445 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
450 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
451 &orphan_list, &clk_summary_fops);
455 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
456 &orphan_list, &clk_dump_fops);
460 mutex_lock(&clk_debug_lock);
461 hlist_for_each_entry(core, &clk_debug_list, debug_node)
462 clk_debug_create_one(core, rootdir);
465 mutex_unlock(&clk_debug_lock);
469 late_initcall(clk_debug_init);
471 static inline int clk_debug_register(struct clk_core *core) { return 0; }
472 static inline void clk_debug_reparent(struct clk_core *core,
473 struct clk_core *new_parent)
476 static inline void clk_debug_unregister(struct clk_core *core)
481 /* caller must hold prepare_lock */
482 static void clk_unprepare_unused_subtree(struct clk_core *core)
484 struct clk_core *child;
486 lockdep_assert_held(&prepare_lock);
488 hlist_for_each_entry(child, &core->children, child_node)
489 clk_unprepare_unused_subtree(child);
491 if (core->prepare_count)
494 if (core->flags & CLK_IGNORE_UNUSED)
497 if (clk_core_is_prepared(core)) {
498 trace_clk_unprepare(core);
499 if (core->ops->unprepare_unused)
500 core->ops->unprepare_unused(core->hw);
501 else if (core->ops->unprepare)
502 core->ops->unprepare(core->hw);
503 trace_clk_unprepare_complete(core);
507 /* caller must hold prepare_lock */
508 static void clk_disable_unused_subtree(struct clk_core *core)
510 struct clk_core *child;
513 lockdep_assert_held(&prepare_lock);
515 hlist_for_each_entry(child, &core->children, child_node)
516 clk_disable_unused_subtree(child);
518 flags = clk_enable_lock();
520 if (core->enable_count)
523 if (core->flags & CLK_IGNORE_UNUSED)
527 * some gate clocks have special needs during the disable-unused
528 * sequence. call .disable_unused if available, otherwise fall
531 if (clk_core_is_enabled(core)) {
532 trace_clk_disable(core);
533 if (core->ops->disable_unused)
534 core->ops->disable_unused(core->hw);
535 else if (core->ops->disable)
536 core->ops->disable(core->hw);
537 trace_clk_disable_complete(core);
541 clk_enable_unlock(flags);
544 static bool clk_ignore_unused;
545 static int __init clk_ignore_unused_setup(char *__unused)
547 clk_ignore_unused = true;
550 __setup("clk_ignore_unused", clk_ignore_unused_setup);
552 static int clk_disable_unused(void)
554 struct clk_core *core;
556 if (clk_ignore_unused) {
557 pr_warn("clk: Not disabling unused clocks\n");
563 hlist_for_each_entry(core, &clk_root_list, child_node)
564 clk_disable_unused_subtree(core);
566 hlist_for_each_entry(core, &clk_orphan_list, child_node)
567 clk_disable_unused_subtree(core);
569 hlist_for_each_entry(core, &clk_root_list, child_node)
570 clk_unprepare_unused_subtree(core);
572 hlist_for_each_entry(core, &clk_orphan_list, child_node)
573 clk_unprepare_unused_subtree(core);
575 clk_prepare_unlock();
579 late_initcall_sync(clk_disable_unused);
581 /*** helper functions ***/
583 const char *__clk_get_name(struct clk *clk)
585 return !clk ? NULL : clk->core->name;
587 EXPORT_SYMBOL_GPL(__clk_get_name);
589 struct clk_hw *__clk_get_hw(struct clk *clk)
591 return !clk ? NULL : clk->core->hw;
593 EXPORT_SYMBOL_GPL(__clk_get_hw);
595 u8 __clk_get_num_parents(struct clk *clk)
597 return !clk ? 0 : clk->core->num_parents;
599 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
601 struct clk *__clk_get_parent(struct clk *clk)
606 /* TODO: Create a per-user clk and change callers to call clk_put */
607 return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
609 EXPORT_SYMBOL_GPL(__clk_get_parent);
611 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
614 if (!core || index >= core->num_parents)
616 else if (!core->parents)
617 return clk_core_lookup(core->parent_names[index]);
618 else if (!core->parents[index])
619 return core->parents[index] =
620 clk_core_lookup(core->parent_names[index]);
622 return core->parents[index];
625 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
627 struct clk_core *parent;
632 parent = clk_core_get_parent_by_index(clk->core, index);
634 return !parent ? NULL : parent->hw->clk;
636 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
638 unsigned int __clk_get_enable_count(struct clk *clk)
640 return !clk ? 0 : clk->core->enable_count;
643 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
654 if (core->flags & CLK_IS_ROOT)
664 unsigned long __clk_get_rate(struct clk *clk)
669 return clk_core_get_rate_nolock(clk->core);
671 EXPORT_SYMBOL_GPL(__clk_get_rate);
673 static unsigned long __clk_get_accuracy(struct clk_core *core)
678 return core->accuracy;
681 unsigned long __clk_get_flags(struct clk *clk)
683 return !clk ? 0 : clk->core->flags;
685 EXPORT_SYMBOL_GPL(__clk_get_flags);
687 static bool clk_core_is_prepared(struct clk_core *core)
695 * .is_prepared is optional for clocks that can prepare
696 * fall back to software usage counter if it is missing
698 if (!core->ops->is_prepared) {
699 ret = core->prepare_count ? 1 : 0;
703 ret = core->ops->is_prepared(core->hw);
708 bool __clk_is_prepared(struct clk *clk)
713 return clk_core_is_prepared(clk->core);
716 static bool clk_core_is_enabled(struct clk_core *core)
724 * .is_enabled is only mandatory for clocks that gate
725 * fall back to software usage counter if .is_enabled is missing
727 if (!core->ops->is_enabled) {
728 ret = core->enable_count ? 1 : 0;
732 ret = core->ops->is_enabled(core->hw);
737 bool __clk_is_enabled(struct clk *clk)
742 return clk_core_is_enabled(clk->core);
744 EXPORT_SYMBOL_GPL(__clk_is_enabled);
746 static struct clk_core *__clk_lookup_subtree(const char *name,
747 struct clk_core *core)
749 struct clk_core *child;
750 struct clk_core *ret;
752 if (!strcmp(core->name, name))
755 hlist_for_each_entry(child, &core->children, child_node) {
756 ret = __clk_lookup_subtree(name, child);
764 static struct clk_core *clk_core_lookup(const char *name)
766 struct clk_core *root_clk;
767 struct clk_core *ret;
772 /* search the 'proper' clk tree first */
773 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
774 ret = __clk_lookup_subtree(name, root_clk);
779 /* if not found, then search the orphan tree */
780 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
781 ret = __clk_lookup_subtree(name, root_clk);
789 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
790 unsigned long best, unsigned long flags)
792 if (flags & CLK_MUX_ROUND_CLOSEST)
793 return abs(now - rate) < abs(best - rate);
795 return now <= rate && now > best;
799 clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
800 unsigned long min_rate,
801 unsigned long max_rate,
802 unsigned long *best_parent_rate,
803 struct clk_hw **best_parent_p,
806 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
808 unsigned long parent_rate, best = 0;
810 /* if NO_REPARENT flag set, pass through to current parent */
811 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
812 parent = core->parent;
813 if (core->flags & CLK_SET_RATE_PARENT)
814 best = __clk_determine_rate(parent ? parent->hw : NULL,
815 rate, min_rate, max_rate);
817 best = clk_core_get_rate_nolock(parent);
819 best = clk_core_get_rate_nolock(core);
823 /* find the parent that can provide the fastest rate <= rate */
824 num_parents = core->num_parents;
825 for (i = 0; i < num_parents; i++) {
826 parent = clk_core_get_parent_by_index(core, i);
829 if (core->flags & CLK_SET_RATE_PARENT)
830 parent_rate = __clk_determine_rate(parent->hw, rate,
834 parent_rate = clk_core_get_rate_nolock(parent);
835 if (mux_is_better_rate(rate, parent_rate, best, flags)) {
836 best_parent = parent;
843 *best_parent_p = best_parent->hw;
844 *best_parent_rate = best;
849 struct clk *__clk_lookup(const char *name)
851 struct clk_core *core = clk_core_lookup(name);
853 return !core ? NULL : core->hw->clk;
856 static void clk_core_get_boundaries(struct clk_core *core,
857 unsigned long *min_rate,
858 unsigned long *max_rate)
860 struct clk *clk_user;
863 *max_rate = ULONG_MAX;
865 hlist_for_each_entry(clk_user, &core->clks, clks_node)
866 *min_rate = max(*min_rate, clk_user->min_rate);
868 hlist_for_each_entry(clk_user, &core->clks, clks_node)
869 *max_rate = min(*max_rate, clk_user->max_rate);
873 * Helper for finding best parent to provide a given frequency. This can be used
874 * directly as a determine_rate callback (e.g. for a mux), or from a more
875 * complex clock that may combine a mux with other operations.
877 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
878 unsigned long min_rate,
879 unsigned long max_rate,
880 unsigned long *best_parent_rate,
881 struct clk_hw **best_parent_p)
883 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
887 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
889 long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
890 unsigned long min_rate,
891 unsigned long max_rate,
892 unsigned long *best_parent_rate,
893 struct clk_hw **best_parent_p)
895 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
898 CLK_MUX_ROUND_CLOSEST);
900 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
904 static void clk_core_unprepare(struct clk_core *core)
909 if (WARN_ON(core->prepare_count == 0))
912 if (--core->prepare_count > 0)
915 WARN_ON(core->enable_count > 0);
917 trace_clk_unprepare(core);
919 if (core->ops->unprepare)
920 core->ops->unprepare(core->hw);
922 trace_clk_unprepare_complete(core);
923 clk_core_unprepare(core->parent);
927 * clk_unprepare - undo preparation of a clock source
928 * @clk: the clk being unprepared
930 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
931 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
932 * if the operation may sleep. One example is a clk which is accessed over
933 * I2c. In the complex case a clk gate operation may require a fast and a slow
934 * part. It is this reason that clk_unprepare and clk_disable are not mutually
935 * exclusive. In fact clk_disable must be called before clk_unprepare.
937 void clk_unprepare(struct clk *clk)
939 if (IS_ERR_OR_NULL(clk))
943 clk_core_unprepare(clk->core);
944 clk_prepare_unlock();
946 EXPORT_SYMBOL_GPL(clk_unprepare);
948 static int clk_core_prepare(struct clk_core *core)
955 if (core->prepare_count == 0) {
956 ret = clk_core_prepare(core->parent);
960 trace_clk_prepare(core);
962 if (core->ops->prepare)
963 ret = core->ops->prepare(core->hw);
965 trace_clk_prepare_complete(core);
968 clk_core_unprepare(core->parent);
973 core->prepare_count++;
979 * clk_prepare - prepare a clock source
980 * @clk: the clk being prepared
982 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
983 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
984 * operation may sleep. One example is a clk which is accessed over I2c. In
985 * the complex case a clk ungate operation may require a fast and a slow part.
986 * It is this reason that clk_prepare and clk_enable are not mutually
987 * exclusive. In fact clk_prepare must be called before clk_enable.
988 * Returns 0 on success, -EERROR otherwise.
990 int clk_prepare(struct clk *clk)
998 ret = clk_core_prepare(clk->core);
999 clk_prepare_unlock();
1003 EXPORT_SYMBOL_GPL(clk_prepare);
1005 static void clk_core_disable(struct clk_core *core)
1010 if (WARN_ON(core->enable_count == 0))
1013 if (--core->enable_count > 0)
1016 trace_clk_disable(core);
1018 if (core->ops->disable)
1019 core->ops->disable(core->hw);
1021 trace_clk_disable_complete(core);
1023 clk_core_disable(core->parent);
1027 * clk_disable - gate a clock
1028 * @clk: the clk being gated
1030 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
1031 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
1032 * clk if the operation is fast and will never sleep. One example is a
1033 * SoC-internal clk which is controlled via simple register writes. In the
1034 * complex case a clk gate operation may require a fast and a slow part. It is
1035 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
1036 * In fact clk_disable must be called before clk_unprepare.
1038 void clk_disable(struct clk *clk)
1040 unsigned long flags;
1042 if (IS_ERR_OR_NULL(clk))
1045 flags = clk_enable_lock();
1046 clk_core_disable(clk->core);
1047 clk_enable_unlock(flags);
1049 EXPORT_SYMBOL_GPL(clk_disable);
1051 static int clk_core_enable(struct clk_core *core)
1058 if (WARN_ON(core->prepare_count == 0))
1061 if (core->enable_count == 0) {
1062 ret = clk_core_enable(core->parent);
1067 trace_clk_enable(core);
1069 if (core->ops->enable)
1070 ret = core->ops->enable(core->hw);
1072 trace_clk_enable_complete(core);
1075 clk_core_disable(core->parent);
1080 core->enable_count++;
1085 * clk_enable - ungate a clock
1086 * @clk: the clk being ungated
1088 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
1089 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1090 * if the operation will never sleep. One example is a SoC-internal clk which
1091 * is controlled via simple register writes. In the complex case a clk ungate
1092 * operation may require a fast and a slow part. It is this reason that
1093 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
1094 * must be called before clk_enable. Returns 0 on success, -EERROR
1097 int clk_enable(struct clk *clk)
1099 unsigned long flags;
1105 flags = clk_enable_lock();
1106 ret = clk_core_enable(clk->core);
1107 clk_enable_unlock(flags);
1111 EXPORT_SYMBOL_GPL(clk_enable);
1113 static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
1115 unsigned long min_rate,
1116 unsigned long max_rate)
1118 unsigned long parent_rate = 0;
1119 struct clk_core *parent;
1120 struct clk_hw *parent_hw;
1122 lockdep_assert_held(&prepare_lock);
1127 parent = core->parent;
1129 parent_rate = parent->rate;
1131 if (core->ops->determine_rate) {
1132 parent_hw = parent ? parent->hw : NULL;
1133 return core->ops->determine_rate(core->hw, rate,
1135 &parent_rate, &parent_hw);
1136 } else if (core->ops->round_rate)
1137 return core->ops->round_rate(core->hw, rate, &parent_rate);
1138 else if (core->flags & CLK_SET_RATE_PARENT)
1139 return clk_core_round_rate_nolock(core->parent, rate, min_rate,
1146 * __clk_determine_rate - get the closest rate actually supported by a clock
1147 * @hw: determine the rate of this clock
1148 * @rate: target rate
1149 * @min_rate: returned rate must be greater than this rate
1150 * @max_rate: returned rate must be less than this rate
1152 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and
1155 unsigned long __clk_determine_rate(struct clk_hw *hw,
1157 unsigned long min_rate,
1158 unsigned long max_rate)
1163 return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
1165 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1168 * __clk_round_rate - round the given rate for a clk
1169 * @clk: round the rate of this clock
1170 * @rate: the rate which is to be rounded
1172 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
1174 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
1176 unsigned long min_rate;
1177 unsigned long max_rate;
1182 clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
1184 return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
1186 EXPORT_SYMBOL_GPL(__clk_round_rate);
1189 * clk_round_rate - round the given rate for a clk
1190 * @clk: the clk for which we are rounding a rate
1191 * @rate: the rate which is to be rounded
1193 * Takes in a rate as input and rounds it to a rate that the clk can actually
1194 * use which is then returned. If clk doesn't support round_rate operation
1195 * then the parent rate is returned.
1197 long clk_round_rate(struct clk *clk, unsigned long rate)
1205 ret = __clk_round_rate(clk, rate);
1206 clk_prepare_unlock();
1210 EXPORT_SYMBOL_GPL(clk_round_rate);
1213 * __clk_notify - call clk notifier chain
1214 * @core: clk that is changing rate
1215 * @msg: clk notifier type (see include/linux/clk.h)
1216 * @old_rate: old clk rate
1217 * @new_rate: new clk rate
1219 * Triggers a notifier call chain on the clk rate-change notification
1220 * for 'clk'. Passes a pointer to the struct clk and the previous
1221 * and current rates to the notifier callback. Intended to be called by
1222 * internal clock code only. Returns NOTIFY_DONE from the last driver
1223 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1224 * a driver returns that.
1226 static int __clk_notify(struct clk_core *core, unsigned long msg,
1227 unsigned long old_rate, unsigned long new_rate)
1229 struct clk_notifier *cn;
1230 struct clk_notifier_data cnd;
1231 int ret = NOTIFY_DONE;
1233 cnd.old_rate = old_rate;
1234 cnd.new_rate = new_rate;
1236 list_for_each_entry(cn, &clk_notifier_list, node) {
1237 if (cn->clk->core == core) {
1239 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1248 * __clk_recalc_accuracies
1249 * @core: first clk in the subtree
1251 * Walks the subtree of clks starting with clk and recalculates accuracies as
1252 * it goes. Note that if a clk does not implement the .recalc_accuracy
1253 * callback then it is assumed that the clock will take on the accuracy of it's
1256 * Caller must hold prepare_lock.
1258 static void __clk_recalc_accuracies(struct clk_core *core)
1260 unsigned long parent_accuracy = 0;
1261 struct clk_core *child;
1263 lockdep_assert_held(&prepare_lock);
1266 parent_accuracy = core->parent->accuracy;
1268 if (core->ops->recalc_accuracy)
1269 core->accuracy = core->ops->recalc_accuracy(core->hw,
1272 core->accuracy = parent_accuracy;
1274 hlist_for_each_entry(child, &core->children, child_node)
1275 __clk_recalc_accuracies(child);
1278 static long clk_core_get_accuracy(struct clk_core *core)
1280 unsigned long accuracy;
1283 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1284 __clk_recalc_accuracies(core);
1286 accuracy = __clk_get_accuracy(core);
1287 clk_prepare_unlock();
1293 * clk_get_accuracy - return the accuracy of clk
1294 * @clk: the clk whose accuracy is being returned
1296 * Simply returns the cached accuracy of the clk, unless
1297 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1299 * If clk is NULL then returns 0.
1301 long clk_get_accuracy(struct clk *clk)
1306 return clk_core_get_accuracy(clk->core);
1308 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1310 static unsigned long clk_recalc(struct clk_core *core,
1311 unsigned long parent_rate)
1313 if (core->ops->recalc_rate)
1314 return core->ops->recalc_rate(core->hw, parent_rate);
1319 * __clk_recalc_rates
1320 * @core: first clk in the subtree
1321 * @msg: notification type (see include/linux/clk.h)
1323 * Walks the subtree of clks starting with clk and recalculates rates as it
1324 * goes. Note that if a clk does not implement the .recalc_rate callback then
1325 * it is assumed that the clock will take on the rate of its parent.
1327 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1330 * Caller must hold prepare_lock.
1332 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1334 unsigned long old_rate;
1335 unsigned long parent_rate = 0;
1336 struct clk_core *child;
1338 lockdep_assert_held(&prepare_lock);
1340 old_rate = core->rate;
1343 parent_rate = core->parent->rate;
1345 core->rate = clk_recalc(core, parent_rate);
1348 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1349 * & ABORT_RATE_CHANGE notifiers
1351 if (core->notifier_count && msg)
1352 __clk_notify(core, msg, old_rate, core->rate);
1354 hlist_for_each_entry(child, &core->children, child_node)
1355 __clk_recalc_rates(child, msg);
1358 static unsigned long clk_core_get_rate(struct clk_core *core)
1364 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1365 __clk_recalc_rates(core, 0);
1367 rate = clk_core_get_rate_nolock(core);
1368 clk_prepare_unlock();
1374 * clk_get_rate - return the rate of clk
1375 * @clk: the clk whose rate is being returned
1377 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1378 * is set, which means a recalc_rate will be issued.
1379 * If clk is NULL then returns 0.
1381 unsigned long clk_get_rate(struct clk *clk)
1386 return clk_core_get_rate(clk->core);
1388 EXPORT_SYMBOL_GPL(clk_get_rate);
1390 static int clk_fetch_parent_index(struct clk_core *core,
1391 struct clk_core *parent)
1395 if (!core->parents) {
1396 core->parents = kcalloc(core->num_parents,
1397 sizeof(struct clk *), GFP_KERNEL);
1403 * find index of new parent clock using cached parent ptrs,
1404 * or if not yet cached, use string name comparison and cache
1405 * them now to avoid future calls to clk_core_lookup.
1407 for (i = 0; i < core->num_parents; i++) {
1408 if (core->parents[i] == parent)
1411 if (core->parents[i])
1414 if (!strcmp(core->parent_names[i], parent->name)) {
1415 core->parents[i] = clk_core_lookup(parent->name);
1423 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1425 hlist_del(&core->child_node);
1428 /* avoid duplicate POST_RATE_CHANGE notifications */
1429 if (new_parent->new_child == core)
1430 new_parent->new_child = NULL;
1432 hlist_add_head(&core->child_node, &new_parent->children);
1434 hlist_add_head(&core->child_node, &clk_orphan_list);
1437 core->parent = new_parent;
1440 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1441 struct clk_core *parent)
1443 unsigned long flags;
1444 struct clk_core *old_parent = core->parent;
1447 * Migrate prepare state between parents and prevent race with
1450 * If the clock is not prepared, then a race with
1451 * clk_enable/disable() is impossible since we already have the
1452 * prepare lock (future calls to clk_enable() need to be preceded by
1455 * If the clock is prepared, migrate the prepared state to the new
1456 * parent and also protect against a race with clk_enable() by
1457 * forcing the clock and the new parent on. This ensures that all
1458 * future calls to clk_enable() are practically NOPs with respect to
1459 * hardware and software states.
1461 * See also: Comment for clk_set_parent() below.
1463 if (core->prepare_count) {
1464 clk_core_prepare(parent);
1465 clk_core_enable(parent);
1466 clk_core_enable(core);
1469 /* update the clk tree topology */
1470 flags = clk_enable_lock();
1471 clk_reparent(core, parent);
1472 clk_enable_unlock(flags);
1477 static void __clk_set_parent_after(struct clk_core *core,
1478 struct clk_core *parent,
1479 struct clk_core *old_parent)
1482 * Finish the migration of prepare state and undo the changes done
1483 * for preventing a race with clk_enable().
1485 if (core->prepare_count) {
1486 clk_core_disable(core);
1487 clk_core_disable(old_parent);
1488 clk_core_unprepare(old_parent);
1492 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1495 unsigned long flags;
1497 struct clk_core *old_parent;
1499 old_parent = __clk_set_parent_before(core, parent);
1501 trace_clk_set_parent(core, parent);
1503 /* change clock input source */
1504 if (parent && core->ops->set_parent)
1505 ret = core->ops->set_parent(core->hw, p_index);
1507 trace_clk_set_parent_complete(core, parent);
1510 flags = clk_enable_lock();
1511 clk_reparent(core, old_parent);
1512 clk_enable_unlock(flags);
1514 if (core->prepare_count) {
1515 clk_core_disable(core);
1516 clk_core_disable(parent);
1517 clk_core_unprepare(parent);
1522 __clk_set_parent_after(core, parent, old_parent);
1528 * __clk_speculate_rates
1529 * @core: first clk in the subtree
1530 * @parent_rate: the "future" rate of clk's parent
1532 * Walks the subtree of clks starting with clk, speculating rates as it
1533 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1535 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1536 * pre-rate change notifications and returns early if no clks in the
1537 * subtree have subscribed to the notifications. Note that if a clk does not
1538 * implement the .recalc_rate callback then it is assumed that the clock will
1539 * take on the rate of its parent.
1541 * Caller must hold prepare_lock.
1543 static int __clk_speculate_rates(struct clk_core *core,
1544 unsigned long parent_rate)
1546 struct clk_core *child;
1547 unsigned long new_rate;
1548 int ret = NOTIFY_DONE;
1550 lockdep_assert_held(&prepare_lock);
1552 new_rate = clk_recalc(core, parent_rate);
1554 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1555 if (core->notifier_count)
1556 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1558 if (ret & NOTIFY_STOP_MASK) {
1559 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1560 __func__, core->name, ret);
1564 hlist_for_each_entry(child, &core->children, child_node) {
1565 ret = __clk_speculate_rates(child, new_rate);
1566 if (ret & NOTIFY_STOP_MASK)
1574 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1575 struct clk_core *new_parent, u8 p_index)
1577 struct clk_core *child;
1579 core->new_rate = new_rate;
1580 core->new_parent = new_parent;
1581 core->new_parent_index = p_index;
1582 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1583 core->new_child = NULL;
1584 if (new_parent && new_parent != core->parent)
1585 new_parent->new_child = core;
1587 hlist_for_each_entry(child, &core->children, child_node) {
1588 child->new_rate = clk_recalc(child, new_rate);
1589 clk_calc_subtree(child, child->new_rate, NULL, 0);
1594 * calculate the new rates returning the topmost clock that has to be
1597 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1600 struct clk_core *top = core;
1601 struct clk_core *old_parent, *parent;
1602 struct clk_hw *parent_hw;
1603 unsigned long best_parent_rate = 0;
1604 unsigned long new_rate;
1605 unsigned long min_rate;
1606 unsigned long max_rate;
1611 if (IS_ERR_OR_NULL(core))
1614 /* save parent rate, if it exists */
1615 parent = old_parent = core->parent;
1617 best_parent_rate = parent->rate;
1619 clk_core_get_boundaries(core, &min_rate, &max_rate);
1621 /* find the closest rate and parent clk/rate */
1622 if (core->ops->determine_rate) {
1623 parent_hw = parent ? parent->hw : NULL;
1624 ret = core->ops->determine_rate(core->hw, rate,
1633 parent = parent_hw ? parent_hw->core : NULL;
1634 } else if (core->ops->round_rate) {
1635 ret = core->ops->round_rate(core->hw, rate,
1641 if (new_rate < min_rate || new_rate > max_rate)
1643 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1644 /* pass-through clock without adjustable parent */
1645 core->new_rate = core->rate;
1648 /* pass-through clock with adjustable parent */
1649 top = clk_calc_new_rates(parent, rate);
1650 new_rate = parent->new_rate;
1654 /* some clocks must be gated to change parent */
1655 if (parent != old_parent &&
1656 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1657 pr_debug("%s: %s not gated but wants to reparent\n",
1658 __func__, core->name);
1662 /* try finding the new parent index */
1663 if (parent && core->num_parents > 1) {
1664 p_index = clk_fetch_parent_index(core, parent);
1666 pr_debug("%s: clk %s can not be parent of clk %s\n",
1667 __func__, parent->name, core->name);
1672 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1673 best_parent_rate != parent->rate)
1674 top = clk_calc_new_rates(parent, best_parent_rate);
1677 clk_calc_subtree(core, new_rate, parent, p_index);
1683 * Notify about rate changes in a subtree. Always walk down the whole tree
1684 * so that in case of an error we can walk down the whole tree again and
1687 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1688 unsigned long event)
1690 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1691 int ret = NOTIFY_DONE;
1693 if (core->rate == core->new_rate)
1696 if (core->notifier_count) {
1697 ret = __clk_notify(core, event, core->rate, core->new_rate);
1698 if (ret & NOTIFY_STOP_MASK)
1702 hlist_for_each_entry(child, &core->children, child_node) {
1703 /* Skip children who will be reparented to another clock */
1704 if (child->new_parent && child->new_parent != core)
1706 tmp_clk = clk_propagate_rate_change(child, event);
1711 /* handle the new child who might not be in core->children yet */
1712 if (core->new_child) {
1713 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1722 * walk down a subtree and set the new rates notifying the rate
1725 static void clk_change_rate(struct clk_core *core)
1727 struct clk_core *child;
1728 struct hlist_node *tmp;
1729 unsigned long old_rate;
1730 unsigned long best_parent_rate = 0;
1731 bool skip_set_rate = false;
1732 struct clk_core *old_parent;
1734 old_rate = core->rate;
1736 if (core->new_parent)
1737 best_parent_rate = core->new_parent->rate;
1738 else if (core->parent)
1739 best_parent_rate = core->parent->rate;
1741 if (core->new_parent && core->new_parent != core->parent) {
1742 old_parent = __clk_set_parent_before(core, core->new_parent);
1743 trace_clk_set_parent(core, core->new_parent);
1745 if (core->ops->set_rate_and_parent) {
1746 skip_set_rate = true;
1747 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1749 core->new_parent_index);
1750 } else if (core->ops->set_parent) {
1751 core->ops->set_parent(core->hw, core->new_parent_index);
1754 trace_clk_set_parent_complete(core, core->new_parent);
1755 __clk_set_parent_after(core, core->new_parent, old_parent);
1758 trace_clk_set_rate(core, core->new_rate);
1760 if (!skip_set_rate && core->ops->set_rate)
1761 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1763 trace_clk_set_rate_complete(core, core->new_rate);
1765 core->rate = clk_recalc(core, best_parent_rate);
1767 if (core->notifier_count && old_rate != core->rate)
1768 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1771 * Use safe iteration, as change_rate can actually swap parents
1772 * for certain clock types.
1774 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1775 /* Skip children who will be reparented to another clock */
1776 if (child->new_parent && child->new_parent != core)
1778 clk_change_rate(child);
1781 /* handle the new child who might not be in core->children yet */
1782 if (core->new_child)
1783 clk_change_rate(core->new_child);
1786 static int clk_core_set_rate_nolock(struct clk_core *core,
1787 unsigned long req_rate)
1789 struct clk_core *top, *fail_clk;
1790 unsigned long rate = req_rate;
1796 /* bail early if nothing to do */
1797 if (rate == clk_core_get_rate_nolock(core))
1800 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1803 /* calculate new rates and get the topmost changed clock */
1804 top = clk_calc_new_rates(core, rate);
1808 /* notify that we are about to change rates */
1809 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1811 pr_debug("%s: failed to set %s rate\n", __func__,
1813 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1817 /* change the rates */
1818 clk_change_rate(top);
1820 core->req_rate = req_rate;
1826 * clk_set_rate - specify a new rate for clk
1827 * @clk: the clk whose rate is being changed
1828 * @rate: the new rate for clk
1830 * In the simplest case clk_set_rate will only adjust the rate of clk.
1832 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1833 * propagate up to clk's parent; whether or not this happens depends on the
1834 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1835 * after calling .round_rate then upstream parent propagation is ignored. If
1836 * *parent_rate comes back with a new rate for clk's parent then we propagate
1837 * up to clk's parent and set its rate. Upward propagation will continue
1838 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1839 * .round_rate stops requesting changes to clk's parent_rate.
1841 * Rate changes are accomplished via tree traversal that also recalculates the
1842 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1844 * Returns 0 on success, -EERROR otherwise.
1846 int clk_set_rate(struct clk *clk, unsigned long rate)
1853 /* prevent racing with updates to the clock topology */
1856 ret = clk_core_set_rate_nolock(clk->core, rate);
1858 clk_prepare_unlock();
1862 EXPORT_SYMBOL_GPL(clk_set_rate);
1865 * clk_set_rate_range - set a rate range for a clock source
1866 * @clk: clock source
1867 * @min: desired minimum clock rate in Hz, inclusive
1868 * @max: desired maximum clock rate in Hz, inclusive
1870 * Returns success (0) or negative errno.
1872 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1880 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1881 __func__, clk->core->name, clk->dev_id, clk->con_id,
1888 if (min != clk->min_rate || max != clk->max_rate) {
1889 clk->min_rate = min;
1890 clk->max_rate = max;
1891 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1894 clk_prepare_unlock();
1898 EXPORT_SYMBOL_GPL(clk_set_rate_range);
1901 * clk_set_min_rate - set a minimum clock rate for a clock source
1902 * @clk: clock source
1903 * @rate: desired minimum clock rate in Hz, inclusive
1905 * Returns success (0) or negative errno.
1907 int clk_set_min_rate(struct clk *clk, unsigned long rate)
1912 return clk_set_rate_range(clk, rate, clk->max_rate);
1914 EXPORT_SYMBOL_GPL(clk_set_min_rate);
1917 * clk_set_max_rate - set a maximum clock rate for a clock source
1918 * @clk: clock source
1919 * @rate: desired maximum clock rate in Hz, inclusive
1921 * Returns success (0) or negative errno.
1923 int clk_set_max_rate(struct clk *clk, unsigned long rate)
1928 return clk_set_rate_range(clk, clk->min_rate, rate);
1930 EXPORT_SYMBOL_GPL(clk_set_max_rate);
1933 * clk_get_parent - return the parent of a clk
1934 * @clk: the clk whose parent gets returned
1936 * Simply returns clk->parent. Returns NULL if clk is NULL.
1938 struct clk *clk_get_parent(struct clk *clk)
1943 parent = __clk_get_parent(clk);
1944 clk_prepare_unlock();
1948 EXPORT_SYMBOL_GPL(clk_get_parent);
1951 * .get_parent is mandatory for clocks with multiple possible parents. It is
1952 * optional for single-parent clocks. Always call .get_parent if it is
1953 * available and WARN if it is missing for multi-parent clocks.
1955 * For single-parent clocks without .get_parent, first check to see if the
1956 * .parents array exists, and if so use it to avoid an expensive tree
1957 * traversal. If .parents does not exist then walk the tree.
1959 static struct clk_core *__clk_init_parent(struct clk_core *core)
1961 struct clk_core *ret = NULL;
1964 /* handle the trivial cases */
1966 if (!core->num_parents)
1969 if (core->num_parents == 1) {
1970 if (IS_ERR_OR_NULL(core->parent))
1971 core->parent = clk_core_lookup(core->parent_names[0]);
1976 if (!core->ops->get_parent) {
1977 WARN(!core->ops->get_parent,
1978 "%s: multi-parent clocks must implement .get_parent\n",
1984 * Do our best to cache parent clocks in core->parents. This prevents
1985 * unnecessary and expensive lookups. We don't set core->parent here;
1986 * that is done by the calling function.
1989 index = core->ops->get_parent(core->hw);
1993 kcalloc(core->num_parents, sizeof(struct clk *),
1996 ret = clk_core_get_parent_by_index(core, index);
2002 static void clk_core_reparent(struct clk_core *core,
2003 struct clk_core *new_parent)
2005 clk_reparent(core, new_parent);
2006 __clk_recalc_accuracies(core);
2007 __clk_recalc_rates(core, POST_RATE_CHANGE);
2011 * clk_has_parent - check if a clock is a possible parent for another
2012 * @clk: clock source
2013 * @parent: parent clock source
2015 * This function can be used in drivers that need to check that a clock can be
2016 * the parent of another without actually changing the parent.
2018 * Returns true if @parent is a possible parent for @clk, false otherwise.
2020 bool clk_has_parent(struct clk *clk, struct clk *parent)
2022 struct clk_core *core, *parent_core;
2025 /* NULL clocks should be nops, so return success if either is NULL. */
2026 if (!clk || !parent)
2030 parent_core = parent->core;
2032 /* Optimize for the case where the parent is already the parent. */
2033 if (core->parent == parent_core)
2036 for (i = 0; i < core->num_parents; i++)
2037 if (strcmp(core->parent_names[i], parent_core->name) == 0)
2042 EXPORT_SYMBOL_GPL(clk_has_parent);
2044 static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
2048 unsigned long p_rate = 0;
2053 /* prevent racing with updates to the clock topology */
2056 if (core->parent == parent)
2059 /* verify ops for for multi-parent clks */
2060 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
2065 /* check that we are allowed to re-parent if the clock is in use */
2066 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2071 /* try finding the new parent index */
2073 p_index = clk_fetch_parent_index(core, parent);
2074 p_rate = parent->rate;
2076 pr_debug("%s: clk %s can not be parent of clk %s\n",
2077 __func__, parent->name, core->name);
2083 /* propagate PRE_RATE_CHANGE notifications */
2084 ret = __clk_speculate_rates(core, p_rate);
2086 /* abort if a driver objects */
2087 if (ret & NOTIFY_STOP_MASK)
2090 /* do the re-parent */
2091 ret = __clk_set_parent(core, parent, p_index);
2093 /* propagate rate an accuracy recalculation accordingly */
2095 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2097 __clk_recalc_rates(core, POST_RATE_CHANGE);
2098 __clk_recalc_accuracies(core);
2102 clk_prepare_unlock();
2108 * clk_set_parent - switch the parent of a mux clk
2109 * @clk: the mux clk whose input we are switching
2110 * @parent: the new input to clk
2112 * Re-parent clk to use parent as its new input source. If clk is in
2113 * prepared state, the clk will get enabled for the duration of this call. If
2114 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2115 * that, the reparenting is glitchy in hardware, etc), use the
2116 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2118 * After successfully changing clk's parent clk_set_parent will update the
2119 * clk topology, sysfs topology and propagate rate recalculation via
2120 * __clk_recalc_rates.
2122 * Returns 0 on success, -EERROR otherwise.
2124 int clk_set_parent(struct clk *clk, struct clk *parent)
2129 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
2131 EXPORT_SYMBOL_GPL(clk_set_parent);
2134 * clk_set_phase - adjust the phase shift of a clock signal
2135 * @clk: clock signal source
2136 * @degrees: number of degrees the signal is shifted
2138 * Shifts the phase of a clock signal by the specified
2139 * degrees. Returns 0 on success, -EERROR otherwise.
2141 * This function makes no distinction about the input or reference
2142 * signal that we adjust the clock signal phase against. For example
2143 * phase locked-loop clock signal generators we may shift phase with
2144 * respect to feedback clock signal input, but for other cases the
2145 * clock phase may be shifted with respect to some other, unspecified
2148 * Additionally the concept of phase shift does not propagate through
2149 * the clock tree hierarchy, which sets it apart from clock rates and
2150 * clock accuracy. A parent clock phase attribute does not have an
2151 * impact on the phase attribute of a child clock.
2153 int clk_set_phase(struct clk *clk, int degrees)
2160 /* sanity check degrees */
2167 trace_clk_set_phase(clk->core, degrees);
2169 if (clk->core->ops->set_phase)
2170 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
2172 trace_clk_set_phase_complete(clk->core, degrees);
2175 clk->core->phase = degrees;
2177 clk_prepare_unlock();
2181 EXPORT_SYMBOL_GPL(clk_set_phase);
2183 static int clk_core_get_phase(struct clk_core *core)
2192 clk_prepare_unlock();
2197 EXPORT_SYMBOL_GPL(clk_get_phase);
2200 * clk_get_phase - return the phase shift of a clock signal
2201 * @clk: clock signal source
2203 * Returns the phase shift of a clock node in degrees, otherwise returns
2206 int clk_get_phase(struct clk *clk)
2211 return clk_core_get_phase(clk->core);
2215 * clk_is_match - check if two clk's point to the same hardware clock
2216 * @p: clk compared against q
2217 * @q: clk compared against p
2219 * Returns true if the two struct clk pointers both point to the same hardware
2220 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2221 * share the same struct clk_core object.
2223 * Returns false otherwise. Note that two NULL clks are treated as matching.
2225 bool clk_is_match(const struct clk *p, const struct clk *q)
2227 /* trivial case: identical struct clk's or both NULL */
2231 /* true if clk->core pointers match. Avoid derefing garbage */
2232 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2233 if (p->core == q->core)
2238 EXPORT_SYMBOL_GPL(clk_is_match);
2241 * __clk_init - initialize the data structures in a struct clk
2242 * @dev: device initializing this clk, placeholder for now
2243 * @clk: clk being initialized
2245 * Initializes the lists in struct clk_core, queries the hardware for the
2246 * parent and rate and sets them both.
2248 static int __clk_init(struct device *dev, struct clk *clk_user)
2251 struct clk_core *orphan;
2252 struct hlist_node *tmp2;
2253 struct clk_core *core;
2259 core = clk_user->core;
2263 /* check to see if a clock with this name is already registered */
2264 if (clk_core_lookup(core->name)) {
2265 pr_debug("%s: clk %s already initialized\n",
2266 __func__, core->name);
2271 /* check that clk_ops are sane. See Documentation/clk.txt */
2272 if (core->ops->set_rate &&
2273 !((core->ops->round_rate || core->ops->determine_rate) &&
2274 core->ops->recalc_rate)) {
2275 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2276 __func__, core->name);
2281 if (core->ops->set_parent && !core->ops->get_parent) {
2282 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
2283 __func__, core->name);
2288 if (core->ops->set_rate_and_parent &&
2289 !(core->ops->set_parent && core->ops->set_rate)) {
2290 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
2291 __func__, core->name);
2296 /* throw a WARN if any entries in parent_names are NULL */
2297 for (i = 0; i < core->num_parents; i++)
2298 WARN(!core->parent_names[i],
2299 "%s: invalid NULL in %s's .parent_names\n",
2300 __func__, core->name);
2303 * Allocate an array of struct clk *'s to avoid unnecessary string
2304 * look-ups of clk's possible parents. This can fail for clocks passed
2305 * in to clk_init during early boot; thus any access to core->parents[]
2306 * must always check for a NULL pointer and try to populate it if
2309 * If core->parents is not NULL we skip this entire block. This allows
2310 * for clock drivers to statically initialize core->parents.
2312 if (core->num_parents > 1 && !core->parents) {
2313 core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
2316 * clk_core_lookup returns NULL for parents that have not been
2317 * clk_init'd; thus any access to clk->parents[] must check
2318 * for a NULL pointer. We can always perform lazy lookups for
2319 * missing parents later on.
2322 for (i = 0; i < core->num_parents; i++)
2324 clk_core_lookup(core->parent_names[i]);
2327 core->parent = __clk_init_parent(core);
2330 * Populate core->parent if parent has already been __clk_init'd. If
2331 * parent has not yet been __clk_init'd then place clk in the orphan
2332 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
2335 * Every time a new clk is clk_init'd then we walk the list of orphan
2336 * clocks and re-parent any that are children of the clock currently
2340 hlist_add_head(&core->child_node,
2341 &core->parent->children);
2342 else if (core->flags & CLK_IS_ROOT)
2343 hlist_add_head(&core->child_node, &clk_root_list);
2345 hlist_add_head(&core->child_node, &clk_orphan_list);
2348 * Set clk's accuracy. The preferred method is to use
2349 * .recalc_accuracy. For simple clocks and lazy developers the default
2350 * fallback is to use the parent's accuracy. If a clock doesn't have a
2351 * parent (or is orphaned) then accuracy is set to zero (perfect
2354 if (core->ops->recalc_accuracy)
2355 core->accuracy = core->ops->recalc_accuracy(core->hw,
2356 __clk_get_accuracy(core->parent));
2357 else if (core->parent)
2358 core->accuracy = core->parent->accuracy;
2364 * Since a phase is by definition relative to its parent, just
2365 * query the current clock phase, or just assume it's in phase.
2367 if (core->ops->get_phase)
2368 core->phase = core->ops->get_phase(core->hw);
2373 * Set clk's rate. The preferred method is to use .recalc_rate. For
2374 * simple clocks and lazy developers the default fallback is to use the
2375 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2376 * then rate is set to zero.
2378 if (core->ops->recalc_rate)
2379 rate = core->ops->recalc_rate(core->hw,
2380 clk_core_get_rate_nolock(core->parent));
2381 else if (core->parent)
2382 rate = core->parent->rate;
2385 core->rate = core->req_rate = rate;
2388 * walk the list of orphan clocks and reparent any that are children of
2391 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2392 if (orphan->num_parents && orphan->ops->get_parent) {
2393 i = orphan->ops->get_parent(orphan->hw);
2394 if (!strcmp(core->name, orphan->parent_names[i]))
2395 clk_core_reparent(orphan, core);
2399 for (i = 0; i < orphan->num_parents; i++)
2400 if (!strcmp(core->name, orphan->parent_names[i])) {
2401 clk_core_reparent(orphan, core);
2407 * optional platform-specific magic
2409 * The .init callback is not used by any of the basic clock types, but
2410 * exists for weird hardware that must perform initialization magic.
2411 * Please consider other ways of solving initialization problems before
2412 * using this callback, as its use is discouraged.
2414 if (core->ops->init)
2415 core->ops->init(core->hw);
2417 kref_init(&core->ref);
2419 clk_prepare_unlock();
2422 clk_debug_register(core);
2427 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2432 /* This is to allow this function to be chained to others */
2433 if (!hw || IS_ERR(hw))
2434 return (struct clk *) hw;
2436 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2438 return ERR_PTR(-ENOMEM);
2440 clk->core = hw->core;
2441 clk->dev_id = dev_id;
2442 clk->con_id = con_id;
2443 clk->max_rate = ULONG_MAX;
2446 hlist_add_head(&clk->clks_node, &hw->core->clks);
2447 clk_prepare_unlock();
2452 void __clk_free_clk(struct clk *clk)
2455 hlist_del(&clk->clks_node);
2456 clk_prepare_unlock();
2462 * clk_register - allocate a new clock, register it and return an opaque cookie
2463 * @dev: device that is registering this clock
2464 * @hw: link to hardware-specific clock data
2466 * clk_register is the primary interface for populating the clock tree with new
2467 * clock nodes. It returns a pointer to the newly allocated struct clk which
2468 * cannot be dereferenced by driver code but may be used in conjuction with the
2469 * rest of the clock API. In the event of an error clk_register will return an
2470 * error code; drivers must test for an error code after calling clk_register.
2472 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2475 struct clk_core *core;
2477 core = kzalloc(sizeof(*core), GFP_KERNEL);
2479 pr_err("%s: could not allocate clk\n", __func__);
2484 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2486 pr_err("%s: could not allocate clk->name\n", __func__);
2490 core->ops = hw->init->ops;
2491 if (dev && dev->driver)
2492 core->owner = dev->driver->owner;
2494 core->flags = hw->init->flags;
2495 core->num_parents = hw->init->num_parents;
2498 /* allocate local copy in case parent_names is __initdata */
2499 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
2502 if (!core->parent_names) {
2503 pr_err("%s: could not allocate clk->parent_names\n", __func__);
2505 goto fail_parent_names;
2509 /* copy each string name in case parent_names is __initdata */
2510 for (i = 0; i < core->num_parents; i++) {
2511 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
2513 if (!core->parent_names[i]) {
2514 pr_err("%s: could not copy parent_names\n", __func__);
2516 goto fail_parent_names_copy;
2520 INIT_HLIST_HEAD(&core->clks);
2522 hw->clk = __clk_create_clk(hw, NULL, NULL);
2523 if (IS_ERR(hw->clk)) {
2524 pr_err("%s: could not allocate per-user clk\n", __func__);
2525 ret = PTR_ERR(hw->clk);
2526 goto fail_parent_names_copy;
2529 ret = __clk_init(dev, hw->clk);
2533 __clk_free_clk(hw->clk);
2536 fail_parent_names_copy:
2538 kfree_const(core->parent_names[i]);
2539 kfree(core->parent_names);
2541 kfree_const(core->name);
2545 return ERR_PTR(ret);
2547 EXPORT_SYMBOL_GPL(clk_register);
2550 * Free memory allocated for a clock.
2551 * Caller must hold prepare_lock.
2553 static void __clk_release(struct kref *ref)
2555 struct clk_core *core = container_of(ref, struct clk_core, ref);
2556 int i = core->num_parents;
2558 lockdep_assert_held(&prepare_lock);
2560 kfree(core->parents);
2562 kfree_const(core->parent_names[i]);
2564 kfree(core->parent_names);
2565 kfree_const(core->name);
2570 * Empty clk_ops for unregistered clocks. These are used temporarily
2571 * after clk_unregister() was called on a clock and until last clock
2572 * consumer calls clk_put() and the struct clk object is freed.
2574 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2579 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2584 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2585 unsigned long parent_rate)
2590 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2595 static const struct clk_ops clk_nodrv_ops = {
2596 .enable = clk_nodrv_prepare_enable,
2597 .disable = clk_nodrv_disable_unprepare,
2598 .prepare = clk_nodrv_prepare_enable,
2599 .unprepare = clk_nodrv_disable_unprepare,
2600 .set_rate = clk_nodrv_set_rate,
2601 .set_parent = clk_nodrv_set_parent,
2605 * clk_unregister - unregister a currently registered clock
2606 * @clk: clock to unregister
2608 void clk_unregister(struct clk *clk)
2610 unsigned long flags;
2612 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2615 clk_debug_unregister(clk->core);
2619 if (clk->core->ops == &clk_nodrv_ops) {
2620 pr_err("%s: unregistered clock: %s\n", __func__,
2625 * Assign empty clock ops for consumers that might still hold
2626 * a reference to this clock.
2628 flags = clk_enable_lock();
2629 clk->core->ops = &clk_nodrv_ops;
2630 clk_enable_unlock(flags);
2632 if (!hlist_empty(&clk->core->children)) {
2633 struct clk_core *child;
2634 struct hlist_node *t;
2636 /* Reparent all children to the orphan list. */
2637 hlist_for_each_entry_safe(child, t, &clk->core->children,
2639 clk_core_set_parent(child, NULL);
2642 hlist_del_init(&clk->core->child_node);
2644 if (clk->core->prepare_count)
2645 pr_warn("%s: unregistering prepared clock: %s\n",
2646 __func__, clk->core->name);
2647 kref_put(&clk->core->ref, __clk_release);
2649 clk_prepare_unlock();
2651 EXPORT_SYMBOL_GPL(clk_unregister);
2653 static void devm_clk_release(struct device *dev, void *res)
2655 clk_unregister(*(struct clk **)res);
2659 * devm_clk_register - resource managed clk_register()
2660 * @dev: device that is registering this clock
2661 * @hw: link to hardware-specific clock data
2663 * Managed clk_register(). Clocks returned from this function are
2664 * automatically clk_unregister()ed on driver detach. See clk_register() for
2667 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2672 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2674 return ERR_PTR(-ENOMEM);
2676 clk = clk_register(dev, hw);
2679 devres_add(dev, clkp);
2686 EXPORT_SYMBOL_GPL(devm_clk_register);
2688 static int devm_clk_match(struct device *dev, void *res, void *data)
2690 struct clk *c = res;
2697 * devm_clk_unregister - resource managed clk_unregister()
2698 * @clk: clock to unregister
2700 * Deallocate a clock allocated with devm_clk_register(). Normally
2701 * this function will not need to be called and the resource management
2702 * code will ensure that the resource is freed.
2704 void devm_clk_unregister(struct device *dev, struct clk *clk)
2706 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2708 EXPORT_SYMBOL_GPL(devm_clk_unregister);
2713 int __clk_get(struct clk *clk)
2715 struct clk_core *core = !clk ? NULL : clk->core;
2718 if (!try_module_get(core->owner))
2721 kref_get(&core->ref);
2726 void __clk_put(struct clk *clk)
2728 struct module *owner;
2730 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2735 hlist_del(&clk->clks_node);
2736 if (clk->min_rate > clk->core->req_rate ||
2737 clk->max_rate < clk->core->req_rate)
2738 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2740 owner = clk->core->owner;
2741 kref_put(&clk->core->ref, __clk_release);
2743 clk_prepare_unlock();
2750 /*** clk rate change notifiers ***/
2753 * clk_notifier_register - add a clk rate change notifier
2754 * @clk: struct clk * to watch
2755 * @nb: struct notifier_block * with callback info
2757 * Request notification when clk's rate changes. This uses an SRCU
2758 * notifier because we want it to block and notifier unregistrations are
2759 * uncommon. The callbacks associated with the notifier must not
2760 * re-enter into the clk framework by calling any top-level clk APIs;
2761 * this will cause a nested prepare_lock mutex.
2763 * In all notification cases cases (pre, post and abort rate change) the
2764 * original clock rate is passed to the callback via struct
2765 * clk_notifier_data.old_rate and the new frequency is passed via struct
2766 * clk_notifier_data.new_rate.
2768 * clk_notifier_register() must be called from non-atomic context.
2769 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2770 * allocation failure; otherwise, passes along the return value of
2771 * srcu_notifier_chain_register().
2773 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2775 struct clk_notifier *cn;
2783 /* search the list of notifiers for this clk */
2784 list_for_each_entry(cn, &clk_notifier_list, node)
2788 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2789 if (cn->clk != clk) {
2790 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2795 srcu_init_notifier_head(&cn->notifier_head);
2797 list_add(&cn->node, &clk_notifier_list);
2800 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2802 clk->core->notifier_count++;
2805 clk_prepare_unlock();
2809 EXPORT_SYMBOL_GPL(clk_notifier_register);
2812 * clk_notifier_unregister - remove a clk rate change notifier
2813 * @clk: struct clk *
2814 * @nb: struct notifier_block * with callback info
2816 * Request no further notification for changes to 'clk' and frees memory
2817 * allocated in clk_notifier_register.
2819 * Returns -EINVAL if called with null arguments; otherwise, passes
2820 * along the return value of srcu_notifier_chain_unregister().
2822 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2824 struct clk_notifier *cn = NULL;
2832 list_for_each_entry(cn, &clk_notifier_list, node)
2836 if (cn->clk == clk) {
2837 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2839 clk->core->notifier_count--;
2841 /* XXX the notifier code should handle this better */
2842 if (!cn->notifier_head.head) {
2843 srcu_cleanup_notifier_head(&cn->notifier_head);
2844 list_del(&cn->node);
2852 clk_prepare_unlock();
2856 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
2860 * struct of_clk_provider - Clock provider registration structure
2861 * @link: Entry in global list of clock providers
2862 * @node: Pointer to device tree node of clock provider
2863 * @get: Get clock callback. Returns NULL or a struct clk for the
2864 * given clock specifier
2865 * @data: context pointer to be passed into @get callback
2867 struct of_clk_provider {
2868 struct list_head link;
2870 struct device_node *node;
2871 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2875 static const struct of_device_id __clk_of_table_sentinel
2876 __used __section(__clk_of_table_end);
2878 static LIST_HEAD(of_clk_providers);
2879 static DEFINE_MUTEX(of_clk_mutex);
2881 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2886 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2888 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2890 struct clk_onecell_data *clk_data = data;
2891 unsigned int idx = clkspec->args[0];
2893 if (idx >= clk_data->clk_num) {
2894 pr_err("%s: invalid clock index %d\n", __func__, idx);
2895 return ERR_PTR(-EINVAL);
2898 return clk_data->clks[idx];
2900 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2903 * of_clk_add_provider() - Register a clock provider for a node
2904 * @np: Device node pointer associated with clock provider
2905 * @clk_src_get: callback for decoding clock
2906 * @data: context pointer for @clk_src_get callback.
2908 int of_clk_add_provider(struct device_node *np,
2909 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2913 struct of_clk_provider *cp;
2916 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2920 cp->node = of_node_get(np);
2922 cp->get = clk_src_get;
2924 mutex_lock(&of_clk_mutex);
2925 list_add(&cp->link, &of_clk_providers);
2926 mutex_unlock(&of_clk_mutex);
2927 pr_debug("Added clock from %s\n", np->full_name);
2929 ret = of_clk_set_defaults(np, true);
2931 of_clk_del_provider(np);
2935 EXPORT_SYMBOL_GPL(of_clk_add_provider);
2938 * of_clk_del_provider() - Remove a previously registered clock provider
2939 * @np: Device node pointer associated with clock provider
2941 void of_clk_del_provider(struct device_node *np)
2943 struct of_clk_provider *cp;
2945 mutex_lock(&of_clk_mutex);
2946 list_for_each_entry(cp, &of_clk_providers, link) {
2947 if (cp->node == np) {
2948 list_del(&cp->link);
2949 of_node_put(cp->node);
2954 mutex_unlock(&of_clk_mutex);
2956 EXPORT_SYMBOL_GPL(of_clk_del_provider);
2958 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2959 const char *dev_id, const char *con_id)
2961 struct of_clk_provider *provider;
2962 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
2965 return ERR_PTR(-EINVAL);
2967 /* Check if we have such a provider in our array */
2968 mutex_lock(&of_clk_mutex);
2969 list_for_each_entry(provider, &of_clk_providers, link) {
2970 if (provider->node == clkspec->np)
2971 clk = provider->get(clkspec, provider->data);
2973 clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2976 if (!IS_ERR(clk) && !__clk_get(clk)) {
2977 __clk_free_clk(clk);
2978 clk = ERR_PTR(-ENOENT);
2984 mutex_unlock(&of_clk_mutex);
2990 * of_clk_get_from_provider() - Lookup a clock from a clock provider
2991 * @clkspec: pointer to a clock specifier data structure
2993 * This function looks up a struct clk from the registered list of clock
2994 * providers, an input is a clock specifier data structure as returned
2995 * from the of_parse_phandle_with_args() function call.
2997 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2999 return __of_clk_get_from_provider(clkspec, NULL, __func__);
3002 int of_clk_get_parent_count(struct device_node *np)
3004 return of_count_phandle_with_args(np, "clocks", "#clock-cells");
3006 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3008 const char *of_clk_get_parent_name(struct device_node *np, int index)
3010 struct of_phandle_args clkspec;
3011 struct property *prop;
3012 const char *clk_name;
3021 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3026 index = clkspec.args_count ? clkspec.args[0] : 0;
3029 /* if there is an indices property, use it to transfer the index
3030 * specified into an array offset for the clock-output-names property.
3032 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3040 if (of_property_read_string_index(clkspec.np, "clock-output-names",
3043 clk_name = clkspec.np->name;
3045 of_node_put(clkspec.np);
3048 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3050 struct clock_provider {
3051 of_clk_init_cb_t clk_init_cb;
3052 struct device_node *np;
3053 struct list_head node;
3056 static LIST_HEAD(clk_provider_list);
3059 * This function looks for a parent clock. If there is one, then it
3060 * checks that the provider for this parent clock was initialized, in
3061 * this case the parent clock will be ready.
3063 static int parent_ready(struct device_node *np)
3068 struct clk *clk = of_clk_get(np, i);
3070 /* this parent is ready we can check the next one */
3077 /* at least one parent is not ready, we exit now */
3078 if (PTR_ERR(clk) == -EPROBE_DEFER)
3082 * Here we make assumption that the device tree is
3083 * written correctly. So an error means that there is
3084 * no more parent. As we didn't exit yet, then the
3085 * previous parent are ready. If there is no clock
3086 * parent, no need to wait for them, then we can
3087 * consider their absence as being ready
3094 * of_clk_init() - Scan and init clock providers from the DT
3095 * @matches: array of compatible values and init functions for providers.
3097 * This function scans the device tree for matching clock providers
3098 * and calls their initialization functions. It also does it by trying
3099 * to follow the dependencies.
3101 void __init of_clk_init(const struct of_device_id *matches)
3103 const struct of_device_id *match;
3104 struct device_node *np;
3105 struct clock_provider *clk_provider, *next;
3110 matches = &__clk_of_table;
3112 /* First prepare the list of the clocks providers */
3113 for_each_matching_node_and_match(np, matches, &match) {
3114 struct clock_provider *parent =
3115 kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
3117 parent->clk_init_cb = match->data;
3119 list_add_tail(&parent->node, &clk_provider_list);
3122 while (!list_empty(&clk_provider_list)) {
3123 is_init_done = false;
3124 list_for_each_entry_safe(clk_provider, next,
3125 &clk_provider_list, node) {
3126 if (force || parent_ready(clk_provider->np)) {
3128 clk_provider->clk_init_cb(clk_provider->np);
3129 of_clk_set_defaults(clk_provider->np, true);
3131 list_del(&clk_provider->node);
3132 kfree(clk_provider);
3133 is_init_done = true;
3138 * We didn't manage to initialize any of the
3139 * remaining providers during the last loop, so now we
3140 * initialize all the remaining ones unconditionally
3141 * in case the clock parent was not mandatory