clk_core_unprepare_lock(core);
}
-static void __init clk_unprepare_unused_subtree(struct clk_core *core)
+static void clk_unprepare_unused_subtree(struct clk_core *core,
+ struct device *dev)
{
+ bool from_sync_state = !!dev;
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
- clk_unprepare_unused_subtree(child);
+ clk_unprepare_unused_subtree(child, dev);
+
+ if (from_sync_state && core->dev != dev)
+ return;
+
+ /*
+ * clock will be unprepared on sync_state,
+ * so leave as is for now
+ */
+ if (!from_sync_state && dev_has_sync_state(core->dev))
+ return;
if (core->prepare_count)
return;
clk_pm_runtime_put(core);
}
-static void __init clk_disable_unused_subtree(struct clk_core *core)
+static void clk_disable_unused_subtree(struct clk_core *core,
+ struct device *dev)
{
+ bool from_sync_state = !!dev;
struct clk_core *child;
unsigned long flags;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
- clk_disable_unused_subtree(child);
+ clk_disable_unused_subtree(child, dev);
+
+ if (from_sync_state && core->dev != dev)
+ return;
+
+ /*
+ * clock will be disabled on sync_state,
+ * so leave as is for now
+ */
+ if (!from_sync_state && dev_has_sync_state(core->dev))
+ return;
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
clk_core_disable_unprepare(core->parent);
}
-static bool clk_ignore_unused __initdata;
+static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
-static int __init clk_disable_unused(void)
+static void __clk_disable_unused(struct device *dev)
{
struct clk_core *core;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
- return 0;
+ return;
}
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
- clk_disable_unused_subtree(core);
+ clk_disable_unused_subtree(core, dev);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
- clk_disable_unused_subtree(core);
+ clk_disable_unused_subtree(core, dev);
hlist_for_each_entry(core, &clk_root_list, child_node)
- clk_unprepare_unused_subtree(core);
+ clk_unprepare_unused_subtree(core, dev);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
- clk_unprepare_unused_subtree(core);
+ clk_unprepare_unused_subtree(core, dev);
clk_prepare_unlock();
+}
+
+static int __init clk_disable_unused(void)
+{
+ __clk_disable_unused(NULL);
return 0;
}
late_initcall_sync(clk_disable_unused);
+void clk_sync_state_disable_unused(struct device *dev)
+{
+ __clk_disable_unused(dev);
+}
+EXPORT_SYMBOL_GPL(clk_sync_state_disable_unused);
+
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{