ANDROID: GKI: clk: Add support for clock providers with sync state

Clock providers whose drivers have sync_state() implemented will disable
their clocks once all their consumers have probed. So during
late_initcall_sync(), don't disable unused clocks of these clock
providers.

Also, provide a clk_sync_state() API that clock providers can use to
disable all their unused clocks once they get their sync_state()
callback.

Bug: 144066914
Signed-off-by: Saravana Kannan <saravanak@google.com>
Change-Id: Id01bfc15347812faa19802463c088db162c7d117
This commit is contained in:
Saravana Kannan 2019-11-06 12:21:35 -08:00
parent 82e8a5fba7
commit 7a9ed11ba8
2 changed files with 68 additions and 0 deletions

View File

@ -72,6 +72,8 @@ struct clk_core {
unsigned long flags;
bool orphan;
bool rpm_enabled;
bool need_sync;
bool boot_enabled;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
@ -1300,6 +1302,38 @@ static int clk_disable_unused(void)
}
late_initcall_sync(clk_disable_unused);
static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
struct device *dev)
{
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_unprepare_disable_dev_subtree(child, dev);
if (core->dev != dev || !core->need_sync)
return;
clk_core_disable_unprepare(core);
}
void clk_sync_state(struct device *dev)
{
struct clk_core *core;
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_unprepare_disable_dev_subtree(core, dev);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_unprepare_disable_dev_subtree(core, dev);
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_sync_state);
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
@ -1674,6 +1708,33 @@ static int clk_fetch_parent_index(struct clk_core *core,
return i;
}
static void clk_core_hold_state(struct clk_core *core)
{
if (core->need_sync || !core->boot_enabled)
return;
if (core->orphan || !dev_has_sync_state(core->dev))
return;
if (core->flags & CLK_DONT_HOLD_STATE)
return;
core->need_sync = !clk_core_prepare_enable(core);
}
static void __clk_core_update_orphan_hold_state(struct clk_core *core)
{
struct clk_core *child;
if (core->orphan)
return;
clk_core_hold_state(core);
hlist_for_each_entry(child, &core->children, child_node)
__clk_core_update_orphan_hold_state(child);
}
/*
* Update the orphan status of @core and all its children.
*/
@ -3384,6 +3445,8 @@ static int __clk_core_init(struct clk_core *core)
rate = 0;
core->rate = core->req_rate = rate;
core->boot_enabled = clk_core_is_enabled(core);
/*
* Enable CLK_IS_CRITICAL clocks so newly added critical clocks
* don't get accidentally disabled when walking the orphan tree and
@ -3399,6 +3462,8 @@ static int __clk_core_init(struct clk_core *core)
clk_enable_unlock(flags);
}
clk_core_hold_state(core);
/*
* walk the list of orphan clocks and reparent any that newly finds a
* parent.
@ -3418,6 +3483,7 @@ static int __clk_core_init(struct clk_core *core)
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, 0);
__clk_core_update_orphan_hold_state(orphan);
}
}

View File

@ -32,6 +32,7 @@
#define CLK_OPS_PARENT_ENABLE BIT(12)
/* duty cycle call may be forwarded to the parent clock */
#define CLK_DUTY_CYCLE_PARENT BIT(13)
#define CLK_DONT_HOLD_STATE BIT(14) /* Don't hold state */
struct clk;
struct clk_hw;
@ -815,6 +816,7 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
void clk_sync_state(struct device *dev);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);