diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /drivers/clk/clk-scmi.c | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/clk/clk-scmi.c')
-rw-r--r-- | drivers/clk/clk-scmi.c | 95 |
1 files changed, 87 insertions, 8 deletions
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index fdec715c9b..8cbe24789c 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -13,13 +13,18 @@ #include <linux/scmi_protocol.h> #include <asm/div64.h> +#define NOT_ATOMIC false +#define ATOMIC true + static const struct scmi_clk_proto_ops *scmi_proto_clk_ops; struct scmi_clk { u32 id; + struct device *dev; struct clk_hw hw; const struct scmi_clock_info *info; const struct scmi_protocol_handle *ph; + struct clk_parent_data *parent_data; }; #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw) @@ -74,38 +79,89 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate); } +static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + + return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index); +} + +static u8 scmi_clk_get_parent(struct clk_hw *hw) +{ + struct scmi_clk *clk = to_scmi_clk(hw); + u32 parent_id, p_idx; + int ret; + + ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id); + if (ret) + return 0; + + for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) { + if (clk->parent_data[p_idx].index == parent_id) + break; + } + + if (p_idx == clk->info->num_parents) + return 0; + + return p_idx; +} + +static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) +{ + /* + * Suppose all the requested rates are supported, and let firmware + * to handle the left work. + */ + return 0; +} + static int scmi_clk_enable(struct clk_hw *hw) { struct scmi_clk *clk = to_scmi_clk(hw); - return scmi_proto_clk_ops->enable(clk->ph, clk->id); + return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC); } static void scmi_clk_disable(struct clk_hw *hw) { struct scmi_clk *clk = to_scmi_clk(hw); - scmi_proto_clk_ops->disable(clk->ph, clk->id); + scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC); } static int scmi_clk_atomic_enable(struct clk_hw *hw) { struct scmi_clk *clk = to_scmi_clk(hw); - return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id); + return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC); } static void scmi_clk_atomic_disable(struct clk_hw *hw) { struct scmi_clk *clk = to_scmi_clk(hw); - scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id); + scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC); +} + +static int scmi_clk_atomic_is_enabled(struct clk_hw *hw) +{ + int ret; + bool enabled = false; + struct scmi_clk *clk = to_scmi_clk(hw); + + ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC); + if (ret) + dev_warn(clk->dev, + "Failed to get state for clock ID %d\n", clk->id); + + return !!enabled; } /* - * We can provide enable/disable atomic callbacks only if the underlying SCMI - * transport for an SCMI instance is configured to handle SCMI commands in an - * atomic manner. + * We can provide enable/disable/is_enabled atomic callbacks only if the + * underlying SCMI transport for an SCMI instance is configured to handle + * SCMI commands in an atomic manner. * * When no SCMI atomic transport support is available we instead provide only * the prepare/unprepare API, as allowed by the clock framework when atomic @@ -121,6 +177,9 @@ static const struct clk_ops scmi_clk_ops = { .set_rate = scmi_clk_set_rate, .prepare = scmi_clk_enable, .unprepare = scmi_clk_disable, + .set_parent = scmi_clk_set_parent, + .get_parent = scmi_clk_get_parent, + .determine_rate = scmi_clk_determine_rate, }; static const struct clk_ops scmi_atomic_clk_ops = { @@ -129,6 +188,10 @@ static const struct clk_ops scmi_atomic_clk_ops = { .set_rate = scmi_clk_set_rate, .enable = scmi_clk_atomic_enable, .disable = scmi_clk_atomic_disable, + .is_enabled = scmi_clk_atomic_is_enabled, + .set_parent = scmi_clk_set_parent, + .get_parent = scmi_clk_get_parent, + .determine_rate = scmi_clk_determine_rate, }; static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, @@ -139,9 +202,10 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, - .num_parents = 0, + .num_parents = sclk->info->num_parents, .ops = scmi_ops, .name = sclk->info->name, + .parent_data = sclk->parent_data, }; sclk->hw.init = &init; @@ -219,6 +283,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) sclk->id = idx; sclk->ph = ph; + sclk->dev = dev; /* * Note that when transport is atomic but SCMI protocol did not @@ -231,9 +296,23 @@ static int scmi_clocks_probe(struct scmi_device *sdev) else scmi_ops = &scmi_clk_ops; + /* Initialize clock parent data. */ + if (sclk->info->num_parents > 0) { + sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents, + sizeof(*sclk->parent_data), GFP_KERNEL); + if (!sclk->parent_data) + return -ENOMEM; + + for (int i = 0; i < sclk->info->num_parents; i++) { + sclk->parent_data[i].index = sclk->info->parents[i]; + sclk->parent_data[i].hw = hws[sclk->info->parents[i]]; + } + } + err = scmi_clk_ops_init(dev, sclk, scmi_ops); if (err) { dev_err(dev, "failed to register clock %d\n", idx); + devm_kfree(dev, sclk->parent_data); devm_kfree(dev, sclk); hws[idx] = NULL; } else { |