summaryrefslogtreecommitdiffstats
path: root/drivers/clk/st
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/st')
-rw-r--r--drivers/clk/st/Makefile2
-rw-r--r--drivers/clk/st/clk-flexgen.c749
-rw-r--r--drivers/clk/st/clkgen-fsyn.c1071
-rw-r--r--drivers/clk/st/clkgen-mux.c111
-rw-r--r--drivers/clk/st/clkgen-pll.c872
-rw-r--r--drivers/clk/st/clkgen.h51
6 files changed, 2856 insertions, 0 deletions
diff --git a/drivers/clk/st/Makefile b/drivers/clk/st/Makefile
new file mode 100644
index 0000000000..caf7789376
--- /dev/null
+++ b/drivers/clk/st/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o clk-flexgen.o
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
new file mode 100644
index 0000000000..5292208c4d
--- /dev/null
+++ b/drivers/clk/st/clk-flexgen.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * clk-flexgen.c
+ *
+ * Copyright (C) ST-Microelectronics SA 2013
+ * Author: Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct clkgen_clk_out {
+ const char *name;
+ unsigned long flags;
+};
+
+struct clkgen_data {
+ unsigned long flags;
+ bool mode;
+ const struct clkgen_clk_out *outputs;
+ const unsigned int outputs_nb;
+};
+
+struct flexgen {
+ struct clk_hw hw;
+
+ /* Crossbar */
+ struct clk_mux mux;
+ /* Pre-divisor's gate */
+ struct clk_gate pgate;
+ /* Pre-divisor */
+ struct clk_divider pdiv;
+ /* Final divisor's gate */
+ struct clk_gate fgate;
+ /* Final divisor */
+ struct clk_divider fdiv;
+ /* Asynchronous mode control */
+ struct clk_gate sync;
+ /* hw control flags */
+ bool control_mode;
+};
+
+#define to_flexgen(_hw) container_of(_hw, struct flexgen, hw)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static int flexgen_enable(struct clk_hw *hw)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *pgate_hw = &flexgen->pgate.hw;
+ struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+ __clk_hw_set_clk(pgate_hw, hw);
+ __clk_hw_set_clk(fgate_hw, hw);
+
+ clk_gate_ops.enable(pgate_hw);
+
+ clk_gate_ops.enable(fgate_hw);
+
+ pr_debug("%s: flexgen output enabled\n", clk_hw_get_name(hw));
+ return 0;
+}
+
+static void flexgen_disable(struct clk_hw *hw)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+ /* disable only the final gate */
+ __clk_hw_set_clk(fgate_hw, hw);
+
+ clk_gate_ops.disable(fgate_hw);
+
+ pr_debug("%s: flexgen output disabled\n", clk_hw_get_name(hw));
+}
+
+static int flexgen_is_enabled(struct clk_hw *hw)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+ __clk_hw_set_clk(fgate_hw, hw);
+
+ if (!clk_gate_ops.is_enabled(fgate_hw))
+ return 0;
+
+ return 1;
+}
+
+static u8 flexgen_get_parent(struct clk_hw *hw)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *mux_hw = &flexgen->mux.hw;
+
+ __clk_hw_set_clk(mux_hw, hw);
+
+ return clk_mux_ops.get_parent(mux_hw);
+}
+
+static int flexgen_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *mux_hw = &flexgen->mux.hw;
+
+ __clk_hw_set_clk(mux_hw, hw);
+
+ return clk_mux_ops.set_parent(mux_hw, index);
+}
+
+static inline unsigned long
+clk_best_div(unsigned long parent_rate, unsigned long rate)
+{
+ return parent_rate / rate + ((rate > (2*(parent_rate % rate))) ? 0 : 1);
+}
+
+static int flexgen_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long div;
+
+ /* Round div according to exact prate and wished rate */
+ div = clk_best_div(req->best_parent_rate, req->rate);
+
+ if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
+ req->best_parent_rate = req->rate * div;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate / div;
+ return 0;
+}
+
+static unsigned long flexgen_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
+ struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+ unsigned long mid_rate;
+
+ __clk_hw_set_clk(pdiv_hw, hw);
+ __clk_hw_set_clk(fdiv_hw, hw);
+
+ mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
+
+ return clk_divider_ops.recalc_rate(fdiv_hw, mid_rate);
+}
+
+static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct flexgen *flexgen = to_flexgen(hw);
+ struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
+ struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+ struct clk_hw *sync_hw = &flexgen->sync.hw;
+ struct clk_gate *config = to_clk_gate(sync_hw);
+ unsigned long div = 0;
+ int ret = 0;
+ u32 reg;
+
+ __clk_hw_set_clk(pdiv_hw, hw);
+ __clk_hw_set_clk(fdiv_hw, hw);
+
+ if (flexgen->control_mode) {
+ reg = readl(config->reg);
+ reg &= ~BIT(config->bit_idx);
+ writel(reg, config->reg);
+ }
+
+ div = clk_best_div(parent_rate, rate);
+
+ /*
+ * pdiv is mainly targeted for low freq results, while fdiv
+ * should be used for div <= 64. The other way round can
+ * lead to 'duty cycle' issues.
+ */
+
+ if (div <= 64) {
+ clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
+ ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
+ } else {
+ clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+ ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
+ }
+
+ return ret;
+}
+
+static const struct clk_ops flexgen_ops = {
+ .enable = flexgen_enable,
+ .disable = flexgen_disable,
+ .is_enabled = flexgen_is_enabled,
+ .get_parent = flexgen_get_parent,
+ .set_parent = flexgen_set_parent,
+ .determine_rate = flexgen_determine_rate,
+ .recalc_rate = flexgen_recalc_rate,
+ .set_rate = flexgen_set_rate,
+};
+
+static struct clk *clk_register_flexgen(const char *name,
+ const char **parent_names, u8 num_parents,
+ void __iomem *reg, spinlock_t *lock, u32 idx,
+ unsigned long flexgen_flags, bool mode) {
+ struct flexgen *fgxbar;
+ struct clk *clk;
+ struct clk_init_data init;
+ u32 xbar_shift;
+ void __iomem *xbar_reg, *fdiv_reg;
+
+ fgxbar = kzalloc(sizeof(struct flexgen), GFP_KERNEL);
+ if (!fgxbar)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &flexgen_ops;
+ init.flags = CLK_GET_RATE_NOCACHE | flexgen_flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ xbar_reg = reg + 0x18 + (idx & ~0x3);
+ xbar_shift = (idx % 4) * 0x8;
+ fdiv_reg = reg + 0x164 + idx * 4;
+
+ /* Crossbar element config */
+ fgxbar->mux.lock = lock;
+ fgxbar->mux.mask = BIT(6) - 1;
+ fgxbar->mux.reg = xbar_reg;
+ fgxbar->mux.shift = xbar_shift;
+ fgxbar->mux.table = NULL;
+
+
+ /* Pre-divider's gate config (in xbar register)*/
+ fgxbar->pgate.lock = lock;
+ fgxbar->pgate.reg = xbar_reg;
+ fgxbar->pgate.bit_idx = xbar_shift + 6;
+
+ /* Pre-divider config */
+ fgxbar->pdiv.lock = lock;
+ fgxbar->pdiv.reg = reg + 0x58 + idx * 4;
+ fgxbar->pdiv.width = 10;
+
+ /* Final divider's gate config */
+ fgxbar->fgate.lock = lock;
+ fgxbar->fgate.reg = fdiv_reg;
+ fgxbar->fgate.bit_idx = 6;
+
+ /* Final divider config */
+ fgxbar->fdiv.lock = lock;
+ fgxbar->fdiv.reg = fdiv_reg;
+ fgxbar->fdiv.width = 6;
+
+ /* Final divider sync config */
+ fgxbar->sync.lock = lock;
+ fgxbar->sync.reg = fdiv_reg;
+ fgxbar->sync.bit_idx = 7;
+
+ fgxbar->control_mode = mode;
+
+ fgxbar->hw.init = &init;
+
+ clk = clk_register(NULL, &fgxbar->hw);
+ if (IS_ERR(clk))
+ kfree(fgxbar);
+ else
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+ return clk;
+}
+
+static const char ** __init flexgen_get_parents(struct device_node *np,
+ int *num_parents)
+{
+ const char **parents;
+ unsigned int nparents;
+
+ nparents = of_clk_get_parent_count(np);
+ if (WARN_ON(!nparents))
+ return NULL;
+
+ parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
+ if (!parents)
+ return NULL;
+
+ *num_parents = of_clk_parent_fill(np, parents, nparents);
+
+ return parents;
+}
+
+static const struct clkgen_data clkgen_audio = {
+ .flags = CLK_SET_RATE_PARENT,
+};
+
+static const struct clkgen_data clkgen_video = {
+ .flags = CLK_SET_RATE_PARENT,
+ .mode = 1,
+};
+
+static const struct clkgen_clk_out clkgen_stih407_a0_clk_out[] = {
+ /* This clk needs to be on so that memory interface is accessible */
+ { .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
+};
+
+static const struct clkgen_data clkgen_stih407_a0 = {
+ .outputs = clkgen_stih407_a0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih407_a0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih410_a0_clk_out[] = {
+ /* Those clks need to be on so that memory interface is accessible */
+ { .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-ic-lmi1", .flags = CLK_IS_CRITICAL },
+};
+
+static const struct clkgen_data clkgen_stih410_a0 = {
+ .outputs = clkgen_stih410_a0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih410_a0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih407_c0_clk_out[] = {
+ { .name = "clk-icn-gpu", },
+ { .name = "clk-fdma", },
+ { .name = "clk-nand", },
+ { .name = "clk-hva", },
+ { .name = "clk-proc-stfe", },
+ { .name = "clk-proc-tp", },
+ { .name = "clk-rx-icn-dmu", },
+ { .name = "clk-rx-icn-hva", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-mmc-0", },
+ { .name = "clk-mmc-1", },
+ { .name = "clk-jpegdec", },
+ /* This clk needs to be on to keep A9 running */
+ { .name = "clk-ext2fa9", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-ic-bdisp-0", },
+ { .name = "clk-ic-bdisp-1", },
+ { .name = "clk-pp-dmu", },
+ { .name = "clk-vid-dmu", },
+ { .name = "clk-dss-lpc", },
+ { .name = "clk-st231-aud-0", },
+ { .name = "clk-st231-gp-1", },
+ { .name = "clk-st231-dmu", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-tx-icn-disp-1", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-stfe-frc2", },
+ { .name = "clk-eth-phy", },
+ { .name = "clk-eth-ref-phyclk", },
+ { .name = "clk-flash-promip", },
+ { .name = "clk-main-disp", },
+ { .name = "clk-aux-disp", },
+ { .name = "clk-compo-dvp", },
+};
+
+static const struct clkgen_data clkgen_stih407_c0 = {
+ .outputs = clkgen_stih407_c0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih407_c0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih410_c0_clk_out[] = {
+ { .name = "clk-icn-gpu", },
+ { .name = "clk-fdma", },
+ { .name = "clk-nand", },
+ { .name = "clk-hva", },
+ { .name = "clk-proc-stfe", },
+ { .name = "clk-proc-tp", },
+ { .name = "clk-rx-icn-dmu", },
+ { .name = "clk-rx-icn-hva", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-mmc-0", },
+ { .name = "clk-mmc-1", },
+ { .name = "clk-jpegdec", },
+ /* This clk needs to be on to keep A9 running */
+ { .name = "clk-ext2fa9", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-ic-bdisp-0", },
+ { .name = "clk-ic-bdisp-1", },
+ { .name = "clk-pp-dmu", },
+ { .name = "clk-vid-dmu", },
+ { .name = "clk-dss-lpc", },
+ { .name = "clk-st231-aud-0", },
+ { .name = "clk-st231-gp-1", },
+ { .name = "clk-st231-dmu", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-tx-icn-disp-1", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-stfe-frc2", },
+ { .name = "clk-eth-phy", },
+ { .name = "clk-eth-ref-phyclk", },
+ { .name = "clk-flash-promip", },
+ { .name = "clk-main-disp", },
+ { .name = "clk-aux-disp", },
+ { .name = "clk-compo-dvp", },
+ { .name = "clk-tx-icn-hades", },
+ { .name = "clk-rx-icn-hades", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-reg-16", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-pp-hades", },
+ { .name = "clk-clust-hades", },
+ { .name = "clk-hwpe-hades", },
+ { .name = "clk-fc-hades", },
+};
+
+static const struct clkgen_data clkgen_stih410_c0 = {
+ .outputs = clkgen_stih410_c0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih410_c0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih418_c0_clk_out[] = {
+ { .name = "clk-icn-gpu", },
+ { .name = "clk-fdma", },
+ { .name = "clk-nand", },
+ { .name = "clk-hva", },
+ { .name = "clk-proc-stfe", },
+ { .name = "clk-tp", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-rx-icn-dmu", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-rx-icn-hva", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-mmc-0", },
+ { .name = "clk-mmc-1", },
+ { .name = "clk-jpegdec", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-reg", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-proc-bdisp-0", },
+ { .name = "clk-proc-bdisp-1", },
+ { .name = "clk-pp-dmu", },
+ { .name = "clk-vid-dmu", },
+ { .name = "clk-dss-lpc", },
+ { .name = "clk-st231-aud-0", },
+ { .name = "clk-st231-gp-1", },
+ { .name = "clk-st231-dmu", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-tx-icn-1", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-stfe-frc2", },
+ { .name = "clk-eth-phyref", },
+ { .name = "clk-eth-ref-phyclk", },
+ { .name = "clk-flash-promip", },
+ { .name = "clk-main-disp", },
+ { .name = "clk-aux-disp", },
+ { .name = "clk-compo-dvp", },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-tx-icn-hades", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-rx-icn-hades", .flags = CLK_IS_CRITICAL },
+ /* This clk needs to be on to keep bus interconnect alive */
+ { .name = "clk-icn-reg-16", .flags = CLK_IS_CRITICAL },
+ { .name = "clk-pp-hevc", },
+ { .name = "clk-clust-hevc", },
+ { .name = "clk-hwpe-hevc", },
+ { .name = "clk-fc-hevc", },
+ { .name = "clk-proc-mixer", },
+ { .name = "clk-proc-sc", },
+ { .name = "clk-avsp-hevc", },
+};
+
+static const struct clkgen_data clkgen_stih418_c0 = {
+ .outputs = clkgen_stih418_c0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih418_c0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih407_d0_clk_out[] = {
+ { .name = "clk-pcm-0", },
+ { .name = "clk-pcm-1", },
+ { .name = "clk-pcm-2", },
+ { .name = "clk-spdiff", },
+};
+
+static const struct clkgen_data clkgen_stih407_d0 = {
+ .flags = CLK_SET_RATE_PARENT,
+ .outputs = clkgen_stih407_d0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih407_d0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih410_d0_clk_out[] = {
+ { .name = "clk-pcm-0", },
+ { .name = "clk-pcm-1", },
+ { .name = "clk-pcm-2", },
+ { .name = "clk-spdiff", },
+ { .name = "clk-pcmr10-master", },
+ { .name = "clk-usb2-phy", },
+};
+
+static const struct clkgen_data clkgen_stih410_d0 = {
+ .flags = CLK_SET_RATE_PARENT,
+ .outputs = clkgen_stih410_d0_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih410_d0_clk_out),
+};
+
+static const struct clkgen_clk_out clkgen_stih407_d2_clk_out[] = {
+ { .name = "clk-pix-main-disp", },
+ { .name = "clk-pix-pip", },
+ { .name = "clk-pix-gdp1", },
+ { .name = "clk-pix-gdp2", },
+ { .name = "clk-pix-gdp3", },
+ { .name = "clk-pix-gdp4", },
+ { .name = "clk-pix-aux-disp", },
+ { .name = "clk-denc", },
+ { .name = "clk-pix-hddac", },
+ { .name = "clk-hddac", },
+ { .name = "clk-sddac", },
+ { .name = "clk-pix-dvo", },
+ { .name = "clk-dvo", },
+ { .name = "clk-pix-hdmi", },
+ { .name = "clk-tmds-hdmi", },
+ { .name = "clk-ref-hdmiphy", },
+};
+
+static const struct clkgen_data clkgen_stih407_d2 = {
+ .outputs = clkgen_stih407_d2_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih407_d2_clk_out),
+ .flags = CLK_SET_RATE_PARENT,
+ .mode = 1,
+};
+
+static const struct clkgen_clk_out clkgen_stih418_d2_clk_out[] = {
+ { .name = "clk-pix-main-disp", },
+ { .name = "", },
+ { .name = "", },
+ { .name = "", },
+ { .name = "", },
+ { .name = "clk-tmds-hdmi-div2", },
+ { .name = "clk-pix-aux-disp", },
+ { .name = "clk-denc", },
+ { .name = "clk-pix-hddac", },
+ { .name = "clk-hddac", },
+ { .name = "clk-sddac", },
+ { .name = "clk-pix-dvo", },
+ { .name = "clk-dvo", },
+ { .name = "clk-pix-hdmi", },
+ { .name = "clk-tmds-hdmi", },
+ { .name = "clk-ref-hdmiphy", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "", }, { .name = "", }, { .name = "", },
+ { .name = "clk-vp9", },
+};
+
+static const struct clkgen_data clkgen_stih418_d2 = {
+ .outputs = clkgen_stih418_d2_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih418_d2_clk_out),
+ .flags = CLK_SET_RATE_PARENT,
+ .mode = 1,
+};
+
+static const struct clkgen_clk_out clkgen_stih407_d3_clk_out[] = {
+ { .name = "clk-stfe-frc1", },
+ { .name = "clk-tsout-0", },
+ { .name = "clk-tsout-1", },
+ { .name = "clk-mchi", },
+ { .name = "clk-vsens-compo", },
+ { .name = "clk-frc1-remote", },
+ { .name = "clk-lpc-0", },
+ { .name = "clk-lpc-1", },
+};
+
+static const struct clkgen_data clkgen_stih407_d3 = {
+ .outputs = clkgen_stih407_d3_clk_out,
+ .outputs_nb = ARRAY_SIZE(clkgen_stih407_d3_clk_out),
+};
+
+static const struct of_device_id flexgen_of_match[] = {
+ {
+ .compatible = "st,flexgen-audio",
+ .data = &clkgen_audio,
+ },
+ {
+ .compatible = "st,flexgen-video",
+ .data = &clkgen_video,
+ },
+ {
+ .compatible = "st,flexgen-stih407-a0",
+ .data = &clkgen_stih407_a0,
+ },
+ {
+ .compatible = "st,flexgen-stih410-a0",
+ .data = &clkgen_stih410_a0,
+ },
+ {
+ .compatible = "st,flexgen-stih407-c0",
+ .data = &clkgen_stih407_c0,
+ },
+ {
+ .compatible = "st,flexgen-stih410-c0",
+ .data = &clkgen_stih410_c0,
+ },
+ {
+ .compatible = "st,flexgen-stih418-c0",
+ .data = &clkgen_stih418_c0,
+ },
+ {
+ .compatible = "st,flexgen-stih407-d0",
+ .data = &clkgen_stih407_d0,
+ },
+ {
+ .compatible = "st,flexgen-stih410-d0",
+ .data = &clkgen_stih410_d0,
+ },
+ {
+ .compatible = "st,flexgen-stih407-d2",
+ .data = &clkgen_stih407_d2,
+ },
+ {
+ .compatible = "st,flexgen-stih418-d2",
+ .data = &clkgen_stih418_d2,
+ },
+ {
+ .compatible = "st,flexgen-stih407-d3",
+ .data = &clkgen_stih407_d3,
+ },
+ {}
+};
+
+static void __init st_of_flexgen_setup(struct device_node *np)
+{
+ struct device_node *pnode;
+ void __iomem *reg;
+ struct clk_onecell_data *clk_data;
+ const char **parents;
+ int num_parents, i;
+ spinlock_t *rlock = NULL;
+ const struct of_device_id *match;
+ struct clkgen_data *data = NULL;
+ unsigned long flex_flags = 0;
+ int ret;
+ bool clk_mode = 0;
+ const char *clk_name;
+
+ pnode = of_get_parent(np);
+ if (!pnode)
+ return;
+
+ reg = of_iomap(pnode, 0);
+ of_node_put(pnode);
+ if (!reg)
+ return;
+
+ parents = flexgen_get_parents(np, &num_parents);
+ if (!parents) {
+ iounmap(reg);
+ return;
+ }
+
+ match = of_match_node(flexgen_of_match, np);
+ if (match) {
+ data = (struct clkgen_data *)match->data;
+ flex_flags = data->flags;
+ clk_mode = data->mode;
+ }
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ goto err;
+
+ /* First try to get output information from the compatible data */
+ if (!data || !data->outputs_nb || !data->outputs) {
+ ret = of_property_count_strings(np, "clock-output-names");
+ if (ret <= 0) {
+ pr_err("%s: Failed to get number of output clocks (%d)",
+ __func__, clk_data->clk_num);
+ goto err;
+ }
+ clk_data->clk_num = ret;
+ } else
+ clk_data->clk_num = data->outputs_nb;
+
+ clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!clk_data->clks)
+ goto err;
+
+ rlock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (!rlock)
+ goto err;
+
+ spin_lock_init(rlock);
+
+ for (i = 0; i < clk_data->clk_num; i++) {
+ struct clk *clk;
+
+ if (!data || !data->outputs_nb || !data->outputs) {
+ if (of_property_read_string_index(np,
+ "clock-output-names",
+ i, &clk_name))
+ break;
+ flex_flags &= ~CLK_IS_CRITICAL;
+ of_clk_detect_critical(np, i, &flex_flags);
+ } else {
+ clk_name = data->outputs[i].name;
+ flex_flags = data->flags | data->outputs[i].flags;
+ }
+
+ /*
+ * If we read an empty clock name then the output is unused
+ */
+ if (*clk_name == '\0')
+ continue;
+
+ clk = clk_register_flexgen(clk_name, parents, num_parents,
+ reg, rlock, i, flex_flags, clk_mode);
+
+ if (IS_ERR(clk))
+ goto err;
+
+ clk_data->clks[i] = clk;
+ }
+
+ kfree(parents);
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+
+ return;
+
+err:
+ iounmap(reg);
+ if (clk_data)
+ kfree(clk_data->clks);
+ kfree(clk_data);
+ kfree(parents);
+ kfree(rlock);
+}
+CLK_OF_DECLARE(flexgen, "st,flexgen", st_of_flexgen_setup);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
new file mode 100644
index 0000000000..40df1db102
--- /dev/null
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -0,0 +1,1071 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 STMicroelectronics R&D Ltd
+ */
+
+/*
+ * Authors:
+ * Stephen Gallimore <stephen.gallimore@st.com>,
+ * Pankaj Dev <pankaj.dev@st.com>.
+ */
+
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "clkgen.h"
+
+/*
+ * Maximum input clock to the PLL before we divide it down by 2
+ * although in reality in actual systems this has never been seen to
+ * be used.
+ */
+#define QUADFS_NDIV_THRESHOLD 30000000
+
+#define PLL_BW_GOODREF (0L)
+#define PLL_BW_VBADREF (1L)
+#define PLL_BW_BADREF (2L)
+#define PLL_BW_VGOODREF (3L)
+
+#define QUADFS_MAX_CHAN 4
+
+struct stm_fs {
+ unsigned long ndiv;
+ unsigned long mdiv;
+ unsigned long pe;
+ unsigned long sdiv;
+ unsigned long nsdiv;
+};
+
+struct clkgen_quadfs_data {
+ bool reset_present;
+ bool bwfilter_present;
+ bool lockstatus_present;
+ bool powerup_polarity;
+ bool standby_polarity;
+ bool nsdiv_present;
+ bool nrst_present;
+ struct clkgen_field ndiv;
+ struct clkgen_field ref_bw;
+ struct clkgen_field nreset;
+ struct clkgen_field npda;
+ struct clkgen_field lock_status;
+
+ struct clkgen_field nrst[QUADFS_MAX_CHAN];
+ struct clkgen_field nsb[QUADFS_MAX_CHAN];
+ struct clkgen_field en[QUADFS_MAX_CHAN];
+ struct clkgen_field mdiv[QUADFS_MAX_CHAN];
+ struct clkgen_field pe[QUADFS_MAX_CHAN];
+ struct clkgen_field sdiv[QUADFS_MAX_CHAN];
+ struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
+
+ const struct clk_ops *pll_ops;
+ int (*get_params)(unsigned long, unsigned long, struct stm_fs *);
+ int (*get_rate)(unsigned long , const struct stm_fs *,
+ unsigned long *);
+};
+
+struct clkgen_clk_out {
+ const char *name;
+ unsigned long flags;
+};
+
+struct clkgen_quadfs_data_clks {
+ struct clkgen_quadfs_data *data;
+ const struct clkgen_clk_out *outputs;
+};
+
+static const struct clk_ops st_quadfs_pll_c32_ops;
+
+static int clk_fs660c32_dig_get_params(unsigned long input,
+ unsigned long output, struct stm_fs *fs);
+static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
+ unsigned long *);
+
+static const struct clkgen_quadfs_data st_fs660c32_C = {
+ .nrst_present = true,
+ .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0),
+ CLKGEN_FIELD(0x2f0, 0x1, 1),
+ CLKGEN_FIELD(0x2f0, 0x1, 2),
+ CLKGEN_FIELD(0x2f0, 0x1, 3) },
+ .npda = CLKGEN_FIELD(0x2f0, 0x1, 12),
+ .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8),
+ CLKGEN_FIELD(0x2f0, 0x1, 9),
+ CLKGEN_FIELD(0x2f0, 0x1, 10),
+ CLKGEN_FIELD(0x2f0, 0x1, 11) },
+ .nsdiv_present = true,
+ .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24),
+ CLKGEN_FIELD(0x308, 0x1, 24),
+ CLKGEN_FIELD(0x30c, 0x1, 24),
+ CLKGEN_FIELD(0x310, 0x1, 24) },
+ .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15),
+ CLKGEN_FIELD(0x308, 0x1f, 15),
+ CLKGEN_FIELD(0x30c, 0x1f, 15),
+ CLKGEN_FIELD(0x310, 0x1f, 15) },
+ .en = { CLKGEN_FIELD(0x2fc, 0x1, 0),
+ CLKGEN_FIELD(0x2fc, 0x1, 1),
+ CLKGEN_FIELD(0x2fc, 0x1, 2),
+ CLKGEN_FIELD(0x2fc, 0x1, 3) },
+ .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16),
+ .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0),
+ CLKGEN_FIELD(0x308, 0x7fff, 0),
+ CLKGEN_FIELD(0x30c, 0x7fff, 0),
+ CLKGEN_FIELD(0x310, 0x7fff, 0) },
+ .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20),
+ CLKGEN_FIELD(0x308, 0xf, 20),
+ CLKGEN_FIELD(0x30c, 0xf, 20),
+ CLKGEN_FIELD(0x310, 0xf, 20) },
+ .lockstatus_present = true,
+ .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
+ .powerup_polarity = 1,
+ .standby_polarity = 1,
+ .pll_ops = &st_quadfs_pll_c32_ops,
+ .get_params = clk_fs660c32_dig_get_params,
+ .get_rate = clk_fs660c32_dig_get_rate,
+};
+
+static const struct clkgen_clk_out st_fs660c32_C_clks[] = {
+ { .name = "clk-s-c0-fs0-ch0", },
+ { .name = "clk-s-c0-fs0-ch1", },
+ { .name = "clk-s-c0-fs0-ch2", },
+ { .name = "clk-s-c0-fs0-ch3", },
+};
+
+static const struct clkgen_quadfs_data_clks st_fs660c32_C_data = {
+ .data = (struct clkgen_quadfs_data *)&st_fs660c32_C,
+ .outputs = st_fs660c32_C_clks,
+};
+
+static const struct clkgen_quadfs_data st_fs660c32_D = {
+ .nrst_present = true,
+ .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0),
+ CLKGEN_FIELD(0x2a0, 0x1, 1),
+ CLKGEN_FIELD(0x2a0, 0x1, 2),
+ CLKGEN_FIELD(0x2a0, 0x1, 3) },
+ .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16),
+ .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
+ CLKGEN_FIELD(0x2b8, 0x7fff, 0),
+ CLKGEN_FIELD(0x2bc, 0x7fff, 0),
+ CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
+ .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20),
+ CLKGEN_FIELD(0x2b8, 0xf, 20),
+ CLKGEN_FIELD(0x2bc, 0xf, 20),
+ CLKGEN_FIELD(0x2c0, 0xf, 20) },
+ .npda = CLKGEN_FIELD(0x2a0, 0x1, 12),
+ .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8),
+ CLKGEN_FIELD(0x2a0, 0x1, 9),
+ CLKGEN_FIELD(0x2a0, 0x1, 10),
+ CLKGEN_FIELD(0x2a0, 0x1, 11) },
+ .nsdiv_present = true,
+ .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24),
+ CLKGEN_FIELD(0x2b8, 0x1, 24),
+ CLKGEN_FIELD(0x2bc, 0x1, 24),
+ CLKGEN_FIELD(0x2c0, 0x1, 24) },
+ .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15),
+ CLKGEN_FIELD(0x2b8, 0x1f, 15),
+ CLKGEN_FIELD(0x2bc, 0x1f, 15),
+ CLKGEN_FIELD(0x2c0, 0x1f, 15) },
+ .en = { CLKGEN_FIELD(0x2ac, 0x1, 0),
+ CLKGEN_FIELD(0x2ac, 0x1, 1),
+ CLKGEN_FIELD(0x2ac, 0x1, 2),
+ CLKGEN_FIELD(0x2ac, 0x1, 3) },
+ .lockstatus_present = true,
+ .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+ .powerup_polarity = 1,
+ .standby_polarity = 1,
+ .pll_ops = &st_quadfs_pll_c32_ops,
+ .get_params = clk_fs660c32_dig_get_params,
+ .get_rate = clk_fs660c32_dig_get_rate,};
+
+static const struct clkgen_quadfs_data_clks st_fs660c32_D_data = {
+ .data = (struct clkgen_quadfs_data *)&st_fs660c32_D,
+};
+
+static const struct clkgen_clk_out st_fs660c32_D0_clks[] = {
+ { .name = "clk-s-d0-fs0-ch0", },
+ { .name = "clk-s-d0-fs0-ch1", },
+ { .name = "clk-s-d0-fs0-ch2", },
+ { .name = "clk-s-d0-fs0-ch3", },
+};
+
+static const struct clkgen_quadfs_data_clks st_fs660c32_D0_data = {
+ .data = (struct clkgen_quadfs_data *)&st_fs660c32_D,
+ .outputs = st_fs660c32_D0_clks,
+};
+
+static const struct clkgen_clk_out st_fs660c32_D2_clks[] = {
+ { .name = "clk-s-d2-fs0-ch0", },
+ { .name = "clk-s-d2-fs0-ch1", },
+ { .name = "clk-s-d2-fs0-ch2", },
+ { .name = "clk-s-d2-fs0-ch3", },
+};
+
+static const struct clkgen_quadfs_data_clks st_fs660c32_D2_data = {
+ .data = (struct clkgen_quadfs_data *)&st_fs660c32_D,
+ .outputs = st_fs660c32_D2_clks,
+};
+
+static const struct clkgen_clk_out st_fs660c32_D3_clks[] = {
+ { .name = "clk-s-d3-fs0-ch0", },
+ { .name = "clk-s-d3-fs0-ch1", },
+ { .name = "clk-s-d3-fs0-ch2", },
+ { .name = "clk-s-d3-fs0-ch3", },
+};
+
+static const struct clkgen_quadfs_data_clks st_fs660c32_D3_data = {
+ .data = (struct clkgen_quadfs_data *)&st_fs660c32_D,
+ .outputs = st_fs660c32_D3_clks,
+};
+
+/**
+ * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control the Fsyn
+ * rate - inherits rate from parent. set_rate/round_rate/recalc_rate
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+/**
+ * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of
+ * its parent clock, found inside a type of
+ * ST quad channel frequency synthesizer block
+ *
+ * @hw: handle between common and hardware-specific interfaces.
+ * @regs_base: base address of the configuration registers.
+ * @lock: spinlock.
+ * @data: local driver data
+ * @ndiv: regmap field for the ndiv control.
+ */
+struct st_clk_quadfs_pll {
+ struct clk_hw hw;
+ void __iomem *regs_base;
+ spinlock_t *lock;
+ struct clkgen_quadfs_data *data;
+ u32 ndiv;
+};
+
+#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
+
+static int quadfs_pll_enable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ /*
+ * Bring block out of reset if we have reset control.
+ */
+ if (pll->data->reset_present)
+ CLKGEN_WRITE(pll, nreset, 1);
+
+ /*
+ * Use a fixed input clock noise bandwidth filter for the moment
+ */
+ if (pll->data->bwfilter_present)
+ CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
+
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+
+ /*
+ * Power up the PLL
+ */
+ CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ if (pll->data->lockstatus_present)
+ while (!CLKGEN_READ(pll, lock_status)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static void quadfs_pll_disable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ /*
+ * Powerdown the PLL and then put block into soft reset if we have
+ * reset control.
+ */
+ CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
+
+ if (pll->data->reset_present)
+ CLKGEN_WRITE(pll, nreset, 0);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int quadfs_pll_is_enabled(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ u32 npda = CLKGEN_READ(pll, npda);
+
+ return pll->data->powerup_polarity ? !npda : !!npda;
+}
+
+static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
+ unsigned long *rate)
+{
+ unsigned long nd = fs->ndiv + 16; /* ndiv value */
+
+ *rate = input * nd;
+
+ return 0;
+}
+
+static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ unsigned long rate = 0;
+ struct stm_fs params;
+
+ params.ndiv = CLKGEN_READ(pll, ndiv);
+ if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
+ pr_err("%s:%s error calculating rate\n",
+ clk_hw_get_name(hw), __func__);
+
+ pll->ndiv = params.ndiv;
+
+ return rate;
+}
+
+static int clk_fs660c32_vco_get_params(unsigned long input,
+ unsigned long output, struct stm_fs *fs)
+{
+/* Formula
+ VCO frequency = (fin x ndiv) / pdiv
+ ndiv = VCOfreq * pdiv / fin
+ */
+ unsigned long pdiv = 1, n;
+
+ /* Output clock range: 384Mhz to 660Mhz */
+ if (output < 384000000 || output > 660000000)
+ return -EINVAL;
+
+ if (input > 40000000)
+ /* This means that PDIV would be 2 instead of 1.
+ Not supported today. */
+ return -EINVAL;
+
+ input /= 1000;
+ output /= 1000;
+
+ n = output * pdiv / input;
+ if (n < 16)
+ n = 16;
+ fs->ndiv = n - 16; /* Converting formula value to reg value */
+
+ return 0;
+}
+
+static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_fs params;
+
+ if (clk_fs660c32_vco_get_params(*prate, rate, &params))
+ return rate;
+
+ clk_fs660c32_vco_get_rate(*prate, &params, &rate);
+
+ pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
+ __func__, clk_hw_get_name(hw),
+ rate, (unsigned int)params.ndiv);
+
+ return rate;
+}
+
+static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
+ struct stm_fs params;
+ long hwrate = 0;
+ unsigned long flags = 0;
+ int ret;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ ret = clk_fs660c32_vco_get_params(parent_rate, rate, &params);
+ if (ret)
+ return ret;
+
+ clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
+
+ pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
+ __func__, clk_hw_get_name(hw),
+ hwrate, (unsigned int)params.ndiv);
+
+ if (!hwrate)
+ return -EINVAL;
+
+ pll->ndiv = params.ndiv;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops st_quadfs_pll_c32_ops = {
+ .enable = quadfs_pll_enable,
+ .disable = quadfs_pll_disable,
+ .is_enabled = quadfs_pll_is_enabled,
+ .recalc_rate = quadfs_pll_fs660c32_recalc_rate,
+ .round_rate = quadfs_pll_fs660c32_round_rate,
+ .set_rate = quadfs_pll_fs660c32_set_rate,
+};
+
+static struct clk * __init st_clk_register_quadfs_pll(
+ const char *name, const char *parent_name,
+ struct clkgen_quadfs_data *quadfs, void __iomem *reg,
+ spinlock_t *lock)
+{
+ struct st_clk_quadfs_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /*
+ * Sanity check required pointers.
+ */
+ if (WARN_ON(!name || !parent_name))
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = quadfs->pll_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->data = quadfs;
+ pll->regs_base = reg;
+ pll->lock = lock;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+/**
+ * DOC: A digital frequency synthesizer
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional
+ * rate - set rate is functional
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+/*
+ * struct st_clk_quadfs_fsynth - One clock output from a four channel digital
+ * frequency synthesizer (fsynth) block.
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ *
+ * @nsb: regmap field in the output control register for the digital
+ * standby of this fsynth channel. This control is active low so
+ * the channel is in standby when the control bit is cleared.
+ *
+ * @nsdiv: regmap field in the output control register for
+ * for the optional divide by 3 of this fsynth channel. This control
+ * is active low so the divide by 3 is active when the control bit is
+ * cleared and the divide is bypassed when the bit is set.
+ */
+struct st_clk_quadfs_fsynth {
+ struct clk_hw hw;
+ void __iomem *regs_base;
+ spinlock_t *lock;
+ struct clkgen_quadfs_data *data;
+
+ u32 chan;
+ /*
+ * Cached hardware values from set_rate so we can program the
+ * hardware in enable. There are two reasons for this:
+ *
+ * 1. The registers may not be writable until the parent has been
+ * enabled.
+ *
+ * 2. It restores the clock rate when a driver does an enable
+ * on PM restore, after a suspend to RAM has lost the hardware
+ * setup.
+ */
+ u32 md;
+ u32 pe;
+ u32 sdiv;
+ u32 nsdiv;
+};
+
+#define to_quadfs_fsynth(_hw) \
+ container_of(_hw, struct st_clk_quadfs_fsynth, hw)
+
+static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
+{
+ /*
+ * Pulse the program enable register lsb to make the hardware take
+ * notice of the new md/pe values with a glitchless transition.
+ */
+ CLKGEN_WRITE(fs, en[fs->chan], 1);
+ CLKGEN_WRITE(fs, en[fs->chan], 0);
+}
+
+static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
+{
+ unsigned long flags = 0;
+
+ /*
+ * Ensure the md/pe parameters are ignored while we are
+ * reprogramming them so we can get a glitchless change
+ * when fine tuning the speed of a running clock.
+ */
+ CLKGEN_WRITE(fs, en[fs->chan], 0);
+
+ CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
+ CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
+ CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
+
+ if (fs->lock)
+ spin_lock_irqsave(fs->lock, flags);
+
+ if (fs->data->nsdiv_present)
+ CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
+
+ if (fs->lock)
+ spin_unlock_irqrestore(fs->lock, flags);
+}
+
+static int quadfs_fsynth_enable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ unsigned long flags = 0;
+
+ pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
+
+ quadfs_fsynth_program_rate(fs);
+
+ if (fs->lock)
+ spin_lock_irqsave(fs->lock, flags);
+
+ CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+
+ if (fs->data->nrst_present)
+ CLKGEN_WRITE(fs, nrst[fs->chan], 0);
+
+ if (fs->lock)
+ spin_unlock_irqrestore(fs->lock, flags);
+
+ quadfs_fsynth_program_enable(fs);
+
+ return 0;
+}
+
+static void quadfs_fsynth_disable(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ unsigned long flags = 0;
+
+ pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
+
+ if (fs->lock)
+ spin_lock_irqsave(fs->lock, flags);
+
+ CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
+
+ if (fs->lock)
+ spin_unlock_irqrestore(fs->lock, flags);
+}
+
+static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
+
+ pr_debug("%s: %s enable bit = 0x%x\n",
+ __func__, clk_hw_get_name(hw), nsb);
+
+ return fs->data->standby_polarity ? !nsb : !!nsb;
+}
+
+#define P20 (uint64_t)(1 << 20)
+
+static int clk_fs660c32_dig_get_rate(unsigned long input,
+ const struct stm_fs *fs, unsigned long *rate)
+{
+ unsigned long s = (1 << fs->sdiv);
+ unsigned long ns;
+ uint64_t res;
+
+ /*
+ * 'nsdiv' is a register value ('BIN') which is translated
+ * to a decimal value according to following rules.
+ *
+ * nsdiv ns.dec
+ * 0 3
+ * 1 1
+ */
+ ns = (fs->nsdiv == 1) ? 1 : 3;
+
+ res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
+ *rate = (unsigned long)div64_u64(input * P20 * 32, res);
+
+ return 0;
+}
+
+
+static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation,
+ signed long input, unsigned long output, uint64_t *p,
+ struct stm_fs *fs)
+{
+ unsigned long new_freq, new_deviation;
+ struct stm_fs fs_tmp;
+ uint64_t val;
+
+ val = (uint64_t)output << si;
+
+ *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL);
+
+ *p = div64_u64(*p, val);
+
+ if (*p > 32767LL)
+ return 1;
+
+ fs_tmp.mdiv = (unsigned long) m;
+ fs_tmp.pe = (unsigned long)*p;
+ fs_tmp.sdiv = si;
+ fs_tmp.nsdiv = 1;
+
+ clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
+
+ new_deviation = abs(output - new_freq);
+
+ if (new_deviation < *deviation) {
+ fs->mdiv = m;
+ fs->pe = (unsigned long)*p;
+ fs->sdiv = si;
+ fs->nsdiv = 1;
+ *deviation = new_deviation;
+ }
+ return 0;
+}
+
+static int clk_fs660c32_dig_get_params(unsigned long input,
+ unsigned long output, struct stm_fs *fs)
+{
+ int si; /* sdiv_reg (8 downto 0) */
+ int m; /* md value */
+ unsigned long new_freq, new_deviation;
+ /* initial condition to say: "infinite deviation" */
+ unsigned long deviation = ~0;
+ uint64_t p, p1, p2; /* pe value */
+ int r1, r2;
+
+ struct stm_fs fs_tmp;
+
+ for (si = 0; (si <= 8) && deviation; si++) {
+
+ /* Boundary test to avoid useless iteration */
+ r1 = clk_fs660c32_get_pe(0, si, &deviation,
+ input, output, &p1, fs);
+ r2 = clk_fs660c32_get_pe(31, si, &deviation,
+ input, output, &p2, fs);
+
+ /* No solution */
+ if (r1 && r2 && (p1 > p2))
+ continue;
+
+ /* Try to find best deviation */
+ for (m = 1; (m < 31) && deviation; m++)
+ clk_fs660c32_get_pe(m, si, &deviation,
+ input, output, &p, fs);
+
+ }
+
+ if (deviation == ~0) /* No solution found */
+ return -1;
+
+ /* pe fine tuning if deviation not 0: +/- 2 around computed pe value */
+ if (deviation) {
+ fs_tmp.mdiv = fs->mdiv;
+ fs_tmp.sdiv = fs->sdiv;
+ fs_tmp.nsdiv = fs->nsdiv;
+
+ if (fs->pe > 2)
+ p2 = fs->pe - 2;
+ else
+ p2 = 0;
+
+ for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) {
+ fs_tmp.pe = (unsigned long)p2;
+
+ clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
+
+ new_deviation = abs(output - new_freq);
+
+ /* Check if this is a better solution */
+ if (new_deviation < deviation) {
+ fs->pe = (unsigned long)p2;
+ deviation = new_deviation;
+
+ }
+ }
+ }
+ return 0;
+}
+
+static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
+ struct stm_fs *params)
+{
+ /*
+ * Get the initial hardware values for recalc_rate
+ */
+ params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]);
+ params->pe = CLKGEN_READ(fs, pe[fs->chan]);
+ params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]);
+
+ if (fs->data->nsdiv_present)
+ params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
+ else
+ params->nsdiv = 1;
+
+ /*
+ * If All are NULL then assume no clock rate is programmed.
+ */
+ if (!params->mdiv && !params->pe && !params->sdiv)
+ return 1;
+
+ fs->md = params->mdiv;
+ fs->pe = params->pe;
+ fs->sdiv = params->sdiv;
+ fs->nsdiv = params->nsdiv;
+
+ return 0;
+}
+
+static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate, struct stm_fs *params)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ int (*clk_fs_get_rate)(unsigned long ,
+ const struct stm_fs *, unsigned long *);
+ int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *);
+ unsigned long rate = 0;
+
+ clk_fs_get_rate = fs->data->get_rate;
+ clk_fs_get_params = fs->data->get_params;
+
+ if (!clk_fs_get_params(prate, drate, params))
+ clk_fs_get_rate(prate, params, &rate);
+
+ return rate;
+}
+
+static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ unsigned long rate = 0;
+ struct stm_fs params;
+ int (*clk_fs_get_rate)(unsigned long ,
+ const struct stm_fs *, unsigned long *);
+
+ clk_fs_get_rate = fs->data->get_rate;
+
+ if (quadfs_fsynt_get_hw_value_for_recalc(fs, &params))
+ return 0;
+
+ if (clk_fs_get_rate(parent_rate, &params, &rate)) {
+ pr_err("%s:%s error calculating rate\n",
+ clk_hw_get_name(hw), __func__);
+ }
+
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
+
+ return rate;
+}
+
+static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_fs params;
+
+ rate = quadfs_find_best_rate(hw, rate, *prate, &params);
+
+ pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
+ __func__, clk_hw_get_name(hw),
+ rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
+
+ return rate;
+}
+
+
+static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
+ struct stm_fs *params)
+{
+ fs->md = params->mdiv;
+ fs->pe = params->pe;
+ fs->sdiv = params->sdiv;
+ fs->nsdiv = params->nsdiv;
+
+ /*
+ * In some integrations you can only change the fsynth programming when
+ * the parent entity containing it is enabled.
+ */
+ quadfs_fsynth_program_rate(fs);
+ quadfs_fsynth_program_enable(fs);
+}
+
+static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
+ struct stm_fs params;
+ long hwrate;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(struct stm_fs));
+
+ hwrate = quadfs_find_best_rate(hw, rate, parent_rate, &params);
+ if (!hwrate)
+ return -EINVAL;
+
+ quadfs_program_and_enable(fs, &params);
+
+ return 0;
+}
+
+
+
+static const struct clk_ops st_quadfs_ops = {
+ .enable = quadfs_fsynth_enable,
+ .disable = quadfs_fsynth_disable,
+ .is_enabled = quadfs_fsynth_is_enabled,
+ .round_rate = quadfs_round_rate,
+ .set_rate = quadfs_set_rate,
+ .recalc_rate = quadfs_recalc_rate,
+};
+
+static struct clk * __init st_clk_register_quadfs_fsynth(
+ const char *name, const char *parent_name,
+ struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
+ unsigned long flags, spinlock_t *lock)
+{
+ struct st_clk_quadfs_fsynth *fs;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /*
+ * Sanity check required pointers, note that nsdiv3 is optional.
+ */
+ if (WARN_ON(!name || !parent_name))
+ return ERR_PTR(-EINVAL);
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &st_quadfs_ops;
+ init.flags = flags | CLK_GET_RATE_NOCACHE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ fs->data = quadfs;
+ fs->regs_base = reg;
+ fs->chan = chan;
+ fs->lock = lock;
+ fs->hw.init = &init;
+
+ clk = clk_register(NULL, &fs->hw);
+
+ if (IS_ERR(clk))
+ kfree(fs);
+
+ return clk;
+}
+
+static void __init st_of_create_quadfs_fsynths(
+ struct device_node *np, const char *pll_name,
+ struct clkgen_quadfs_data_clks *quadfs, void __iomem *reg,
+ spinlock_t *lock)
+{
+ struct clk_onecell_data *clk_data;
+ int fschan;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clk_num = QUADFS_MAX_CHAN;
+ clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks) {
+ kfree(clk_data);
+ return;
+ }
+
+ for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
+ struct clk *clk;
+ const char *clk_name;
+ unsigned long flags = 0;
+
+ if (quadfs->outputs) {
+ clk_name = quadfs->outputs[fschan].name;
+ flags = quadfs->outputs[fschan].flags;
+ } else {
+ if (of_property_read_string_index(np,
+ "clock-output-names",
+ fschan, &clk_name))
+ break;
+ of_clk_detect_critical(np, fschan, &flags);
+ }
+
+ /*
+ * If we read an empty clock name then the channel is unused
+ */
+ if (*clk_name == '\0')
+ continue;
+
+ clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
+ quadfs->data, reg, fschan,
+ flags, lock);
+
+ /*
+ * If there was an error registering this clock output, clean
+ * up and move on to the next one.
+ */
+ if (!IS_ERR(clk)) {
+ clk_data->clks[fschan] = clk;
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+ }
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+}
+
+static void __init st_of_quadfs_setup(struct device_node *np,
+ struct clkgen_quadfs_data_clks *datac)
+{
+ struct clk *clk;
+ const char *pll_name, *clk_parent_name;
+ void __iomem *reg;
+ spinlock_t *lock;
+ struct device_node *parent_np;
+
+ /*
+ * First check for reg property within the node to keep backward
+ * compatibility, then if reg doesn't exist look at the parent node
+ */
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ parent_np = of_get_parent(np);
+ reg = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
+ }
+
+ clk_parent_name = of_clk_get_parent_name(np, 0);
+ if (!clk_parent_name)
+ return;
+
+ pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np);
+ if (!pll_name)
+ return;
+
+ lock = kzalloc(sizeof(*lock), GFP_KERNEL);
+ if (!lock)
+ goto err_exit;
+
+ spin_lock_init(lock);
+
+ clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data,
+ reg, lock);
+ if (IS_ERR(clk)) {
+ kfree(lock);
+ goto err_exit;
+ } else
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+
+ st_of_create_quadfs_fsynths(np, pll_name, datac, reg, lock);
+
+err_exit:
+ kfree(pll_name); /* No longer need local copy of the PLL name */
+}
+
+static void __init st_of_quadfs660C_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np,
+ (struct clkgen_quadfs_data_clks *) &st_fs660c32_C_data);
+}
+CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup);
+
+static void __init st_of_quadfs660D_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np,
+ (struct clkgen_quadfs_data_clks *) &st_fs660c32_D_data);
+}
+CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup);
+
+static void __init st_of_quadfs660D0_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np,
+ (struct clkgen_quadfs_data_clks *) &st_fs660c32_D0_data);
+}
+CLK_OF_DECLARE(quadfs660D0, "st,quadfs-d0", st_of_quadfs660D0_setup);
+
+static void __init st_of_quadfs660D2_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np,
+ (struct clkgen_quadfs_data_clks *) &st_fs660c32_D2_data);
+}
+CLK_OF_DECLARE(quadfs660D2, "st,quadfs-d2", st_of_quadfs660D2_setup);
+
+static void __init st_of_quadfs660D3_setup(struct device_node *np)
+{
+ st_of_quadfs_setup(np,
+ (struct clkgen_quadfs_data_clks *) &st_fs660c32_D3_data);
+}
+CLK_OF_DECLARE(quadfs660D3, "st,quadfs-d3", st_of_quadfs660D3_setup);
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
new file mode 100644
index 0000000000..596e939ad9
--- /dev/null
+++ b/drivers/clk/st/clkgen-mux.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * clkgen-mux.c: ST GEN-MUX Clock driver
+ *
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited
+ *
+ * Authors: Stephen Gallimore <stephen.gallimore@st.com>
+ * Pankaj Dev <pankaj.dev@st.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include "clkgen.h"
+
+static const char ** __init clkgen_mux_get_parents(struct device_node *np,
+ int *num_parents)
+{
+ const char **parents;
+ unsigned int nparents;
+
+ nparents = of_clk_get_parent_count(np);
+ if (WARN_ON(!nparents))
+ return ERR_PTR(-EINVAL);
+
+ parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
+ if (!parents)
+ return ERR_PTR(-ENOMEM);
+
+ *num_parents = of_clk_parent_fill(np, parents, nparents);
+ return parents;
+}
+
+struct clkgen_mux_data {
+ u32 offset;
+ u8 shift;
+ u8 width;
+ spinlock_t *lock;
+ unsigned long clk_flags;
+ u8 mux_flags;
+};
+
+static struct clkgen_mux_data stih407_a9_mux_data = {
+ .offset = 0x1a4,
+ .shift = 0,
+ .width = 2,
+ .lock = &clkgen_a9_lock,
+};
+
+static void __init st_of_clkgen_mux_setup(struct device_node *np,
+ struct clkgen_mux_data *data)
+{
+ struct clk *clk;
+ void __iomem *reg;
+ const char **parents;
+ int num_parents = 0;
+ struct device_node *parent_np;
+
+ /*
+ * First check for reg property within the node to keep backward
+ * compatibility, then if reg doesn't exist look at the parent node
+ */
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ parent_np = of_get_parent(np);
+ reg = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
+ }
+
+ parents = clkgen_mux_get_parents(np, &num_parents);
+ if (IS_ERR(parents)) {
+ pr_err("%s: Failed to get parents (%ld)\n",
+ __func__, PTR_ERR(parents));
+ goto err_parents;
+ }
+
+ clk = clk_register_mux(NULL, np->name, parents, num_parents,
+ data->clk_flags | CLK_SET_RATE_PARENT,
+ reg + data->offset,
+ data->shift, data->width, data->mux_flags,
+ data->lock);
+ if (IS_ERR(clk))
+ goto err;
+
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ (unsigned int)clk_get_rate(clk));
+
+ kfree(parents);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return;
+
+err:
+ kfree(parents);
+err_parents:
+ iounmap(reg);
+}
+
+static void __init st_of_clkgen_a9_mux_setup(struct device_node *np)
+{
+ st_of_clkgen_mux_setup(np, &stih407_a9_mux_data);
+}
+CLK_OF_DECLARE(clkgen_a9mux, "st,stih407-clkgen-a9-mux",
+ st_of_clkgen_a9_mux_setup);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
new file mode 100644
index 0000000000..b36e4d8036
--- /dev/null
+++ b/drivers/clk/st/clkgen-pll.c
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited
+ */
+
+/*
+ * Authors:
+ * Stephen Gallimore <stephen.gallimore@st.com>,
+ * Pankaj Dev <pankaj.dev@st.com>.
+ */
+
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "clkgen.h"
+
+static DEFINE_SPINLOCK(clkgena_c32_odf_lock);
+DEFINE_SPINLOCK(clkgen_a9_lock);
+
+/*
+ * PLL configuration register bits for PLL3200 C32
+ */
+#define C32_NDIV_MASK (0xff)
+#define C32_IDF_MASK (0x7)
+#define C32_ODF_MASK (0x3f)
+#define C32_LDF_MASK (0x7f)
+#define C32_CP_MASK (0x1f)
+
+#define C32_MAX_ODFS (4)
+
+/*
+ * PLL configuration register bits for PLL4600 C28
+ */
+#define C28_NDIV_MASK (0xff)
+#define C28_IDF_MASK (0x7)
+#define C28_ODF_MASK (0x3f)
+
+struct clkgen_pll_data {
+ struct clkgen_field pdn_status;
+ struct clkgen_field pdn_ctrl;
+ struct clkgen_field locked_status;
+ struct clkgen_field mdiv;
+ struct clkgen_field ndiv;
+ struct clkgen_field pdiv;
+ struct clkgen_field idf;
+ struct clkgen_field ldf;
+ struct clkgen_field cp;
+ unsigned int num_odfs;
+ struct clkgen_field odf[C32_MAX_ODFS];
+ struct clkgen_field odf_gate[C32_MAX_ODFS];
+ bool switch2pll_en;
+ struct clkgen_field switch2pll;
+ spinlock_t *lock;
+ const struct clk_ops *ops;
+};
+
+struct clkgen_clk_out {
+ const char *name;
+ unsigned long flags;
+};
+
+struct clkgen_pll_data_clks {
+ struct clkgen_pll_data *data;
+ const struct clkgen_clk_out *outputs;
+};
+
+
+static const struct clk_ops stm_pll3200c32_ops;
+static const struct clk_ops stm_pll3200c32_a9_ops;
+static const struct clk_ops stm_pll4600c28_ops;
+
+static const struct clkgen_pll_data st_pll3200c32_cx_0 = {
+ /* 407 C0 PLL0 */
+ .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8),
+ .pdn_ctrl = CLKGEN_FIELD(0x2a0, 0x1, 8),
+ .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24),
+ .ndiv = CLKGEN_FIELD(0x2a4, C32_NDIV_MASK, 16),
+ .idf = CLKGEN_FIELD(0x2a4, C32_IDF_MASK, 0x0),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x2b4, C32_ODF_MASK, 0) },
+ .odf_gate = { CLKGEN_FIELD(0x2b4, 0x1, 6) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data_clks st_pll3200c32_cx_0_legacy_data = {
+ .data = (struct clkgen_pll_data *)&st_pll3200c32_cx_0,
+};
+
+static const struct clkgen_clk_out st_pll3200c32_ax_0_clks[] = {
+ { .name = "clk-s-a0-pll-odf-0", },
+};
+
+static const struct clkgen_pll_data_clks st_pll3200c32_a0_data = {
+ .data = (struct clkgen_pll_data *)&st_pll3200c32_cx_0,
+ .outputs = st_pll3200c32_ax_0_clks,
+};
+
+static const struct clkgen_clk_out st_pll3200c32_cx_0_clks[] = {
+ { .name = "clk-s-c0-pll0-odf-0", },
+};
+
+static const struct clkgen_pll_data_clks st_pll3200c32_c0_data = {
+ .data = (struct clkgen_pll_data *)&st_pll3200c32_cx_0,
+ .outputs = st_pll3200c32_cx_0_clks,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_cx_1 = {
+ /* 407 C0 PLL1 */
+ .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8),
+ .pdn_ctrl = CLKGEN_FIELD(0x2c8, 0x1, 8),
+ .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24),
+ .ndiv = CLKGEN_FIELD(0x2cc, C32_NDIV_MASK, 16),
+ .idf = CLKGEN_FIELD(0x2cc, C32_IDF_MASK, 0x0),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x2dc, C32_ODF_MASK, 0) },
+ .odf_gate = { CLKGEN_FIELD(0x2dc, 0x1, 6) },
+ .ops = &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data_clks st_pll3200c32_cx_1_legacy_data = {
+ .data = (struct clkgen_pll_data *)&st_pll3200c32_cx_1,
+};
+
+static const struct clkgen_clk_out st_pll3200c32_cx_1_clks[] = {
+ { .name = "clk-s-c0-pll1-odf-0", },
+};
+
+static const struct clkgen_pll_data_clks st_pll3200c32_c1_data = {
+ .data = (struct clkgen_pll_data *)&st_pll3200c32_cx_1,
+ .outputs = st_pll3200c32_cx_1_clks,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_a9 = {
+ /* 407 A9 */
+ .pdn_status = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x87c, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x1b0, C32_NDIV_MASK, 0),
+ .idf = CLKGEN_FIELD(0x1a8, C32_IDF_MASK, 25),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x1b0, C32_ODF_MASK, 8) },
+ .odf_gate = { CLKGEN_FIELD(0x1ac, 0x1, 28) },
+ .switch2pll_en = true,
+ .cp = CLKGEN_FIELD(0x1a8, C32_CP_MASK, 1),
+ .switch2pll = CLKGEN_FIELD(0x1a4, 0x1, 1),
+ .lock = &clkgen_a9_lock,
+ .ops = &stm_pll3200c32_a9_ops,
+};
+
+static const struct clkgen_clk_out st_pll3200c32_407_a9_clks[] = {
+ { .name = "clockgen-a9-pll-odf", },
+};
+
+static const struct clkgen_pll_data_clks st_pll3200c32_407_a9_data = {
+ .data = (struct clkgen_pll_data *)&st_pll3200c32_407_a9,
+ .outputs = st_pll3200c32_407_a9_clks,
+};
+
+static struct clkgen_pll_data st_pll4600c28_418_a9 = {
+ /* 418 A9 */
+ .pdn_status = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .pdn_ctrl = CLKGEN_FIELD(0x1a8, 0x1, 0),
+ .locked_status = CLKGEN_FIELD(0x87c, 0x1, 0),
+ .ndiv = CLKGEN_FIELD(0x1b0, C28_NDIV_MASK, 0),
+ .idf = CLKGEN_FIELD(0x1a8, C28_IDF_MASK, 25),
+ .num_odfs = 1,
+ .odf = { CLKGEN_FIELD(0x1b0, C28_ODF_MASK, 8) },
+ .odf_gate = { CLKGEN_FIELD(0x1ac, 0x1, 28) },
+ .switch2pll_en = true,
+ .switch2pll = CLKGEN_FIELD(0x1a4, 0x1, 1),
+ .lock = &clkgen_a9_lock,
+ .ops = &stm_pll4600c28_ops,
+};
+
+static const struct clkgen_clk_out st_pll4600c28_418_a9_clks[] = {
+ { .name = "clockgen-a9-pll-odf", },
+};
+
+static const struct clkgen_pll_data_clks st_pll4600c28_418_a9_data = {
+ .data = (struct clkgen_pll_data *)&st_pll4600c28_418_a9,
+ .outputs = st_pll4600c28_418_a9_clks,
+};
+
+/**
+ * DOC: Clock Generated by PLL, rate set and enabled by bootloader
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable/disable only ensures parent is enabled
+ * rate - rate is fixed. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+/*
+ * PLL clock that is integrated in the ClockGenA instances on the STiH415
+ * and STiH416.
+ *
+ * @hw: handle between common and hardware-specific interfaces.
+ * @regs_base: base of the PLL configuration register(s).
+ *
+ */
+struct clkgen_pll {
+ struct clk_hw hw;
+ struct clkgen_pll_data *data;
+ void __iomem *regs_base;
+ spinlock_t *lock;
+
+ u32 ndiv;
+ u32 idf;
+ u32 cp;
+};
+
+#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw)
+
+struct stm_pll {
+ unsigned long mdiv;
+ unsigned long ndiv;
+ unsigned long pdiv;
+ unsigned long odf;
+ unsigned long idf;
+ unsigned long ldf;
+ unsigned long cp;
+};
+
+static int clkgen_pll_is_locked(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ u32 locked = CLKGEN_READ(pll, locked_status);
+
+ return !!locked;
+}
+
+static int clkgen_pll_is_enabled(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ u32 poweroff = CLKGEN_READ(pll, pdn_status);
+ return !poweroff;
+}
+
+static int __clkgen_pll_enable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ void __iomem *base = pll->regs_base;
+ struct clkgen_field *field = &pll->data->locked_status;
+ int ret = 0;
+ u32 reg;
+
+ if (clkgen_pll_is_enabled(hw))
+ return 0;
+
+ CLKGEN_WRITE(pll, pdn_ctrl, 0);
+
+ ret = readl_relaxed_poll_timeout(base + field->offset, reg,
+ !!((reg >> field->shift) & field->mask), 0, 10000);
+
+ if (!ret) {
+ if (pll->data->switch2pll_en)
+ CLKGEN_WRITE(pll, switch2pll, 0);
+
+ pr_debug("%s:%s enabled\n", __clk_get_name(hw->clk), __func__);
+ }
+
+ return ret;
+}
+
+static int clkgen_pll_enable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ ret = __clkgen_pll_enable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void __clkgen_pll_disable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+
+ if (!clkgen_pll_is_enabled(hw))
+ return;
+
+ if (pll->data->switch2pll_en)
+ CLKGEN_WRITE(pll, switch2pll, 1);
+
+ CLKGEN_WRITE(pll, pdn_ctrl, 1);
+
+ pr_debug("%s:%s disabled\n", __clk_get_name(hw->clk), __func__);
+}
+
+static void clkgen_pll_disable(struct clk_hw *hw)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ __clkgen_pll_disable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int clk_pll3200c32_get_params(unsigned long input, unsigned long output,
+ struct stm_pll *pll)
+{
+ unsigned long i, n;
+ unsigned long deviation = ~0;
+ unsigned long new_freq;
+ long new_deviation;
+ /* Charge pump table: highest ndiv value for cp=6 to 25 */
+ static const unsigned char cp_table[] = {
+ 48, 56, 64, 72, 80, 88, 96, 104, 112, 120,
+ 128, 136, 144, 152, 160, 168, 176, 184, 192
+ };
+
+ /* Output clock range: 800Mhz to 1600Mhz */
+ if (output < 800000000 || output > 1600000000)
+ return -EINVAL;
+
+ input /= 1000;
+ output /= 1000;
+
+ for (i = 1; i <= 7 && deviation; i++) {
+ n = i * output / (2 * input);
+
+ /* Checks */
+ if (n < 8)
+ continue;
+ if (n > 200)
+ break;
+
+ new_freq = (input * 2 * n) / i;
+
+ new_deviation = abs(new_freq - output);
+
+ if (!new_deviation || new_deviation < deviation) {
+ pll->idf = i;
+ pll->ndiv = n;
+ deviation = new_deviation;
+ }
+ }
+
+ if (deviation == ~0) /* No solution found */
+ return -EINVAL;
+
+ /* Computing recommended charge pump value */
+ for (pll->cp = 6; pll->ndiv > cp_table[pll->cp-6]; (pll->cp)++)
+ ;
+
+ return 0;
+}
+
+static int clk_pll3200c32_get_rate(unsigned long input, struct stm_pll *pll,
+ unsigned long *rate)
+{
+ if (!pll->idf)
+ pll->idf = 1;
+
+ *rate = ((2 * (input / 1000) * pll->ndiv) / pll->idf) * 1000;
+
+ return 0;
+}
+
+static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ unsigned long ndiv, idf;
+ unsigned long rate = 0;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ ndiv = CLKGEN_READ(pll, ndiv);
+ idf = CLKGEN_READ(pll, idf);
+
+ if (idf)
+ /* Note: input is divided to avoid overflow */
+ rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000;
+
+ pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
+
+ return rate;
+}
+
+static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_pll params;
+
+ if (!clk_pll3200c32_get_params(*prate, rate, &params))
+ clk_pll3200c32_get_rate(*prate, &params, &rate);
+ else {
+ pr_debug("%s: %s rate %ld Invalid\n", __func__,
+ __clk_get_name(hw->clk), rate);
+ return 0;
+ }
+
+ pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
+ __func__, __clk_get_name(hw->clk),
+ rate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ return rate;
+}
+
+static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ struct stm_pll params;
+ long hwrate = 0;
+ unsigned long flags = 0;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ if (!clk_pll3200c32_get_params(parent_rate, rate, &params))
+ clk_pll3200c32_get_rate(parent_rate, &params, &hwrate);
+
+ pr_debug("%s: %s new rate %ld [ndiv=0x%x] [idf=0x%x]\n",
+ __func__, __clk_get_name(hw->clk),
+ hwrate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ if (!hwrate)
+ return -EINVAL;
+
+ pll->ndiv = params.ndiv;
+ pll->idf = params.idf;
+ pll->cp = params.cp;
+
+ __clkgen_pll_disable(hw);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+ CLKGEN_WRITE(pll, idf, pll->idf);
+ CLKGEN_WRITE(pll, cp, pll->cp);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ __clkgen_pll_enable(hw);
+
+ return 0;
+}
+
+/* PLL output structure
+ * FVCO >> /2 >> FVCOBY2 (no output)
+ * |> Divider (ODF) >> PHI
+ *
+ * FVCOby2 output = (input * 2 * NDIV) / IDF (assuming FRAC_CONTROL==L)
+ *
+ * Rules:
+ * 4Mhz <= INFF input <= 350Mhz
+ * 4Mhz <= INFIN (INFF / IDF) <= 50Mhz
+ * 19.05Mhz <= FVCOby2 output (PHI w ODF=1) <= 3000Mhz
+ * 1 <= i (register/dec value for IDF) <= 7
+ * 8 <= n (register/dec value for NDIV) <= 246
+ */
+
+static int clk_pll4600c28_get_params(unsigned long input, unsigned long output,
+ struct stm_pll *pll)
+{
+
+ unsigned long i, infin, n;
+ unsigned long deviation = ~0;
+ unsigned long new_freq, new_deviation;
+
+ /* Output clock range: 19Mhz to 3000Mhz */
+ if (output < 19000000 || output > 3000000000u)
+ return -EINVAL;
+
+ /* For better jitter, IDF should be smallest and NDIV must be maximum */
+ for (i = 1; i <= 7 && deviation; i++) {
+ /* INFIN checks */
+ infin = input / i;
+ if (infin < 4000000 || infin > 50000000)
+ continue; /* Invalid case */
+
+ n = output / (infin * 2);
+ if (n < 8 || n > 246)
+ continue; /* Invalid case */
+ if (n < 246)
+ n++; /* To work around 'y' when n=x.y */
+
+ for (; n >= 8 && deviation; n--) {
+ new_freq = infin * 2 * n;
+ if (new_freq < output)
+ break; /* Optimization: shorting loop */
+
+ new_deviation = new_freq - output;
+ if (!new_deviation || new_deviation < deviation) {
+ pll->idf = i;
+ pll->ndiv = n;
+ deviation = new_deviation;
+ }
+ }
+ }
+
+ if (deviation == ~0) /* No solution found */
+ return -EINVAL;
+
+ return 0;
+}
+
+static int clk_pll4600c28_get_rate(unsigned long input, struct stm_pll *pll,
+ unsigned long *rate)
+{
+ if (!pll->idf)
+ pll->idf = 1;
+
+ *rate = (input / pll->idf) * 2 * pll->ndiv;
+
+ return 0;
+}
+
+static unsigned long recalc_stm_pll4600c28(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ struct stm_pll params;
+ unsigned long rate;
+
+ if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw))
+ return 0;
+
+ params.ndiv = CLKGEN_READ(pll, ndiv);
+ params.idf = CLKGEN_READ(pll, idf);
+
+ clk_pll4600c28_get_rate(parent_rate, &params, &rate);
+
+ pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+
+ return rate;
+}
+
+static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm_pll params;
+
+ if (!clk_pll4600c28_get_params(*prate, rate, &params)) {
+ clk_pll4600c28_get_rate(*prate, &params, &rate);
+ } else {
+ pr_debug("%s: %s rate %ld Invalid\n", __func__,
+ __clk_get_name(hw->clk), rate);
+ return 0;
+ }
+
+ pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
+ __func__, __clk_get_name(hw->clk),
+ rate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ return rate;
+}
+
+static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clkgen_pll *pll = to_clkgen_pll(hw);
+ struct stm_pll params;
+ long hwrate;
+ unsigned long flags = 0;
+
+ if (!rate || !parent_rate)
+ return -EINVAL;
+
+ if (!clk_pll4600c28_get_params(parent_rate, rate, &params)) {
+ clk_pll4600c28_get_rate(parent_rate, &params, &hwrate);
+ } else {
+ pr_debug("%s: %s rate %ld Invalid\n", __func__,
+ __clk_get_name(hw->clk), rate);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: %s new rate %ld [ndiv=0x%x] [idf=0x%x]\n",
+ __func__, __clk_get_name(hw->clk),
+ hwrate, (unsigned int)params.ndiv,
+ (unsigned int)params.idf);
+
+ if (!hwrate)
+ return -EINVAL;
+
+ pll->ndiv = params.ndiv;
+ pll->idf = params.idf;
+
+ __clkgen_pll_disable(hw);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ CLKGEN_WRITE(pll, ndiv, pll->ndiv);
+ CLKGEN_WRITE(pll, idf, pll->idf);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ __clkgen_pll_enable(hw);
+
+ return 0;
+}
+
+static const struct clk_ops stm_pll3200c32_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll3200c32,
+};
+
+static const struct clk_ops stm_pll3200c32_a9_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll3200c32,
+ .round_rate = round_rate_stm_pll3200c32,
+ .set_rate = set_rate_stm_pll3200c32,
+};
+
+static const struct clk_ops stm_pll4600c28_ops = {
+ .enable = clkgen_pll_enable,
+ .disable = clkgen_pll_disable,
+ .is_enabled = clkgen_pll_is_enabled,
+ .recalc_rate = recalc_stm_pll4600c28,
+ .round_rate = round_rate_stm_pll4600c28,
+ .set_rate = set_rate_stm_pll4600c28,
+};
+
+static struct clk * __init clkgen_pll_register(const char *parent_name,
+ struct clkgen_pll_data *pll_data,
+ void __iomem *reg, unsigned long pll_flags,
+ const char *clk_name, spinlock_t *lock)
+{
+ struct clkgen_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = clk_name;
+ init.ops = pll_data->ops;
+
+ init.flags = pll_flags | CLK_GET_RATE_NOCACHE;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->data = pll_data;
+ pll->regs_base = reg;
+ pll->hw.init = &init;
+ pll->lock = lock;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ kfree(pll);
+ return clk;
+ }
+
+ pr_debug("%s: parent %s rate %lu\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ clk_get_rate(clk));
+
+ return clk;
+}
+
+static void __iomem * __init clkgen_get_register_base(
+ struct device_node *np)
+{
+ struct device_node *pnode;
+ void __iomem *reg = NULL;
+
+ pnode = of_get_parent(np);
+ if (!pnode)
+ return NULL;
+
+ reg = of_iomap(pnode, 0);
+
+ of_node_put(pnode);
+ return reg;
+}
+
+static struct clk * __init clkgen_odf_register(const char *parent_name,
+ void __iomem *reg,
+ struct clkgen_pll_data *pll_data,
+ unsigned long pll_flags, int odf,
+ spinlock_t *odf_lock,
+ const char *odf_name)
+{
+ struct clk *clk;
+ unsigned long flags;
+ struct clk_gate *gate;
+ struct clk_divider *div;
+
+ flags = pll_flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
+ gate->reg = reg + pll_data->odf_gate[odf].offset;
+ gate->bit_idx = pll_data->odf_gate[odf].shift;
+ gate->lock = odf_lock;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div) {
+ kfree(gate);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ div->reg = reg + pll_data->odf[odf].offset;
+ div->shift = pll_data->odf[odf].shift;
+ div->width = fls(pll_data->odf[odf].mask);
+ div->lock = odf_lock;
+
+ clk = clk_register_composite(NULL, odf_name, &parent_name, 1,
+ NULL, NULL,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops,
+ flags);
+ if (IS_ERR(clk))
+ return clk;
+
+ pr_debug("%s: parent %s rate %lu\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+ clk_get_rate(clk));
+ return clk;
+}
+
+
+static void __init clkgen_c32_pll_setup(struct device_node *np,
+ struct clkgen_pll_data_clks *datac)
+{
+ struct clk *clk;
+ const char *parent_name, *pll_name;
+ void __iomem *pll_base;
+ int num_odfs, odf;
+ struct clk_onecell_data *clk_data;
+ unsigned long pll_flags = 0;
+
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name)
+ return;
+
+ pll_base = clkgen_get_register_base(np);
+ if (!pll_base)
+ return;
+
+ of_clk_detect_critical(np, 0, &pll_flags);
+
+ clk = clkgen_pll_register(parent_name, datac->data, pll_base, pll_flags,
+ np->name, datac->data->lock);
+ if (IS_ERR(clk))
+ return;
+
+ pll_name = __clk_get_name(clk);
+
+ num_odfs = datac->data->num_odfs;
+
+ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clk_num = num_odfs;
+ clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
+ GFP_KERNEL);
+
+ if (!clk_data->clks)
+ goto err;
+
+ for (odf = 0; odf < num_odfs; odf++) {
+ struct clk *clk;
+ const char *clk_name;
+ unsigned long odf_flags = 0;
+
+ if (datac->outputs) {
+ clk_name = datac->outputs[odf].name;
+ odf_flags = datac->outputs[odf].flags;
+ } else {
+ if (of_property_read_string_index(np,
+ "clock-output-names",
+ odf, &clk_name))
+ return;
+
+ of_clk_detect_critical(np, odf, &odf_flags);
+ }
+
+ clk = clkgen_odf_register(pll_name, pll_base, datac->data,
+ odf_flags, odf, &clkgena_c32_odf_lock,
+ clk_name);
+ if (IS_ERR(clk))
+ goto err;
+
+ clk_data->clks[odf] = clk;
+ }
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ return;
+
+err:
+ kfree(pll_name);
+ kfree(clk_data->clks);
+ kfree(clk_data);
+}
+static void __init clkgen_c32_pll0_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll3200c32_cx_0_legacy_data);
+}
+CLK_OF_DECLARE(c32_pll0, "st,clkgen-pll0", clkgen_c32_pll0_setup);
+
+static void __init clkgen_c32_pll0_a0_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll3200c32_a0_data);
+}
+CLK_OF_DECLARE(c32_pll0_a0, "st,clkgen-pll0-a0", clkgen_c32_pll0_a0_setup);
+
+static void __init clkgen_c32_pll0_c0_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll3200c32_c0_data);
+}
+CLK_OF_DECLARE(c32_pll0_c0, "st,clkgen-pll0-c0", clkgen_c32_pll0_c0_setup);
+
+static void __init clkgen_c32_pll1_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll3200c32_cx_1_legacy_data);
+}
+CLK_OF_DECLARE(c32_pll1, "st,clkgen-pll1", clkgen_c32_pll1_setup);
+
+static void __init clkgen_c32_pll1_c0_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll3200c32_c1_data);
+}
+CLK_OF_DECLARE(c32_pll1_c0, "st,clkgen-pll1-c0", clkgen_c32_pll1_c0_setup);
+
+static void __init clkgen_c32_plla9_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll3200c32_407_a9_data);
+}
+CLK_OF_DECLARE(c32_plla9, "st,stih407-clkgen-plla9", clkgen_c32_plla9_setup);
+
+static void __init clkgen_c28_plla9_setup(struct device_node *np)
+{
+ clkgen_c32_pll_setup(np,
+ (struct clkgen_pll_data_clks *) &st_pll4600c28_418_a9_data);
+}
+CLK_OF_DECLARE(c28_plla9, "st,stih418-clkgen-plla9", clkgen_c28_plla9_setup);
diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h
new file mode 100644
index 0000000000..44302fc7ca
--- /dev/null
+++ b/drivers/clk/st/clkgen.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/************************************************************************
+File : Clock H/w specific Information
+
+Author: Pankaj Dev <pankaj.dev@st.com>
+
+Copyright (C) 2014 STMicroelectronics
+************************************************************************/
+
+#ifndef __CLKGEN_INFO_H
+#define __CLKGEN_INFO_H
+
+extern spinlock_t clkgen_a9_lock;
+
+struct clkgen_field {
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int shift;
+};
+
+static inline unsigned long clkgen_read(void __iomem *base,
+ struct clkgen_field *field)
+{
+ return (readl(base + field->offset) >> field->shift) & field->mask;
+}
+
+
+static inline void clkgen_write(void __iomem *base, struct clkgen_field *field,
+ unsigned long val)
+{
+ writel((readl(base + field->offset) &
+ ~(field->mask << field->shift)) | (val << field->shift),
+ base + field->offset);
+
+ return;
+}
+
+#define CLKGEN_FIELD(_offset, _mask, _shift) { \
+ .offset = _offset, \
+ .mask = _mask, \
+ .shift = _shift, \
+ }
+
+#define CLKGEN_READ(pll, field) clkgen_read(pll->regs_base, \
+ &pll->data->field)
+
+#define CLKGEN_WRITE(pll, field, val) clkgen_write(pll->regs_base, \
+ &pll->data->field, val)
+
+#endif /*__CLKGEN_INFO_H*/
+