summaryrefslogtreecommitdiffstats
path: root/drivers/clk/ti
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/clk/ti
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/clk/ti/Kconfig7
-rw-r--r--drivers/clk/ti/Makefile26
-rw-r--r--drivers/clk/ti/adpll.c967
-rw-r--r--drivers/clk/ti/apll.c415
-rw-r--r--drivers/clk/ti/autoidle.c257
-rw-r--r--drivers/clk/ti/clk-2xxx.c250
-rw-r--r--drivers/clk/ti/clk-33xx.c310
-rw-r--r--drivers/clk/ti/clk-3xxx.c353
-rw-r--r--drivers/clk/ti/clk-43xx.c301
-rw-r--r--drivers/clk/ti/clk-44xx.c830
-rw-r--r--drivers/clk/ti/clk-54xx.c658
-rw-r--r--drivers/clk/ti/clk-7xx.c976
-rw-r--r--drivers/clk/ti/clk-814x.c118
-rw-r--r--drivers/clk/ti/clk-816x.c79
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c309
-rw-r--r--drivers/clk/ti/clk.c659
-rw-r--r--drivers/clk/ti/clkctrl.c749
-rw-r--r--drivers/clk/ti/clkt_dflt.c289
-rw-r--r--drivers/clk/ti/clkt_dpll.c371
-rw-r--r--drivers/clk/ti/clkt_iclk.c97
-rw-r--r--drivers/clk/ti/clock.h307
-rw-r--r--drivers/clk/ti/clockdomain.c172
-rw-r--r--drivers/clk/ti/composite.c270
-rw-r--r--drivers/clk/ti/divider.c561
-rw-r--r--drivers/clk/ti/dpll.c726
-rw-r--r--drivers/clk/ti/dpll3xxx.c1115
-rw-r--r--drivers/clk/ti/dpll44xx.c226
-rw-r--r--drivers/clk/ti/fapll.c666
-rw-r--r--drivers/clk/ti/fixed-factor.c61
-rw-r--r--drivers/clk/ti/gate.c263
-rw-r--r--drivers/clk/ti/interface.c144
-rw-r--r--drivers/clk/ti/mux.c287
32 files changed, 12819 insertions, 0 deletions
diff --git a/drivers/clk/ti/Kconfig b/drivers/clk/ti/Kconfig
new file mode 100644
index 000000000..497291230
--- /dev/null
+++ b/drivers/clk/ti/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config COMMON_CLK_TI_ADPLL
+ tristate "Clock driver for dm814x ADPLL"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ default y if SOC_TI81XX
+ help
+ ADPLL clock driver for the dm814x SoC using common clock framework.
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
new file mode 100644
index 000000000..2c6315cfd
--- /dev/null
+++ b/drivers/clk/ti/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+ifeq ($(CONFIG_ARCH_OMAP2PLUS), y)
+
+obj-y += clk.o autoidle.o clockdomain.o
+clk-common = dpll.o composite.o divider.o gate.o \
+ fixed-factor.o mux.o apll.o \
+ clkt_dpll.o clkt_iclk.o clkt_dflt.o \
+ clkctrl.o
+obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o
+obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-814x.o clk-816x.o
+obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \
+ clk-3xxx.o dpll3xxx.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o \
+ dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o \
+ dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \
+ clk-dra7-atl.o dpll3xxx.o \
+ dpll44xx.o
+
+obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o
+
+endif # CONFIG_ARCH_OMAP2PLUS
+
+obj-$(CONFIG_COMMON_CLK_TI_ADPLL) += adpll.o
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
new file mode 100644
index 000000000..f5e7e2049
--- /dev/null
+++ b/drivers/clk/ti/adpll.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+
+#define ADPLL_PLLSS_MMR_LOCK_OFFSET 0x00 /* Managed by MPPULL */
+#define ADPLL_PLLSS_MMR_LOCK_ENABLED 0x1f125B64
+#define ADPLL_PLLSS_MMR_UNLOCK_MAGIC 0x1eda4c3d
+
+#define ADPLL_PWRCTRL_OFFSET 0x00
+#define ADPLL_PWRCTRL_PONIN 5
+#define ADPLL_PWRCTRL_PGOODIN 4
+#define ADPLL_PWRCTRL_RET 3
+#define ADPLL_PWRCTRL_ISORET 2
+#define ADPLL_PWRCTRL_ISOSCAN 1
+#define ADPLL_PWRCTRL_OFFMODE 0
+
+#define ADPLL_CLKCTRL_OFFSET 0x04
+#define ADPLL_CLKCTRL_CLKDCOLDOEN 29
+#define ADPLL_CLKCTRL_IDLE 23
+#define ADPLL_CLKCTRL_CLKOUTEN 20
+#define ADPLL_CLKINPHIFSEL_ADPLL_S 19 /* REVISIT: which bit? */
+#define ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ 19
+#define ADPLL_CLKCTRL_ULOWCLKEN 18
+#define ADPLL_CLKCTRL_CLKDCOLDOPWDNZ 17
+#define ADPLL_CLKCTRL_M2PWDNZ 16
+#define ADPLL_CLKCTRL_M3PWDNZ_ADPLL_S 15
+#define ADPLL_CLKCTRL_LOWCURRSTDBY_ADPLL_S 13
+#define ADPLL_CLKCTRL_LPMODE_ADPLL_S 12
+#define ADPLL_CLKCTRL_REGM4XEN_ADPLL_S 10
+#define ADPLL_CLKCTRL_SELFREQDCO_ADPLL_LJ 10
+#define ADPLL_CLKCTRL_TINITZ 0
+
+#define ADPLL_TENABLE_OFFSET 0x08
+#define ADPLL_TENABLEDIV_OFFSET 0x8c
+
+#define ADPLL_M2NDIV_OFFSET 0x10
+#define ADPLL_M2NDIV_M2 16
+#define ADPLL_M2NDIV_M2_ADPLL_S_WIDTH 5
+#define ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH 7
+
+#define ADPLL_MN2DIV_OFFSET 0x14
+#define ADPLL_MN2DIV_N2 16
+
+#define ADPLL_FRACDIV_OFFSET 0x18
+#define ADPLL_FRACDIV_REGSD 24
+#define ADPLL_FRACDIV_FRACTIONALM 0
+#define ADPLL_FRACDIV_FRACTIONALM_MASK 0x3ffff
+
+#define ADPLL_BWCTRL_OFFSET 0x1c
+#define ADPLL_BWCTRL_BWCONTROL 1
+#define ADPLL_BWCTRL_BW_INCR_DECRZ 0
+
+#define ADPLL_RESERVED_OFFSET 0x20
+
+#define ADPLL_STATUS_OFFSET 0x24
+#define ADPLL_STATUS_PONOUT 31
+#define ADPLL_STATUS_PGOODOUT 30
+#define ADPLL_STATUS_LDOPWDN 29
+#define ADPLL_STATUS_RECAL_BSTATUS3 28
+#define ADPLL_STATUS_RECAL_OPPIN 27
+#define ADPLL_STATUS_PHASELOCK 10
+#define ADPLL_STATUS_FREQLOCK 9
+#define ADPLL_STATUS_BYPASSACK 8
+#define ADPLL_STATUS_LOSSREF 6
+#define ADPLL_STATUS_CLKOUTENACK 5
+#define ADPLL_STATUS_LOCK2 4
+#define ADPLL_STATUS_M2CHANGEACK 3
+#define ADPLL_STATUS_HIGHJITTER 1
+#define ADPLL_STATUS_BYPASS 0
+#define ADPLL_STATUS_PREPARED_MASK (BIT(ADPLL_STATUS_PHASELOCK) | \
+ BIT(ADPLL_STATUS_FREQLOCK))
+
+#define ADPLL_M3DIV_OFFSET 0x28 /* Only on MPUPLL */
+#define ADPLL_M3DIV_M3 0
+#define ADPLL_M3DIV_M3_WIDTH 5
+#define ADPLL_M3DIV_M3_MASK 0x1f
+
+#define ADPLL_RAMPCTRL_OFFSET 0x2c /* Only on MPUPLL */
+#define ADPLL_RAMPCTRL_CLKRAMPLEVEL 19
+#define ADPLL_RAMPCTRL_CLKRAMPRATE 16
+#define ADPLL_RAMPCTRL_RELOCK_RAMP_EN 0
+
+#define MAX_ADPLL_INPUTS 3
+#define MAX_ADPLL_OUTPUTS 4
+#define ADPLL_MAX_RETRIES 5
+
+#define to_dco(_hw) container_of(_hw, struct ti_adpll_dco_data, hw)
+#define to_adpll(_hw) container_of(_hw, struct ti_adpll_data, dco)
+#define to_clkout(_hw) container_of(_hw, struct ti_adpll_clkout_data, hw)
+
+enum ti_adpll_clocks {
+ TI_ADPLL_DCO,
+ TI_ADPLL_DCO_GATE,
+ TI_ADPLL_N2,
+ TI_ADPLL_M2,
+ TI_ADPLL_M2_GATE,
+ TI_ADPLL_BYPASS,
+ TI_ADPLL_HIF,
+ TI_ADPLL_DIV2,
+ TI_ADPLL_CLKOUT,
+ TI_ADPLL_CLKOUT2,
+ TI_ADPLL_M3,
+};
+
+#define TI_ADPLL_NR_CLOCKS (TI_ADPLL_M3 + 1)
+
+enum ti_adpll_inputs {
+ TI_ADPLL_CLKINP,
+ TI_ADPLL_CLKINPULOW,
+ TI_ADPLL_CLKINPHIF,
+};
+
+enum ti_adpll_s_outputs {
+ TI_ADPLL_S_DCOCLKLDO,
+ TI_ADPLL_S_CLKOUT,
+ TI_ADPLL_S_CLKOUTX2,
+ TI_ADPLL_S_CLKOUTHIF,
+};
+
+enum ti_adpll_lj_outputs {
+ TI_ADPLL_LJ_CLKDCOLDO,
+ TI_ADPLL_LJ_CLKOUT,
+ TI_ADPLL_LJ_CLKOUTLDO,
+};
+
+struct ti_adpll_platform_data {
+ const bool is_type_s;
+ const int nr_max_inputs;
+ const int nr_max_outputs;
+ const int output_index;
+};
+
+struct ti_adpll_clock {
+ struct clk *clk;
+ struct clk_lookup *cl;
+ void (*unregister)(struct clk *clk);
+};
+
+struct ti_adpll_dco_data {
+ struct clk_hw hw;
+};
+
+struct ti_adpll_clkout_data {
+ struct ti_adpll_data *adpll;
+ struct clk_gate gate;
+ struct clk_hw hw;
+};
+
+struct ti_adpll_data {
+ struct device *dev;
+ const struct ti_adpll_platform_data *c;
+ struct device_node *np;
+ unsigned long pa;
+ void __iomem *iobase;
+ void __iomem *regs;
+ spinlock_t lock; /* For ADPLL shared register access */
+ const char *parent_names[MAX_ADPLL_INPUTS];
+ struct clk *parent_clocks[MAX_ADPLL_INPUTS];
+ struct ti_adpll_clock *clocks;
+ struct clk_onecell_data outputs;
+ struct ti_adpll_dco_data dco;
+};
+
+static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
+ int output_index,
+ const char *postfix)
+{
+ const char *name;
+ int err;
+
+ if (output_index >= 0) {
+ err = of_property_read_string_index(d->np,
+ "clock-output-names",
+ output_index,
+ &name);
+ if (err)
+ return NULL;
+ } else {
+ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
+ d->pa, postfix);
+ }
+
+ return name;
+}
+
+#define ADPLL_MAX_CON_ID 16 /* See MAX_CON_ID */
+
+static int ti_adpll_setup_clock(struct ti_adpll_data *d, struct clk *clock,
+ int index, int output_index, const char *name,
+ void (*unregister)(struct clk *clk))
+{
+ struct clk_lookup *cl;
+ const char *postfix = NULL;
+ char con_id[ADPLL_MAX_CON_ID];
+
+ d->clocks[index].clk = clock;
+ d->clocks[index].unregister = unregister;
+
+ /* Separate con_id in format "pll040dcoclkldo" to fit MAX_CON_ID */
+ postfix = strrchr(name, '.');
+ if (postfix && strlen(postfix) > 1) {
+ if (strlen(postfix) > ADPLL_MAX_CON_ID)
+ dev_warn(d->dev, "clock %s con_id lookup may fail\n",
+ name);
+ snprintf(con_id, 16, "pll%03lx%s", d->pa & 0xfff, postfix + 1);
+ cl = clkdev_create(clock, con_id, NULL);
+ if (!cl)
+ return -ENOMEM;
+ d->clocks[index].cl = cl;
+ } else {
+ dev_warn(d->dev, "no con_id for clock %s\n", name);
+ }
+
+ if (output_index < 0)
+ return 0;
+
+ d->outputs.clks[output_index] = clock;
+ d->outputs.clk_num++;
+
+ return 0;
+}
+
+static int ti_adpll_init_divider(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, char *name,
+ struct clk *parent_clock,
+ void __iomem *reg,
+ u8 shift, u8 width,
+ u8 clk_divider_flags)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, output_index, name);
+ if (!child_name)
+ return -EINVAL;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_divider(d->dev, child_name, parent_name, 0,
+ reg, shift, width, clk_divider_flags,
+ &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register divider %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ clk_unregister_divider);
+}
+
+static int ti_adpll_init_mux(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ char *name, struct clk *clk0,
+ struct clk *clk1,
+ void __iomem *reg,
+ u8 shift)
+{
+ const char *child_name;
+ const char *parents[2];
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
+ if (!child_name)
+ return -ENOMEM;
+ parents[0] = __clk_get_name(clk0);
+ parents[1] = __clk_get_name(clk1);
+ clock = clk_register_mux(d->dev, child_name, parents, 2, 0,
+ reg, shift, 1, 0, &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register mux %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
+ clk_unregister_mux);
+}
+
+static int ti_adpll_init_gate(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, char *name,
+ struct clk *parent_clock,
+ void __iomem *reg,
+ u8 bit_idx,
+ u8 clk_gate_flags)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, output_index, name);
+ if (!child_name)
+ return -EINVAL;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_gate(d->dev, child_name, parent_name, 0,
+ reg, bit_idx, clk_gate_flags,
+ &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register gate %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ clk_unregister_gate);
+}
+
+static int ti_adpll_init_fixed_factor(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ char *name,
+ struct clk *parent_clock,
+ unsigned int mult,
+ unsigned int div)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
+ if (!child_name)
+ return -ENOMEM;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_fixed_factor(d->dev, child_name, parent_name,
+ 0, mult, div);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
+ clk_unregister);
+}
+
+static void ti_adpll_set_idle_bypass(struct ti_adpll_data *d)
+{
+ unsigned long flags;
+ u32 v;
+
+ spin_lock_irqsave(&d->lock, flags);
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ v |= BIT(ADPLL_CLKCTRL_IDLE);
+ writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET);
+ spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static void ti_adpll_clear_idle_bypass(struct ti_adpll_data *d)
+{
+ unsigned long flags;
+ u32 v;
+
+ spin_lock_irqsave(&d->lock, flags);
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ v &= ~BIT(ADPLL_CLKCTRL_IDLE);
+ writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET);
+ spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static bool ti_adpll_clock_is_bypass(struct ti_adpll_data *d)
+{
+ u32 v;
+
+ v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET);
+
+ return v & BIT(ADPLL_STATUS_BYPASS);
+}
+
+/*
+ * Locked and bypass are not actually mutually exclusive: if you only care
+ * about the DCO clock and not CLKOUT you can clear M2PWDNZ before enabling
+ * the PLL, resulting in status (FREQLOCK | PHASELOCK | BYPASS) after lock.
+ */
+static bool ti_adpll_is_locked(struct ti_adpll_data *d)
+{
+ u32 v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET);
+
+ return (v & ADPLL_STATUS_PREPARED_MASK) == ADPLL_STATUS_PREPARED_MASK;
+}
+
+static int ti_adpll_wait_lock(struct ti_adpll_data *d)
+{
+ int retries = ADPLL_MAX_RETRIES;
+
+ do {
+ if (ti_adpll_is_locked(d))
+ return 0;
+ usleep_range(200, 300);
+ } while (retries--);
+
+ dev_err(d->dev, "pll failed to lock\n");
+ return -ETIMEDOUT;
+}
+
+static int ti_adpll_prepare(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ ti_adpll_clear_idle_bypass(d);
+ ti_adpll_wait_lock(d);
+
+ return 0;
+}
+
+static void ti_adpll_unprepare(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ ti_adpll_set_idle_bypass(d);
+}
+
+static int ti_adpll_is_prepared(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ return ti_adpll_is_locked(d);
+}
+
+/*
+ * Note that the DCO clock is never subject to bypass: if the PLL is off,
+ * dcoclk is low.
+ */
+static unsigned long ti_adpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+ u32 frac_m, divider, v;
+ u64 rate;
+ unsigned long flags;
+
+ if (ti_adpll_clock_is_bypass(d))
+ return 0;
+
+ spin_lock_irqsave(&d->lock, flags);
+ frac_m = readl_relaxed(d->regs + ADPLL_FRACDIV_OFFSET);
+ frac_m &= ADPLL_FRACDIV_FRACTIONALM_MASK;
+ rate = (u64)readw_relaxed(d->regs + ADPLL_MN2DIV_OFFSET) << 18;
+ rate += frac_m;
+ rate *= parent_rate;
+ divider = (readw_relaxed(d->regs + ADPLL_M2NDIV_OFFSET) + 1) << 18;
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ do_div(rate, divider);
+
+ if (d->c->is_type_s) {
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ if (v & BIT(ADPLL_CLKCTRL_REGM4XEN_ADPLL_S))
+ rate *= 4;
+ rate *= 2;
+ }
+
+ return rate;
+}
+
+/* PLL parent is always clkinp, bypass only affects the children */
+static u8 ti_adpll_get_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static const struct clk_ops ti_adpll_ops = {
+ .prepare = ti_adpll_prepare,
+ .unprepare = ti_adpll_unprepare,
+ .is_prepared = ti_adpll_is_prepared,
+ .recalc_rate = ti_adpll_recalc_rate,
+ .get_parent = ti_adpll_get_parent,
+};
+
+static int ti_adpll_init_dco(struct ti_adpll_data *d)
+{
+ struct clk_init_data init;
+ struct clk *clock;
+ const char *postfix;
+ int width, err;
+
+ d->outputs.clks = devm_kcalloc(d->dev,
+ MAX_ADPLL_OUTPUTS,
+ sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!d->outputs.clks)
+ return -ENOMEM;
+
+ if (d->c->output_index < 0)
+ postfix = "dco";
+ else
+ postfix = NULL;
+
+ init.name = ti_adpll_clk_get_name(d, d->c->output_index, postfix);
+ if (!init.name)
+ return -EINVAL;
+
+ init.parent_names = d->parent_names;
+ init.num_parents = d->c->nr_max_inputs;
+ init.ops = &ti_adpll_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ d->dco.hw.init = &init;
+
+ if (d->c->is_type_s)
+ width = 5;
+ else
+ width = 4;
+
+ /* Internal input clock divider N2 */
+ err = ti_adpll_init_divider(d, TI_ADPLL_N2, -ENODEV, "n2",
+ d->parent_clocks[TI_ADPLL_CLKINP],
+ d->regs + ADPLL_MN2DIV_OFFSET,
+ ADPLL_MN2DIV_N2, width, 0);
+ if (err)
+ return err;
+
+ clock = devm_clk_register(d->dev, &d->dco.hw);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ return ti_adpll_setup_clock(d, clock, TI_ADPLL_DCO, d->c->output_index,
+ init.name, NULL);
+}
+
+static int ti_adpll_clkout_enable(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return clk_gate_ops.enable(gate_hw);
+}
+
+static void ti_adpll_clkout_disable(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+ clk_gate_ops.disable(gate_hw);
+}
+
+static int ti_adpll_clkout_is_enabled(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return clk_gate_ops.is_enabled(gate_hw);
+}
+
+/* Setting PLL bypass puts clkout and clkoutx2 into bypass */
+static u8 ti_adpll_clkout_get_parent(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct ti_adpll_data *d = co->adpll;
+
+ return ti_adpll_clock_is_bypass(d);
+}
+
+static int ti_adpll_init_clkout(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, int gate_bit,
+ char *name, struct clk *clk0,
+ struct clk *clk1)
+{
+ struct ti_adpll_clkout_data *co;
+ struct clk_init_data init;
+ struct clk_ops *ops;
+ const char *parent_names[2];
+ const char *child_name;
+ struct clk *clock;
+ int err;
+
+ co = devm_kzalloc(d->dev, sizeof(*co), GFP_KERNEL);
+ if (!co)
+ return -ENOMEM;
+ co->adpll = d;
+
+ err = of_property_read_string_index(d->np,
+ "clock-output-names",
+ output_index,
+ &child_name);
+ if (err)
+ return err;
+
+ ops = devm_kzalloc(d->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ init.name = child_name;
+ init.ops = ops;
+ init.flags = 0;
+ co->hw.init = &init;
+ parent_names[0] = __clk_get_name(clk0);
+ parent_names[1] = __clk_get_name(clk1);
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ ops->get_parent = ti_adpll_clkout_get_parent;
+ ops->determine_rate = __clk_mux_determine_rate;
+ if (gate_bit) {
+ co->gate.lock = &d->lock;
+ co->gate.reg = d->regs + ADPLL_CLKCTRL_OFFSET;
+ co->gate.bit_idx = gate_bit;
+ ops->enable = ti_adpll_clkout_enable;
+ ops->disable = ti_adpll_clkout_disable;
+ ops->is_enabled = ti_adpll_clkout_is_enabled;
+ }
+
+ clock = devm_clk_register(d->dev, &co->hw);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register output %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ NULL);
+}
+
+static int ti_adpll_init_children_adpll_s(struct ti_adpll_data *d)
+{
+ int err;
+
+ if (!d->c->is_type_s)
+ return 0;
+
+ /* Internal mux, sources from divider N2 or clkinpulow */
+ err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass",
+ d->clocks[TI_ADPLL_N2].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPULOW],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_ULOWCLKEN);
+ if (err)
+ return err;
+
+ /* Internal divider M2, sources DCO */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV, "m2",
+ d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_M2NDIV_OFFSET,
+ ADPLL_M2NDIV_M2,
+ ADPLL_M2NDIV_M2_ADPLL_S_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Internal fixed divider, after M2 before clkout */
+ err = ti_adpll_init_fixed_factor(d, TI_ADPLL_DIV2, "div2",
+ d->clocks[TI_ADPLL_M2].clk,
+ 1, 2);
+ if (err)
+ return err;
+
+ /* Output clkout with a mux and gate, sources from div2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT,
+ ADPLL_CLKCTRL_CLKOUTEN, "clkout",
+ d->clocks[TI_ADPLL_DIV2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ /* Output clkoutx2 with a mux and gate, sources from M2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT2, TI_ADPLL_S_CLKOUTX2, 0,
+ "clkout2", d->clocks[TI_ADPLL_M2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ /* Internal mux, sources from DCO and clkinphif */
+ if (d->parent_clocks[TI_ADPLL_CLKINPHIF]) {
+ err = ti_adpll_init_mux(d, TI_ADPLL_HIF, "hif",
+ d->clocks[TI_ADPLL_DCO].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPHIF],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKINPHIFSEL_ADPLL_S);
+ if (err)
+ return err;
+ }
+
+ /* Output clkouthif with a divider M3, sources from hif */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M3, TI_ADPLL_S_CLKOUTHIF, "m3",
+ d->clocks[TI_ADPLL_HIF].clk,
+ d->regs + ADPLL_M3DIV_OFFSET,
+ ADPLL_M3DIV_M3,
+ ADPLL_M3DIV_M3_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Output clock dcoclkldo is the DCO */
+
+ return 0;
+}
+
+static int ti_adpll_init_children_adpll_lj(struct ti_adpll_data *d)
+{
+ int err;
+
+ if (d->c->is_type_s)
+ return 0;
+
+ /* Output clkdcoldo, gated output of DCO */
+ err = ti_adpll_init_gate(d, TI_ADPLL_DCO_GATE, TI_ADPLL_LJ_CLKDCOLDO,
+ "clkdcoldo", d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_CLKDCOLDOEN, 0);
+ if (err)
+ return err;
+
+ /* Internal divider M2, sources from DCO */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV,
+ "m2", d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_M2NDIV_OFFSET,
+ ADPLL_M2NDIV_M2,
+ ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Output clkoutldo, gated output of M2 */
+ err = ti_adpll_init_gate(d, TI_ADPLL_M2_GATE, TI_ADPLL_LJ_CLKOUTLDO,
+ "clkoutldo", d->clocks[TI_ADPLL_M2].clk,
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ,
+ 0);
+ if (err)
+ return err;
+
+ /* Internal mux, sources from divider N2 or clkinpulow */
+ err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass",
+ d->clocks[TI_ADPLL_N2].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPULOW],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_ULOWCLKEN);
+ if (err)
+ return err;
+
+ /* Output clkout, sources M2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT,
+ ADPLL_CLKCTRL_CLKOUTEN, "clkout",
+ d->clocks[TI_ADPLL_M2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void ti_adpll_free_resources(struct ti_adpll_data *d)
+{
+ int i;
+
+ for (i = TI_ADPLL_M3; i >= 0; i--) {
+ struct ti_adpll_clock *ac = &d->clocks[i];
+
+ if (!ac || IS_ERR_OR_NULL(ac->clk))
+ continue;
+ if (ac->cl)
+ clkdev_drop(ac->cl);
+ if (ac->unregister)
+ ac->unregister(ac->clk);
+ }
+}
+
+/* MPU PLL manages the lock register for all PLLs */
+static void ti_adpll_unlock_all(void __iomem *reg)
+{
+ u32 v;
+
+ v = readl_relaxed(reg);
+ if (v == ADPLL_PLLSS_MMR_LOCK_ENABLED)
+ writel_relaxed(ADPLL_PLLSS_MMR_UNLOCK_MAGIC, reg);
+}
+
+static int ti_adpll_init_registers(struct ti_adpll_data *d)
+{
+ int register_offset = 0;
+
+ if (d->c->is_type_s) {
+ register_offset = 8;
+ ti_adpll_unlock_all(d->iobase + ADPLL_PLLSS_MMR_LOCK_OFFSET);
+ }
+
+ d->regs = d->iobase + register_offset + ADPLL_PWRCTRL_OFFSET;
+
+ return 0;
+}
+
+static int ti_adpll_init_inputs(struct ti_adpll_data *d)
+{
+ static const char error[] = "need at least %i inputs";
+ struct clk *clock;
+ int nr_inputs;
+
+ nr_inputs = of_clk_get_parent_count(d->np);
+ if (nr_inputs < d->c->nr_max_inputs) {
+ dev_err(d->dev, error, nr_inputs);
+ return -EINVAL;
+ }
+ of_clk_parent_fill(d->np, d->parent_names, nr_inputs);
+
+ clock = devm_clk_get(d->dev, d->parent_names[0]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinp\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINP] = clock;
+
+ clock = devm_clk_get(d->dev, d->parent_names[1]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinpulow clock\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINPULOW] = clock;
+
+ if (d->c->is_type_s) {
+ clock = devm_clk_get(d->dev, d->parent_names[2]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinphif clock\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINPHIF] = clock;
+ }
+
+ return 0;
+}
+
+static const struct ti_adpll_platform_data ti_adpll_type_s = {
+ .is_type_s = true,
+ .nr_max_inputs = MAX_ADPLL_INPUTS,
+ .nr_max_outputs = MAX_ADPLL_OUTPUTS,
+ .output_index = TI_ADPLL_S_DCOCLKLDO,
+};
+
+static const struct ti_adpll_platform_data ti_adpll_type_lj = {
+ .is_type_s = false,
+ .nr_max_inputs = MAX_ADPLL_INPUTS - 1,
+ .nr_max_outputs = MAX_ADPLL_OUTPUTS - 1,
+ .output_index = -EINVAL,
+};
+
+static const struct of_device_id ti_adpll_match[] = {
+ { .compatible = "ti,dm814-adpll-s-clock", &ti_adpll_type_s },
+ { .compatible = "ti,dm814-adpll-lj-clock", &ti_adpll_type_lj },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_adpll_match);
+
+static int ti_adpll_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ const struct ti_adpll_platform_data *pdata;
+ struct ti_adpll_data *d;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(ti_adpll_match, dev);
+ if (match)
+ pdata = match->data;
+ else
+ return -ENODEV;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->dev = dev;
+ d->np = node;
+ d->c = pdata;
+ dev_set_drvdata(d->dev, d);
+ spin_lock_init(&d->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ d->pa = res->start;
+
+ d->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(d->iobase))
+ return PTR_ERR(d->iobase);
+
+ err = ti_adpll_init_registers(d);
+ if (err)
+ return err;
+
+ err = ti_adpll_init_inputs(d);
+ if (err)
+ return err;
+
+ d->clocks = devm_kcalloc(d->dev,
+ TI_ADPLL_NR_CLOCKS,
+ sizeof(struct ti_adpll_clock),
+ GFP_KERNEL);
+ if (!d->clocks)
+ return -ENOMEM;
+
+ err = ti_adpll_init_dco(d);
+ if (err) {
+ dev_err(dev, "could not register dco: %i\n", err);
+ goto free;
+ }
+
+ err = ti_adpll_init_children_adpll_s(d);
+ if (err)
+ goto free;
+ err = ti_adpll_init_children_adpll_lj(d);
+ if (err)
+ goto free;
+
+ err = of_clk_add_provider(d->np, of_clk_src_onecell_get, &d->outputs);
+ if (err)
+ goto free;
+
+ return 0;
+
+free:
+ WARN_ON(1);
+ ti_adpll_free_resources(d);
+
+ return err;
+}
+
+static int ti_adpll_remove(struct platform_device *pdev)
+{
+ struct ti_adpll_data *d = dev_get_drvdata(&pdev->dev);
+
+ ti_adpll_free_resources(d);
+
+ return 0;
+}
+
+static struct platform_driver ti_adpll_driver = {
+ .driver = {
+ .name = "ti-adpll",
+ .of_match_table = ti_adpll_match,
+ },
+ .probe = ti_adpll_probe,
+ .remove = ti_adpll_remove,
+};
+
+static int __init ti_adpll_init(void)
+{
+ return platform_driver_register(&ti_adpll_driver);
+}
+core_initcall(ti_adpll_init);
+
+static void __exit ti_adpll_exit(void)
+{
+ platform_driver_unregister(&ti_adpll_driver);
+}
+module_exit(ti_adpll_exit);
+
+MODULE_DESCRIPTION("Clock driver for dm814x ADPLL");
+MODULE_ALIAS("platform:dm814-adpll-clock");
+MODULE_AUTHOR("Tony LIndgren <tony@atomide.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
new file mode 100644
index 000000000..93183287c
--- /dev/null
+++ b/drivers/clk/ti/apll.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP APLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * J Keerthy <j-keerthy@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#include "clock.h"
+
+#define APLL_FORCE_LOCK 0x1
+#define APLL_AUTO_IDLE 0x2
+#define MAX_APLL_WAIT_TRIES 1000000
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int dra7_apll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int r = 0, i = 0;
+ struct dpll_data *ad;
+ const char *clk_name;
+ u8 state = 1;
+ u32 v;
+
+ ad = clk->dpll_data;
+ if (!ad)
+ return -EINVAL;
+
+ clk_name = clk_hw_get_name(&clk->hw);
+
+ state <<= __ffs(ad->idlest_mask);
+
+ /* Check is already locked */
+ v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
+
+ if ((v & ad->idlest_mask) == state)
+ return r;
+
+ v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
+
+ state <<= __ffs(ad->idlest_mask);
+
+ while (1) {
+ v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
+ if ((v & ad->idlest_mask) == state)
+ break;
+ if (i > MAX_APLL_WAIT_TRIES)
+ break;
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_APLL_WAIT_TRIES) {
+ pr_warn("clock: %s failed transition to '%s'\n",
+ clk_name, (state) ? "locked" : "bypassed");
+ r = -EBUSY;
+ } else
+ pr_debug("clock: %s transition to '%s' in %d loops\n",
+ clk_name, (state) ? "locked" : "bypassed", i);
+
+ return r;
+}
+
+static void dra7_apll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad;
+ u8 state = 1;
+ u32 v;
+
+ ad = clk->dpll_data;
+
+ state <<= __ffs(ad->idlest_mask);
+
+ v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
+}
+
+static int dra7_apll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad;
+ u32 v;
+
+ ad = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
+ v &= ad->enable_mask;
+
+ v >>= __ffs(ad->enable_mask);
+
+ return v == APLL_AUTO_IDLE ? 0 : 1;
+}
+
+static u8 dra7_init_apll_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static const struct clk_ops apll_ck_ops = {
+ .enable = &dra7_apll_enable,
+ .disable = &dra7_apll_disable,
+ .is_enabled = &dra7_apll_is_enabled,
+ .get_parent = &dra7_init_apll_parent,
+};
+
+static void __init omap_clk_register_apll(void *user,
+ struct device_node *node)
+{
+ struct clk_hw *hw = user;
+ struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk_hw->dpll_data;
+ const char *name;
+ struct clk *clk;
+ const struct clk_init_data *init = clk_hw->hw.init;
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-ref for %pOFn not ready, retry\n",
+ node);
+ if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+ return;
+
+ goto cleanup;
+ }
+
+ ad->clk_ref = __clk_get_hw(clk);
+
+ clk = of_clk_get(node, 1);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-bypass for %pOFn not ready, retry\n",
+ node);
+ if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+ return;
+
+ goto cleanup;
+ }
+
+ ad->clk_bypass = __clk_get_hw(clk);
+
+ name = ti_dt_clk_name(node);
+ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(init->parent_names);
+ kfree(init);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw->dpll_data);
+ kfree(init->parent_names);
+ kfree(init);
+ kfree(clk_hw);
+}
+
+static void __init of_dra7_apll_setup(struct device_node *node)
+{
+ struct dpll_data *ad = NULL;
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char **parent_names = NULL;
+ int ret;
+
+ ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!ad || !clk_hw || !init)
+ goto cleanup;
+
+ clk_hw->dpll_data = ad;
+ clk_hw->hw.init = init;
+
+ init->name = ti_dt_clk_name(node);
+ init->ops = &apll_ck_ops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents < 1) {
+ pr_err("dra7 apll %pOFn must have parent(s)\n", node);
+ goto cleanup;
+ }
+
+ parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ of_clk_parent_fill(node, parent_names, init->num_parents);
+
+ init->parent_names = parent_names;
+
+ ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
+ ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg);
+
+ if (ret)
+ goto cleanup;
+
+ ad->idlest_mask = 0x1;
+ ad->enable_mask = 0x3;
+
+ omap_clk_register_apll(&clk_hw->hw, node);
+ return;
+
+cleanup:
+ kfree(parent_names);
+ kfree(ad);
+ kfree(clk_hw);
+ kfree(init);
+}
+CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup);
+
+#define OMAP2_EN_APLL_LOCKED 0x3
+#define OMAP2_EN_APLL_STOPPED 0x0
+
+static int omap2_apll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
+ v &= ad->enable_mask;
+
+ v >>= __ffs(ad->enable_mask);
+
+ return v == OMAP2_EN_APLL_LOCKED ? 1 : 0;
+}
+
+static unsigned long omap2_apll_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+ if (omap2_apll_is_enabled(hw))
+ return clk->fixed_rate;
+
+ return 0;
+}
+
+static int omap2_apll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+ int i = 0;
+
+ v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
+
+ while (1) {
+ v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg);
+ if (v & ad->idlest_mask)
+ break;
+ if (i > MAX_APLL_WAIT_TRIES)
+ break;
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_APLL_WAIT_TRIES) {
+ pr_warn("%s failed to transition to locked\n",
+ clk_hw_get_name(&clk->hw));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void omap2_apll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(&ad->control_reg);
+ v &= ~ad->enable_mask;
+ v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
+}
+
+static const struct clk_ops omap2_apll_ops = {
+ .enable = &omap2_apll_enable,
+ .disable = &omap2_apll_disable,
+ .is_enabled = &omap2_apll_is_enabled,
+ .recalc_rate = &omap2_apll_recalc,
+};
+
+static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val)
+{
+ struct dpll_data *ad = clk->dpll_data;
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg);
+ v &= ~ad->autoidle_mask;
+ v |= val << __ffs(ad->autoidle_mask);
+ ti_clk_ll_ops->clk_writel(v, &ad->control_reg);
+}
+
+#define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3
+#define OMAP2_APLL_AUTOIDLE_DISABLE 0x0
+
+static void omap2_apll_allow_idle(struct clk_hw_omap *clk)
+{
+ omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP);
+}
+
+static void omap2_apll_deny_idle(struct clk_hw_omap *clk)
+{
+ omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE);
+}
+
+static const struct clk_hw_omap_ops omap2_apll_hwops = {
+ .allow_idle = &omap2_apll_allow_idle,
+ .deny_idle = &omap2_apll_deny_idle,
+};
+
+static void __init of_omap2_apll_setup(struct device_node *node)
+{
+ struct dpll_data *ad = NULL;
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char *name;
+ struct clk *clk;
+ const char *parent_name;
+ u32 val;
+ int ret;
+
+ ad = kzalloc(sizeof(*ad), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+
+ if (!ad || !clk_hw || !init)
+ goto cleanup;
+
+ clk_hw->dpll_data = ad;
+ clk_hw->hw.init = init;
+ init->ops = &omap2_apll_ops;
+ name = ti_dt_clk_name(node);
+ init->name = name;
+ clk_hw->ops = &omap2_apll_hwops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents != 1) {
+ pr_err("%pOFn must have one parent\n", node);
+ goto cleanup;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ init->parent_names = &parent_name;
+
+ if (of_property_read_u32(node, "ti,clock-frequency", &val)) {
+ pr_err("%pOFn missing clock-frequency\n", node);
+ goto cleanup;
+ }
+ clk_hw->fixed_rate = val;
+
+ if (of_property_read_u32(node, "ti,bit-shift", &val)) {
+ pr_err("%pOFn missing bit-shift\n", node);
+ goto cleanup;
+ }
+
+ clk_hw->enable_bit = val;
+ ad->enable_mask = 0x3 << val;
+ ad->autoidle_mask = 0x3 << val;
+
+ if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
+ pr_err("%pOFn missing idlest-shift\n", node);
+ goto cleanup;
+ }
+
+ ad->idlest_mask = 1 << val;
+
+ ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg);
+ ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg);
+ ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg);
+
+ if (ret)
+ goto cleanup;
+
+ name = ti_dt_clk_name(node);
+ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(init);
+ return;
+ }
+cleanup:
+ kfree(ad);
+ kfree(clk_hw);
+ kfree(init);
+}
+CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock",
+ of_omap2_apll_setup);
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
new file mode 100644
index 000000000..27e6b9cb1
--- /dev/null
+++ b/drivers/clk/ti/autoidle.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI clock autoidle support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+struct clk_ti_autoidle {
+ struct clk_omap_reg reg;
+ u8 shift;
+ u8 flags;
+ const char *name;
+ struct list_head node;
+};
+
+#define AUTOIDLE_LOW 0x1
+
+static LIST_HEAD(autoidle_clks);
+
+/*
+ * we have some non-atomic read/write
+ * operations behind it, so lets
+ * take one lock for handling autoidle
+ * of all clocks
+ */
+static DEFINE_SPINLOCK(autoidle_spinlock);
+
+static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
+{
+ if (clk->ops && clk->ops->deny_idle) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&autoidle_spinlock, irqflags);
+ clk->autoidle_count++;
+ if (clk->autoidle_count == 1)
+ clk->ops->deny_idle(clk);
+
+ spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
+ }
+ return 0;
+}
+
+static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
+{
+ if (clk->ops && clk->ops->allow_idle) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&autoidle_spinlock, irqflags);
+ clk->autoidle_count--;
+ if (clk->autoidle_count == 0)
+ clk->ops->allow_idle(clk);
+
+ spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
+ }
+ return 0;
+}
+
+/**
+ * omap2_clk_deny_idle - disable autoidle on an OMAP clock
+ * @clk: struct clk * to disable autoidle for
+ *
+ * Disable autoidle on an OMAP clock.
+ */
+int omap2_clk_deny_idle(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ if (!clk)
+ return -EINVAL;
+
+ hw = __clk_get_hw(clk);
+
+ if (omap2_clk_is_hw_omap(hw)) {
+ struct clk_hw_omap *c = to_clk_hw_omap(hw);
+
+ return _omap2_clk_deny_idle(c);
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * omap2_clk_allow_idle - enable autoidle on an OMAP clock
+ * @clk: struct clk * to enable autoidle for
+ *
+ * Enable autoidle on an OMAP clock.
+ */
+int omap2_clk_allow_idle(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ if (!clk)
+ return -EINVAL;
+
+ hw = __clk_get_hw(clk);
+
+ if (omap2_clk_is_hw_omap(hw)) {
+ struct clk_hw_omap *c = to_clk_hw_omap(hw);
+
+ return _omap2_clk_allow_idle(c);
+ }
+
+ return -EINVAL;
+}
+
+static void _allow_autoidle(struct clk_ti_autoidle *clk)
+{
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->reg);
+
+ if (clk->flags & AUTOIDLE_LOW)
+ val &= ~(1 << clk->shift);
+ else
+ val |= (1 << clk->shift);
+
+ ti_clk_ll_ops->clk_writel(val, &clk->reg);
+}
+
+static void _deny_autoidle(struct clk_ti_autoidle *clk)
+{
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->reg);
+
+ if (clk->flags & AUTOIDLE_LOW)
+ val |= (1 << clk->shift);
+ else
+ val &= ~(1 << clk->shift);
+
+ ti_clk_ll_ops->clk_writel(val, &clk->reg);
+}
+
+/**
+ * _clk_generic_allow_autoidle_all - enable autoidle for all clocks
+ *
+ * Enables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+static void _clk_generic_allow_autoidle_all(void)
+{
+ struct clk_ti_autoidle *c;
+
+ list_for_each_entry(c, &autoidle_clks, node)
+ _allow_autoidle(c);
+}
+
+/**
+ * _clk_generic_deny_autoidle_all - disable autoidle for all clocks
+ *
+ * Disables hardware autoidle for all registered DT clocks, which have
+ * the feature.
+ */
+static void _clk_generic_deny_autoidle_all(void)
+{
+ struct clk_ti_autoidle *c;
+
+ list_for_each_entry(c, &autoidle_clks, node)
+ _deny_autoidle(c);
+}
+
+/**
+ * of_ti_clk_autoidle_setup - sets up hardware autoidle for a clock
+ * @node: pointer to the clock device node
+ *
+ * Checks if a clock has hardware autoidle support or not (check
+ * for presence of 'ti,autoidle-shift' property in the device tree
+ * node) and sets up the hardware autoidle feature for the clock
+ * if available. If autoidle is available, the clock is also added
+ * to the autoidle list for later processing. Returns 0 on success,
+ * negative error value on failure.
+ */
+int __init of_ti_clk_autoidle_setup(struct device_node *node)
+{
+ u32 shift;
+ struct clk_ti_autoidle *clk;
+ int ret;
+
+ /* Check if this clock has autoidle support or not */
+ if (of_property_read_u32(node, "ti,autoidle-shift", &shift))
+ return 0;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+
+ if (!clk)
+ return -ENOMEM;
+
+ clk->shift = shift;
+ clk->name = ti_dt_clk_name(node);
+ ret = ti_clk_get_reg_addr(node, 0, &clk->reg);
+ if (ret) {
+ kfree(clk);
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "ti,invert-autoidle-bit"))
+ clk->flags |= AUTOIDLE_LOW;
+
+ list_add(&clk->node, &autoidle_clks);
+
+ return 0;
+}
+
+/**
+ * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Enable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them. This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code. Returns 0.
+ */
+int omap2_clk_enable_autoidle_all(void)
+{
+ int ret;
+
+ ret = omap2_clk_for_each(_omap2_clk_allow_idle);
+ if (ret)
+ return ret;
+
+ _clk_generic_allow_autoidle_all();
+
+ return 0;
+}
+
+/**
+ * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Disable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them. This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code. Returns 0.
+ */
+int omap2_clk_disable_autoidle_all(void)
+{
+ int ret;
+
+ ret = omap2_clk_for_each(_omap2_clk_deny_idle);
+ if (ret)
+ return ret;
+
+ _clk_generic_deny_autoidle_all();
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c
new file mode 100644
index 000000000..363c4fdbe
--- /dev/null
+++ b/drivers/clk/ti/clk-2xxx.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP2 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_dt_clk omap2xxx_clks[] = {
+ DT_CLK(NULL, "func_32k_ck", "func_32k_ck"),
+ DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"),
+ DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
+ DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
+ DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
+ DT_CLK(NULL, "virt_26m_ck", "virt_26m_ck"),
+ DT_CLK(NULL, "aplls_clkin_ck", "aplls_clkin_ck"),
+ DT_CLK(NULL, "aplls_clkin_x2_ck", "aplls_clkin_x2_ck"),
+ DT_CLK(NULL, "osc_ck", "osc_ck"),
+ DT_CLK(NULL, "sys_ck", "sys_ck"),
+ DT_CLK(NULL, "alt_ck", "alt_ck"),
+ DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
+ DT_CLK(NULL, "dpll_ck", "dpll_ck"),
+ DT_CLK(NULL, "apll96_ck", "apll96_ck"),
+ DT_CLK(NULL, "apll54_ck", "apll54_ck"),
+ DT_CLK(NULL, "func_54m_ck", "func_54m_ck"),
+ DT_CLK(NULL, "core_ck", "core_ck"),
+ DT_CLK(NULL, "func_96m_ck", "func_96m_ck"),
+ DT_CLK(NULL, "func_48m_ck", "func_48m_ck"),
+ DT_CLK(NULL, "func_12m_ck", "func_12m_ck"),
+ DT_CLK(NULL, "sys_clkout_src", "sys_clkout_src"),
+ DT_CLK(NULL, "sys_clkout", "sys_clkout"),
+ DT_CLK(NULL, "emul_ck", "emul_ck"),
+ DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+ DT_CLK(NULL, "dsp_fck", "dsp_fck"),
+ DT_CLK(NULL, "gfx_3d_fck", "gfx_3d_fck"),
+ DT_CLK(NULL, "gfx_2d_fck", "gfx_2d_fck"),
+ DT_CLK(NULL, "gfx_ick", "gfx_ick"),
+ DT_CLK("omapdss_dss", "ick", "dss_ick"),
+ DT_CLK(NULL, "dss_ick", "dss_ick"),
+ DT_CLK(NULL, "dss1_fck", "dss1_fck"),
+ DT_CLK(NULL, "dss2_fck", "dss2_fck"),
+ DT_CLK(NULL, "dss_54m_fck", "dss_54m_fck"),
+ DT_CLK(NULL, "core_l3_ck", "core_l3_ck"),
+ DT_CLK(NULL, "ssi_fck", "ssi_ssr_sst_fck"),
+ DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
+ DT_CLK(NULL, "l4_ck", "l4_ck"),
+ DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
+ DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
+ DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
+ DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
+ DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
+ DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
+ DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
+ DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
+ DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
+ DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
+ DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
+ DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
+ DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
+ DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
+ DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
+ DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
+ DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
+ DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
+ DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
+ DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
+ DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
+ DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
+ DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
+ DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
+ DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
+ DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
+ DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
+ DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"),
+ DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
+ DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
+ DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
+ DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
+ DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
+ DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
+ DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
+ DT_CLK(NULL, "uart1_ick", "uart1_ick"),
+ DT_CLK(NULL, "uart1_fck", "uart1_fck"),
+ DT_CLK(NULL, "uart2_ick", "uart2_ick"),
+ DT_CLK(NULL, "uart2_fck", "uart2_fck"),
+ DT_CLK(NULL, "uart3_ick", "uart3_ick"),
+ DT_CLK(NULL, "uart3_fck", "uart3_fck"),
+ DT_CLK(NULL, "gpios_ick", "gpios_ick"),
+ DT_CLK(NULL, "gpios_fck", "gpios_fck"),
+ DT_CLK("omap_wdt", "ick", "mpu_wdt_ick"),
+ DT_CLK(NULL, "mpu_wdt_ick", "mpu_wdt_ick"),
+ DT_CLK(NULL, "mpu_wdt_fck", "mpu_wdt_fck"),
+ DT_CLK(NULL, "sync_32k_ick", "sync_32k_ick"),
+ DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
+ DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
+ DT_CLK("omap24xxcam", "fck", "cam_fck"),
+ DT_CLK(NULL, "cam_fck", "cam_fck"),
+ DT_CLK("omap24xxcam", "ick", "cam_ick"),
+ DT_CLK(NULL, "cam_ick", "cam_ick"),
+ DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
+ DT_CLK(NULL, "wdt4_ick", "wdt4_ick"),
+ DT_CLK(NULL, "wdt4_fck", "wdt4_fck"),
+ DT_CLK(NULL, "mspro_ick", "mspro_ick"),
+ DT_CLK(NULL, "mspro_fck", "mspro_fck"),
+ DT_CLK(NULL, "fac_ick", "fac_ick"),
+ DT_CLK(NULL, "fac_fck", "fac_fck"),
+ DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
+ DT_CLK(NULL, "hdq_ick", "hdq_ick"),
+ DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
+ DT_CLK(NULL, "hdq_fck", "hdq_fck"),
+ DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
+ DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
+ DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
+ DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
+ DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
+ DT_CLK(NULL, "sdma_fck", "sdma_fck"),
+ DT_CLK(NULL, "sdma_ick", "sdma_ick"),
+ DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
+ DT_CLK(NULL, "des_ick", "des_ick"),
+ DT_CLK("omap-sham", "ick", "sha_ick"),
+ DT_CLK(NULL, "sha_ick", "sha_ick"),
+ DT_CLK("omap_rng", "ick", "rng_ick"),
+ DT_CLK(NULL, "rng_ick", "rng_ick"),
+ DT_CLK("omap-aes", "ick", "aes_ick"),
+ DT_CLK(NULL, "aes_ick", "aes_ick"),
+ DT_CLK(NULL, "pka_ick", "pka_ick"),
+ DT_CLK(NULL, "usb_fck", "usb_fck"),
+ DT_CLK(NULL, "timer_32k_ck", "func_32k_ck"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
+ DT_CLK(NULL, "timer_ext_ck", "alt_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap2420_clks[] = {
+ DT_CLK(NULL, "sys_clkout2_src", "sys_clkout2_src"),
+ DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
+ DT_CLK(NULL, "dsp_ick", "dsp_ick"),
+ DT_CLK(NULL, "iva1_ifck", "iva1_ifck"),
+ DT_CLK(NULL, "iva1_mpu_int_ifck", "iva1_mpu_int_ifck"),
+ DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
+ DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
+ DT_CLK("mmci-omap.0", "ick", "mmc_ick"),
+ DT_CLK(NULL, "mmc_ick", "mmc_ick"),
+ DT_CLK("mmci-omap.0", "fck", "mmc_fck"),
+ DT_CLK(NULL, "mmc_fck", "mmc_fck"),
+ DT_CLK(NULL, "eac_ick", "eac_ick"),
+ DT_CLK(NULL, "eac_fck", "eac_fck"),
+ DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
+ DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
+ DT_CLK(NULL, "vlynq_ick", "vlynq_ick"),
+ DT_CLK(NULL, "vlynq_fck", "vlynq_fck"),
+ DT_CLK("musb-hdrc", "fck", "osc_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap2430_clks[] = {
+ DT_CLK("twl", "fck", "osc_ck"),
+ DT_CLK(NULL, "iva2_1_ick", "iva2_1_ick"),
+ DT_CLK(NULL, "mdm_ick", "mdm_ick"),
+ DT_CLK(NULL, "mdm_osc_ck", "mdm_osc_ck"),
+ DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"),
+ DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
+ DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
+ DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"),
+ DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
+ DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
+ DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
+ DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
+ DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
+ DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
+ DT_CLK(NULL, "icr_ick", "icr_ick"),
+ DT_CLK(NULL, "i2chs1_fck", "i2chs1_fck"),
+ DT_CLK(NULL, "i2chs2_fck", "i2chs2_fck"),
+ DT_CLK("musb-omap2430", "ick", "usbhs_ick"),
+ DT_CLK(NULL, "usbhs_ick", "usbhs_ick"),
+ DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
+ DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
+ DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
+ DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
+ DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
+ DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
+ DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
+ DT_CLK(NULL, "gpio5_fck", "gpio5_fck"),
+ DT_CLK(NULL, "mdm_intc_ick", "mdm_intc_ick"),
+ DT_CLK("omap_hsmmc.0", "mmchsdb_fck", "mmchsdb1_fck"),
+ DT_CLK(NULL, "mmchsdb1_fck", "mmchsdb1_fck"),
+ DT_CLK("omap_hsmmc.1", "mmchsdb_fck", "mmchsdb2_fck"),
+ DT_CLK(NULL, "mmchsdb2_fck", "mmchsdb2_fck"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "apll96_ck",
+ "apll54_ck",
+ "sync_32k_ick",
+ "omapctrl_ick",
+ "gpmc_fck",
+ "sdrc_ick",
+};
+
+enum {
+ OMAP2_SOC_OMAP2420,
+ OMAP2_SOC_OMAP2430,
+};
+
+static int __init omap2xxx_dt_clk_init(int soc_type)
+{
+ ti_dt_clocks_register(omap2xxx_clks);
+
+ if (soc_type == OMAP2_SOC_OMAP2420)
+ ti_dt_clocks_register(omap2420_clks);
+ else
+ ti_dt_clocks_register(omap2430_clks);
+
+ omap2xxx_clkt_vps_init();
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 100000) % 10,
+ (clk_get_rate(clk_get_sys(NULL, "dpll_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "mpu_ck")) / 1000000));
+
+ return 0;
+}
+
+int __init omap2420_dt_clk_init(void)
+{
+ return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2420);
+}
+
+int __init omap2430_dt_clk_init(void)
+{
+ return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2430);
+}
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
new file mode 100644
index 000000000..85c50ea39
--- /dev/null
+++ b/drivers/clk/ti/clk-33xx.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AM33XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/am3.h>
+
+#include "clock.h"
+
+static const char * const am3_gpio1_dbclk_parents[] __initconst = {
+ "clk-24mhz-clkctrl:0000:0",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio2_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio3_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4ls_clkctrl_regs[] __initconst = {
+ { AM3_L4LS_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM3_L4LS_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM3_L4LS_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { AM3_L4LS_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { AM3_L4LS_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { AM3_L4LS_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { AM3_L4LS_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
+ { AM3_L4LS_GPIO2_CLKCTRL, am3_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_GPIO3_CLKCTRL, am3_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_GPIO4_CLKCTRL, am3_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
+ { AM3_L4LS_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
+ { AM3_L4LS_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { AM3_L4LS_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { AM3_L4LS_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM3_L4LS_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM3_L4LS_OCPWP_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l3s_clkctrl_regs[] __initconst = {
+ { AM3_L3S_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck" },
+ { AM3_L3S_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM3_L3S_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck" },
+ { AM3_L3S_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck" },
+ { AM3_L3S_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l3_clkctrl_regs[] __initconst = {
+ { AM3_L3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck" },
+ { AM3_L3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck" },
+ { AM3_L3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM3_L3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4hs_clkctrl_regs[] __initconst = {
+ { AM3_L4HS_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_pruss_ocp_clkctrl_regs[] __initconst = {
+ { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "pruss_ocp_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_cpsw_125mhz_clkctrl_regs[] __initconst = {
+ { AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_lcdc_clkctrl_regs[] __initconst = {
+ { AM3_LCDC_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_clk_24mhz_clkctrl_regs[] __initconst = {
+ { AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ck" },
+ { 0 },
+};
+
+static const char * const am3_gpio0_dbclk_parents[] __initconst = {
+ "gpio0_dbclk_mux_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am3_gpio1_bit_data[] __initconst = {
+ { 18, TI_CLK_GATE, am3_gpio0_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_wkup_clkctrl_regs[] __initconst = {
+ { AM3_L4_WKUP_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_GPIO1_CLKCTRL, am3_gpio1_bit_data, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_m4_div2_ck" },
+ { AM3_L4_WKUP_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM3_L4_WKUP_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM3_L4_WKUP_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
+ { AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
+ { AM3_L4_WKUP_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
+ { AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
+ { AM3_L4_WKUP_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
+ { 0 },
+};
+
+static const char * const am3_dbg_sysclk_ck_parents[] __initconst = {
+ "sys_clkin_ck",
+ NULL,
+};
+
+static const char * const am3_trace_pmd_clk_mux_ck_parents[] __initconst = {
+ "l3-aon-clkctrl:0000:19",
+ "l3-aon-clkctrl:0000:30",
+ NULL,
+};
+
+static const char * const am3_trace_clk_div_ck_parents[] __initconst = {
+ "l3-aon-clkctrl:0000:20",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data am3_trace_clk_div_ck_data __initconst = {
+ .max_div = 64,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const char * const am3_stm_clk_div_ck_parents[] __initconst = {
+ "l3-aon-clkctrl:0000:22",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data am3_stm_clk_div_ck_data __initconst = {
+ .max_div = 64,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const char * const am3_dbg_clka_ck_parents[] __initconst = {
+ "dpll_core_m4_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am3_debugss_bit_data[] __initconst = {
+ { 19, TI_CLK_GATE, am3_dbg_sysclk_ck_parents, NULL },
+ { 20, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL },
+ { 22, TI_CLK_MUX, am3_trace_pmd_clk_mux_ck_parents, NULL },
+ { 24, TI_CLK_DIVIDER, am3_trace_clk_div_ck_parents, &am3_trace_clk_div_ck_data },
+ { 27, TI_CLK_DIVIDER, am3_stm_clk_div_ck_parents, &am3_stm_clk_div_ck_data },
+ { 30, TI_CLK_GATE, am3_dbg_clka_ck_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l3_aon_clkctrl_regs[] __initconst = {
+ { AM3_L3_AON_DEBUGSS_CLKCTRL, am3_debugss_bit_data, CLKF_SW_SUP, "l3-aon-clkctrl:0000:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_wkup_aon_clkctrl_regs[] __initconst = {
+ { AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL, NULL, CLKF_NO_IDLEST, "dpll_core_m4_div2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
+ { AM3_MPU_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
+ { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = {
+ { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "gfx_fck_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am3_l4_cefuse_clkctrl_regs[] __initconst = {
+ { AM3_L4_CEFUSE_CEFUSE_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data am3_clkctrl_data[] __initconst = {
+ { 0x44e00038, am3_l4ls_clkctrl_regs },
+ { 0x44e0001c, am3_l3s_clkctrl_regs },
+ { 0x44e00024, am3_l3_clkctrl_regs },
+ { 0x44e00120, am3_l4hs_clkctrl_regs },
+ { 0x44e000e8, am3_pruss_ocp_clkctrl_regs },
+ { 0x44e00000, am3_cpsw_125mhz_clkctrl_regs },
+ { 0x44e00018, am3_lcdc_clkctrl_regs },
+ { 0x44e0014c, am3_clk_24mhz_clkctrl_regs },
+ { 0x44e00400, am3_l4_wkup_clkctrl_regs },
+ { 0x44e00414, am3_l3_aon_clkctrl_regs },
+ { 0x44e004b0, am3_l4_wkup_aon_clkctrl_regs },
+ { 0x44e00600, am3_mpu_clkctrl_regs },
+ { 0x44e00800, am3_l4_rtc_clkctrl_regs },
+ { 0x44e00900, am3_gfx_l3_clkctrl_regs },
+ { 0x44e00a00, am3_l4_cefuse_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk am33xx_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "clk-24mhz-clkctrl:0000:0"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "clkdiv32k_ick", "clk-24mhz-clkctrl:0000:0"),
+ DT_CLK(NULL, "dbg_clka_ck", "l3-aon-clkctrl:0000:30"),
+ DT_CLK(NULL, "dbg_sysclk_ck", "l3-aon-clkctrl:0000:19"),
+ DT_CLK(NULL, "gpio0_dbclk", "l4-wkup-clkctrl:0008:18"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4ls-clkctrl:0074:18"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4ls-clkctrl:0078:18"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4ls-clkctrl:007c:18"),
+ DT_CLK(NULL, "stm_clk_div_ck", "l3-aon-clkctrl:0000:27"),
+ DT_CLK(NULL, "stm_pmd_clock_mux_ck", "l3-aon-clkctrl:0000:22"),
+ DT_CLK(NULL, "trace_clk_div_ck", "l3-aon-clkctrl:0000:24"),
+ DT_CLK(NULL, "trace_pmd_clk_mux_ck", "l3-aon-clkctrl:0000:20"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "dpll_ddr_m2_ck",
+ "dpll_mpu_m2_ck",
+ "l3_gclk",
+ /* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:00bc:0",
+ "l4hs_gclk",
+ "l4fw_gclk",
+ "l4ls_gclk",
+ /* Required for external peripherals like, Audio codecs */
+ "clkout2_ck",
+};
+
+int __init am33xx_dt_clk_init(void)
+{
+ struct clk *clk1, *clk2;
+
+ ti_dt_clocks_register(am33xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ ti_clk_add_aliases();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ /* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
+ * physically present, in such a case HWMOD enabling of
+ * clock would be failure with default parent. And timer
+ * probe thinks clock is already enabled, this leads to
+ * crash upon accessing timer 3 & 6 registers in probe.
+ * Fix by setting parent of both these timers to master
+ * oscillator clock.
+ */
+
+ clk1 = clk_get_sys(NULL, "sys_clkin_ck");
+ clk2 = clk_get_sys(NULL, "timer3_fck");
+ clk_set_parent(clk2, clk1);
+
+ clk2 = clk_get_sys(NULL, "timer6_fck");
+ clk_set_parent(clk2, clk1);
+ /*
+ * The On-Chip 32K RC Osc clock is not an accurate clock-source as per
+ * the design/spec, so as a result, for example, timer which supposed
+ * to get expired @60Sec, but will expire somewhere ~@40Sec, which is
+ * not expected by any use-case, so change WDT1 clock source to PRCM
+ * 32KHz clock.
+ */
+ clk1 = clk_get_sys(NULL, "wdt1_fck");
+ clk2 = clk_get_sys(NULL, "clkdiv32k_ick");
+ clk_set_parent(clk1, clk2);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
new file mode 100644
index 000000000..ae943ea63
--- /dev/null
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP3 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+#define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
+#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
+#define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
+
+#define OMAP34XX_CM_IDLEST_VAL 1
+
+/*
+ * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported
+ * in the same register at a bit offset of 0x8. The EN_ACK for ICK is
+ * at an offset of 4 from ICK enable bit.
+ */
+#define AM35XX_IPSS_ICK_MASK 0xF
+#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 0x4
+#define AM35XX_IPSS_ICK_FCK_OFFSET 0x8
+#define AM35XX_IPSS_CLK_IDLEST_VAL 0
+
+#define AM35XX_ST_IPSS_SHIFT 5
+
+/**
+ * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
+ * from the CM_{I,F}CLKEN bit. Pass back the correct info via
+ * @idlest_reg and @idlest_bit. No return value.
+ */
+static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+ idlest_reg->offset &= ~0xf0;
+ idlest_reg->offset |= 0x20;
+ *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_ssi_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * Some OMAP modules on OMAP3 ES2+ chips have both initiator and
+ * target IDLEST bits. For our purposes, we are concerned with the
+ * target IDLEST bits, which exist at a different bit position than
+ * the *CLKEN bit position for these modules (DSS and USBHOST) (The
+ * default find_idlest code assumes that they are at the same
+ * position.) No return value.
+ */
+static void
+omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit, u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+
+ idlest_reg->offset &= ~0xf0;
+ idlest_reg->offset |= 0x20;
+ /* USBHOST_IDLE has same shift */
+ *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
+ * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
+ * @idlest_reg and @idlest_bit. No return value.
+ */
+static void
+omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+ idlest_reg->offset &= ~0xf0;
+ idlest_reg->offset |= 0x20;
+ *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The interface clocks on AM35xx IPSS reflects the clock idle status
+ * in the enable register itsel at a bit offset of 4 from the enable
+ * bit. A value of 1 indicates that clock is enabled.
+ */
+static void am35xx_clk_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+ *idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET;
+ *idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL;
+}
+
+/**
+ * am35xx_clk_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Some clocks don't have companion clocks. For example, modules with
+ * only an interface clock (such as HECC) don't have a companion
+ * clock. Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active. Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts. No return value.
+ */
+static void am35xx_clk_find_companion(struct clk_hw_omap *clk,
+ struct clk_omap_reg *other_reg,
+ u8 *other_bit)
+{
+ memcpy(other_reg, &clk->enable_reg, sizeof(*other_reg));
+ if (clk->enable_bit & AM35XX_IPSS_ICK_MASK)
+ *other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET;
+ else
+ *other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET;
+}
+
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = {
+ .find_idlest = am35xx_clk_find_idlest,
+ .find_companion = am35xx_clk_find_companion,
+};
+
+/**
+ * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The IPSS target CM_IDLEST bit is at a different shift from the
+ * CM_{I,F}CLKEN bit. Pass back the correct info via @idlest_reg
+ * and @idlest_bit. No return value.
+ */
+static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+
+ idlest_reg->offset &= ~0xf0;
+ idlest_reg->offset |= 0x20;
+ *idlest_bit = AM35XX_ST_IPSS_SHIFT;
+ *idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = am35xx_clk_ipss_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+static struct ti_dt_clk omap3xxx_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "omap_32k_fck"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_omap3430es2plus_clks[] = {
+ DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es2"),
+ DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es2"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es2"),
+ DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es2"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap3430es1_clks[] = {
+ DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es1"),
+ DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es1"),
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es1"),
+ DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es1"),
+ DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es1"),
+ DT_CLK(NULL, "dss_ick", "dss_ick_3430es1"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
+ DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es2"),
+ DT_CLK(NULL, "dss_ick", "dss_ick_3430es2"),
+ { .node_name = NULL },
+};
+
+static struct ti_dt_clk am35xx_clks[] = {
+ DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_am35xx"),
+ DT_CLK(NULL, "hsotgusb_fck", "hsotgusb_fck_am35xx"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick_am35xx"),
+ DT_CLK(NULL, "uart4_fck", "uart4_fck_am35xx"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "sdrc_ick",
+ "gpmc_fck",
+ "omapctrl_ick",
+};
+
+enum {
+ OMAP3_SOC_AM35XX,
+ OMAP3_SOC_OMAP3430_ES1,
+ OMAP3_SOC_OMAP3430_ES2_PLUS,
+ OMAP3_SOC_OMAP3630,
+};
+
+/**
+ * omap3_clk_lock_dpll5 - locks DPLL5
+ *
+ * Locks DPLL5 to a pre-defined frequency. This is required for proper
+ * operation of USB.
+ */
+void __init omap3_clk_lock_dpll5(void)
+{
+ struct clk *dpll5_clk;
+ struct clk *dpll5_m2_clk;
+
+ /*
+ * Errata sprz319f advisory 2.1 documents a USB host clock drift issue
+ * that can be worked around using specially crafted dpll5 settings
+ * with a dpll5_m2 divider set to 8. Set the dpll5 rate to 8x the USB
+ * host clock rate, its .set_rate handler() will detect that frequency
+ * and use the errata settings.
+ */
+ dpll5_clk = clk_get(NULL, "dpll5_ck");
+ clk_set_rate(dpll5_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST * 8);
+ clk_prepare_enable(dpll5_clk);
+
+ /* Program dpll5_m2_clk divider */
+ dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
+ clk_prepare_enable(dpll5_m2_clk);
+ clk_set_rate(dpll5_m2_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST);
+
+ clk_disable_unprepare(dpll5_m2_clk);
+ clk_disable_unprepare(dpll5_clk);
+}
+
+static int __init omap3xxx_dt_clk_init(int soc_type)
+{
+ if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES1 ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+ ti_dt_clocks_register(omap3xxx_clks);
+
+ if (soc_type == OMAP3_SOC_AM35XX)
+ ti_dt_clocks_register(am35xx_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3630 || soc_type == OMAP3_SOC_AM35XX ||
+ soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
+ ti_dt_clocks_register(omap36xx_am35xx_omap3430es2plus_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES1)
+ ti_dt_clocks_register(omap3430es1_clks);
+
+ if (soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
+ soc_type == OMAP3_SOC_OMAP3630)
+ ti_dt_clocks_register(omap36xx_omap3430es2plus_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ ti_clk_add_aliases();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+ (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 100000) % 10,
+ (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
+ (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
+
+ if (soc_type != OMAP3_SOC_OMAP3430_ES1)
+ omap3_clk_lock_dpll5();
+
+ return 0;
+}
+
+int __init omap3430_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3430_ES2_PLUS);
+}
+
+int __init omap3630_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3630);
+}
+
+int __init am35xx_dt_clk_init(void)
+{
+ return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
+}
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
new file mode 100644
index 000000000..f24f6eb21
--- /dev/null
+++ b/drivers/clk/ti/clk-43xx.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AM43XX Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/am4.h>
+
+#include "clock.h"
+
+static const struct omap_clkctrl_reg_data am4_l3s_tsc_clkctrl_regs[] __initconst = {
+ { AM4_L3S_TSC_ADC_TSC_CLKCTRL, NULL, CLKF_SW_SUP, "adc_tsc_fck" },
+ { 0 },
+};
+
+static const char * const am4_synctimer_32kclk_parents[] __initconst = {
+ "mux_synctimer32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_counter_32k_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_synctimer_32kclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4_wkup_aon_clkctrl_regs[] __initconst = {
+ { AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sys_clkin_ck" },
+ { AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL, am4_counter_32k_bit_data, CLKF_SW_SUP, "l4-wkup-aon-clkctrl:0008:8" },
+ { 0 },
+};
+
+static const char * const am4_gpio0_dbclk_parents[] __initconst = {
+ "gpio0_dbclk_mux_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio0_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4_wkup_clkctrl_regs[] __initconst = {
+ { AM4_L4_WKUP_L4_WKUP_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { AM4_L4_WKUP_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
+ { AM4_L4_WKUP_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "wdt1_fck" },
+ { AM4_L4_WKUP_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM4_L4_WKUP_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_wkupdm_ck" },
+ { AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex0_fck" },
+ { AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL, NULL, CLKF_SW_SUP, "smartreflex1_fck" },
+ { AM4_L4_WKUP_CONTROL_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin_ck" },
+ { AM4_L4_WKUP_GPIO1_CLKCTRL, am4_gpio1_bit_data, CLKF_SW_SUP, "sys_clkin_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = {
+ { AM4_MPU_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = {
+ { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "gfx_fck_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = {
+ { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ick" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l3_clkctrl_regs[] __initconst = {
+ { AM4_L3_L3_MAIN_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_AES_CLKCTRL, NULL, CLKF_SW_SUP, "aes0_fck" },
+ { AM4_L3_DES_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_L3_INSTR_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_OCMCRAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_SHAM_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3_L4_HS_CLKCTRL, NULL, CLKF_SW_SUP, "l4hs_gclk" },
+ { 0 },
+};
+
+static const char * const am4_usb_otg_ss0_refclk960m_parents[] __initconst = {
+ "dpll_per_clkdcoldo",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_usb_otg_ss0_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_usb_otg_ss1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_usb_otg_ss0_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l3s_clkctrl_regs[] __initconst = {
+ { AM4_L3S_VPFE0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3S_VPFE1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk" },
+ { AM4_L3S_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_ADC1_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_MCASP0_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp0_fck" },
+ { AM4_L3S_MCASP1_CLKCTRL, NULL, CLKF_SW_SUP, "mcasp1_fck" },
+ { AM4_L3S_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_L3S_QSPI_CLKCTRL, NULL, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_USB_OTG_SS0_CLKCTRL, am4_usb_otg_ss0_bit_data, CLKF_SW_SUP, "l3s_gclk" },
+ { AM4_L3S_USB_OTG_SS1_CLKCTRL, am4_usb_otg_ss1_bit_data, CLKF_SW_SUP, "l3s_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_pruss_ocp_clkctrl_regs[] __initconst = {
+ { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "pruss_ocp_gclk" },
+ { 0 },
+};
+
+static const char * const am4_gpio1_dbclk_parents[] __initconst = {
+ "clkdiv32k_ick",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data am4_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, am4_gpio1_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_l4ls_clkctrl_regs[] __initconst = {
+ { AM4_L4LS_L4_LS_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_D_CAN0_CLKCTRL, NULL, CLKF_SW_SUP, "dcan0_fck" },
+ { AM4_L4LS_D_CAN1_CLKCTRL, NULL, CLKF_SW_SUP, "dcan1_fck" },
+ { AM4_L4LS_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS3_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS4_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_EPWMSS5_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_ELM_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO2_CLKCTRL, am4_gpio2_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO3_CLKCTRL, am4_gpio3_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO4_CLKCTRL, am4_gpio4_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO5_CLKCTRL, am4_gpio5_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_GPIO6_CLKCTRL, am4_gpio6_bit_data, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_clk" },
+ { AM4_L4LS_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_L4LS_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "mmc_clk" },
+ { AM4_L4LS_RNG_CLKCTRL, NULL, CLKF_SW_SUP, "rng_fck" },
+ { AM4_L4LS_SPI0_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI1_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPI4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_SPINLOCK_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { AM4_L4LS_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { AM4_L4LS_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { AM4_L4LS_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { AM4_L4LS_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { AM4_L4LS_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { AM4_L4LS_TIMER8_CLKCTRL, NULL, CLKF_SW_SUP, "timer8_fck" },
+ { AM4_L4LS_TIMER9_CLKCTRL, NULL, CLKF_SW_SUP, "timer9_fck" },
+ { AM4_L4LS_TIMER10_CLKCTRL, NULL, CLKF_SW_SUP, "timer10_fck" },
+ { AM4_L4LS_TIMER11_CLKCTRL, NULL, CLKF_SW_SUP, "timer11_fck" },
+ { AM4_L4LS_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_per_m2_div4_ck" },
+ { AM4_L4LS_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { AM4_L4LS_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_emif_clkctrl_regs[] __initconst = {
+ { AM4_EMIF_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_dss_clkctrl_regs[] __initconst = {
+ { AM4_DSS_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data am4_cpsw_125mhz_clkctrl_regs[] __initconst = {
+ { AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data am4_clkctrl_data[] __initconst = {
+ { 0x44df2920, am4_l3s_tsc_clkctrl_regs },
+ { 0x44df2a28, am4_l4_wkup_aon_clkctrl_regs },
+ { 0x44df2a20, am4_l4_wkup_clkctrl_regs },
+ { 0x44df8320, am4_mpu_clkctrl_regs },
+ { 0x44df8420, am4_gfx_l3_clkctrl_regs },
+ { 0x44df8520, am4_l4_rtc_clkctrl_regs },
+ { 0x44df8820, am4_l3_clkctrl_regs },
+ { 0x44df8868, am4_l3s_clkctrl_regs },
+ { 0x44df8b20, am4_pruss_ocp_clkctrl_regs },
+ { 0x44df8c20, am4_l4ls_clkctrl_regs },
+ { 0x44df8f20, am4_emif_clkctrl_regs },
+ { 0x44df9220, am4_dss_clkctrl_regs },
+ { 0x44df9320, am4_cpsw_125mhz_clkctrl_regs },
+ { 0 },
+};
+
+const struct omap_clkctrl_data am438x_clkctrl_data[] __initconst = {
+ { 0x44df2920, am4_l3s_tsc_clkctrl_regs },
+ { 0x44df2a28, am4_l4_wkup_aon_clkctrl_regs },
+ { 0x44df2a20, am4_l4_wkup_clkctrl_regs },
+ { 0x44df8320, am4_mpu_clkctrl_regs },
+ { 0x44df8420, am4_gfx_l3_clkctrl_regs },
+ { 0x44df8820, am4_l3_clkctrl_regs },
+ { 0x44df8868, am4_l3s_clkctrl_regs },
+ { 0x44df8b20, am4_pruss_ocp_clkctrl_regs },
+ { 0x44df8c20, am4_l4ls_clkctrl_regs },
+ { 0x44df8f20, am4_emif_clkctrl_regs },
+ { 0x44df9220, am4_dss_clkctrl_regs },
+ { 0x44df9320, am4_cpsw_125mhz_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk am43xx_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "clkdiv32k_ick"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "gpio0_dbclk", "l4-wkup-clkctrl:0148:8"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4ls-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4ls-clkctrl:0060:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4ls-clkctrl:0068:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4ls-clkctrl:0070:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4ls-clkctrl:0078:8"),
+ DT_CLK(NULL, "synctimer_32kclk", "l4-wkup-aon-clkctrl:0008:8"),
+ DT_CLK(NULL, "usb_otg_ss0_refclk960m", "l3s-clkctrl:01f8:8"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3s-clkctrl:0200:8"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ /* AM4_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:0000:0",
+};
+
+int __init am43xx_dt_clk_init(void)
+{
+ struct clk *clk1, *clk2;
+
+ ti_dt_clocks_register(am43xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ ti_clk_add_aliases();
+
+ /*
+ * cpsw_cpts_rft_clk has got the choice of 3 clocksources
+ * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
+ * By default dpll_core_m4_ck is selected, witn this as clock
+ * source the CPTS doesnot work properly. It gives clockcheck errors
+ * while running PTP.
+ * clockcheck: clock jumped backward or running slower than expected!
+ * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
+ * In AM335x dpll_core_m5_ck is the default clocksource.
+ */
+ clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
+ clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
+ clk_set_parent(clk1, clk2);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
new file mode 100644
index 000000000..9b2824ed7
--- /dev/null
+++ b/drivers/clk/ti/clk-44xx.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP4 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/omap4.h>
+
+#include "clock.h"
+
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ 960000000
+
+static const struct omap_clkctrl_reg_data omap4_mpuss_clkctrl_regs[] __initconst = {
+ { OMAP4_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_tesla_clkctrl_regs[] __initconst = {
+ { OMAP4_DSP_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_m4x2_ck" },
+ { 0 },
+};
+
+static const char * const omap4_aess_fclk_parents[] __initconst = {
+ "abe_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_aess_fclk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap4_aess_fclk_parents, &omap4_aess_fclk_data },
+ { 0 },
+};
+
+static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0018:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const char * const omap4_dmic_sync_mux_ck_parents[] __initconst = {
+ "abe_24m_fclk",
+ "syc_clk_div_ck",
+ "func_24m_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_dmic_abe_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0020:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcasp_abe_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0028:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcbsp1_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0030:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcbsp2_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0038:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcbsp3_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_slimbus1_fclk_0_parents[] __initconst = {
+ "abe_24m_fclk",
+ NULL,
+};
+
+static const char * const omap4_slimbus1_fclk_1_parents[] __initconst = {
+ "func_24m_clk",
+ NULL,
+};
+
+static const char * const omap4_slimbus1_fclk_2_parents[] __initconst = {
+ "pad_clks_ck",
+ NULL,
+};
+
+static const char * const omap4_slimbus1_slimbus_clk_parents[] __initconst = {
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_slimbus1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_slimbus1_fclk_0_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_slimbus1_fclk_1_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_slimbus1_fclk_2_parents, NULL },
+ { 11, TI_CLK_GATE, omap4_slimbus1_slimbus_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_timer5_sync_mux_parents[] __initconst = {
+ "syc_clk_div_ck",
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = {
+ { OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" },
+ { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
+ { OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
+ { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "abe-clkctrl:0020:24" },
+ { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0040:8" },
+ { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
+ { OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_ao_clkctrl_regs[] __initconst = {
+ { OMAP4_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "l4_wkup_clk_mux_ck" },
+ { OMAP4_SMARTREFLEX_IVA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_wkup_clk_mux_ck" },
+ { OMAP4_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "l4_wkup_clk_mux_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_1_clkctrl_regs[] __initconst = {
+ { OMAP4_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_2_clkctrl_regs[] __initconst = {
+ { OMAP4_L3_MAIN_2_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { OMAP4_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_OCMC_RAM_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_ducati_clkctrl_regs[] __initconst = {
+ { OMAP4_IPU_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "ducati_clk_mux_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_dma_clkctrl_regs[] __initconst = {
+ { OMAP4_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_emif_clkctrl_regs[] __initconst = {
+ { OMAP4_DMM_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { OMAP4_EMIF1_CLKCTRL, NULL, CLKF_HW_SUP, "ddrphy_ck" },
+ { OMAP4_EMIF2_CLKCTRL, NULL, CLKF_HW_SUP, "ddrphy_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_d2d_clkctrl_regs[] __initconst = {
+ { OMAP4_C2C_CLKCTRL, NULL, 0, "div_core_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_cfg_clkctrl_regs[] __initconst = {
+ { OMAP4_L4_CFG_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_SPINLOCK_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_MAILBOX_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_instr_clkctrl_regs[] __initconst = {
+ { OMAP4_L3_MAIN_3_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_OCP_WP_NOC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_ivahd_clkctrl_regs[] __initconst = {
+ { OMAP4_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_m5x2_ck" },
+ { OMAP4_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
+ { 0 },
+};
+
+static const char * const omap4_iss_ctrlclk_parents[] __initconst = {
+ "func_96m_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_iss_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_iss_ctrlclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_fdif_fck_parents[] __initconst = {
+ "dpll_per_m4x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_fdif_fck_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap4_fdif_fck_parents, &omap4_fdif_fck_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = {
+ { OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" },
+ { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "iss-clkctrl:0008:24" },
+ { 0 },
+};
+
+static const char * const omap4_dss_dss_clk_parents[] __initconst = {
+ "dpll_per_m5x2_ck",
+ NULL,
+};
+
+static const char * const omap4_dss_48mhz_clk_parents[] __initconst = {
+ "func_48mc_fclk",
+ NULL,
+};
+
+static const char * const omap4_dss_sys_clk_parents[] __initconst = {
+ "syc_clk_div_ck",
+ NULL,
+};
+
+static const char * const omap4_dss_tv_clk_parents[] __initconst = {
+ "extalt_clkin_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_dss_dss_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_dss_48mhz_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_dss_sys_clk_parents, NULL },
+ { 11, TI_CLK_GATE, omap4_dss_tv_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = {
+ { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "l3-dss-clkctrl:0000:8" },
+ { 0 },
+};
+
+static const char * const omap4_sgx_clk_mux_parents[] __initconst = {
+ "dpll_core_m7x2_ck",
+ "dpll_per_m7x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_sgx_clk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = {
+ { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "l3-gfx-clkctrl:0000:24" },
+ { 0 },
+};
+
+static const char * const omap4_hsmmc1_fclk_parents[] __initconst = {
+ "func_64m_fclk",
+ "func_96m_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mmc1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_hsmmc1_fclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_mmc2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_hsmmc1_fclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_hsi_fck_parents[] __initconst = {
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_hsi_fck_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap4_hsi_fck_parents, &omap4_hsi_fck_data },
+ { 0 },
+};
+
+static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
+ "l3-init-clkctrl:0038:24",
+ NULL,
+};
+
+static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
+ "l3-init-clkctrl:0038:25",
+ NULL,
+};
+
+static const char * const omap4_usb_host_hs_utmi_p3_clk_parents[] __initconst = {
+ "init_60m_fclk",
+ NULL,
+};
+
+static const char * const omap4_usb_host_hs_hsic480m_p1_clk_parents[] __initconst = {
+ "dpll_usb_m2_ck",
+ NULL,
+};
+
+static const char * const omap4_utmi_p1_gfclk_parents[] __initconst = {
+ "init_60m_fclk",
+ "xclk60mhsp1_ck",
+ NULL,
+};
+
+static const char * const omap4_utmi_p2_gfclk_parents[] __initconst = {
+ "init_60m_fclk",
+ "xclk60mhsp2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_usb_host_hs_utmi_p1_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_usb_host_hs_utmi_p2_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 11, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 12, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 13, TI_CLK_GATE, omap4_usb_host_hs_hsic480m_p1_clk_parents, NULL },
+ { 14, TI_CLK_GATE, omap4_usb_host_hs_hsic480m_p1_clk_parents, NULL },
+ { 15, TI_CLK_GATE, omap4_dss_48mhz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, omap4_utmi_p1_gfclk_parents, NULL },
+ { 25, TI_CLK_MUX, omap4_utmi_p2_gfclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = {
+ "l3-init-clkctrl:0040:24",
+ NULL,
+};
+
+static const char * const omap4_otg_60m_gfclk_parents[] __initconst = {
+ "utmi_phy_clkout_ck",
+ "xclk60motg_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_usb_otg_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_usb_otg_hs_xclk_parents, NULL },
+ { 24, TI_CLK_MUX, omap4_otg_60m_gfclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_usb_tll_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_ocp2scp_usb_phy_phy_48m_parents[] __initconst = {
+ "func_48m_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_ocp2scp_usb_phy_phy_48m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = {
+ { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0008:24" },
+ { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "l3-init-clkctrl:0010:24" },
+ { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:0018:24" },
+ { OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" },
+ { OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" },
+ { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "l3-init-clkctrl:00c0:8" },
+ { 0 },
+};
+
+static const char * const omap4_cm2_dm10_mux_parents[] __initconst = {
+ "sys_clkin_ck",
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer11_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_gpio2_dbclk_parents[] __initconst = {
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = {
+ "l4-per-clkctrl:00c0:26",
+ "pad_clks_ck",
+ NULL,
+};
+
+static const char * const omap4_mcbsp4_sync_mux_ck_parents[] __initconst = {
+ "func_96m_fclk",
+ "per_abe_nc_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_per_mcbsp4_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_mcbsp4_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_slimbus2_fclk_0_parents[] __initconst = {
+ "func_24mc_fclk",
+ NULL,
+};
+
+static const char * const omap4_slimbus2_fclk_1_parents[] __initconst = {
+ "per_abe_24m_fclk",
+ NULL,
+};
+
+static const char * const omap4_slimbus2_slimbus_clk_parents[] __initconst = {
+ "pad_slimbus_core_clks_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_slimbus2_fclk_0_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_slimbus2_fclk_1_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_slimbus2_slimbus_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = {
+ { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0008:24" },
+ { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0010:24" },
+ { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0018:24" },
+ { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0020:24" },
+ { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0028:24" },
+ { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0030:24" },
+ { OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO4_CLKCTRL, omap4_gpio4_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO5_CLKCTRL, omap4_gpio5_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO6_CLKCTRL, omap4_gpio6_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
+ { OMAP4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:00c0:24" },
+ { OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "l4-per-clkctrl:0118:8" },
+ { OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MMC5_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { 0 },
+};
+
+static const struct
+omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = {
+ { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" },
+ { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" },
+ { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_div_ck" },
+ { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initconst = {
+ { OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" },
+ { OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" },
+ { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "l4-wkup-clkctrl:0020:24" },
+ { OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" },
+ { OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = {
+ "sys_clkin_ck",
+ "dpll_core_m6x2_ck",
+ "tie_low_clock_ck",
+ NULL,
+};
+
+static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = {
+ "emu-sys-clkctrl:0000:22",
+ NULL,
+};
+
+static const int omap4_trace_clk_div_div_ck_divs[] __initconst = {
+ 0,
+ 1,
+ 2,
+ 0,
+ 4,
+ -1,
+};
+
+static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __initconst = {
+ .dividers = omap4_trace_clk_div_div_ck_divs,
+};
+
+static const char * const omap4_stm_clk_div_ck_parents[] __initconst = {
+ "emu-sys-clkctrl:0000:20",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_stm_clk_div_ck_data __initconst = {
+ .max_div = 64,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data omap4_debugss_bit_data[] __initconst = {
+ { 20, TI_CLK_MUX, omap4_pmd_stm_clock_mux_ck_parents, NULL },
+ { 22, TI_CLK_MUX, omap4_pmd_stm_clock_mux_ck_parents, NULL },
+ { 24, TI_CLK_DIVIDER, omap4_trace_clk_div_div_ck_parents, &omap4_trace_clk_div_div_ck_data },
+ { 27, TI_CLK_DIVIDER, omap4_stm_clk_div_ck_parents, &omap4_stm_clk_div_ck_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_emu_sys_clkctrl_regs[] __initconst = {
+ { OMAP4_DEBUGSS_CLKCTRL, omap4_debugss_bit_data, 0, "trace_clk_div_ck" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data omap4_clkctrl_data[] __initconst = {
+ { 0x4a004320, omap4_mpuss_clkctrl_regs },
+ { 0x4a004420, omap4_tesla_clkctrl_regs },
+ { 0x4a004520, omap4_abe_clkctrl_regs },
+ { 0x4a008620, omap4_l4_ao_clkctrl_regs },
+ { 0x4a008720, omap4_l3_1_clkctrl_regs },
+ { 0x4a008820, omap4_l3_2_clkctrl_regs },
+ { 0x4a008920, omap4_ducati_clkctrl_regs },
+ { 0x4a008a20, omap4_l3_dma_clkctrl_regs },
+ { 0x4a008b20, omap4_l3_emif_clkctrl_regs },
+ { 0x4a008c20, omap4_d2d_clkctrl_regs },
+ { 0x4a008d20, omap4_l4_cfg_clkctrl_regs },
+ { 0x4a008e20, omap4_l3_instr_clkctrl_regs },
+ { 0x4a008f20, omap4_ivahd_clkctrl_regs },
+ { 0x4a009020, omap4_iss_clkctrl_regs },
+ { 0x4a009120, omap4_l3_dss_clkctrl_regs },
+ { 0x4a009220, omap4_l3_gfx_clkctrl_regs },
+ { 0x4a009320, omap4_l3_init_clkctrl_regs },
+ { 0x4a009420, omap4_l4_per_clkctrl_regs },
+ { 0x4a0095a0, omap4_l4_secure_clkctrl_regs },
+ { 0x4a307820, omap4_l4_wkup_clkctrl_regs },
+ { 0x4a307a20, omap4_emu_sys_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk omap44xx_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ /*
+ * XXX: All the clock aliases below are only needed for legacy
+ * hwmod support. Once hwmod is removed, these can be removed
+ * also.
+ */
+ DT_CLK(NULL, "aess_fclk", "abe-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm10_mux", "l4-per-clkctrl:0008:24"),
+ DT_CLK(NULL, "cm2_dm11_mux", "l4-per-clkctrl:0010:24"),
+ DT_CLK(NULL, "cm2_dm2_mux", "l4-per-clkctrl:0018:24"),
+ DT_CLK(NULL, "cm2_dm3_mux", "l4-per-clkctrl:0020:24"),
+ DT_CLK(NULL, "cm2_dm4_mux", "l4-per-clkctrl:0028:24"),
+ DT_CLK(NULL, "cm2_dm9_mux", "l4-per-clkctrl:0030:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dmt1_clk_mux", "l4-wkup-clkctrl:0020:24"),
+ DT_CLK(NULL, "dss_48mhz_clk", "l3-dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "l3-dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "l3-dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "dss_tv_clk", "l3-dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "fdif_fck", "iss-clkctrl:0008:24"),
+ DT_CLK(NULL, "func_dmic_abe_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "func_mcasp_abe_gfclk", "abe-clkctrl:0020:24"),
+ DT_CLK(NULL, "func_mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "func_mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "func_mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "gpio1_dbclk", "l4-wkup-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4-per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4-per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4-per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4-per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4-per-clkctrl:0060:8"),
+ DT_CLK(NULL, "hsi_fck", "l3-init-clkctrl:0018:24"),
+ DT_CLK(NULL, "hsmmc1_fclk", "l3-init-clkctrl:0008:24"),
+ DT_CLK(NULL, "hsmmc2_fclk", "l3-init-clkctrl:0010:24"),
+ DT_CLK(NULL, "iss_ctrlclk", "iss-clkctrl:0000:8"),
+ DT_CLK(NULL, "mcasp_sync_mux_ck", "abe-clkctrl:0020:26"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+ DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+ DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
+ DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+ DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
+ DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
+ DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
+ DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
+ DT_CLK(NULL, "sgx_clk_mux", "l3-gfx-clkctrl:0000:24"),
+ DT_CLK(NULL, "slimbus1_fclk_0", "abe-clkctrl:0040:8"),
+ DT_CLK(NULL, "slimbus1_fclk_1", "abe-clkctrl:0040:9"),
+ DT_CLK(NULL, "slimbus1_fclk_2", "abe-clkctrl:0040:10"),
+ DT_CLK(NULL, "slimbus1_slimbus_clk", "abe-clkctrl:0040:11"),
+ DT_CLK(NULL, "slimbus2_fclk_0", "l4-per-clkctrl:0118:8"),
+ DT_CLK(NULL, "slimbus2_fclk_1", "l4-per-clkctrl:0118:9"),
+ DT_CLK(NULL, "slimbus2_slimbus_clk", "l4-per-clkctrl:0118:10"),
+ DT_CLK(NULL, "stm_clk_div_ck", "emu-sys-clkctrl:0000:27"),
+ DT_CLK(NULL, "timer5_sync_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_sync_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_sync_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_sync_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "trace_clk_div_div_ck", "emu-sys-clkctrl:0000:24"),
+ DT_CLK(NULL, "usb_host_hs_func48mclk", "l3-init-clkctrl:0038:15"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3-init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3-init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3-init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3-init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3-init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3-init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3_init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_hs_xclk", "l3-init-clkctrl:0040:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3-init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3-init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3-init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3-init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3-init-clkctrl:0038:25"),
+ { .node_name = NULL },
+};
+
+int __init omap4xxx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+
+ ti_dt_clocks_register(omap44xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ ti_clk_add_aliases();
+
+ /*
+ * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+ * domain can transition to retention state when not in use.
+ */
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(usb_dpll, OMAP4_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ /*
+ * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+ * state when turning the ABE clock domain. Workaround this by
+ * locking the ABE DPLL on boot.
+ * Lock the ABE DPLL in any case to avoid issues with audio.
+ */
+ abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_refclk_mux_ck");
+ sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+ rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP4_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
new file mode 100644
index 000000000..74dfd5823
--- /dev/null
+++ b/drivers/clk/ti/clk-54xx.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP5 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/omap5.h>
+
+#include "clock.h"
+
+#define OMAP5_DPLL_ABE_DEFFREQ 98304000
+
+/*
+ * OMAP543x TRM, section "3.6.3.9.5 DPLL_USB Preferred Settings"
+ * states it must be at 960MHz
+ */
+#define OMAP5_DPLL_USB_DEFFREQ 960000000
+
+static const struct omap_clkctrl_reg_data omap5_mpu_clkctrl_regs[] __initconst = {
+ { OMAP5_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_dsp_clkctrl_regs[] __initconst = {
+ { OMAP5_MMU_DSP_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h11x2_ck" },
+ { 0 },
+};
+
+static const char * const omap5_aess_fclk_parents[] __initconst = {
+ "abe_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_aess_fclk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_aess_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap5_aess_fclk_parents, &omap5_aess_fclk_data },
+ { 0 },
+};
+
+static const char * const omap5_dmic_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0018:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const char * const omap5_dmic_sync_mux_ck_parents[] __initconst = {
+ "abe_24m_fclk",
+ "dss_syc_gfclk_div",
+ "func_24m_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_dmic_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_dmic_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap5_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_mcbsp1_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0028:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_mcbsp1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_mcbsp1_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap5_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_mcbsp2_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0030:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_mcbsp2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_mcbsp2_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap5_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_mcbsp3_gfclk_parents[] __initconst = {
+ "abe-clkctrl:0038:26",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_mcbsp3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_mcbsp3_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap5_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_timer5_gfclk_mux_parents[] __initconst = {
+ "dss_syc_gfclk_div",
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_abe_clkctrl_regs[] __initconst = {
+ { OMAP5_L4_ABE_CLKCTRL, NULL, 0, "abe_iclk" },
+ { OMAP5_AESS_CLKCTRL, omap5_aess_bit_data, CLKF_SW_SUP, "abe-clkctrl:0008:24" },
+ { OMAP5_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
+ { OMAP5_DMIC_CLKCTRL, omap5_dmic_bit_data, CLKF_SW_SUP, "abe-clkctrl:0018:24" },
+ { OMAP5_MCBSP1_CLKCTRL, omap5_mcbsp1_bit_data, CLKF_SW_SUP, "abe-clkctrl:0028:24" },
+ { OMAP5_MCBSP2_CLKCTRL, omap5_mcbsp2_bit_data, CLKF_SW_SUP, "abe-clkctrl:0030:24" },
+ { OMAP5_MCBSP3_CLKCTRL, omap5_mcbsp3_bit_data, CLKF_SW_SUP, "abe-clkctrl:0038:24" },
+ { OMAP5_TIMER5_CLKCTRL, omap5_timer5_bit_data, CLKF_SW_SUP, "abe-clkctrl:0048:24" },
+ { OMAP5_TIMER6_CLKCTRL, omap5_timer6_bit_data, CLKF_SW_SUP, "abe-clkctrl:0050:24" },
+ { OMAP5_TIMER7_CLKCTRL, omap5_timer7_bit_data, CLKF_SW_SUP, "abe-clkctrl:0058:24" },
+ { OMAP5_TIMER8_CLKCTRL, omap5_timer8_bit_data, CLKF_SW_SUP, "abe-clkctrl:0060:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_l3main1_clkctrl_regs[] __initconst = {
+ { OMAP5_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_l3main2_clkctrl_regs[] __initconst = {
+ { OMAP5_L3_MAIN_2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { OMAP5_L3_MAIN_2_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_L3_MAIN_2_OCMC_RAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_ipu_clkctrl_regs[] __initconst = {
+ { OMAP5_MMU_IPU_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_dma_clkctrl_regs[] __initconst = {
+ { OMAP5_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_emif_clkctrl_regs[] __initconst = {
+ { OMAP5_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { OMAP5_EMIF1_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h11x2_ck" },
+ { OMAP5_EMIF2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h11x2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_l4cfg_clkctrl_regs[] __initconst = {
+ { OMAP5_L4_CFG_CLKCTRL, NULL, 0, "l4_root_clk_div" },
+ { OMAP5_SPINLOCK_CLKCTRL, NULL, 0, "l4_root_clk_div" },
+ { OMAP5_MAILBOX_CLKCTRL, NULL, 0, "l4_root_clk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_l3instr_clkctrl_regs[] __initconst = {
+ { OMAP5_L3_MAIN_3_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const char * const omap5_timer10_gfclk_mux_parents[] __initconst = {
+ "sys_clkin",
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer11_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_gpio2_dbclk_parents[] __initconst = {
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio7_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio8_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst = {
+ { OMAP5_TIMER10_CLKCTRL, omap5_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
+ { OMAP5_TIMER11_CLKCTRL, omap5_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
+ { OMAP5_TIMER2_CLKCTRL, omap5_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
+ { OMAP5_TIMER3_CLKCTRL, omap5_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
+ { OMAP5_TIMER4_CLKCTRL, omap5_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
+ { OMAP5_TIMER9_CLKCTRL, omap5_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0030:24" },
+ { OMAP5_GPIO2_CLKCTRL, omap5_gpio2_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_GPIO3_CLKCTRL, omap5_gpio3_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_GPIO4_CLKCTRL, omap5_gpio4_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_GPIO5_CLKCTRL, omap5_gpio5_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_GPIO6_CLKCTRL, omap5_gpio6_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP5_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP5_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP5_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP5_L4_PER_CLKCTRL, NULL, 0, "l4_root_clk_div" },
+ { OMAP5_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_GPIO7_CLKCTRL, omap5_gpio7_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_GPIO8_CLKCTRL, omap5_gpio8_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_MMC5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP5_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP5_UART5_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_UART6_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { 0 },
+};
+
+static const struct
+omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = {
+ { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
+ { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_iva_clkctrl_regs[] __initconst = {
+ { OMAP5_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { OMAP5_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { 0 },
+};
+
+static const char * const omap5_dss_dss_clk_parents[] __initconst = {
+ "dpll_per_h12x2_ck",
+ NULL,
+};
+
+static const char * const omap5_dss_48mhz_clk_parents[] __initconst = {
+ "func_48m_fclk",
+ NULL,
+};
+
+static const char * const omap5_dss_sys_clk_parents[] __initconst = {
+ "dss_syc_gfclk_div",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_dss_core_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_dss_dss_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap5_dss_48mhz_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap5_dss_sys_clk_parents, NULL },
+ { 11, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_dss_clkctrl_regs[] __initconst = {
+ { OMAP5_DSS_CORE_CLKCTRL, omap5_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
+ { 0 },
+};
+
+static const char * const omap5_gpu_core_mux_parents[] __initconst = {
+ "dpll_core_h14x2_ck",
+ "dpll_per_h14x2_ck",
+ NULL,
+};
+
+static const char * const omap5_gpu_hyd_mux_parents[] __initconst = {
+ "dpll_core_h14x2_ck",
+ "dpll_per_h14x2_ck",
+ NULL,
+};
+
+static const char * const omap5_gpu_sys_clk_parents[] __initconst = {
+ "sys_clkin",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_gpu_sys_clk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpu_core_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_gpu_core_mux_parents, NULL },
+ { 25, TI_CLK_MUX, omap5_gpu_hyd_mux_parents, NULL },
+ { 26, TI_CLK_DIVIDER, omap5_gpu_sys_clk_parents, &omap5_gpu_sys_clk_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_gpu_clkctrl_regs[] __initconst = {
+ { OMAP5_GPU_CLKCTRL, omap5_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24" },
+ { 0 },
+};
+
+static const char * const omap5_mmc1_fclk_mux_parents[] __initconst = {
+ "func_128m_clk",
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const char * const omap5_mmc1_fclk_parents[] __initconst = {
+ "l3init-clkctrl:0008:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_mmc1_fclk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_mmc1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 24, TI_CLK_MUX, omap5_mmc1_fclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, omap5_mmc1_fclk_parents, &omap5_mmc1_fclk_data },
+ { 0 },
+};
+
+static const char * const omap5_mmc2_fclk_parents[] __initconst = {
+ "l3init-clkctrl:0010:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap5_mmc2_fclk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap5_mmc2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_mmc1_fclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, omap5_mmc2_fclk_parents, &omap5_mmc2_fclk_data },
+ { 0 },
+};
+
+static const char * const omap5_usb_host_hs_hsic60m_p3_clk_parents[] __initconst = {
+ "l3init_60m_fclk",
+ NULL,
+};
+
+static const char * const omap5_usb_host_hs_hsic480m_p3_clk_parents[] __initconst = {
+ "dpll_usb_m2_ck",
+ NULL,
+};
+
+static const char * const omap5_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
+ "l3init-clkctrl:0038:24",
+ NULL,
+};
+
+static const char * const omap5_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
+ "l3init-clkctrl:0038:25",
+ NULL,
+};
+
+static const char * const omap5_utmi_p1_gfclk_parents[] __initconst = {
+ "l3init_60m_fclk",
+ "xclk60mhsp1_ck",
+ NULL,
+};
+
+static const char * const omap5_utmi_p2_gfclk_parents[] __initconst = {
+ "l3init_60m_fclk",
+ "xclk60mhsp2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_usb_host_hs_bit_data[] __initconst = {
+ { 6, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 7, TI_CLK_GATE, omap5_usb_host_hs_hsic480m_p3_clk_parents, NULL },
+ { 8, TI_CLK_GATE, omap5_usb_host_hs_utmi_p1_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap5_usb_host_hs_utmi_p2_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 11, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 12, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 13, TI_CLK_GATE, omap5_usb_host_hs_hsic480m_p3_clk_parents, NULL },
+ { 14, TI_CLK_GATE, omap5_usb_host_hs_hsic480m_p3_clk_parents, NULL },
+ { 24, TI_CLK_MUX, omap5_utmi_p1_gfclk_parents, NULL },
+ { 25, TI_CLK_MUX, omap5_utmi_p2_gfclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_usb_tll_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap5_usb_host_hs_hsic60m_p3_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_sata_ref_clk_parents[] __initconst = {
+ "sys_clkin",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_sata_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_sata_ref_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap5_usb_otg_ss_refclk960m_parents[] __initconst = {
+ "dpll_usb_clkdcoldo",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap5_usb_otg_ss_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_usb_otg_ss_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_l3init_clkctrl_regs[] __initconst = {
+ { OMAP5_MMC1_CLKCTRL, omap5_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
+ { OMAP5_MMC2_CLKCTRL, omap5_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
+ { OMAP5_USB_HOST_HS_CLKCTRL, omap5_usb_host_hs_bit_data, CLKF_SW_SUP, "l3init_60m_fclk" },
+ { OMAP5_USB_TLL_HS_CLKCTRL, omap5_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_SATA_CLKCTRL, omap5_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP5_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_USB_OTG_SS_CLKCTRL, omap5_usb_otg_ss_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap5_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap5_timer1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap5_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap5_wkupaon_clkctrl_regs[] __initconst = {
+ { OMAP5_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { OMAP5_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { OMAP5_GPIO1_CLKCTRL, omap5_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
+ { OMAP5_TIMER1_CLKCTRL, omap5_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
+ { OMAP5_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { OMAP5_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
+ { 0x4a004320, omap5_mpu_clkctrl_regs },
+ { 0x4a004420, omap5_dsp_clkctrl_regs },
+ { 0x4a004520, omap5_abe_clkctrl_regs },
+ { 0x4a008720, omap5_l3main1_clkctrl_regs },
+ { 0x4a008820, omap5_l3main2_clkctrl_regs },
+ { 0x4a008920, omap5_ipu_clkctrl_regs },
+ { 0x4a008a20, omap5_dma_clkctrl_regs },
+ { 0x4a008b20, omap5_emif_clkctrl_regs },
+ { 0x4a008d20, omap5_l4cfg_clkctrl_regs },
+ { 0x4a008e20, omap5_l3instr_clkctrl_regs },
+ { 0x4a009020, omap5_l4per_clkctrl_regs },
+ { 0x4a0091a0, omap5_l4_secure_clkctrl_regs },
+ { 0x4a009220, omap5_iva_clkctrl_regs },
+ { 0x4a009420, omap5_dss_clkctrl_regs },
+ { 0x4a009520, omap5_gpu_clkctrl_regs },
+ { 0x4a009620, omap5_l3init_clkctrl_regs },
+ { 0x4ae07920, omap5_wkupaon_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk omap54xx_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"),
+ DT_CLK(NULL, "dmic_gfclk", "abe-clkctrl:0018:24"),
+ DT_CLK(NULL, "dmic_sync_mux_ck", "abe-clkctrl:0018:26"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_sys_clk", "dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0060:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00f0:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
+ DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
+ DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+ DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
+ DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+ DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
+ DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
+ DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0028:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "abe-clkctrl:0048:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "abe-clkctrl:0050:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "abe-clkctrl:0058:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "abe-clkctrl:0060:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0030:24"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p1_clk", "l3init-clkctrl:0038:13"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p2_clk", "l3init-clkctrl:0038:14"),
+ DT_CLK(NULL, "usb_host_hs_hsic480m_p3_clk", "l3init-clkctrl:0038:7"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p1_clk", "l3init-clkctrl:0038:11"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p2_clk", "l3init-clkctrl:0038:12"),
+ DT_CLK(NULL, "usb_host_hs_hsic60m_p3_clk", "l3init-clkctrl:0038:6"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p1_clk", "l3init-clkctrl:0038:8"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p2_clk", "l3init-clkctrl:0038:9"),
+ DT_CLK(NULL, "usb_host_hs_utmi_p3_clk", "l3init-clkctrl:0038:10"),
+ DT_CLK(NULL, "usb_otg_ss_refclk960m", "l3init-clkctrl:00d0:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch0_clk", "l3init-clkctrl:0048:8"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch1_clk", "l3init-clkctrl:0048:9"),
+ DT_CLK(NULL, "usb_tll_hs_usb_ch2_clk", "l3init-clkctrl:0048:10"),
+ DT_CLK(NULL, "utmi_p1_gfclk", "l3init-clkctrl:0038:24"),
+ DT_CLK(NULL, "utmi_p2_gfclk", "l3init-clkctrl:0038:25"),
+ { .node_name = NULL },
+};
+
+int __init omap5xxx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *abe_dpll_ref, *abe_dpll, *abe_dpll_byp, *sys_32k_ck, *usb_dpll;
+
+ ti_dt_clocks_register(omap54xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ ti_clk_add_aliases();
+
+ abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_clk_mux");
+ sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
+ rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+
+ /*
+ * This must also be set to sys_32k_ck to match or
+ * the ABE DPLL will not lock on a warm reboot when
+ * ABE timers are used.
+ */
+ abe_dpll_byp = clk_get_sys(NULL, "abe_dpll_bypass_clk_mux");
+ if (!rc)
+ rc = clk_set_parent(abe_dpll_byp, sys_32k_ck);
+
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+
+ abe_dpll = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+ if (!rc)
+ rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ * 2);
+ if (rc)
+ pr_err("%s: failed to configure ABE m2x2 DPLL!\n", __func__);
+
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ usb_dpll = clk_get_sys(NULL, "dpll_usb_m2_ck");
+ rc = clk_set_rate(usb_dpll, OMAP5_DPLL_USB_DEFFREQ/2);
+ if (rc)
+ pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
new file mode 100644
index 000000000..0f0994415
--- /dev/null
+++ b/drivers/clk/ti/clk-7xx.c
@@ -0,0 +1,976 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DRA7 Clock init
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo (t-kristo@ti.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/dra7.h>
+
+#include "clock.h"
+
+#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
+#define DRA7_DPLL_USB_DEFFREQ 960000000
+
+static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst = {
+ { DRA7_MPU_MPU_CLKCTRL, NULL, 0, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dsp1_clkctrl_regs[] __initconst = {
+ { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_ipu1_gfclk_mux_parents[] __initconst = {
+ "dpll_abe_m2x2_ck",
+ "dpll_core_h22x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmu_ipu1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_ipu1_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_ipu1_clkctrl_regs[] __initconst = {
+ { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP | CLKF_NO_IDLEST, "ipu1-clkctrl:0000:24" },
+ { 0 },
+};
+
+static const char * const dra7_mcasp1_aux_gfclk_mux_parents[] __initconst = {
+ "per_abe_x1_gfclk2_div",
+ "video1_clk2_div",
+ "video2_clk2_div",
+ "hdmi_clk2_div",
+ NULL,
+};
+
+static const char * const dra7_mcasp1_ahclkx_mux_parents[] __initconst = {
+ "abe_24m_fclk",
+ "abe_sys_clk_div",
+ "func_24m_clk",
+ "atl_clkin3_ck",
+ "atl_clkin2_ck",
+ "atl_clkin1_ck",
+ "atl_clkin0_ck",
+ "sys_clkin2",
+ "ref_clkin0_ck",
+ "ref_clkin1_ck",
+ "ref_clkin2_ck",
+ "ref_clkin3_ck",
+ "mlb_clk",
+ "mlbp_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp1_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_timer5_gfclk_mux_parents[] __initconst = {
+ "timer_sys_clk_div",
+ "sys_32k_ck",
+ "sys_clkin2",
+ "ref_clkin0_ck",
+ "ref_clkin1_ck",
+ "ref_clkin2_ck",
+ "ref_clkin3_ck",
+ "abe_giclk_div",
+ "video1_div_clk",
+ "video2_div_clk",
+ "hdmi_div_clk",
+ "clkoutmux0_clk_mux",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer5_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_uart6_gfclk_mux_parents[] __initconst = {
+ "func_48m_fclk",
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst = {
+ { DRA7_IPU_MCASP1_CLKCTRL, dra7_mcasp1_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0000:22" },
+ { DRA7_IPU_TIMER5_CLKCTRL, dra7_timer5_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0008:24" },
+ { DRA7_IPU_TIMER6_CLKCTRL, dra7_timer6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0010:24" },
+ { DRA7_IPU_TIMER7_CLKCTRL, dra7_timer7_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0018:24" },
+ { DRA7_IPU_TIMER8_CLKCTRL, dra7_timer8_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0020:24" },
+ { DRA7_IPU_I2C5_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_IPU_UART6_CLKCTRL, dra7_uart6_bit_data, CLKF_SW_SUP, "ipu-clkctrl:0030:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dsp2_clkctrl_regs[] __initconst = {
+ { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_rtc_clkctrl_regs[] __initconst = {
+ { DRA7_RTC_RTCSS_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+static const char * const dra7_cam_gfclk_mux_parents[] __initconst = {
+ "l3_iclk_div",
+ "core_iss_main_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_cam_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_cam_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_cam_clkctrl_regs[] __initconst = {
+ { DRA7_CAM_VIP1_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_CAM_VIP2_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_CAM_VIP3_CLKCTRL, dra7_cam_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_vpe_clkctrl_regs[] __initconst = {
+ { DRA7_VPE_VPE_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h23x2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_coreaon_clkctrl_regs[] __initconst = {
+ { DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "wkupaon_iclk_mux" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initconst = {
+ { DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3MAIN1_TPCC_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_TPTC0_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3MAIN1_TPTC1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3MAIN1_VCP1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L3MAIN1_VCP2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_ipu2_clkctrl_regs[] __initconst = {
+ { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dma_clkctrl_regs[] __initconst = {
+ { DRA7_DMA_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_emif_clkctrl_regs[] __initconst = {
+ { DRA7_EMIF_DMM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const char * const dra7_atl_dpll_clk_mux_parents[] __initconst = {
+ "sys_32k_ck",
+ "video1_clkin_ck",
+ "video2_clkin_ck",
+ "hdmi_clkin_ck",
+ NULL,
+};
+
+static const char * const dra7_atl_gfclk_mux_parents[] __initconst = {
+ "l3_iclk_div",
+ "dpll_abe_m2_ck",
+ "atl-clkctrl:0000:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_atl_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_atl_dpll_clk_mux_parents, NULL },
+ { 26, TI_CLK_MUX, dra7_atl_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_atl_clkctrl_regs[] __initconst = {
+ { DRA7_ATL_ATL_CLKCTRL, dra7_atl_bit_data, CLKF_SW_SUP, "atl-clkctrl:0000:26" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4cfg_clkctrl_regs[] __initconst = {
+ { DRA7_L4CFG_L4_CFG_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_SPINLOCK_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX3_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX4_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX5_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX6_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX7_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX8_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX9_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX10_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX11_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX12_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4CFG_MAILBOX13_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initconst = {
+ { DRA7_L3INSTR_L3_MAIN_2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L3INSTR_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_iva_clkctrl_regs[] __initconst = {
+ { DRA7_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h12x2_ck" },
+ { DRA7_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_dss_dss_clk_parents[] __initconst = {
+ "dpll_per_h12x2_ck",
+ NULL,
+};
+
+static const char * const dra7_dss_48mhz_clk_parents[] __initconst = {
+ "func_48m_fclk",
+ NULL,
+};
+
+static const char * const dra7_dss_hdmi_clk_parents[] __initconst = {
+ "hdmi_dpll_clk_mux",
+ NULL,
+};
+
+static const char * const dra7_dss_32khz_clk_parents[] __initconst = {
+ "sys_32k_ck",
+ NULL,
+};
+
+static const char * const dra7_dss_video1_clk_parents[] __initconst = {
+ "video1_dpll_clk_mux",
+ NULL,
+};
+
+static const char * const dra7_dss_video2_clk_parents[] __initconst = {
+ "video2_dpll_clk_mux",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_dss_core_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_dss_clk_parents, NULL },
+ { 9, TI_CLK_GATE, dra7_dss_48mhz_clk_parents, NULL },
+ { 10, TI_CLK_GATE, dra7_dss_hdmi_clk_parents, NULL },
+ { 11, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 12, TI_CLK_GATE, dra7_dss_video1_clk_parents, NULL },
+ { 13, TI_CLK_GATE, dra7_dss_video2_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_dss_clkctrl_regs[] __initconst = {
+ { DRA7_DSS_DSS_CORE_CLKCTRL, dra7_dss_core_bit_data, CLKF_SW_SUP, "dss-clkctrl:0000:8" },
+ { DRA7_DSS_BB2D_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_core_h24x2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_gpu_core_mux_parents[] __initconst = {
+ "dpll_core_h14x2_ck",
+ "dpll_per_h14x2_ck",
+ "dpll_gpu_m2_ck",
+ NULL,
+};
+
+static const char * const dra7_gpu_hyd_mux_parents[] __initconst = {
+ "dpll_core_h14x2_ck",
+ "dpll_per_h14x2_ck",
+ "dpll_gpu_m2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, },
+ { 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = {
+ { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24", },
+ { 0 },
+};
+
+static const char * const dra7_mmc1_fclk_mux_parents[] __initconst = {
+ "func_128m_clk",
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const char * const dra7_mmc1_fclk_div_parents[] __initconst = {
+ "l3init-clkctrl:0008:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc1_fclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc1_fclk_div_parents, &dra7_mmc1_fclk_div_data },
+ { 0 },
+};
+
+static const char * const dra7_mmc2_fclk_div_parents[] __initconst = {
+ "l3init-clkctrl:0010:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc2_fclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mmc1_fclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc2_fclk_div_parents, &dra7_mmc2_fclk_div_data },
+ { 0 },
+};
+
+static const char * const dra7_usb_otg_ss2_refclk960m_parents[] __initconst = {
+ "l3init_960m_gfclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_usb_otg_ss2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_sata_ref_clk_parents[] __initconst = {
+ "sys_clkin1",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_sata_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_sata_ref_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_usb_otg_ss1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_usb_otg_ss2_refclk960m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst = {
+ { DRA7_L3INIT_MMC1_CLKCTRL, dra7_mmc1_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0008:25" },
+ { DRA7_L3INIT_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
+ { DRA7_L3INIT_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L3INIT_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { DRA7_L3INIT_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { DRA7_L3INIT_USB_OTG_SS1_CLKCTRL, dra7_usb_otg_ss1_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { 0 },
+};
+
+static const char * const dra7_optfclk_pciephy1_clk_parents[] __initconst = {
+ "apll_pcie_ck",
+ NULL,
+};
+
+static const char * const dra7_optfclk_pciephy1_div_clk_parents[] __initconst = {
+ "optfclk_pciephy_div",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_pcie1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL },
+ { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_pcie2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 9, TI_CLK_GATE, dra7_optfclk_pciephy1_clk_parents, NULL },
+ { 10, TI_CLK_GATE, dra7_optfclk_pciephy1_div_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_pcie_clkctrl_regs[] __initconst = {
+ { DRA7_PCIE_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_PCIE_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div" },
+ { 0 },
+};
+
+static const char * const dra7_rmii_50mhz_clk_mux_parents[] __initconst = {
+ "dpll_gmac_h11x2_ck",
+ "rmii_clk_ck",
+ NULL,
+};
+
+static const char * const dra7_gmac_rft_clk_mux_parents[] __initconst = {
+ "video1_clkin_ck",
+ "video2_clkin_ck",
+ "dpll_abe_m2_ck",
+ "hdmi_clkin_ck",
+ "l3_iclk_div",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_gmac_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_rmii_50mhz_clk_mux_parents, NULL },
+ { 25, TI_CLK_MUX, dra7_gmac_rft_clk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_gmac_clkctrl_regs[] __initconst = {
+ { DRA7_GMAC_GMAC_CLKCTRL, dra7_gmac_bit_data, CLKF_SW_SUP, "gmac_main_clk" },
+ { 0 },
+};
+
+static const char * const dra7_timer10_gfclk_mux_parents[] __initconst = {
+ "timer_sys_clk_div",
+ "sys_32k_ck",
+ "sys_clkin2",
+ "ref_clkin0_ck",
+ "ref_clkin1_ck",
+ "ref_clkin2_ck",
+ "ref_clkin3_ck",
+ "abe_giclk_div",
+ "video1_div_clk",
+ "video2_div_clk",
+ "hdmi_div_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer11_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio7_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio8_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_mmc3_gfclk_div_parents[] __initconst = {
+ "l4per-clkctrl:00f8:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc3_gfclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc3_gfclk_div_parents, &dra7_mmc3_gfclk_div_data },
+ { 0 },
+};
+
+static const char * const dra7_mmc4_gfclk_div_parents[] __initconst = {
+ "l4per-clkctrl:0100:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_mmc4_gfclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_mmc4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_mmc4_gfclk_div_parents, &dra7_mmc4_gfclk_div_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst = {
+ { DRA7_L4PER_TIMER10_CLKCTRL, dra7_timer10_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0000:24" },
+ { DRA7_L4PER_TIMER11_CLKCTRL, dra7_timer11_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0008:24" },
+ { DRA7_L4PER_TIMER2_CLKCTRL, dra7_timer2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0010:24" },
+ { DRA7_L4PER_TIMER3_CLKCTRL, dra7_timer3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0018:24" },
+ { DRA7_L4PER_TIMER4_CLKCTRL, dra7_timer4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0020:24" },
+ { DRA7_L4PER_TIMER9_CLKCTRL, dra7_timer9_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0028:24" },
+ { DRA7_L4PER_ELM_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO2_CLKCTRL, dra7_gpio2_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO3_CLKCTRL, dra7_gpio3_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO4_CLKCTRL, dra7_gpio4_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO5_CLKCTRL, dra7_gpio5_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO6_CLKCTRL, dra7_gpio6_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
+ { DRA7_L4PER_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { DRA7_L4PER_L4_PER1_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { DRA7_L4PER_GPIO7_CLKCTRL, dra7_gpio7_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_GPIO8_CLKCTRL, dra7_gpio8_bit_data, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4PER_MMC3_CLKCTRL, dra7_mmc3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:00f8:25" },
+ { DRA7_L4PER_MMC4_CLKCTRL, dra7_mmc4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0100:25" },
+ { DRA7_L4PER_UART1_CLKCTRL, dra7_uart1_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0118:24" },
+ { DRA7_L4PER_UART2_CLKCTRL, dra7_uart2_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0120:24" },
+ { DRA7_L4PER_UART3_CLKCTRL, dra7_uart3_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0128:24" },
+ { DRA7_L4PER_UART4_CLKCTRL, dra7_uart4_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0130:24" },
+ { DRA7_L4PER_UART5_CLKCTRL, dra7_uart5_bit_data, CLKF_SW_SUP, "l4per-clkctrl:0148:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst = {
+ { DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
+ { DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_SHAM2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { 0 },
+};
+
+static const char * const dra7_qspi_gfclk_mux_parents[] __initconst = {
+ "func_128m_clk",
+ "dpll_per_h13x2_ck",
+ NULL,
+};
+
+static const char * const dra7_qspi_gfclk_div_parents[] __initconst = {
+ "l4per2-clkctrl:012c:24",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data dra7_qspi_gfclk_div_data __initconst = {
+ .max_div = 4,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+};
+
+static const struct omap_clkctrl_bit_data dra7_qspi_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_qspi_gfclk_mux_parents, NULL },
+ { 25, TI_CLK_DIVIDER, dra7_qspi_gfclk_div_parents, &dra7_qspi_gfclk_div_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp2_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 28, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp3_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp5_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp8_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp4_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp6_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_mcasp7_bit_data[] __initconst = {
+ { 22, TI_CLK_MUX, dra7_mcasp1_aux_gfclk_mux_parents, NULL },
+ { 24, TI_CLK_MUX, dra7_mcasp1_ahclkx_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst = {
+ { DRA7_L4PER2_L4_PER2_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER2_PRUSS1_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { DRA7_L4PER2_PRUSS2_CLKCTRL, NULL, CLKF_SW_SUP, "" },
+ { DRA7_L4PER2_EPWMSS1_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_L4PER2_EPWMSS2_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_L4PER2_EPWMSS0_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { DRA7_L4PER2_QSPI_CLKCTRL, dra7_qspi_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:012c:25" },
+ { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
+ { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
+ { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
+ { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" },
+ { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
+ { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
+ { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
+ { DRA7_L4PER2_UART9_CLKCTRL, dra7_uart9_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01dc:24" },
+ { DRA7_L4PER2_DCAN2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_clkin1" },
+ { DRA7_L4PER2_MCASP6_CLKCTRL, dra7_mcasp6_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01f8:22" },
+ { DRA7_L4PER2_MCASP7_CLKCTRL, dra7_mcasp7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01fc:22" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer13_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer14_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer15_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer16_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_l4per3_clkctrl_regs[] __initconst = {
+ { DRA7_L4PER3_L4_PER3_CLKCTRL, NULL, 0, "l3_iclk_div" },
+ { DRA7_L4PER3_TIMER13_CLKCTRL, dra7_timer13_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00b4:24" },
+ { DRA7_L4PER3_TIMER14_CLKCTRL, dra7_timer14_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00bc:24" },
+ { DRA7_L4PER3_TIMER15_CLKCTRL, dra7_timer15_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:00c4:24" },
+ { DRA7_L4PER3_TIMER16_CLKCTRL, dra7_timer16_bit_data, CLKF_SW_SUP, "l4per3-clkctrl:011c:24" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, dra7_dss_32khz_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_timer1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_timer10_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data dra7_uart10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_uart6_gfclk_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const dra7_dcan1_sys_clk_mux_parents[] __initconst = {
+ "sys_clkin1",
+ "sys_clkin2",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data dra7_dcan1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, dra7_dcan1_sys_clk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initconst = {
+ { DRA7_WKUPAON_L4_WKUP_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { DRA7_WKUPAON_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { DRA7_WKUPAON_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
+ { DRA7_WKUPAON_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
+ { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" },
+ { DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
+ { DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" },
+ { DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" },
+ { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SOC_DRA76, "mcan_clk" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
+ { 0x4a005320, dra7_mpu_clkctrl_regs },
+ { 0x4a005420, dra7_dsp1_clkctrl_regs },
+ { 0x4a005520, dra7_ipu1_clkctrl_regs },
+ { 0x4a005550, dra7_ipu_clkctrl_regs },
+ { 0x4a005620, dra7_dsp2_clkctrl_regs },
+ { 0x4a005720, dra7_rtc_clkctrl_regs },
+ { 0x4a005760, dra7_vpe_clkctrl_regs },
+ { 0x4a008620, dra7_coreaon_clkctrl_regs },
+ { 0x4a008720, dra7_l3main1_clkctrl_regs },
+ { 0x4a008920, dra7_ipu2_clkctrl_regs },
+ { 0x4a008a20, dra7_dma_clkctrl_regs },
+ { 0x4a008b20, dra7_emif_clkctrl_regs },
+ { 0x4a008c00, dra7_atl_clkctrl_regs },
+ { 0x4a008d20, dra7_l4cfg_clkctrl_regs },
+ { 0x4a008e20, dra7_l3instr_clkctrl_regs },
+ { 0x4a008f20, dra7_iva_clkctrl_regs },
+ { 0x4a009020, dra7_cam_clkctrl_regs },
+ { 0x4a009120, dra7_dss_clkctrl_regs },
+ { 0x4a009220, dra7_gpu_clkctrl_regs },
+ { 0x4a009320, dra7_l3init_clkctrl_regs },
+ { 0x4a0093b0, dra7_pcie_clkctrl_regs },
+ { 0x4a0093d0, dra7_gmac_clkctrl_regs },
+ { 0x4a009728, dra7_l4per_clkctrl_regs },
+ { 0x4a0098a0, dra7_l4sec_clkctrl_regs },
+ { 0x4a00970c, dra7_l4per2_clkctrl_regs },
+ { 0x4a009714, dra7_l4per3_clkctrl_regs },
+ { 0x4ae07820, dra7_wkupaon_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk dra7xx_clks[] = {
+ DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"),
+ DT_CLK(NULL, "sys_clkin", "sys_clkin1"),
+ DT_CLK(NULL, "atl_dpll_clk_mux", "atl-clkctrl:0000:24"),
+ DT_CLK(NULL, "atl_gfclk_mux", "atl-clkctrl:0000:26"),
+ DT_CLK(NULL, "dcan1_sys_clk_mux", "wkupaon-clkctrl:0068:24"),
+ DT_CLK(NULL, "dss_32khz_clk", "dss-clkctrl:0000:11"),
+ DT_CLK(NULL, "dss_48mhz_clk", "dss-clkctrl:0000:9"),
+ DT_CLK(NULL, "dss_dss_clk", "dss-clkctrl:0000:8"),
+ DT_CLK(NULL, "dss_hdmi_clk", "dss-clkctrl:0000:10"),
+ DT_CLK(NULL, "dss_video1_clk", "dss-clkctrl:0000:12"),
+ DT_CLK(NULL, "dss_video2_clk", "dss-clkctrl:0000:13"),
+ DT_CLK(NULL, "gmac_rft_clk_mux", "gmac-clkctrl:0000:25"),
+ DT_CLK(NULL, "gpio1_dbclk", "wkupaon-clkctrl:0018:8"),
+ DT_CLK(NULL, "gpio2_dbclk", "l4per-clkctrl:0038:8"),
+ DT_CLK(NULL, "gpio3_dbclk", "l4per-clkctrl:0040:8"),
+ DT_CLK(NULL, "gpio4_dbclk", "l4per-clkctrl:0048:8"),
+ DT_CLK(NULL, "gpio5_dbclk", "l4per-clkctrl:0050:8"),
+ DT_CLK(NULL, "gpio6_dbclk", "l4per-clkctrl:0058:8"),
+ DT_CLK(NULL, "gpio7_dbclk", "l4per-clkctrl:00e8:8"),
+ DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f0:8"),
+ DT_CLK(NULL, "ipu1_gfclk_mux", "ipu1-clkctrl:0000:24"),
+ DT_CLK(NULL, "mcasp1_ahclkr_mux", "ipu-clkctrl:0000:28"),
+ DT_CLK(NULL, "mcasp1_ahclkx_mux", "ipu-clkctrl:0000:24"),
+ DT_CLK(NULL, "mcasp1_aux_gfclk_mux", "ipu-clkctrl:0000:22"),
+ DT_CLK(NULL, "mcasp2_ahclkr_mux", "l4per2-clkctrl:0154:28"),
+ DT_CLK(NULL, "mcasp2_ahclkx_mux", "l4per2-clkctrl:0154:24"),
+ DT_CLK(NULL, "mcasp2_aux_gfclk_mux", "l4per2-clkctrl:0154:22"),
+ DT_CLK(NULL, "mcasp3_ahclkx_mux", "l4per2-clkctrl:015c:24"),
+ DT_CLK(NULL, "mcasp3_aux_gfclk_mux", "l4per2-clkctrl:015c:22"),
+ DT_CLK(NULL, "mcasp4_ahclkx_mux", "l4per2-clkctrl:018c:24"),
+ DT_CLK(NULL, "mcasp4_aux_gfclk_mux", "l4per2-clkctrl:018c:22"),
+ DT_CLK(NULL, "mcasp5_ahclkx_mux", "l4per2-clkctrl:016c:24"),
+ DT_CLK(NULL, "mcasp5_aux_gfclk_mux", "l4per2-clkctrl:016c:22"),
+ DT_CLK(NULL, "mcasp6_ahclkx_mux", "l4per2-clkctrl:01f8:24"),
+ DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
+ DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
+ DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
+ DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"),
+ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"),
+ DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
+ DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
+ DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
+ DT_CLK(NULL, "mmc2_clk32k", "l3init-clkctrl:0010:8"),
+ DT_CLK(NULL, "mmc2_fclk_div", "l3init-clkctrl:0010:25"),
+ DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "mmc3_clk32k", "l4per-clkctrl:00f8:8"),
+ DT_CLK(NULL, "mmc3_gfclk_div", "l4per-clkctrl:00f8:25"),
+ DT_CLK(NULL, "mmc3_gfclk_mux", "l4per-clkctrl:00f8:24"),
+ DT_CLK(NULL, "mmc4_clk32k", "l4per-clkctrl:0100:8"),
+ DT_CLK(NULL, "mmc4_gfclk_div", "l4per-clkctrl:0100:25"),
+ DT_CLK(NULL, "mmc4_gfclk_mux", "l4per-clkctrl:0100:24"),
+ DT_CLK(NULL, "optfclk_pciephy1_32khz", "pcie-clkctrl:0000:8"),
+ DT_CLK(NULL, "optfclk_pciephy1_clk", "pcie-clkctrl:0000:9"),
+ DT_CLK(NULL, "optfclk_pciephy1_div_clk", "pcie-clkctrl:0000:10"),
+ DT_CLK(NULL, "optfclk_pciephy2_32khz", "pcie-clkctrl:0008:8"),
+ DT_CLK(NULL, "optfclk_pciephy2_clk", "pcie-clkctrl:0008:9"),
+ DT_CLK(NULL, "optfclk_pciephy2_div_clk", "pcie-clkctrl:0008:10"),
+ DT_CLK(NULL, "qspi_gfclk_div", "l4per2-clkctrl:012c:25"),
+ DT_CLK(NULL, "qspi_gfclk_mux", "l4per2-clkctrl:012c:24"),
+ DT_CLK(NULL, "rmii_50mhz_clk_mux", "gmac-clkctrl:0000:24"),
+ DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
+ DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0000:24"),
+ DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer13_gfclk_mux", "l4per3-clkctrl:00b4:24"),
+ DT_CLK(NULL, "timer14_gfclk_mux", "l4per3-clkctrl:00bc:24"),
+ DT_CLK(NULL, "timer15_gfclk_mux", "l4per3-clkctrl:00c4:24"),
+ DT_CLK(NULL, "timer16_gfclk_mux", "l4per3-clkctrl:011c:24"),
+ DT_CLK(NULL, "timer1_gfclk_mux", "wkupaon-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer2_gfclk_mux", "l4per-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer3_gfclk_mux", "l4per-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer4_gfclk_mux", "l4per-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer5_gfclk_mux", "ipu-clkctrl:0008:24"),
+ DT_CLK(NULL, "timer6_gfclk_mux", "ipu-clkctrl:0010:24"),
+ DT_CLK(NULL, "timer7_gfclk_mux", "ipu-clkctrl:0018:24"),
+ DT_CLK(NULL, "timer8_gfclk_mux", "ipu-clkctrl:0020:24"),
+ DT_CLK(NULL, "timer9_gfclk_mux", "l4per-clkctrl:0028:24"),
+ DT_CLK(NULL, "uart10_gfclk_mux", "wkupaon-clkctrl:0060:24"),
+ DT_CLK(NULL, "uart1_gfclk_mux", "l4per-clkctrl:0118:24"),
+ DT_CLK(NULL, "uart2_gfclk_mux", "l4per-clkctrl:0120:24"),
+ DT_CLK(NULL, "uart3_gfclk_mux", "l4per-clkctrl:0128:24"),
+ DT_CLK(NULL, "uart4_gfclk_mux", "l4per-clkctrl:0130:24"),
+ DT_CLK(NULL, "uart5_gfclk_mux", "l4per-clkctrl:0148:24"),
+ DT_CLK(NULL, "uart6_gfclk_mux", "ipu-clkctrl:0030:24"),
+ DT_CLK(NULL, "uart7_gfclk_mux", "l4per2-clkctrl:01c4:24"),
+ DT_CLK(NULL, "uart8_gfclk_mux", "l4per2-clkctrl:01d4:24"),
+ DT_CLK(NULL, "uart9_gfclk_mux", "l4per2-clkctrl:01dc:24"),
+ DT_CLK(NULL, "usb_otg_ss1_refclk960m", "l3init-clkctrl:00d0:8"),
+ DT_CLK(NULL, "usb_otg_ss2_refclk960m", "l3init-clkctrl:0020:8"),
+ { .node_name = NULL },
+};
+
+int __init dra7xx_dt_clk_init(void)
+{
+ int rc;
+ struct clk *dpll_ck, *hdcp_ck;
+
+ ti_dt_clocks_register(dra7xx_clks);
+
+ omap2_clk_disable_autoidle_all();
+
+ ti_clk_add_aliases();
+
+ dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure GMAC DPLL!\n", __func__);
+
+ dpll_ck = clk_get_sys(NULL, "dpll_usb_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+ dpll_ck = clk_get_sys(NULL, "dpll_usb_m2_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ/2);
+ if (rc)
+ pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
+ hdcp_ck = clk_get_sys(NULL, "dss_deshdcp_clk");
+ rc = clk_prepare_enable(hdcp_ck);
+ if (rc)
+ pr_err("%s: failed to set dss_deshdcp_clk\n", __func__);
+
+ return rc;
+}
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
new file mode 100644
index 000000000..4f8bd34ec
--- /dev/null
+++ b/drivers/clk/ti/clk-814x.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/clock/dm814.h>
+
+#include "clock.h"
+
+static const struct omap_clkctrl_reg_data dm814_default_clkctrl_regs[] __initconst = {
+ { DM814_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "pll260dcoclkldo" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dm814_alwon_clkctrl_regs[] __initconst = {
+ { DM814_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM814_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM814_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM814_GPIO1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM814_GPIO2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM814_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM814_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM814_WD_TIMER_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
+ { DM814_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM814_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM814_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "mpu_ck" },
+ { DM814_RTC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
+ { DM814_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM814_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM814_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM814_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM814_TPTC3_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM814_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk8_ck" },
+ { DM814_MMC2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk8_ck" },
+ { DM814_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk8_ck" },
+ { 0 },
+};
+
+static const struct
+omap_clkctrl_reg_data dm814_alwon_ethernet_clkctrl_regs[] __initconst = {
+ { 0, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk" },
+};
+
+const struct omap_clkctrl_data dm814_clkctrl_data[] __initconst = {
+ { 0x48180500, dm814_default_clkctrl_regs },
+ { 0x48181400, dm814_alwon_clkctrl_regs },
+ { 0x481815d4, dm814_alwon_ethernet_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk dm814_clks[] = {
+ DT_CLK(NULL, "timer_sys_ck", "devosc_ck"),
+ { .node_name = NULL },
+};
+
+static bool timer_clocks_initialized;
+
+static int __init dm814x_adpll_early_init(void)
+{
+ struct device_node *np;
+
+ if (!timer_clocks_initialized)
+ return -ENODEV;
+
+ np = of_find_node_by_name(NULL, "pllss");
+ if (!np) {
+ pr_err("Could not find node for plls\n");
+ return -ENODEV;
+ }
+
+ of_platform_populate(np, NULL, NULL, NULL);
+ of_node_put(np);
+
+ return 0;
+}
+core_initcall(dm814x_adpll_early_init);
+
+static const char * const init_clocks[] = {
+ "pll040clkout", /* MPU 481c5040.adpll.clkout */
+ "pll290clkout", /* DDR 481c5290.adpll.clkout */
+};
+
+static int __init dm814x_adpll_enable_init_clocks(void)
+{
+ int i, err;
+
+ if (!timer_clocks_initialized)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(init_clocks); i++) {
+ struct clk *clock;
+
+ clock = clk_get(NULL, init_clocks[i]);
+ if (WARN(IS_ERR(clock), "could not find init clock %s\n",
+ init_clocks[i]))
+ continue;
+ err = clk_prepare_enable(clock);
+ if (WARN(err, "could not enable init clock %s\n",
+ init_clocks[i]))
+ continue;
+ }
+
+ return 0;
+}
+postcore_initcall(dm814x_adpll_enable_init_clocks);
+
+int __init dm814x_dt_clk_init(void)
+{
+ ti_dt_clocks_register(dm814_clks);
+ omap2_clk_disable_autoidle_all();
+ ti_clk_add_aliases();
+ omap2_clk_enable_init_clocks(NULL, 0);
+ timer_clocks_initialized = true;
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644
index 000000000..3b8e483ae
--- /dev/null
+++ b/drivers/clk/ti/clk-816x.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+#include <dt-bindings/clock/dm816.h>
+
+#include "clock.h"
+
+static const struct omap_clkctrl_reg_data dm816_default_clkctrl_regs[] __initconst = {
+ { DM816_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data dm816_alwon_clkctrl_regs[] __initconst = {
+ { DM816_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_GPIO1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM816_GPIO2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM816_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_TIMER1_CLKCTRL, NULL, CLKF_SW_SUP, "timer1_fck" },
+ { DM816_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "timer2_fck" },
+ { DM816_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "timer3_fck" },
+ { DM816_TIMER4_CLKCTRL, NULL, CLKF_SW_SUP, "timer4_fck" },
+ { DM816_TIMER5_CLKCTRL, NULL, CLKF_SW_SUP, "timer5_fck" },
+ { DM816_TIMER6_CLKCTRL, NULL, CLKF_SW_SUP, "timer6_fck" },
+ { DM816_TIMER7_CLKCTRL, NULL, CLKF_SW_SUP, "timer7_fck" },
+ { DM816_WD_TIMER_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
+ { DM816_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_MAILBOX_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM816_SPINBOX_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM816_MMC1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk10_ck" },
+ { DM816_GPMC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk6_ck" },
+ { DM816_DAVINCI_MDIO_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk24_ck" },
+ { DM816_EMAC1_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk24_ck" },
+ { DM816_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk2_ck" },
+ { DM816_RTC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "sysclk18_ck" },
+ { DM816_TPCC_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM816_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM816_TPTC1_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM816_TPTC2_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { DM816_TPTC3_CLKCTRL, NULL, CLKF_SW_SUP, "sysclk4_ck" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data dm816_clkctrl_data[] __initconst = {
+ { 0x48180500, dm816_default_clkctrl_regs },
+ { 0x48181400, dm816_alwon_clkctrl_regs },
+ { 0 },
+};
+
+static struct ti_dt_clk dm816x_clks[] = {
+ DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
+ DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+ DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
+ DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
+ { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+ "ddr_pll_clk1",
+ "ddr_pll_clk2",
+ "ddr_pll_clk3",
+ "sysclk6_ck",
+};
+
+int __init dm816x_dt_clk_init(void)
+{
+ ti_dt_clocks_register(dm816x_clks);
+ omap2_clk_disable_autoidle_all();
+ ti_clk_add_aliases();
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
+ return 0;
+}
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
new file mode 100644
index 000000000..1c576599f
--- /dev/null
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DRA7 ATL (Audio Tracking Logic) clock driver
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+#define DRA7_ATL_INSTANCES 4
+
+#define DRA7_ATL_PPMR_REG(id) (0x200 + (id * 0x80))
+#define DRA7_ATL_BBSR_REG(id) (0x204 + (id * 0x80))
+#define DRA7_ATL_ATLCR_REG(id) (0x208 + (id * 0x80))
+#define DRA7_ATL_SWEN_REG(id) (0x210 + (id * 0x80))
+#define DRA7_ATL_BWSMUX_REG(id) (0x214 + (id * 0x80))
+#define DRA7_ATL_AWSMUX_REG(id) (0x218 + (id * 0x80))
+#define DRA7_ATL_PCLKMUX_REG(id) (0x21c + (id * 0x80))
+
+#define DRA7_ATL_SWEN BIT(0)
+#define DRA7_ATL_DIVIDER_MASK (0x1f)
+#define DRA7_ATL_PCLKMUX BIT(0)
+struct dra7_atl_clock_info;
+
+struct dra7_atl_desc {
+ struct clk *clk;
+ struct clk_hw hw;
+ struct dra7_atl_clock_info *cinfo;
+ int id;
+
+ bool probed; /* the driver for the IP has been loaded */
+ bool valid; /* configured */
+ bool enabled;
+ u32 bws; /* Baseband Word Select Mux */
+ u32 aws; /* Audio Word Select Mux */
+ u32 divider; /* Cached divider value */
+};
+
+struct dra7_atl_clock_info {
+ struct device *dev;
+ void __iomem *iobase;
+
+ struct dra7_atl_desc *cdesc;
+};
+
+#define to_atl_desc(_hw) container_of(_hw, struct dra7_atl_desc, hw)
+
+static inline void atl_write(struct dra7_atl_clock_info *cinfo, u32 reg,
+ u32 val)
+{
+ __raw_writel(val, cinfo->iobase + reg);
+}
+
+static inline int atl_read(struct dra7_atl_clock_info *cinfo, u32 reg)
+{
+ return __raw_readl(cinfo->iobase + reg);
+}
+
+static int atl_clk_enable(struct clk_hw *hw)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ if (!cdesc->probed)
+ goto out;
+
+ if (unlikely(!cdesc->valid))
+ dev_warn(cdesc->cinfo->dev, "atl%d has not been configured\n",
+ cdesc->id);
+ pm_runtime_get_sync(cdesc->cinfo->dev);
+
+ atl_write(cdesc->cinfo, DRA7_ATL_ATLCR_REG(cdesc->id),
+ cdesc->divider - 1);
+ atl_write(cdesc->cinfo, DRA7_ATL_SWEN_REG(cdesc->id), DRA7_ATL_SWEN);
+
+out:
+ cdesc->enabled = true;
+
+ return 0;
+}
+
+static void atl_clk_disable(struct clk_hw *hw)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ if (!cdesc->probed)
+ goto out;
+
+ atl_write(cdesc->cinfo, DRA7_ATL_SWEN_REG(cdesc->id), 0);
+ pm_runtime_put_sync(cdesc->cinfo->dev);
+
+out:
+ cdesc->enabled = false;
+}
+
+static int atl_clk_is_enabled(struct clk_hw *hw)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ return cdesc->enabled;
+}
+
+static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dra7_atl_desc *cdesc = to_atl_desc(hw);
+
+ return parent_rate / cdesc->divider;
+}
+
+static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned divider;
+
+ divider = (*parent_rate + rate / 2) / rate;
+ if (divider > DRA7_ATL_DIVIDER_MASK + 1)
+ divider = DRA7_ATL_DIVIDER_MASK + 1;
+
+ return *parent_rate / divider;
+}
+
+static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dra7_atl_desc *cdesc;
+ u32 divider;
+
+ if (!hw || !rate)
+ return -EINVAL;
+
+ cdesc = to_atl_desc(hw);
+ divider = ((parent_rate + rate / 2) / rate) - 1;
+ if (divider > DRA7_ATL_DIVIDER_MASK)
+ divider = DRA7_ATL_DIVIDER_MASK;
+
+ cdesc->divider = divider + 1;
+
+ return 0;
+}
+
+static const struct clk_ops atl_clk_ops = {
+ .enable = atl_clk_enable,
+ .disable = atl_clk_disable,
+ .is_enabled = atl_clk_is_enabled,
+ .recalc_rate = atl_clk_recalc_rate,
+ .round_rate = atl_clk_round_rate,
+ .set_rate = atl_clk_set_rate,
+};
+
+static void __init of_dra7_atl_clock_setup(struct device_node *node)
+{
+ struct dra7_atl_desc *clk_hw = NULL;
+ struct clk_init_data init = { NULL };
+ const char **parent_names = NULL;
+ const char *name;
+ struct clk *clk;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw) {
+ pr_err("%s: could not allocate dra7_atl_desc\n", __func__);
+ return;
+ }
+
+ clk_hw->hw.init = &init;
+ clk_hw->divider = 1;
+ name = ti_dt_clk_name(node);
+ init.name = name;
+ init.ops = &atl_clk_ops;
+ init.flags = CLK_IGNORE_UNUSED;
+ init.num_parents = of_clk_get_parent_count(node);
+
+ if (init.num_parents != 1) {
+ pr_err("%s: atl clock %pOFn must have 1 parent\n", __func__,
+ node);
+ goto cleanup;
+ }
+
+ parent_names = kzalloc(sizeof(char *), GFP_KERNEL);
+
+ if (!parent_names)
+ goto cleanup;
+
+ parent_names[0] = of_clk_get_parent_name(node, 0);
+
+ init.parent_names = parent_names;
+
+ clk = of_ti_clk_register(node, &clk_hw->hw, name);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(parent_names);
+ return;
+ }
+cleanup:
+ kfree(parent_names);
+ kfree(clk_hw);
+}
+CLK_OF_DECLARE(dra7_atl_clock, "ti,dra7-atl-clock", of_dra7_atl_clock_setup);
+
+static int of_dra7_atl_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct dra7_atl_clock_info *cinfo;
+ int i;
+ int ret = 0;
+
+ if (!node)
+ return -ENODEV;
+
+ cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ cinfo->iobase = of_iomap(node, 0);
+ cinfo->dev = &pdev->dev;
+ pm_runtime_enable(cinfo->dev);
+
+ pm_runtime_get_sync(cinfo->dev);
+ atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);
+
+ for (i = 0; i < DRA7_ATL_INSTANCES; i++) {
+ struct device_node *cfg_node;
+ char prop[5];
+ struct dra7_atl_desc *cdesc;
+ struct of_phandle_args clkspec;
+ struct clk *clk;
+ int rc;
+
+ rc = of_parse_phandle_with_args(node, "ti,provided-clocks",
+ NULL, i, &clkspec);
+
+ if (rc) {
+ pr_err("%s: failed to lookup atl clock %d\n", __func__,
+ i);
+ ret = -EINVAL;
+ goto pm_put;
+ }
+
+ clk = of_clk_get_from_provider(&clkspec);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to get atl clock %d from provider\n",
+ __func__, i);
+ ret = PTR_ERR(clk);
+ goto pm_put;
+ }
+
+ cdesc = to_atl_desc(__clk_get_hw(clk));
+ cdesc->cinfo = cinfo;
+ cdesc->id = i;
+
+ /* Get configuration for the ATL instances */
+ snprintf(prop, sizeof(prop), "atl%u", i);
+ cfg_node = of_get_child_by_name(node, prop);
+ if (cfg_node) {
+ ret = of_property_read_u32(cfg_node, "bws",
+ &cdesc->bws);
+ ret |= of_property_read_u32(cfg_node, "aws",
+ &cdesc->aws);
+ if (!ret) {
+ cdesc->valid = true;
+ atl_write(cinfo, DRA7_ATL_BWSMUX_REG(i),
+ cdesc->bws);
+ atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
+ cdesc->aws);
+ }
+ of_node_put(cfg_node);
+ }
+
+ cdesc->probed = true;
+ /*
+ * Enable the clock if it has been asked prior to loading the
+ * hw driver
+ */
+ if (cdesc->enabled)
+ atl_clk_enable(__clk_get_hw(clk));
+ }
+
+pm_put:
+ pm_runtime_put_sync(cinfo->dev);
+ return ret;
+}
+
+static const struct of_device_id of_dra7_atl_clk_match_tbl[] = {
+ { .compatible = "ti,dra7-atl", },
+ {},
+};
+
+static struct platform_driver dra7_atl_clk_driver = {
+ .driver = {
+ .name = "dra7-atl",
+ .suppress_bind_attrs = true,
+ .of_match_table = of_dra7_atl_clk_match_tbl,
+ },
+ .probe = of_dra7_atl_clk_probe,
+};
+builtin_platform_driver(dra7_atl_clk_driver);
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
new file mode 100644
index 000000000..269355010
--- /dev/null
+++ b/drivers/clk/ti/clk.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+#include <linux/memblock.h>
+#include <linux/device.h>
+
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static LIST_HEAD(clk_hw_omap_clocks);
+struct ti_clk_ll_ops *ti_clk_ll_ops;
+static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
+
+struct ti_clk_features ti_clk_features;
+
+struct clk_iomap {
+ struct regmap *regmap;
+ void __iomem *mem;
+};
+
+static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS];
+
+static void clk_memmap_writel(u32 val, const struct clk_omap_reg *reg)
+{
+ struct clk_iomap *io = clk_memmaps[reg->index];
+
+ if (reg->ptr)
+ writel_relaxed(val, reg->ptr);
+ else if (io->regmap)
+ regmap_write(io->regmap, reg->offset, val);
+ else
+ writel_relaxed(val, io->mem + reg->offset);
+}
+
+static void _clk_rmw(u32 val, u32 mask, void __iomem *ptr)
+{
+ u32 v;
+
+ v = readl_relaxed(ptr);
+ v &= ~mask;
+ v |= val;
+ writel_relaxed(v, ptr);
+}
+
+static void clk_memmap_rmw(u32 val, u32 mask, const struct clk_omap_reg *reg)
+{
+ struct clk_iomap *io = clk_memmaps[reg->index];
+
+ if (reg->ptr) {
+ _clk_rmw(val, mask, reg->ptr);
+ } else if (io->regmap) {
+ regmap_update_bits(io->regmap, reg->offset, mask, val);
+ } else {
+ _clk_rmw(val, mask, io->mem + reg->offset);
+ }
+}
+
+static u32 clk_memmap_readl(const struct clk_omap_reg *reg)
+{
+ u32 val;
+ struct clk_iomap *io = clk_memmaps[reg->index];
+
+ if (reg->ptr)
+ val = readl_relaxed(reg->ptr);
+ else if (io->regmap)
+ regmap_read(io->regmap, reg->offset, &val);
+ else
+ val = readl_relaxed(io->mem + reg->offset);
+
+ return val;
+}
+
+/**
+ * ti_clk_setup_ll_ops - setup low level clock operations
+ * @ops: low level clock ops descriptor
+ *
+ * Sets up low level clock operations for TI clock driver. This is used
+ * to provide various callbacks for the clock driver towards platform
+ * specific code. Returns 0 on success, -EBUSY if ll_ops have been
+ * registered already.
+ */
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
+{
+ if (ti_clk_ll_ops) {
+ pr_err("Attempt to register ll_ops multiple times.\n");
+ return -EBUSY;
+ }
+
+ ti_clk_ll_ops = ops;
+ ops->clk_readl = clk_memmap_readl;
+ ops->clk_writel = clk_memmap_writel;
+ ops->clk_rmw = clk_memmap_rmw;
+
+ return 0;
+}
+
+/*
+ * Eventually we could standardize to using '_' for clk-*.c files to follow the
+ * TRM naming and leave out the tmp name here.
+ */
+static struct device_node *ti_find_clock_provider(struct device_node *from,
+ const char *name)
+{
+ struct device_node *np;
+ bool found = false;
+ const char *n;
+ char *tmp;
+
+ tmp = kstrdup(name, GFP_KERNEL);
+ if (!tmp)
+ return NULL;
+ strreplace(tmp, '-', '_');
+
+ /* Node named "clock" with "clock-output-names" */
+ for_each_of_allnodes_from(from, np) {
+ if (of_property_read_string_index(np, "clock-output-names",
+ 0, &n))
+ continue;
+
+ if (!strncmp(n, tmp, strlen(tmp))) {
+ of_node_get(np);
+ found = true;
+ break;
+ }
+ }
+ kfree(tmp);
+
+ if (found) {
+ of_node_put(from);
+ return np;
+ }
+
+ /* Fall back to using old node name base provider name */
+ return of_find_node_by_name(from, name);
+}
+
+/**
+ * ti_dt_clocks_register - register DT alias clocks during boot
+ * @oclks: list of clocks to register
+ *
+ * Register alias or non-standard DT clock entries during boot. By
+ * default, DT clocks are found based on their clock-output-names
+ * property, or the clock node name for legacy cases. If any
+ * additional con-id / dev-id -> clock mapping is required, use this
+ * function to list these.
+ */
+void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+{
+ struct ti_dt_clk *c;
+ struct device_node *node, *parent, *child;
+ struct clk *clk;
+ struct of_phandle_args clkspec;
+ char buf[64];
+ char *ptr;
+ char *tags[2];
+ int i;
+ int num_args;
+ int ret;
+ static bool clkctrl_nodes_missing;
+ static bool has_clkctrl_data;
+ static bool compat_mode;
+
+ compat_mode = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
+
+ for (c = oclks; c->node_name != NULL; c++) {
+ strcpy(buf, c->node_name);
+ ptr = buf;
+ for (i = 0; i < 2; i++)
+ tags[i] = NULL;
+ num_args = 0;
+ while (*ptr) {
+ if (*ptr == ':') {
+ if (num_args >= 2) {
+ pr_warn("Bad number of tags on %s\n",
+ c->node_name);
+ return;
+ }
+ tags[num_args++] = ptr + 1;
+ *ptr = 0;
+ }
+ ptr++;
+ }
+
+ if (num_args && clkctrl_nodes_missing)
+ continue;
+
+ node = ti_find_clock_provider(NULL, buf);
+ if (num_args && compat_mode) {
+ parent = node;
+ child = of_get_child_by_name(parent, "clock");
+ if (!child)
+ child = of_get_child_by_name(parent, "clk");
+ if (child) {
+ of_node_put(parent);
+ node = child;
+ }
+ }
+
+ clkspec.np = node;
+ clkspec.args_count = num_args;
+ for (i = 0; i < num_args; i++) {
+ ret = kstrtoint(tags[i], i ? 10 : 16, clkspec.args + i);
+ if (ret) {
+ pr_warn("Bad tag in %s at %d: %s\n",
+ c->node_name, i, tags[i]);
+ of_node_put(node);
+ return;
+ }
+ }
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(node);
+ if (!IS_ERR(clk)) {
+ c->lk.clk = clk;
+ clkdev_add(&c->lk);
+ } else {
+ if (num_args && !has_clkctrl_data) {
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "ti,clkctrl");
+ if (np) {
+ has_clkctrl_data = true;
+ of_node_put(np);
+ } else {
+ clkctrl_nodes_missing = true;
+
+ pr_warn("missing clkctrl nodes, please update your dts.\n");
+ continue;
+ }
+ }
+
+ pr_warn("failed to lookup clock node %s, ret=%ld\n",
+ c->node_name, PTR_ERR(clk));
+ }
+ }
+}
+
+struct clk_init_item {
+ struct device_node *node;
+ void *user;
+ ti_of_clk_init_cb_t func;
+ struct list_head link;
+};
+
+static LIST_HEAD(retry_list);
+
+/**
+ * ti_clk_retry_init - retries a failed clock init at later phase
+ * @node: device not for the clock
+ * @user: user data pointer
+ * @func: init function to be called for the clock
+ *
+ * Adds a failed clock init to the retry list. The retry list is parsed
+ * once all the other clocks have been initialized.
+ */
+int __init ti_clk_retry_init(struct device_node *node, void *user,
+ ti_of_clk_init_cb_t func)
+{
+ struct clk_init_item *retry;
+
+ pr_debug("%pOFn: adding to retry list...\n", node);
+ retry = kzalloc(sizeof(*retry), GFP_KERNEL);
+ if (!retry)
+ return -ENOMEM;
+
+ retry->node = node;
+ retry->func = func;
+ retry->user = user;
+ list_add(&retry->link, &retry_list);
+
+ return 0;
+}
+
+/**
+ * ti_clk_get_reg_addr - get register address for a clock register
+ * @node: device node for the clock
+ * @index: register index from the clock node
+ * @reg: pointer to target register struct
+ *
+ * Builds clock register address from device tree information, and returns
+ * the data via the provided output pointer @reg. Returns 0 on success,
+ * negative error value on failure.
+ */
+int ti_clk_get_reg_addr(struct device_node *node, int index,
+ struct clk_omap_reg *reg)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < CLK_MAX_MEMMAPS; i++) {
+ if (clocks_node_ptr[i] == node->parent)
+ break;
+ if (clocks_node_ptr[i] == node->parent->parent)
+ break;
+ }
+
+ if (i == CLK_MAX_MEMMAPS) {
+ pr_err("clk-provider not found for %pOFn!\n", node);
+ return -ENOENT;
+ }
+
+ reg->index = i;
+
+ if (of_property_read_u32_index(node, "reg", index, &val)) {
+ if (of_property_read_u32_index(node->parent, "reg",
+ index, &val)) {
+ pr_err("%pOFn or parent must have reg[%d]!\n",
+ node, index);
+ return -EINVAL;
+ }
+ }
+
+ reg->offset = val;
+ reg->ptr = NULL;
+
+ return 0;
+}
+
+void ti_clk_latch(struct clk_omap_reg *reg, s8 shift)
+{
+ u32 latch;
+
+ if (shift < 0)
+ return;
+
+ latch = 1 << shift;
+
+ ti_clk_ll_ops->clk_rmw(latch, latch, reg);
+ ti_clk_ll_ops->clk_rmw(0, latch, reg);
+ ti_clk_ll_ops->clk_readl(reg); /* OCP barrier */
+}
+
+/**
+ * omap2_clk_provider_init - init master clock provider
+ * @parent: master node
+ * @index: internal index for clk_reg_ops
+ * @syscon: syscon regmap pointer for accessing clock registers
+ * @mem: iomem pointer for the clock provider memory area, only used if
+ * syscon is not provided
+ *
+ * Initializes a master clock IP block. This basically sets up the
+ * mapping from clocks node to the memory map index. All the clocks
+ * are then initialized through the common of_clk_init call, and the
+ * clocks will access their memory maps based on the node layout.
+ * Returns 0 in success.
+ */
+int __init omap2_clk_provider_init(struct device_node *parent, int index,
+ struct regmap *syscon, void __iomem *mem)
+{
+ struct device_node *clocks;
+ struct clk_iomap *io;
+
+ /* get clocks for this parent */
+ clocks = of_get_child_by_name(parent, "clocks");
+ if (!clocks) {
+ pr_err("%pOFn missing 'clocks' child node.\n", parent);
+ return -EINVAL;
+ }
+
+ /* add clocks node info */
+ clocks_node_ptr[index] = clocks;
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io)
+ return -ENOMEM;
+
+ io->regmap = syscon;
+ io->mem = mem;
+
+ clk_memmaps[index] = io;
+
+ return 0;
+}
+
+/**
+ * omap2_clk_legacy_provider_init - initialize a legacy clock provider
+ * @index: index for the clock provider
+ * @mem: iomem pointer for the clock provider memory area
+ *
+ * Initializes a legacy clock provider memory mapping.
+ */
+void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
+{
+ struct clk_iomap *io;
+
+ io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
+ if (!io)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*io));
+
+ io->mem = mem;
+
+ clk_memmaps[index] = io;
+}
+
+/**
+ * ti_dt_clk_init_retry_clks - init clocks from the retry list
+ *
+ * Initializes any clocks that have failed to initialize before,
+ * reasons being missing parent node(s) during earlier init. This
+ * typically happens only for DPLLs which need to have both of their
+ * parent clocks ready during init.
+ */
+void ti_dt_clk_init_retry_clks(void)
+{
+ struct clk_init_item *retry;
+ struct clk_init_item *tmp;
+ int retries = 5;
+
+ while (!list_empty(&retry_list) && retries) {
+ list_for_each_entry_safe(retry, tmp, &retry_list, link) {
+ pr_debug("retry-init: %pOFn\n", retry->node);
+ retry->func(retry->user, retry->node);
+ list_del(&retry->link);
+ kfree(retry);
+ }
+ retries--;
+ }
+}
+
+static const struct of_device_id simple_clk_match_table[] __initconst = {
+ { .compatible = "fixed-clock" },
+ { .compatible = "fixed-factor-clock" },
+ { }
+};
+
+/**
+ * ti_dt_clk_name - init clock name from first output name or node name
+ * @np: device node
+ *
+ * Use the first clock-output-name for the clock name if found. Fall back
+ * to legacy naming based on node name.
+ */
+const char *ti_dt_clk_name(struct device_node *np)
+{
+ const char *name;
+
+ if (!of_property_read_string_index(np, "clock-output-names", 0,
+ &name))
+ return name;
+
+ return np->name;
+}
+
+/**
+ * ti_clk_add_aliases - setup clock aliases
+ *
+ * Sets up any missing clock aliases. No return value.
+ */
+void __init ti_clk_add_aliases(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+
+ for_each_matching_node(np, simple_clk_match_table) {
+ struct of_phandle_args clkspec;
+
+ clkspec.np = np;
+ clk = of_clk_get_from_provider(&clkspec);
+
+ ti_clk_add_alias(clk, ti_dt_clk_name(np));
+ }
+}
+
+/**
+ * ti_clk_setup_features - setup clock features flags
+ * @features: features definition to use
+ *
+ * Initializes the clock driver features flags based on platform
+ * provided data. No return value.
+ */
+void __init ti_clk_setup_features(struct ti_clk_features *features)
+{
+ memcpy(&ti_clk_features, features, sizeof(*features));
+}
+
+/**
+ * ti_clk_get_features - get clock driver features flags
+ *
+ * Get TI clock driver features description. Returns a pointer
+ * to the current feature setup.
+ */
+const struct ti_clk_features *ti_clk_get_features(void)
+{
+ return &ti_clk_features;
+}
+
+/**
+ * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
+ * @clk_names: ptr to an array of strings of clock names to enable
+ * @num_clocks: number of clock names in @clk_names
+ *
+ * Prepare and enable a list of clocks, named by @clk_names. No
+ * return value. XXX Deprecated; only needed until these clocks are
+ * properly claimed and enabled by the drivers or core code that uses
+ * them. XXX What code disables & calls clk_put on these clocks?
+ */
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+{
+ struct clk *init_clk;
+ int i;
+
+ for (i = 0; i < num_clocks; i++) {
+ init_clk = clk_get(NULL, clk_names[i]);
+ if (WARN(IS_ERR(init_clk), "could not find init clock %s\n",
+ clk_names[i]))
+ continue;
+ clk_prepare_enable(init_clk);
+ }
+}
+
+/**
+ * ti_clk_add_alias - add a clock alias for a TI clock
+ * @clk: clock handle to create alias for
+ * @con: connection ID for this clock
+ *
+ * Creates a clock alias for a TI clock. Allocates the clock lookup entry
+ * and assigns the data to it. Returns 0 if successful, negative error
+ * value otherwise.
+ */
+int ti_clk_add_alias(struct clk *clk, const char *con)
+{
+ struct clk_lookup *cl;
+
+ if (!clk)
+ return 0;
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = con;
+ cl->clk = clk;
+
+ clkdev_add(cl);
+
+ return 0;
+}
+
+/**
+ * of_ti_clk_register - register a TI clock to the common clock framework
+ * @node: device node for this clock
+ * @hw: hardware clock handle
+ * @con: connection ID for this clock
+ *
+ * Registers a TI clock to the common clock framework, and adds a clock
+ * alias for it. Returns a handle to the registered clock if successful,
+ * ERR_PTR value in failure.
+ */
+struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
+ const char *con)
+{
+ struct clk *clk;
+ int ret;
+
+ ret = of_clk_hw_register(node, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk = hw->clk;
+ ret = ti_clk_add_alias(clk, con);
+ if (ret) {
+ clk_unregister(clk);
+ return ERR_PTR(ret);
+ }
+
+ return clk;
+}
+
+/**
+ * of_ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
+ * @node: device node for this clock
+ * @hw: hardware clock handle
+ * @con: connection ID for this clock
+ *
+ * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias
+ * for it, and adds the list to the available clk_hw_omap type clocks.
+ * Returns a handle to the registered clock if successful, ERR_PTR value
+ * in failure.
+ */
+struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
+ struct clk_hw *hw, const char *con)
+{
+ struct clk *clk;
+ struct clk_hw_omap *oclk;
+
+ clk = of_ti_clk_register(node, hw, con);
+ if (IS_ERR(clk))
+ return clk;
+
+ oclk = to_clk_hw_omap(hw);
+
+ list_add(&oclk->node, &clk_hw_omap_clocks);
+
+ return clk;
+}
+
+/**
+ * omap2_clk_for_each - call function for each registered clk_hw_omap
+ * @fn: pointer to a callback function
+ *
+ * Call @fn for each registered clk_hw_omap, passing @hw to each
+ * function. @fn must return 0 for success or any other value for
+ * failure. If @fn returns non-zero, the iteration across clocks
+ * will stop and the non-zero return value will be passed to the
+ * caller of omap2_clk_for_each().
+ */
+int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw))
+{
+ int ret;
+ struct clk_hw_omap *hw;
+
+ list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
+ ret = (*fn)(hw);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock
+ * @hw: clk_hw to check if it is an omap clock or not
+ *
+ * Checks if the provided clk_hw is OMAP clock or not. Returns true if
+ * it is, false otherwise.
+ */
+bool omap2_clk_is_hw_omap(struct clk_hw *hw)
+{
+ struct clk_hw_omap *oclk;
+
+ list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
+ if (&oclk->hw == hw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
new file mode 100644
index 000000000..87e562478
--- /dev/null
+++ b/drivers/clk/ti/clkctrl.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP clkctrl clock support
+ *
+ * Copyright (C) 2017 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+#include <linux/timekeeping.h>
+#include "clock.h"
+
+#define NO_IDLEST 0
+
+#define OMAP4_MODULEMODE_MASK 0x3
+
+#define MODULEMODE_HWCTRL 0x1
+#define MODULEMODE_SWCTRL 0x2
+
+#define OMAP4_IDLEST_MASK (0x3 << 16)
+#define OMAP4_IDLEST_SHIFT 16
+
+#define OMAP4_STBYST_MASK BIT(18)
+#define OMAP4_STBYST_SHIFT 18
+
+#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
+#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
+#define CLKCTRL_IDLEST_DISABLED 0x3
+
+/* These timeouts are in us */
+#define OMAP4_MAX_MODULE_READY_TIME 2000
+#define OMAP4_MAX_MODULE_DISABLE_TIME 5000
+
+static bool _early_timeout = true;
+
+struct omap_clkctrl_provider {
+ void __iomem *base;
+ struct list_head clocks;
+ char *clkdm_name;
+};
+
+struct omap_clkctrl_clk {
+ struct clk_hw *clk;
+ u16 reg_offset;
+ int bit_offset;
+ struct list_head node;
+};
+
+union omap4_timeout {
+ u32 cycles;
+ ktime_t start;
+};
+
+static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
+ { 0 },
+};
+
+static u32 _omap4_idlest(u32 val)
+{
+ val &= OMAP4_IDLEST_MASK;
+ val >>= OMAP4_IDLEST_SHIFT;
+
+ return val;
+}
+
+static bool _omap4_is_idle(u32 val)
+{
+ val = _omap4_idlest(val);
+
+ return val == CLKCTRL_IDLEST_DISABLED;
+}
+
+static bool _omap4_is_ready(u32 val)
+{
+ val = _omap4_idlest(val);
+
+ return val == CLKCTRL_IDLEST_FUNCTIONAL ||
+ val == CLKCTRL_IDLEST_INTERFACE_IDLE;
+}
+
+static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
+{
+ /*
+ * There are two special cases where ktime_to_ns() can't be
+ * used to track the timeouts. First one is during early boot
+ * when the timers haven't been initialized yet. The second
+ * one is during suspend-resume cycle while timekeeping is
+ * being suspended / resumed. Clocksource for the system
+ * can be from a timer that requires pm_runtime access, which
+ * will eventually bring us here with timekeeping_suspended,
+ * during both suspend entry and resume paths. This happens
+ * at least on am43xx platform. Account for flakeyness
+ * with udelay() by multiplying the timeout value by 2.
+ */
+ if (unlikely(_early_timeout || timekeeping_suspended)) {
+ if (time->cycles++ < timeout) {
+ udelay(1 * 2);
+ return false;
+ }
+ } else {
+ if (!ktime_to_ns(time->start)) {
+ time->start = ktime_get();
+ return false;
+ }
+
+ if (ktime_us_delta(ktime_get(), time->start) < timeout) {
+ cpu_relax();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int __init _omap4_disable_early_timeout(void)
+{
+ _early_timeout = false;
+
+ return 0;
+}
+arch_initcall(_omap4_disable_early_timeout);
+
+static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 val;
+ int ret;
+ union omap4_timeout timeout = { 0 };
+
+ if (clk->clkdm) {
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw),
+ clk->clkdm_name, ret);
+ return ret;
+ }
+ }
+
+ if (!clk->enable_bit)
+ return 0;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ val &= ~OMAP4_MODULEMODE_MASK;
+ val |= clk->enable_bit;
+
+ ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
+
+ if (test_bit(NO_IDLEST, &clk->flags))
+ return 0;
+
+ /* Wait until module is enabled */
+ while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
+ if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
+ pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 val;
+ union omap4_timeout timeout = { 0 };
+
+ if (!clk->enable_bit)
+ goto exit;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ val &= ~OMAP4_MODULEMODE_MASK;
+
+ ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
+
+ if (test_bit(NO_IDLEST, &clk->flags))
+ goto exit;
+
+ /* Wait until module is disabled */
+ while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
+ if (_omap4_is_timeout(&timeout,
+ OMAP4_MAX_MODULE_DISABLE_TIME)) {
+ pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
+ break;
+ }
+ }
+
+exit:
+ if (clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ if (val & clk->enable_bit)
+ return 1;
+
+ return 0;
+}
+
+static const struct clk_ops omap4_clkctrl_clk_ops = {
+ .enable = _omap4_clkctrl_clk_enable,
+ .disable = _omap4_clkctrl_clk_disable,
+ .is_enabled = _omap4_clkctrl_clk_is_enabled,
+ .init = omap2_init_clk_clkdm,
+};
+
+static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct omap_clkctrl_provider *provider = data;
+ struct omap_clkctrl_clk *entry = NULL, *iter;
+
+ if (clkspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("%s: looking for %x:%x\n", __func__,
+ clkspec->args[0], clkspec->args[1]);
+
+ list_for_each_entry(iter, &provider->clocks, node) {
+ if (iter->reg_offset == clkspec->args[0] &&
+ iter->bit_offset == clkspec->args[1]) {
+ entry = iter;
+ break;
+ }
+ }
+
+ if (!entry)
+ return ERR_PTR(-EINVAL);
+
+ return entry->clk;
+}
+
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+ const char *clkctrl_name,
+ int offset, int index,
+ bool legacy_naming)
+{
+ char *clock_name;
+
+ /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+ if (clkctrl_name && !legacy_naming) {
+ clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+ clkctrl_name, offset, index);
+ if (!clock_name)
+ return NULL;
+
+ strreplace(clock_name, '_', '-');
+
+ return clock_name;
+ }
+
+ /* l4per:1234:0 old style naming based on clkctrl_name */
+ if (clkctrl_name)
+ return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+ clkctrl_name, offset, index);
+
+ /* l4per_cm:1234:0 old style naming based on parent node name */
+ if (legacy_naming)
+ return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+ np->parent, offset, index);
+
+ /* l4per-clkctrl:1234:0 style naming based on node name */
+ return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
+static int __init
+_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
+ struct device_node *node, struct clk_hw *clk_hw,
+ u16 offset, u8 bit, const char * const *parents,
+ int num_parents, const struct clk_ops *ops,
+ const char *clkctrl_name)
+{
+ struct clk_init_data init = { NULL };
+ struct clk *clk;
+ struct omap_clkctrl_clk *clkctrl_clk;
+ int ret = 0;
+
+ init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
+ ti_clk_get_features()->flags &
+ TI_CLK_CLKCTRL_COMPAT);
+
+ clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
+ if (!init.name || !clkctrl_clk) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ clk_hw->init = &init;
+ init.parent_names = parents;
+ init.num_parents = num_parents;
+ init.ops = ops;
+ init.flags = 0;
+
+ clk = of_ti_clk_register(node, clk_hw, init.name);
+ if (IS_ERR_OR_NULL(clk)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ clkctrl_clk->reg_offset = offset;
+ clkctrl_clk->bit_offset = bit;
+ clkctrl_clk->clk = clk_hw;
+
+ list_add(&clkctrl_clk->node, &provider->clocks);
+
+ return 0;
+
+cleanup:
+ kfree(init.name);
+ kfree(clkctrl_clk);
+ return ret;
+}
+
+static void __init
+_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
+ struct device_node *node, u16 offset,
+ const struct omap_clkctrl_bit_data *data,
+ void __iomem *reg, const char *clkctrl_name)
+{
+ struct clk_hw_omap *clk_hw;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->enable_bit = data->bit;
+ clk_hw->enable_reg.ptr = reg;
+
+ if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
+ data->bit, data->parents, 1,
+ &omap_gate_clk_ops, clkctrl_name))
+ kfree(clk_hw);
+}
+
+static void __init
+_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
+ struct device_node *node, u16 offset,
+ const struct omap_clkctrl_bit_data *data,
+ void __iomem *reg, const char *clkctrl_name)
+{
+ struct clk_omap_mux *mux;
+ int num_parents = 0;
+ const char * const *pname;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ pname = data->parents;
+ while (*pname) {
+ num_parents++;
+ pname++;
+ }
+
+ mux->mask = num_parents;
+ if (!(mux->flags & CLK_MUX_INDEX_ONE))
+ mux->mask--;
+
+ mux->mask = (1 << fls(mux->mask)) - 1;
+
+ mux->shift = data->bit;
+ mux->reg.ptr = reg;
+
+ if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
+ data->bit, data->parents, num_parents,
+ &ti_clk_mux_ops, clkctrl_name))
+ kfree(mux);
+}
+
+static void __init
+_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
+ struct device_node *node, u16 offset,
+ const struct omap_clkctrl_bit_data *data,
+ void __iomem *reg, const char *clkctrl_name)
+{
+ struct clk_omap_divider *div;
+ const struct omap_clkctrl_div_data *div_data = data->data;
+ u8 div_flags = 0;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
+
+ div->reg.ptr = reg;
+ div->shift = data->bit;
+ div->flags = div_data->flags;
+
+ if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div_flags |= CLKF_INDEX_POWER_OF_TWO;
+
+ if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
+ div_data->max_div, div_flags,
+ div)) {
+ pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
+ node, offset, data->bit);
+ kfree(div);
+ return;
+ }
+
+ if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
+ data->bit, data->parents, 1,
+ &ti_clk_divider_ops, clkctrl_name))
+ kfree(div);
+}
+
+static void __init
+_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
+ struct device_node *node,
+ const struct omap_clkctrl_reg_data *data,
+ void __iomem *reg, const char *clkctrl_name)
+{
+ const struct omap_clkctrl_bit_data *bits = data->bit_data;
+
+ if (!bits)
+ return;
+
+ while (bits->bit) {
+ switch (bits->type) {
+ case TI_CLK_GATE:
+ _ti_clkctrl_setup_gate(provider, node, data->offset,
+ bits, reg, clkctrl_name);
+ break;
+
+ case TI_CLK_DIVIDER:
+ _ti_clkctrl_setup_div(provider, node, data->offset,
+ bits, reg, clkctrl_name);
+ break;
+
+ case TI_CLK_MUX:
+ _ti_clkctrl_setup_mux(provider, node, data->offset,
+ bits, reg, clkctrl_name);
+ break;
+
+ default:
+ pr_err("%s: bad subclk type: %d\n", __func__,
+ bits->type);
+ return;
+ }
+ bits++;
+ }
+}
+
+static void __init _clkctrl_add_provider(void *data,
+ struct device_node *np)
+{
+ of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
+}
+
+/*
+ * Get clock name based on "clock-output-names" property or the
+ * compatible property for clkctrl.
+ */
+static const char * __init clkctrl_get_name(struct device_node *np)
+{
+ struct property *prop;
+ const int prefix_len = 11;
+ const char *compat;
+ const char *output;
+ char *name;
+
+ if (!of_property_read_string_index(np, "clock-output-names", 0,
+ &output)) {
+ const char *end;
+ int len;
+
+ len = strlen(output);
+ end = strstr(output, "_clkctrl");
+ if (end)
+ len -= strlen(end);
+ name = kstrndup(output, len, GFP_KERNEL);
+
+ return name;
+ }
+
+ of_property_for_each_string(np, "compatible", prop, compat) {
+ if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
+ /* Two letter minimum name length for l3, l4 etc */
+ if (strnlen(compat + prefix_len, 16) < 2)
+ continue;
+ name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
+ if (!name)
+ continue;
+ strreplace(name, '-', '_');
+
+ return name;
+ }
+ }
+
+ return NULL;
+}
+
+static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
+{
+ struct omap_clkctrl_provider *provider;
+ const struct omap_clkctrl_data *data = default_clkctrl_data;
+ const struct omap_clkctrl_reg_data *reg_data;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *hw;
+ struct clk *clk;
+ struct omap_clkctrl_clk *clkctrl_clk = NULL;
+ const __be32 *addrp;
+ bool legacy_naming;
+ const char *clkctrl_name;
+ u32 addr;
+ int ret;
+ char *c;
+ u16 soc_mask = 0;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ addr = (u32)of_translate_address(node, addrp);
+
+#ifdef CONFIG_ARCH_OMAP4
+ if (of_machine_is_compatible("ti,omap4"))
+ data = omap4_clkctrl_data;
+#endif
+#ifdef CONFIG_SOC_OMAP5
+ if (of_machine_is_compatible("ti,omap5"))
+ data = omap5_clkctrl_data;
+#endif
+#ifdef CONFIG_SOC_DRA7XX
+ if (of_machine_is_compatible("ti,dra7"))
+ data = dra7_clkctrl_data;
+ if (of_machine_is_compatible("ti,dra72"))
+ soc_mask = CLKF_SOC_DRA72;
+ if (of_machine_is_compatible("ti,dra74"))
+ soc_mask = CLKF_SOC_DRA74;
+ if (of_machine_is_compatible("ti,dra76"))
+ soc_mask = CLKF_SOC_DRA76;
+#endif
+#ifdef CONFIG_SOC_AM33XX
+ if (of_machine_is_compatible("ti,am33xx"))
+ data = am3_clkctrl_data;
+#endif
+#ifdef CONFIG_SOC_AM43XX
+ if (of_machine_is_compatible("ti,am4372"))
+ data = am4_clkctrl_data;
+
+ if (of_machine_is_compatible("ti,am438x"))
+ data = am438x_clkctrl_data;
+#endif
+#ifdef CONFIG_SOC_TI81XX
+ if (of_machine_is_compatible("ti,dm814"))
+ data = dm814_clkctrl_data;
+
+ if (of_machine_is_compatible("ti,dm816"))
+ data = dm816_clkctrl_data;
+#endif
+
+ if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
+ soc_mask |= CLKF_SOC_NONSEC;
+
+ while (data->addr) {
+ if (addr == data->addr)
+ break;
+
+ data++;
+ }
+
+ if (!data->addr) {
+ pr_err("%pOF not found from clkctrl data.\n", node);
+ return;
+ }
+
+ provider = kzalloc(sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return;
+
+ provider->base = of_iomap(node, 0);
+
+ legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
+ clkctrl_name = clkctrl_get_name(node);
+ if (clkctrl_name) {
+ provider->clkdm_name = kasprintf(GFP_KERNEL,
+ "%s_clkdm", clkctrl_name);
+ if (!provider->clkdm_name) {
+ kfree(provider);
+ return;
+ }
+ goto clkdm_found;
+ }
+
+ /*
+ * The code below can be removed when all clkctrl nodes use domain
+ * specific compatible property and standard clock node naming
+ */
+ if (legacy_naming) {
+ provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
+ if (!provider->clkdm_name) {
+ kfree(provider);
+ return;
+ }
+
+ /*
+ * Create default clkdm name, replace _cm from end of parent
+ * node name with _clkdm
+ */
+ provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
+ } else {
+ provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
+ if (!provider->clkdm_name) {
+ kfree(provider);
+ return;
+ }
+
+ /*
+ * Create default clkdm name, replace _clkctrl from end of
+ * node name with _clkdm
+ */
+ provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
+ }
+
+ strcat(provider->clkdm_name, "clkdm");
+
+ /* Replace any dash from the clkdm name with underscore */
+ c = provider->clkdm_name;
+
+ while (*c) {
+ if (*c == '-')
+ *c = '_';
+ c++;
+ }
+clkdm_found:
+ INIT_LIST_HEAD(&provider->clocks);
+
+ /* Generate clocks */
+ reg_data = data->regs;
+
+ while (reg_data->parent) {
+ if ((reg_data->flags & CLKF_SOC_MASK) &&
+ (reg_data->flags & soc_mask) == 0) {
+ reg_data++;
+ continue;
+ }
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ hw->enable_reg.ptr = provider->base + reg_data->offset;
+
+ _ti_clkctrl_setup_subclks(provider, node, reg_data,
+ hw->enable_reg.ptr, clkctrl_name);
+
+ if (reg_data->flags & CLKF_SW_SUP)
+ hw->enable_bit = MODULEMODE_SWCTRL;
+ if (reg_data->flags & CLKF_HW_SUP)
+ hw->enable_bit = MODULEMODE_HWCTRL;
+ if (reg_data->flags & CLKF_NO_IDLEST)
+ set_bit(NO_IDLEST, &hw->flags);
+
+ if (reg_data->clkdm_name)
+ hw->clkdm_name = reg_data->clkdm_name;
+ else
+ hw->clkdm_name = provider->clkdm_name;
+
+ init.parent_names = &reg_data->parent;
+ init.num_parents = 1;
+ init.flags = 0;
+ if (reg_data->flags & CLKF_SET_RATE_PARENT)
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ init.name = clkctrl_get_clock_name(node, clkctrl_name,
+ reg_data->offset, 0,
+ legacy_naming);
+ if (!init.name)
+ goto cleanup;
+
+ clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
+ if (!clkctrl_clk)
+ goto cleanup;
+
+ init.ops = &omap4_clkctrl_clk_ops;
+ hw->hw.init = &init;
+
+ clk = of_ti_clk_register_omap_hw(node, &hw->hw, init.name);
+ if (IS_ERR_OR_NULL(clk))
+ goto cleanup;
+
+ clkctrl_clk->reg_offset = reg_data->offset;
+ clkctrl_clk->clk = &hw->hw;
+
+ list_add(&clkctrl_clk->node, &provider->clocks);
+
+ reg_data++;
+ }
+
+ ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
+ if (ret == -EPROBE_DEFER)
+ ti_clk_retry_init(node, provider, _clkctrl_add_provider);
+
+ kfree(clkctrl_name);
+
+ return;
+
+cleanup:
+ kfree(hw);
+ kfree(init.name);
+ kfree(clkctrl_name);
+ kfree(clkctrl_clk);
+}
+CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
+ _ti_omap4_clkctrl_setup);
+
+/**
+ * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
+ * @clk: clock to check standby status for
+ *
+ * Finds whether the provided clock is in standby mode or not. Returns
+ * true if the provided clock is a clkctrl type clock and it is in standby,
+ * false otherwise.
+ */
+bool ti_clk_is_in_standby(struct clk *clk)
+{
+ struct clk_hw *hw;
+ struct clk_hw_omap *hwclk;
+ u32 val;
+
+ hw = __clk_get_hw(clk);
+
+ if (!omap2_clk_is_hw_omap(hw))
+ return false;
+
+ hwclk = to_clk_hw_omap(hw);
+
+ val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
+
+ if (val & OMAP4_STBYST_MASK)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
new file mode 100644
index 000000000..a756ab1a5
--- /dev/null
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Default clock type
+ *
+ * Copyright (C) 2005-2008, 2015 Texas Instruments, Inc.
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#include "clock.h"
+
+/*
+ * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
+ * for a module to indicate that it is no longer in idle
+ */
+#define MAX_MODULE_ENABLE_WAIT 100000
+
+/*
+ * CM module register offsets, used for calculating the companion
+ * register addresses.
+ */
+#define CM_FCLKEN 0x0000
+#define CM_ICLKEN 0x0010
+
+/**
+ * _wait_idlest_generic - wait for a module to leave the idle state
+ * @clk: module clock to wait for (needed for register offsets)
+ * @reg: virtual address of module IDLEST register
+ * @mask: value to mask against to determine if the module is active
+ * @idlest: idle state indicator (0 or 1) for the clock
+ * @name: name of the clock (for printk)
+ *
+ * Wait for a module to leave idle, where its idle-status register is
+ * not inside the CM module. Returns 1 if the module left idle
+ * promptly, or 0 if the module did not leave idle before the timeout
+ * elapsed. XXX Deprecated - should be moved into drivers for the
+ * individual IP block that the IDLEST register exists in.
+ */
+static int _wait_idlest_generic(struct clk_hw_omap *clk,
+ struct clk_omap_reg *reg,
+ u32 mask, u8 idlest, const char *name)
+{
+ int i = 0, ena = 0;
+
+ ena = (idlest) ? 0 : mask;
+
+ /* Wait until module enters enabled state */
+ for (i = 0; i < MAX_MODULE_ENABLE_WAIT; i++) {
+ if ((ti_clk_ll_ops->clk_readl(reg) & mask) == ena)
+ break;
+ udelay(1);
+ }
+
+ if (i < MAX_MODULE_ENABLE_WAIT)
+ pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
+ name, i);
+ else
+ pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
+ name, MAX_MODULE_ENABLE_WAIT);
+
+ return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
+}
+
+/**
+ * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
+ * @clk: struct clk * belonging to the module
+ *
+ * If the necessary clocks for the OMAP hardware IP block that
+ * corresponds to clock @clk are enabled, then wait for the module to
+ * indicate readiness (i.e., to leave IDLE). This code does not
+ * belong in the clock code and will be moved in the medium term to
+ * module-dependent code. No return value.
+ */
+static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
+{
+ struct clk_omap_reg companion_reg, idlest_reg;
+ u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
+ s16 prcm_mod;
+ int r;
+
+ /* Not all modules have multiple clocks that their IDLEST depends on */
+ if (clk->ops->find_companion) {
+ clk->ops->find_companion(clk, &companion_reg, &other_bit);
+ if (!(ti_clk_ll_ops->clk_readl(&companion_reg) &
+ (1 << other_bit)))
+ return;
+ }
+
+ clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
+ r = ti_clk_ll_ops->cm_split_idlest_reg(&idlest_reg, &prcm_mod,
+ &idlest_reg_id);
+ if (r) {
+ /* IDLEST register not in the CM module */
+ _wait_idlest_generic(clk, &idlest_reg, (1 << idlest_bit),
+ idlest_val, clk_hw_get_name(&clk->hw));
+ } else {
+ ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id,
+ idlest_bit);
+ }
+}
+
+/**
+ * omap2_clk_dflt_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Note: We don't need special code here for INVERT_ENABLE for the
+ * time being since INVERT_ENABLE only applies to clocks enabled by
+ * CM_CLKEN_PLL
+ *
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's
+ * just a matter of XORing the bits.
+ *
+ * Some clocks don't have companion clocks. For example, modules with
+ * only an interface clock (such as MAILBOXES) don't have a companion
+ * clock. Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active. Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts. No return value.
+ */
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+ struct clk_omap_reg *other_reg,
+ u8 *other_bit)
+{
+ memcpy(other_reg, &clk->enable_reg, sizeof(*other_reg));
+
+ /*
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
+ * it's just a matter of XORing the bits.
+ */
+ other_reg->offset ^= (CM_FCLKEN ^ CM_ICLKEN);
+
+ *other_bit = clk->enable_bit;
+}
+
+/**
+ * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
+ * @clk: struct clk * to find IDLEST info for
+ * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
+ * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
+ * @idlest_val: u8 * to return the idle status indicator
+ *
+ * Return the CM_IDLEST register address and bit shift corresponding
+ * to the module that "owns" this clock. This default code assumes
+ * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
+ * the IDLEST register address ID corresponds to the CM_*CLKEN
+ * register address ID (e.g., that CM_FCLKEN2 corresponds to
+ * CM_IDLEST2). This is not true for all modules. No return value.
+ */
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg, u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+
+ idlest_reg->offset &= ~0xf0;
+ idlest_reg->offset |= 0x20;
+
+ *idlest_bit = clk->enable_bit;
+
+ /*
+ * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
+ * 34xx reverses this, just to keep us on our toes
+ * AM35xx uses both, depending on the module.
+ */
+ *idlest_val = ti_clk_get_features()->cm_idlest_val;
+}
+
+/**
+ * omap2_dflt_clk_enable - enable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to enable
+ *
+ * Enable the clock @hw in the hardware. We first call into the OMAP
+ * clockdomain code to "enable" the corresponding clockdomain if this
+ * is the first enabled user of the clockdomain. Then program the
+ * hardware to enable the clock. Then wait for the IP block that uses
+ * this clock to leave idle (if applicable). Returns the error value
+ * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
+ * if @hw has a null clock enable_reg, or zero upon success.
+ */
+int omap2_dflt_clk_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+ u32 v;
+ int ret = 0;
+ bool clkdm_control;
+
+ if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL)
+ clkdm_control = false;
+ else
+ clkdm_control = true;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (clkdm_control && clk->clkdm) {
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw),
+ clk->clkdm_name, ret);
+ return ret;
+ }
+ }
+
+ /* FIXME should not have INVERT_ENABLE bit here */
+ v = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+ if (clk->flags & INVERT_ENABLE)
+ v &= ~(1 << clk->enable_bit);
+ else
+ v |= (1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, &clk->enable_reg);
+ v = ti_clk_ll_ops->clk_readl(&clk->enable_reg); /* OCP barrier */
+
+ if (clk->ops && clk->ops->find_idlest)
+ _omap2_module_wait_ready(clk);
+
+ return 0;
+}
+
+/**
+ * omap2_dflt_clk_disable - disable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to disable
+ *
+ * Disable the clock @hw in the hardware, and call into the OMAP
+ * clockdomain code to "disable" the corresponding clockdomain if all
+ * clocks/hwmods in that clockdomain are now disabled. No return
+ * value.
+ */
+void omap2_dflt_clk_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+ u32 v;
+
+ clk = to_clk_hw_omap(hw);
+
+ v = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+ if (clk->flags & INVERT_ENABLE)
+ v |= (1 << clk->enable_bit);
+ else
+ v &= ~(1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, &clk->enable_reg);
+ /* No OCP barrier needed here since it is a disable operation */
+
+ if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) &&
+ clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/**
+ * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
+ * @hw: struct clk_hw * to check
+ *
+ * Return 1 if the clock represented by @hw is enabled in the
+ * hardware, or 0 otherwise. Intended for use in the struct
+ * clk_ops.is_enabled function pointer.
+ */
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
+
+ v = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ if (clk->flags & INVERT_ENABLE)
+ v ^= BIT(clk->enable_bit);
+
+ v &= BIT(clk->enable_bit);
+
+ return v ? 1 : 0;
+}
+
+const struct clk_hw_omap_ops clkhwops_wait = {
+ .find_idlest = omap2_clk_dflt_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
new file mode 100644
index 000000000..dfaa4d1f0
--- /dev/null
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP2/3/4 DPLL clock functions
+ *
+ * Copyright (C) 2005-2008 Texas Instruments, Inc.
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#include <asm/div64.h>
+
+#include "clock.h"
+
+/* DPLL rate rounding: minimum DPLL multiplier, divider values */
+#define DPLL_MIN_MULTIPLIER 2
+#define DPLL_MIN_DIVIDER 1
+
+/* Possible error results from _dpll_test_mult */
+#define DPLL_MULT_UNDERFLOW -1
+
+/*
+ * Scale factor to mitigate roundoff errors in DPLL rate rounding.
+ * The higher the scale factor, the greater the risk of arithmetic overflow,
+ * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR
+ * must be a power of DPLL_SCALE_BASE.
+ */
+#define DPLL_SCALE_FACTOR 64
+#define DPLL_SCALE_BASE 2
+#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \
+ (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
+
+/*
+ * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx.
+ * From device data manual section 4.3 "DPLL and DLL Specifications".
+ */
+#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000
+#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000
+
+/* _dpll_test_fint() return codes */
+#define DPLL_FINT_UNDERFLOW -1
+#define DPLL_FINT_INVALID -2
+
+/* Private functions */
+
+/*
+ * _dpll_test_fint - test whether an Fint value is valid for the DPLL
+ * @clk: DPLL struct clk to test
+ * @n: divider value (N) to test
+ *
+ * Tests whether a particular divider @n will result in a valid DPLL
+ * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
+ * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate
+ * (assuming that it is counting N upwards), or -2 if the enclosing loop
+ * should skip to the next iteration (again assuming N is increasing).
+ */
+static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
+{
+ struct dpll_data *dd;
+ long fint, fint_min, fint_max;
+ int ret = 0;
+
+ dd = clk->dpll_data;
+
+ /* DPLL divider must result in a valid jitter correction val */
+ fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n;
+
+ if (dd->flags & DPLL_J_TYPE) {
+ fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
+ fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX;
+ } else {
+ fint_min = ti_clk_get_features()->fint_min;
+ fint_max = ti_clk_get_features()->fint_max;
+ }
+
+ if (!fint_min || !fint_max) {
+ WARN(1, "No fint limits available!\n");
+ return DPLL_FINT_INVALID;
+ }
+
+ if (fint < ti_clk_get_features()->fint_min) {
+ pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n",
+ n);
+ dd->max_divider = n;
+ ret = DPLL_FINT_UNDERFLOW;
+ } else if (fint > ti_clk_get_features()->fint_max) {
+ pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n",
+ n);
+ dd->min_divider = n;
+ ret = DPLL_FINT_INVALID;
+ } else if (fint > ti_clk_get_features()->fint_band1_max &&
+ fint < ti_clk_get_features()->fint_band2_min) {
+ pr_debug("rejecting n=%d due to Fint failure\n", n);
+ ret = DPLL_FINT_INVALID;
+ }
+
+ return ret;
+}
+
+static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
+ unsigned int m, unsigned int n)
+{
+ unsigned long long num;
+
+ num = (unsigned long long)parent_rate * m;
+ do_div(num, n);
+ return num;
+}
+
+/*
+ * _dpll_test_mult - test a DPLL multiplier value
+ * @m: pointer to the DPLL m (multiplier) value under test
+ * @n: current DPLL n (divider) value under test
+ * @new_rate: pointer to storage for the resulting rounded rate
+ * @target_rate: the desired DPLL rate
+ * @parent_rate: the DPLL's parent clock rate
+ *
+ * This code tests a DPLL multiplier value, ensuring that the
+ * resulting rate will not be higher than the target_rate, and that
+ * the multiplier value itself is valid for the DPLL. Initially, the
+ * integer pointed to by the m argument should be prescaled by
+ * multiplying by DPLL_SCALE_FACTOR. The code will replace this with
+ * a non-scaled m upon return. This non-scaled m will result in a
+ * new_rate as close as possible to target_rate (but not greater than
+ * target_rate) given the current (parent_rate, n, prescaled m)
+ * triple. Returns DPLL_MULT_UNDERFLOW in the event that the
+ * non-scaled m attempted to underflow, which can allow the calling
+ * function to bail out early; or 0 upon success.
+ */
+static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
+ unsigned long target_rate,
+ unsigned long parent_rate)
+{
+ int r = 0, carry = 0;
+
+ /* Unscale m and round if necessary */
+ if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
+ carry = 1;
+ *m = (*m / DPLL_SCALE_FACTOR) + carry;
+
+ /*
+ * The new rate must be <= the target rate to avoid programming
+ * a rate that is impossible for the hardware to handle
+ */
+ *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
+ if (*new_rate > target_rate) {
+ (*m)--;
+ *new_rate = 0;
+ }
+
+ /* Guard against m underflow */
+ if (*m < DPLL_MIN_MULTIPLIER) {
+ *m = DPLL_MIN_MULTIPLIER;
+ *new_rate = 0;
+ r = DPLL_MULT_UNDERFLOW;
+ }
+
+ if (*new_rate == 0)
+ *new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
+
+ return r;
+}
+
+/**
+ * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not
+ * @v: bitfield value of the DPLL enable
+ *
+ * Checks given DPLL enable bitfield to see whether the DPLL is in bypass
+ * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise.
+ */
+static int _omap2_dpll_is_in_bypass(u32 v)
+{
+ u8 mask, val;
+
+ mask = ti_clk_get_features()->dpll_bypass_vals;
+
+ /*
+ * Each set bit in the mask corresponds to a bypass value equal
+ * to the bitshift. Go through each set-bit in the mask and
+ * compare against the given register value.
+ */
+ while (mask) {
+ val = __ffs(mask);
+ mask ^= (1 << val);
+ if (v == val)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Public functions */
+u8 omap2_init_dpll_parent(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
+ struct dpll_data *dd;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ v &= dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ /* Reparent the struct clk in case the dpll is in bypass */
+ if (_omap2_dpll_is_in_bypass(v))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate
+ * @clk: struct clk * of a DPLL
+ *
+ * DPLLs can be locked or bypassed - basically, enabled or disabled.
+ * When locked, the DPLL output depends on the M and N values. When
+ * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock
+ * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and
+ * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively
+ * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk.
+ * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is
+ * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
+ * if the clock @clk is not a DPLL.
+ */
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
+{
+ u64 dpll_clk;
+ u32 dpll_mult, dpll_div, v;
+ struct dpll_data *dd;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return 0;
+
+ /* Return bypass rate if DPLL is bypassed */
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ v &= dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ if (_omap2_dpll_is_in_bypass(v))
+ return clk_hw_get_rate(dd->clk_bypass);
+
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ dpll_mult = v & dd->mult_mask;
+ dpll_mult >>= __ffs(dd->mult_mask);
+ dpll_div = v & dd->div1_mask;
+ dpll_div >>= __ffs(dd->div1_mask);
+
+ dpll_clk = (u64)clk_hw_get_rate(dd->clk_ref) * dpll_mult;
+ do_div(dpll_clk, dpll_div + 1);
+
+ return dpll_clk;
+}
+
+/* DPLL rate rounding code */
+
+/**
+ * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * @hw: struct clk_hw containing the struct clk * for a DPLL
+ * @target_rate: desired DPLL clock rate
+ * @parent_rate: parent's DPLL clock rate
+ *
+ * Given a DPLL and a desired target rate, round the target rate to a
+ * possible, programmable rate for this DPLL. Attempts to select the
+ * minimum possible n. Stores the computed (m, n) in the DPLL's
+ * dpll_data structure so set_rate() will not need to call this
+ * (expensive) function again. Returns ~0 if the target rate cannot
+ * be rounded, or the rounded rate upon success.
+ */
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int m, n, r, scaled_max_m;
+ int min_delta_m = INT_MAX, min_delta_n = INT_MAX;
+ unsigned long scaled_rt_rp;
+ unsigned long new_rate = 0;
+ struct dpll_data *dd;
+ unsigned long ref_rate;
+ long delta;
+ long prev_min_delta = LONG_MAX;
+ const char *clk_name;
+
+ if (!clk || !clk->dpll_data)
+ return ~0;
+
+ dd = clk->dpll_data;
+
+ if (dd->max_rate && target_rate > dd->max_rate)
+ target_rate = dd->max_rate;
+
+ ref_rate = clk_hw_get_rate(dd->clk_ref);
+ clk_name = clk_hw_get_name(hw);
+ pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
+ clk_name, target_rate);
+
+ scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+ scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
+
+ dd->last_rounded_rate = 0;
+
+ for (n = dd->min_divider; n <= dd->max_divider; n++) {
+ /* Is the (input clk, divider) pair valid for the DPLL? */
+ r = _dpll_test_fint(clk, n);
+ if (r == DPLL_FINT_UNDERFLOW)
+ break;
+ else if (r == DPLL_FINT_INVALID)
+ continue;
+
+ /* Compute the scaled DPLL multiplier, based on the divider */
+ m = scaled_rt_rp * n;
+
+ /*
+ * Since we're counting n up, a m overflow means we
+ * can bail out completely (since as n increases in
+ * the next iteration, there's no way that m can
+ * increase beyond the current m)
+ */
+ if (m > scaled_max_m)
+ break;
+
+ r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+ ref_rate);
+
+ /* m can't be set low enough for this n - try with a larger n */
+ if (r == DPLL_MULT_UNDERFLOW)
+ continue;
+
+ /* skip rates above our target rate */
+ delta = target_rate - new_rate;
+ if (delta < 0)
+ continue;
+
+ if (delta < prev_min_delta) {
+ prev_min_delta = delta;
+ min_delta_m = m;
+ min_delta_n = n;
+ }
+
+ pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n",
+ clk_name, m, n, new_rate);
+
+ if (delta == 0)
+ break;
+ }
+
+ if (prev_min_delta == LONG_MAX) {
+ pr_debug("clock: %s: cannot round to rate %lu\n",
+ clk_name, target_rate);
+ return ~0;
+ }
+
+ dd->last_rounded_m = min_delta_m;
+ dd->last_rounded_n = min_delta_n;
+ dd->last_rounded_rate = target_rate - prev_min_delta;
+
+ return dd->last_rounded_rate;
+}
diff --git a/drivers/clk/ti/clkt_iclk.c b/drivers/clk/ti/clkt_iclk.c
new file mode 100644
index 000000000..b738ee615
--- /dev/null
+++ b/drivers/clk/ti/clkt_iclk.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP2/3 interface clock control
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* Register offsets */
+#define OMAP24XX_CM_FCLKEN2 0x04
+#define CM_AUTOIDLE 0x30
+#define CM_ICLKEN 0x10
+#define CM_IDLEST 0x20
+
+#define OMAP24XX_CM_IDLEST_VAL 0
+
+/* Private functions */
+
+/* XXX */
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
+{
+ u32 v;
+ struct clk_omap_reg r;
+
+ memcpy(&r, &clk->enable_reg, sizeof(r));
+ r.offset ^= (CM_AUTOIDLE ^ CM_ICLKEN);
+
+ v = ti_clk_ll_ops->clk_readl(&r);
+ v |= (1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, &r);
+}
+
+/* XXX */
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
+{
+ u32 v;
+ struct clk_omap_reg r;
+
+ memcpy(&r, &clk->enable_reg, sizeof(r));
+
+ r.offset ^= (CM_AUTOIDLE ^ CM_ICLKEN);
+
+ v = ti_clk_ll_ops->clk_readl(&r);
+ v &= ~(1 << clk->enable_bit);
+ ti_clk_ll_ops->clk_writel(v, &r);
+}
+
+/**
+ * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
+ * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function
+ * passes back the correct CM_IDLEST register address for I2CHS
+ * modules. No return value.
+ */
+static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit,
+ u8 *idlest_val)
+{
+ memcpy(idlest_reg, &clk->enable_reg, sizeof(*idlest_reg));
+ idlest_reg->offset ^= (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST);
+ *idlest_bit = clk->enable_bit;
+ *idlest_val = OMAP24XX_CM_IDLEST_VAL;
+}
+
+/* Public data */
+
+const struct clk_hw_omap_ops clkhwops_iclk = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+};
+
+const struct clk_hw_omap_ops clkhwops_iclk_wait = {
+ .allow_idle = omap2_clkt_iclk_allow_idle,
+ .deny_idle = omap2_clkt_iclk_deny_idle,
+ .find_idlest = omap2_clk_dflt_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/* 2430 I2CHS has non-standard IDLEST register */
+const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = {
+ .find_idlest = omap2430_clk_i2chs_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644
index 000000000..16a9f7c22
--- /dev/null
+++ b/drivers/clk/ti/clock.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TI Clock driver internal definitions
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ * Tero Kristo (t-kristo@ti.com)
+ */
+#ifndef __DRIVERS_CLK_TI_CLOCK__
+#define __DRIVERS_CLK_TI_CLOCK__
+
+struct clk_omap_divider {
+ struct clk_hw hw;
+ struct clk_omap_reg reg;
+ u8 shift;
+ u8 flags;
+ s8 latch;
+ u16 min;
+ u16 max;
+ u16 mask;
+ const struct clk_div_table *table;
+ u32 context;
+};
+
+#define to_clk_omap_divider(_hw) container_of(_hw, struct clk_omap_divider, hw)
+
+struct clk_omap_mux {
+ struct clk_hw hw;
+ struct clk_omap_reg reg;
+ u32 *table;
+ u32 mask;
+ u8 shift;
+ s8 latch;
+ u8 flags;
+ u8 saved_parent;
+};
+
+#define to_clk_omap_mux(_hw) container_of(_hw, struct clk_omap_mux, hw)
+
+enum {
+ TI_CLK_FIXED,
+ TI_CLK_MUX,
+ TI_CLK_DIVIDER,
+ TI_CLK_COMPOSITE,
+ TI_CLK_FIXED_FACTOR,
+ TI_CLK_GATE,
+ TI_CLK_DPLL,
+};
+
+/* Global flags */
+#define CLKF_INDEX_POWER_OF_TWO (1 << 0)
+#define CLKF_INDEX_STARTS_AT_ONE (1 << 1)
+#define CLKF_SET_RATE_PARENT (1 << 2)
+#define CLKF_OMAP3 (1 << 3)
+#define CLKF_AM35XX (1 << 4)
+
+/* Gate flags */
+#define CLKF_SET_BIT_TO_DISABLE (1 << 5)
+#define CLKF_INTERFACE (1 << 6)
+#define CLKF_SSI (1 << 7)
+#define CLKF_DSS (1 << 8)
+#define CLKF_HSOTGUSB (1 << 9)
+#define CLKF_WAIT (1 << 10)
+#define CLKF_NO_WAIT (1 << 11)
+#define CLKF_HSDIV (1 << 12)
+#define CLKF_CLKDM (1 << 13)
+
+/* DPLL flags */
+#define CLKF_LOW_POWER_STOP (1 << 5)
+#define CLKF_LOCK (1 << 6)
+#define CLKF_LOW_POWER_BYPASS (1 << 7)
+#define CLKF_PER (1 << 8)
+#define CLKF_CORE (1 << 9)
+#define CLKF_J_TYPE (1 << 10)
+
+/* CLKCTRL flags */
+#define CLKF_SW_SUP BIT(5)
+#define CLKF_HW_SUP BIT(6)
+#define CLKF_NO_IDLEST BIT(7)
+
+#define CLKF_SOC_MASK GENMASK(11, 8)
+
+#define CLKF_SOC_NONSEC BIT(8)
+#define CLKF_SOC_DRA72 BIT(9)
+#define CLKF_SOC_DRA74 BIT(10)
+#define CLKF_SOC_DRA76 BIT(11)
+
+#define CLK(dev, con, ck) \
+ { \
+ .lk = { \
+ .dev_id = dev, \
+ .con_id = con, \
+ }, \
+ .clk = ck, \
+ }
+
+struct ti_clk {
+ const char *name;
+ const char *clkdm_name;
+ int type;
+ void *data;
+ struct ti_clk *patch;
+ struct clk *clk;
+};
+
+struct ti_clk_mux {
+ u8 bit_shift;
+ int num_parents;
+ u16 reg;
+ u8 module;
+ const char * const *parents;
+ u16 flags;
+};
+
+struct ti_clk_divider {
+ const char *parent;
+ u8 bit_shift;
+ u16 max_div;
+ u16 reg;
+ u8 module;
+ int *dividers;
+ int num_dividers;
+ u16 flags;
+};
+
+struct ti_clk_gate {
+ const char *parent;
+ u8 bit_shift;
+ u16 reg;
+ u8 module;
+ u16 flags;
+};
+
+/* Composite clock component types */
+enum {
+ CLK_COMPONENT_TYPE_GATE = 0,
+ CLK_COMPONENT_TYPE_DIVIDER,
+ CLK_COMPONENT_TYPE_MUX,
+ CLK_COMPONENT_TYPE_MAX,
+};
+
+/**
+ * struct ti_dt_clk - OMAP DT clock alias declarations
+ * @lk: clock lookup definition
+ * @node_name: clock DT node to map to
+ */
+struct ti_dt_clk {
+ struct clk_lookup lk;
+ char *node_name;
+};
+
+#define DT_CLK(dev, con, name) \
+ { \
+ .lk = { \
+ .dev_id = dev, \
+ .con_id = con, \
+ }, \
+ .node_name = name, \
+ }
+
+/* CLKCTRL type definitions */
+struct omap_clkctrl_div_data {
+ const int *dividers;
+ int max_div;
+ u32 flags;
+};
+
+struct omap_clkctrl_bit_data {
+ u8 bit;
+ u8 type;
+ const char * const *parents;
+ const void *data;
+};
+
+struct omap_clkctrl_reg_data {
+ u16 offset;
+ const struct omap_clkctrl_bit_data *bit_data;
+ u16 flags;
+ const char *parent;
+ const char *clkdm_name;
+};
+
+struct omap_clkctrl_data {
+ u32 addr;
+ const struct omap_clkctrl_reg_data *regs;
+};
+
+extern const struct omap_clkctrl_data omap4_clkctrl_data[];
+extern const struct omap_clkctrl_data omap5_clkctrl_data[];
+extern const struct omap_clkctrl_data dra7_clkctrl_data[];
+extern const struct omap_clkctrl_data dra7_clkctrl_compat_data[];
+extern struct ti_dt_clk dra7xx_compat_clks[];
+extern const struct omap_clkctrl_data am3_clkctrl_data[];
+extern const struct omap_clkctrl_data am3_clkctrl_compat_data[];
+extern struct ti_dt_clk am33xx_compat_clks[];
+extern const struct omap_clkctrl_data am4_clkctrl_data[];
+extern const struct omap_clkctrl_data am438x_clkctrl_data[];
+extern const struct omap_clkctrl_data dm814_clkctrl_data[];
+extern const struct omap_clkctrl_data dm816_clkctrl_data[];
+
+typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
+
+struct clk *of_ti_clk_register(struct device_node *node, struct clk_hw *hw,
+ const char *con);
+struct clk *of_ti_clk_register_omap_hw(struct device_node *node,
+ struct clk_hw *hw, const char *con);
+const char *ti_dt_clk_name(struct device_node *np);
+int ti_clk_add_alias(struct clk *clk, const char *con);
+void ti_clk_add_aliases(void);
+
+void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
+
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
+
+int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+ u8 flags, struct clk_omap_divider *div);
+
+int ti_clk_get_reg_addr(struct device_node *node, int index,
+ struct clk_omap_reg *reg);
+void ti_dt_clocks_register(struct ti_dt_clk *oclks);
+int ti_clk_retry_init(struct device_node *node, void *user,
+ ti_of_clk_init_cb_t func);
+int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+
+int of_ti_clk_autoidle_setup(struct device_node *node);
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
+
+extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
+extern const struct clk_hw_omap_ops clkhwops_wait;
+extern const struct clk_hw_omap_ops clkhwops_iclk;
+extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
+
+extern const struct clk_ops ti_clk_divider_ops;
+extern const struct clk_ops ti_clk_mux_ops;
+extern const struct clk_ops omap_gate_clk_ops;
+
+extern struct ti_clk_features ti_clk_features;
+
+int omap2_init_clk_clkdm(struct clk_hw *hw);
+int omap2_clkops_enable_clkdm(struct clk_hw *hw);
+void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+
+int omap2_dflt_clk_enable(struct clk_hw *hw);
+void omap2_dflt_clk_disable(struct clk_hw *hw);
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+ struct clk_omap_reg *other_reg,
+ u8 *other_bit);
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+ struct clk_omap_reg *idlest_reg,
+ u8 *idlest_bit, u8 *idlest_val);
+
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk);
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk);
+
+u8 omap2_init_dpll_parent(struct clk_hw *hw);
+int omap3_noncore_dpll_enable(struct clk_hw *hw);
+void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index);
+int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate);
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+/*
+ * OMAP3_DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
+ * that are sourced by DPLL5, and both of these require this clock
+ * to be at 120 MHz for proper operation.
+ */
+#define OMAP3_DPLL5_FREQ_FOR_USBHOST 120000000
+
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
+int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index);
+int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+void omap3_clk_lock_dpll5(void);
+
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate);
+int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
+int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
+
+extern struct ti_clk_ll_ops *ti_clk_ll_ops;
+
+#endif
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
new file mode 100644
index 000000000..c897ad7e6
--- /dev/null
+++ b/drivers/clk/ti/clockdomain.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP clockdomain support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+/**
+ * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being enabled
+ *
+ * Increment the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 1, the clockdomain will be "enabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_enable() as
+ * their enable function pointer. Passes along the return value of
+ * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
+ * clockdomain, or 0 if clock framework-based clockdomain control is
+ * not implemented.
+ */
+int omap2_clkops_enable_clkdm(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+ int ret = 0;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (unlikely(!clk->clkdm)) {
+ pr_err("%s: %s: no clkdm set ?!\n", __func__,
+ clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) {
+ pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+ }
+
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw), clk->clkdm_name, ret);
+
+ return ret;
+}
+
+/**
+ * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being disabled
+ *
+ * Decrement the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 0, the clockdomain will be "disabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
+ * disable function pointer. No return value.
+ */
+void omap2_clkops_disable_clkdm(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk;
+
+ clk = to_clk_hw_omap(hw);
+
+ if (unlikely(!clk->clkdm)) {
+ pr_err("%s: %s: no clkdm set ?!\n", __func__,
+ clk_hw_get_name(hw));
+ return;
+ }
+
+ if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) {
+ pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+ __func__, clk_hw_get_name(hw));
+ return;
+ }
+
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/**
+ * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
+ * @hw: Pointer to clk_hw_omap used to obtain OMAP clock struct ptr to use
+ *
+ * Convert a clockdomain name stored in a struct clk 'clk' into a
+ * clockdomain pointer, and save it into the struct clk. Intended to be
+ * called during clk_register(). Returns 0 on success, -EERROR otherwise.
+ */
+int omap2_init_clk_clkdm(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct clockdomain *clkdm;
+ const char *clk_name;
+
+ if (!clk->clkdm_name)
+ return 0;
+
+ clk_name = __clk_get_name(hw->clk);
+
+ clkdm = ti_clk_ll_ops->clkdm_lookup(clk->clkdm_name);
+ if (clkdm) {
+ pr_debug("clock: associated clk %s to clkdm %s\n",
+ clk_name, clk->clkdm_name);
+ clk->clkdm = clkdm;
+ } else {
+ pr_debug("clock: could not associate clk %s to clkdm %s\n",
+ clk_name, clk->clkdm_name);
+ }
+
+ return 0;
+}
+
+static void __init of_ti_clockdomain_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_hw *clk_hw;
+ const char *clkdm_name = ti_dt_clk_name(node);
+ int i;
+ unsigned int num_clks;
+
+ num_clks = of_clk_get_parent_count(node);
+
+ for (i = 0; i < num_clks; i++) {
+ clk = of_clk_get(node, i);
+ if (IS_ERR(clk)) {
+ pr_err("%s: Failed get %pOF' clock nr %d (%ld)\n",
+ __func__, node, i, PTR_ERR(clk));
+ continue;
+ }
+ clk_hw = __clk_get_hw(clk);
+ if (!omap2_clk_is_hw_omap(clk_hw)) {
+ pr_warn("can't setup clkdm for basic clk %s\n",
+ __clk_get_name(clk));
+ clk_put(clk);
+ continue;
+ }
+ to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
+ omap2_init_clk_clkdm(clk_hw);
+ clk_put(clk);
+ }
+}
+
+static const struct of_device_id ti_clkdm_match_table[] __initconst = {
+ { .compatible = "ti,clockdomain" },
+ { }
+};
+
+/**
+ * ti_dt_clockdomains_setup - setup device tree clockdomains
+ *
+ * Initializes clockdomain nodes for a SoC. This parses through all the
+ * nodes with compatible = "ti,clockdomain", and add the clockdomain
+ * info for all the clocks listed under these. This function shall be
+ * called after rest of the DT clock init has completed and all
+ * clock nodes have been registered.
+ */
+void __init ti_dt_clockdomains_setup(void)
+{
+ struct device_node *np;
+ for_each_matching_node(np, ti_clkdm_match_table) {
+ of_ti_clockdomain_setup(np);
+ }
+}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
new file mode 100644
index 000000000..b85382c37
--- /dev/null
+++ b/drivers/clk/ti/composite.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI composite clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/list.h>
+
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return -EINVAL;
+}
+
+static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return -EINVAL;
+}
+
+static const struct clk_ops ti_composite_divider_ops = {
+ .recalc_rate = &ti_composite_recalc_rate,
+ .round_rate = &ti_composite_round_rate,
+ .set_rate = &ti_composite_set_rate,
+};
+
+static const struct clk_ops ti_composite_gate_ops = {
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+struct component_clk {
+ int num_parents;
+ const char **parent_names;
+ struct device_node *node;
+ int type;
+ struct clk_hw *hw;
+ struct list_head link;
+};
+
+static const char * const component_clk_types[] __initconst = {
+ "gate", "divider", "mux"
+};
+
+static LIST_HEAD(component_clks);
+
+static struct device_node *_get_component_node(struct device_node *node, int i)
+{
+ int rc;
+ struct of_phandle_args clkspec;
+
+ rc = of_parse_phandle_with_args(node, "clocks", "#clock-cells", i,
+ &clkspec);
+ if (rc)
+ return NULL;
+
+ return clkspec.np;
+}
+
+static struct component_clk *_lookup_component(struct device_node *node)
+{
+ struct component_clk *comp;
+
+ list_for_each_entry(comp, &component_clks, link) {
+ if (comp->node == node)
+ return comp;
+ }
+ return NULL;
+}
+
+struct clk_hw_omap_comp {
+ struct clk_hw hw;
+ struct device_node *comp_nodes[CLK_COMPONENT_TYPE_MAX];
+ struct component_clk *comp_clks[CLK_COMPONENT_TYPE_MAX];
+};
+
+static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
+{
+ if (!clk)
+ return NULL;
+
+ if (!clk->comp_clks[idx])
+ return NULL;
+
+ return clk->comp_clks[idx]->hw;
+}
+
+#define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
+
+static void __init _register_composite(void *user,
+ struct device_node *node)
+{
+ struct clk_hw *hw = user;
+ struct clk *clk;
+ struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
+ struct component_clk *comp;
+ int num_parents = 0;
+ const char **parent_names = NULL;
+ const char *name;
+ int i;
+ int ret;
+
+ /* Check for presence of each component clock */
+ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+ if (!cclk->comp_nodes[i])
+ continue;
+
+ comp = _lookup_component(cclk->comp_nodes[i]);
+ if (!comp) {
+ pr_debug("component %s not ready for %pOFn, retry\n",
+ cclk->comp_nodes[i]->name, node);
+ if (!ti_clk_retry_init(node, hw,
+ _register_composite))
+ return;
+
+ goto cleanup;
+ }
+ if (cclk->comp_clks[comp->type] != NULL) {
+ pr_err("duplicate component types for %pOFn (%s)!\n",
+ node, component_clk_types[comp->type]);
+ goto cleanup;
+ }
+
+ cclk->comp_clks[comp->type] = comp;
+
+ /* Mark this node as found */
+ cclk->comp_nodes[i] = NULL;
+ }
+
+ /* All components exists, proceed with registration */
+ for (i = CLK_COMPONENT_TYPE_MAX - 1; i >= 0; i--) {
+ comp = cclk->comp_clks[i];
+ if (!comp)
+ continue;
+ if (comp->num_parents) {
+ num_parents = comp->num_parents;
+ parent_names = comp->parent_names;
+ break;
+ }
+ }
+
+ if (!num_parents) {
+ pr_err("%s: no parents found for %pOFn!\n", __func__, node);
+ goto cleanup;
+ }
+
+ name = ti_dt_clk_name(node);
+ clk = clk_register_composite(NULL, name,
+ parent_names, num_parents,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_MUX),
+ &ti_clk_mux_ops,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_DIVIDER),
+ &ti_composite_divider_ops,
+ _get_hw(cclk, CLK_COMPONENT_TYPE_GATE),
+ &ti_composite_gate_ops, 0);
+
+ if (!IS_ERR(clk)) {
+ ret = ti_clk_add_alias(clk, name);
+ if (ret) {
+ clk_unregister(clk);
+ goto cleanup;
+ }
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ }
+
+cleanup:
+ /* Free component clock list entries */
+ for (i = 0; i < CLK_COMPONENT_TYPE_MAX; i++) {
+ if (!cclk->comp_clks[i])
+ continue;
+ list_del(&cclk->comp_clks[i]->link);
+ kfree(cclk->comp_clks[i]->parent_names);
+ kfree(cclk->comp_clks[i]);
+ }
+
+ kfree(cclk);
+}
+
+static void __init of_ti_composite_clk_setup(struct device_node *node)
+{
+ unsigned int num_clks;
+ int i;
+ struct clk_hw_omap_comp *cclk;
+
+ /* Number of component clocks to be put inside this clock */
+ num_clks = of_clk_get_parent_count(node);
+
+ if (!num_clks) {
+ pr_err("composite clk %pOFn must have component(s)\n", node);
+ return;
+ }
+
+ cclk = kzalloc(sizeof(*cclk), GFP_KERNEL);
+ if (!cclk)
+ return;
+
+ /* Get device node pointers for each component clock */
+ for (i = 0; i < num_clks; i++)
+ cclk->comp_nodes[i] = _get_component_node(node, i);
+
+ _register_composite(&cclk->hw, node);
+}
+CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
+ of_ti_composite_clk_setup);
+
+/**
+ * ti_clk_add_component - add a component clock to the pool
+ * @node: device node of the component clock
+ * @hw: hardware clock definition for the component clock
+ * @type: type of the component clock
+ *
+ * Adds a component clock to the list of available components, so that
+ * it can be registered by a composite clock.
+ */
+int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
+ int type)
+{
+ unsigned int num_parents;
+ const char **parent_names;
+ struct component_clk *clk;
+
+ num_parents = of_clk_get_parent_count(node);
+
+ if (!num_parents) {
+ pr_err("component-clock %pOFn must have parent(s)\n", node);
+ return -EINVAL;
+ }
+
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ of_clk_parent_fill(node, parent_names, num_parents);
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(parent_names);
+ return -ENOMEM;
+ }
+
+ clk->num_parents = num_parents;
+ clk->parent_names = parent_names;
+ clk->hw = hw;
+ clk->node = node;
+ clk->type = type;
+ list_add(&clk->link, &component_clks);
+
+ return 0;
+}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
new file mode 100644
index 000000000..5d5bb123b
--- /dev/null
+++ b/drivers/clk/ti/divider.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Divider Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+ unsigned int val)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ return 0;
+}
+
+static void _setup_mask(struct clk_omap_divider *divider)
+{
+ u16 mask;
+ u32 max_val;
+ const struct clk_div_table *clkt;
+
+ if (divider->table) {
+ max_val = 0;
+
+ for (clkt = divider->table; clkt->div; clkt++)
+ if (clkt->val > max_val)
+ max_val = clkt->val;
+ } else {
+ max_val = divider->max;
+
+ if (!(divider->flags & CLK_DIVIDER_ONE_BASED) &&
+ !(divider->flags & CLK_DIVIDER_POWER_OF_TWO))
+ max_val--;
+ }
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ mask = fls(max_val) - 1;
+ else
+ mask = max_val;
+
+ divider->mask = (1 << fls(mask)) - 1;
+}
+
+static unsigned int _get_div(struct clk_omap_divider *divider, unsigned int val)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (divider->table)
+ return _get_table_div(divider->table, val);
+ return val + 1;
+}
+
+static unsigned int _get_table_val(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return clkt->val;
+ return 0;
+}
+
+static unsigned int _get_val(struct clk_omap_divider *divider, u8 div)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __ffs(div);
+ if (divider->table)
+ return _get_table_val(divider->table, div);
+ return div - 1;
+}
+
+static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_omap_divider *divider = to_clk_omap_divider(hw);
+ unsigned int div, val;
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
+ val &= divider->mask;
+
+ div = _get_div(divider, val);
+ if (!div) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ clk_hw_get_name(hw));
+ return parent_rate;
+ }
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+/*
+ * The reverse of DIV_ROUND_UP: The maximum number which
+ * divided by m is r
+ */
+#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+
+static bool _is_valid_table_div(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return true;
+ return false;
+}
+
+static bool _is_valid_div(struct clk_omap_divider *divider, unsigned int div)
+{
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return is_power_of_2(div);
+ if (divider->table)
+ return _is_valid_table_div(divider->table, div);
+ return true;
+}
+
+static int _div_round_up(const struct clk_div_table *table,
+ unsigned long parent_rate, unsigned long rate)
+{
+ const struct clk_div_table *clkt;
+ int up = INT_MAX;
+ int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+
+ for (clkt = table; clkt->div; clkt++) {
+ if (clkt->div == div)
+ return clkt->div;
+ else if (clkt->div < div)
+ continue;
+
+ if ((clkt->div - div) < (up - div))
+ up = clkt->div;
+ }
+
+ return up;
+}
+
+static int _div_round(const struct clk_div_table *table,
+ unsigned long parent_rate, unsigned long rate)
+{
+ if (!table)
+ return DIV_ROUND_UP(parent_rate, rate);
+
+ return _div_round_up(table, parent_rate, rate);
+}
+
+static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+{
+ struct clk_omap_divider *divider = to_clk_omap_divider(hw);
+ int i, bestdiv = 0;
+ unsigned long parent_rate, best = 0, now, maxdiv;
+ unsigned long parent_rate_saved = *best_parent_rate;
+
+ if (!rate)
+ rate = 1;
+
+ maxdiv = divider->max;
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
+ bestdiv = _div_round(divider->table, parent_rate, rate);
+ bestdiv = bestdiv == 0 ? 1 : bestdiv;
+ bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
+ return bestdiv;
+ }
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * i below
+ */
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (i = 1; i <= maxdiv; i++) {
+ if (!_is_valid_div(divider, i))
+ continue;
+ if (rate * i == parent_rate_saved) {
+ /*
+ * It's the most ideal case if the requested rate can be
+ * divided from parent clock without needing to change
+ * parent rate, so return the divider immediately.
+ */
+ *best_parent_rate = parent_rate_saved;
+ return i;
+ }
+ parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ MULT_ROUND_UP(rate, i));
+ now = DIV_ROUND_UP(parent_rate, i);
+ if (now <= rate && now > best) {
+ bestdiv = i;
+ best = now;
+ *best_parent_rate = parent_rate;
+ }
+ }
+
+ if (!bestdiv) {
+ bestdiv = divider->max;
+ *best_parent_rate =
+ clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ }
+
+ return bestdiv;
+}
+
+static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div;
+ div = ti_clk_divider_bestdiv(hw, rate, prate);
+
+ return DIV_ROUND_UP(*prate, div);
+}
+
+static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_omap_divider *divider;
+ unsigned int div, value;
+ u32 val;
+
+ if (!hw || !rate)
+ return -EINVAL;
+
+ divider = to_clk_omap_divider(hw);
+
+ div = DIV_ROUND_UP(parent_rate, rate);
+
+ if (div > divider->max)
+ div = divider->max;
+ if (div < divider->min)
+ div = divider->min;
+
+ value = _get_val(divider, div);
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg);
+ val &= ~(divider->mask << divider->shift);
+ val |= value << divider->shift;
+ ti_clk_ll_ops->clk_writel(val, &divider->reg);
+
+ ti_clk_latch(&divider->reg, divider->latch);
+
+ return 0;
+}
+
+/**
+ * clk_divider_save_context - Save the divider value
+ * @hw: pointer struct clk_hw
+ *
+ * Save the divider value
+ */
+static int clk_divider_save_context(struct clk_hw *hw)
+{
+ struct clk_omap_divider *divider = to_clk_omap_divider(hw);
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
+ divider->context = val & divider->mask;
+
+ return 0;
+}
+
+/**
+ * clk_divider_restore_context - restore the saved the divider value
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the saved the divider value
+ */
+static void clk_divider_restore_context(struct clk_hw *hw)
+{
+ struct clk_omap_divider *divider = to_clk_omap_divider(hw);
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg);
+ val &= ~(divider->mask << divider->shift);
+ val |= divider->context << divider->shift;
+ ti_clk_ll_ops->clk_writel(val, &divider->reg);
+}
+
+const struct clk_ops ti_clk_divider_ops = {
+ .recalc_rate = ti_clk_divider_recalc_rate,
+ .round_rate = ti_clk_divider_round_rate,
+ .set_rate = ti_clk_divider_set_rate,
+ .save_context = clk_divider_save_context,
+ .restore_context = clk_divider_restore_context,
+};
+
+static struct clk *_register_divider(struct device_node *node,
+ u32 flags,
+ struct clk_omap_divider *div)
+{
+ struct clk_init_data init;
+ const char *parent_name;
+ const char *name;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ name = ti_dt_clk_name(node);
+ init.name = name;
+ init.ops = &ti_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ div->hw.init = &init;
+
+ /* register the clock */
+ return of_ti_clk_register(node, &div->hw, name);
+}
+
+int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
+ u8 flags, struct clk_omap_divider *divider)
+{
+ int valid_div = 0;
+ int i;
+ struct clk_div_table *tmp;
+ u16 min_div = 0;
+
+ if (!div_table) {
+ divider->min = 1;
+ divider->max = max_div;
+ _setup_mask(divider);
+ return 0;
+ }
+
+ i = 0;
+
+ while (!num_dividers || i < num_dividers) {
+ if (div_table[i] == -1)
+ break;
+ if (div_table[i])
+ valid_div++;
+ i++;
+ }
+
+ num_dividers = i;
+
+ tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ valid_div = 0;
+
+ for (i = 0; i < num_dividers; i++)
+ if (div_table[i] > 0) {
+ tmp[valid_div].div = div_table[i];
+ tmp[valid_div].val = i;
+ valid_div++;
+ if (div_table[i] > max_div)
+ max_div = div_table[i];
+ if (!min_div || div_table[i] < min_div)
+ min_div = div_table[i];
+ }
+
+ divider->min = min_div;
+ divider->max = max_div;
+ divider->table = tmp;
+ _setup_mask(divider);
+
+ return 0;
+}
+
+static int __init ti_clk_get_div_table(struct device_node *node,
+ struct clk_omap_divider *div)
+{
+ struct clk_div_table *table;
+ const __be32 *divspec;
+ u32 val;
+ u32 num_div;
+ u32 valid_div;
+ int i;
+
+ divspec = of_get_property(node, "ti,dividers", &num_div);
+
+ if (!divspec)
+ return 0;
+
+ num_div /= 4;
+
+ valid_div = 0;
+
+ /* Determine required size for divider table */
+ for (i = 0; i < num_div; i++) {
+ of_property_read_u32_index(node, "ti,dividers", i, &val);
+ if (val)
+ valid_div++;
+ }
+
+ if (!valid_div) {
+ pr_err("no valid dividers for %pOFn table\n", node);
+ return -EINVAL;
+ }
+
+ table = kcalloc(valid_div + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ valid_div = 0;
+
+ for (i = 0; i < num_div; i++) {
+ of_property_read_u32_index(node, "ti,dividers", i, &val);
+ if (val) {
+ table[valid_div].div = val;
+ table[valid_div].val = i;
+ valid_div++;
+ }
+ }
+
+ div->table = table;
+
+ return 0;
+}
+
+static int _populate_divider_min_max(struct device_node *node,
+ struct clk_omap_divider *divider)
+{
+ u32 min_div = 0;
+ u32 max_div = 0;
+ u32 val;
+ const struct clk_div_table *clkt;
+
+ if (!divider->table) {
+ /* Clk divider table not provided, determine min/max divs */
+ if (of_property_read_u32(node, "ti,min-div", &min_div))
+ min_div = 1;
+
+ if (of_property_read_u32(node, "ti,max-div", &max_div)) {
+ pr_err("no max-div for %pOFn!\n", node);
+ return -EINVAL;
+ }
+ } else {
+
+ for (clkt = divider->table; clkt->div; clkt++) {
+ val = clkt->div;
+ if (val > max_div)
+ max_div = val;
+ if (!min_div || val < min_div)
+ min_div = val;
+ }
+ }
+
+ divider->min = min_div;
+ divider->max = max_div;
+ _setup_mask(divider);
+
+ return 0;
+}
+
+static int __init ti_clk_divider_populate(struct device_node *node,
+ struct clk_omap_divider *div,
+ u32 *flags)
+{
+ u32 val;
+ int ret;
+
+ ret = ti_clk_get_reg_addr(node, 0, &div->reg);
+ if (ret)
+ return ret;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ div->shift = val;
+ else
+ div->shift = 0;
+
+ if (!of_property_read_u32(node, "ti,latch-bit", &val))
+ div->latch = val;
+ else
+ div->latch = -EINVAL;
+
+ *flags = 0;
+ div->flags = 0;
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ div->flags |= CLK_DIVIDER_ONE_BASED;
+
+ if (of_property_read_bool(node, "ti,index-power-of-two"))
+ div->flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ *flags |= CLK_SET_RATE_PARENT;
+
+ ret = ti_clk_get_div_table(node, div);
+ if (ret)
+ return ret;
+
+ return _populate_divider_min_max(node, div);
+}
+
+/**
+ * of_ti_divider_clk_setup - Setup function for simple div rate clock
+ * @node: device node for this clock
+ *
+ * Sets up a basic divider clock.
+ */
+static void __init of_ti_divider_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ u32 flags = 0;
+ struct clk_omap_divider *div;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
+
+ if (ti_clk_divider_populate(node, div, &flags))
+ goto cleanup;
+
+ clk = _register_divider(node, flags, div);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+ return;
+ }
+
+cleanup:
+ kfree(div->table);
+ kfree(div);
+}
+CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup);
+
+static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
+{
+ struct clk_omap_divider *div;
+ u32 tmp;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
+
+ if (ti_clk_divider_populate(node, div, &tmp))
+ goto cleanup;
+
+ if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
+ return;
+
+cleanup:
+ kfree(div->table);
+ kfree(div);
+}
+CLK_OF_DECLARE(ti_composite_divider_clk, "ti,composite-divider-clock",
+ of_ti_composite_divider_clk_setup);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
new file mode 100644
index 000000000..403ec81f5
--- /dev/null
+++ b/drivers/clk/ti/dpll.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP DPLL clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static const struct clk_ops dpll_m4xen_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap4_dpll_regm4xen_recalc,
+ .round_rate = &omap4_dpll_regm4xen_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap4_dpll_regm4xen_determine_rate,
+ .get_parent = &omap2_init_dpll_parent,
+ .save_context = &omap3_core_dpll_save_context,
+ .restore_context = &omap3_core_dpll_restore_context,
+};
+#else
+static const struct clk_ops dpll_m4xen_ck_ops = {};
+#endif
+
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
+ defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
+ defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
+static const struct clk_ops dpll_core_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+};
+
+static const struct clk_ops dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .get_parent = &omap2_init_dpll_parent,
+ .save_context = &omap3_noncore_dpll_save_context,
+ .restore_context = &omap3_noncore_dpll_restore_context,
+};
+
+static const struct clk_ops dpll_no_gate_ck_ops = {
+ .recalc_rate = &omap3_dpll_recalc,
+ .get_parent = &omap2_init_dpll_parent,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .save_context = &omap3_noncore_dpll_save_context,
+ .restore_context = &omap3_noncore_dpll_restore_context
+};
+#else
+static const struct clk_ops dpll_core_ck_ops = {};
+static const struct clk_ops dpll_ck_ops = {};
+static const struct clk_ops dpll_no_gate_ck_ops = {};
+const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+static const struct clk_ops omap2_dpll_core_ck_ops = {
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap2_dpllcore_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap2_reprogram_dpllcore,
+};
+#else
+static const struct clk_ops omap2_dpll_core_ck_ops = {};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_core_ck_ops = {
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#else
+static const struct clk_ops omap3_dpll_core_ck_ops = {};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static const struct clk_ops omap3_dpll_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static const struct clk_ops omap3_dpll5_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll5_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+
+static const struct clk_ops omap3_dpll_per_ck_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+ .get_parent = &omap2_init_dpll_parent,
+ .recalc_rate = &omap3_dpll_recalc,
+ .set_rate = &omap3_dpll4_set_rate,
+ .set_parent = &omap3_noncore_dpll_set_parent,
+ .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
+ .determine_rate = &omap3_noncore_dpll_determine_rate,
+ .round_rate = &omap2_dpll_round_rate,
+};
+#endif
+
+static const struct clk_ops dpll_x2_ck_ops = {
+ .recalc_rate = &omap3_clkoutx2_recalc,
+};
+
+/**
+ * _register_dpll - low level registration of a DPLL clock
+ * @user: pointer to the hardware clock definition for the clock
+ * @node: device node for the clock
+ *
+ * Finalizes DPLL registration process. In case a failure (clk-ref or
+ * clk-bypass is missing), the clock is added to retry list and
+ * the initialization is retried on later stage.
+ */
+static void __init _register_dpll(void *user,
+ struct device_node *node)
+{
+ struct clk_hw *hw = user;
+ struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
+ struct dpll_data *dd = clk_hw->dpll_data;
+ const char *name;
+ struct clk *clk;
+ const struct clk_init_data *init = hw->init;
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-ref missing for %pOFn, retry later\n",
+ node);
+ if (!ti_clk_retry_init(node, hw, _register_dpll))
+ return;
+
+ goto cleanup;
+ }
+
+ dd->clk_ref = __clk_get_hw(clk);
+
+ clk = of_clk_get(node, 1);
+
+ if (IS_ERR(clk)) {
+ pr_debug("clk-bypass missing for %pOFn, retry later\n",
+ node);
+ if (!ti_clk_retry_init(node, hw, _register_dpll))
+ return;
+
+ goto cleanup;
+ }
+
+ dd->clk_bypass = __clk_get_hw(clk);
+
+ /* register the clock */
+ name = ti_dt_clk_name(node);
+ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ kfree(init->parent_names);
+ kfree(init);
+ return;
+ }
+
+cleanup:
+ kfree(clk_hw->dpll_data);
+ kfree(init->parent_names);
+ kfree(init);
+ kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
+ defined(CONFIG_SOC_AM43XX)
+/**
+ * _register_dpll_x2 - Registers a DPLLx2 clock
+ * @node: device node for this clock
+ * @ops: clk_ops for this clock
+ * @hw_ops: clk_hw_ops for this clock
+ *
+ * Initializes a DPLL x 2 clock from device tree data.
+ */
+static void _register_dpll_x2(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk *clk;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ const char *name = ti_dt_clk_name(node);
+ const char *parent_name;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%pOFn must have parent\n", node);
+ return;
+ }
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->ops = hw_ops;
+ clk_hw->hw.init = &init;
+
+ init.name = name;
+ init.ops = ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+ if (hw_ops == &clkhwops_omap4_dpllmx) {
+ int ret;
+
+ /* Check if register defined, if not, drop hw-ops */
+ ret = of_property_count_elems_of_size(node, "reg", 1);
+ if (ret <= 0) {
+ clk_hw->ops = NULL;
+ } else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) {
+ kfree(clk_hw);
+ return;
+ }
+ }
+#endif
+
+ /* register the clock */
+ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(clk_hw);
+ else
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+#endif
+
+/**
+ * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
+ * @node: device node containing the DPLL info
+ * @ops: ops for the DPLL
+ * @ddt: DPLL data template to use
+ *
+ * Initializes a DPLL clock from device tree data.
+ */
+static void __init of_ti_dpll_setup(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct dpll_data *ddt)
+{
+ struct clk_hw_omap *clk_hw = NULL;
+ struct clk_init_data *init = NULL;
+ const char **parent_names = NULL;
+ struct dpll_data *dd = NULL;
+ int ssc_clk_index;
+ u8 dpll_mode = 0;
+ u32 min_div;
+
+ dd = kmemdup(ddt, sizeof(*dd), GFP_KERNEL);
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!dd || !clk_hw || !init)
+ goto cleanup;
+
+ clk_hw->dpll_data = dd;
+ clk_hw->ops = &clkhwops_omap3_dpll;
+ clk_hw->hw.init = init;
+
+ init->name = ti_dt_clk_name(node);
+ init->ops = ops;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (!init->num_parents) {
+ pr_err("%pOFn must have parent(s)\n", node);
+ goto cleanup;
+ }
+
+ parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ of_clk_parent_fill(node, parent_names, init->num_parents);
+
+ init->parent_names = parent_names;
+
+ if (ti_clk_get_reg_addr(node, 0, &dd->control_reg))
+ goto cleanup;
+
+ /*
+ * Special case for OMAP2 DPLL, register order is different due to
+ * missing idlest_reg, also clkhwops is different. Detected from
+ * missing idlest_mask.
+ */
+ if (!dd->idlest_mask) {
+ if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg))
+ goto cleanup;
+#ifdef CONFIG_ARCH_OMAP2
+ clk_hw->ops = &clkhwops_omap2xxx_dpll;
+ omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
+#endif
+ } else {
+ if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg))
+ goto cleanup;
+
+ if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg))
+ goto cleanup;
+ }
+
+ if (dd->autoidle_mask) {
+ if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg))
+ goto cleanup;
+
+ ssc_clk_index = 4;
+ } else {
+ ssc_clk_index = 3;
+ }
+
+ if (dd->ssc_deltam_int_mask && dd->ssc_deltam_frac_mask &&
+ dd->ssc_modfreq_mant_mask && dd->ssc_modfreq_exp_mask) {
+ if (ti_clk_get_reg_addr(node, ssc_clk_index++,
+ &dd->ssc_deltam_reg))
+ goto cleanup;
+
+ if (ti_clk_get_reg_addr(node, ssc_clk_index++,
+ &dd->ssc_modfreq_reg))
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,ssc-modfreq-hz",
+ &dd->ssc_modfreq);
+ of_property_read_u32(node, "ti,ssc-deltam", &dd->ssc_deltam);
+ dd->ssc_downspread =
+ of_property_read_bool(node, "ti,ssc-downspread");
+ }
+
+ if (of_property_read_bool(node, "ti,low-power-stop"))
+ dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
+
+ if (of_property_read_bool(node, "ti,low-power-bypass"))
+ dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
+
+ if (of_property_read_bool(node, "ti,lock"))
+ dpll_mode |= 1 << DPLL_LOCKED;
+
+ if (!of_property_read_u32(node, "ti,min-div", &min_div) &&
+ min_div > dd->min_divider)
+ dd->min_divider = min_div;
+
+ if (dpll_mode)
+ dd->modes = dpll_mode;
+
+ _register_dpll(&clk_hw->hw, node);
+ return;
+
+cleanup:
+ kfree(dd);
+ kfree(parent_names);
+ kfree(init);
+ kfree(clk_hw);
+}
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
+{
+ _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
+ of_ti_omap4_dpll_x2_setup);
+#endif
+
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
+static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
+{
+ _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+}
+CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
+ of_ti_am3_dpll_x2_setup);
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_omap3_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf0,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ if ((of_machine_is_compatible("ti,omap3630") ||
+ of_machine_is_compatible("ti,omap36xx")) &&
+ of_node_name_eq(node, "dpll5_ck"))
+ of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
+ else
+ of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
+ of_ti_omap3_dpll_setup);
+
+static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 16,
+ .div1_mask = 0x7f << 8,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf0,
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
+ of_ti_omap3_core_dpll_setup);
+
+static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1 << 1,
+ .enable_mask = 0x7 << 16,
+ .autoidle_mask = 0x7 << 3,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .freqsel_mask = 0xf00000,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
+ of_ti_omap3_per_dpll_setup);
+
+static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1 << 1,
+ .enable_mask = 0x7 << 16,
+ .autoidle_mask = 0x7 << 3,
+ .mult_mask = 0xfff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 4095,
+ .max_divider = 128,
+ .min_divider = 1,
+ .sddiv_mask = 0xff << 24,
+ .dco_mask = 0xe << 20,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
+ of_ti_omap3_per_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_omap4_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
+ of_ti_omap4_dpll_setup);
+
+static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .dcc_mask = BIT(22),
+ .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
+}
+CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
+ of_ti_omap5_mpu_dpll_setup);
+
+static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
+ of_ti_omap4_core_dpll_setup);
+
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+ defined(CONFIG_SOC_DRA7XX)
+static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .m4xen_mask = 0x800,
+ .lpmode_mask = 1 << 10,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
+ of_ti_omap4_m4xen_dpll_setup);
+
+static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .autoidle_mask = 0x7,
+ .mult_mask = 0xfff << 8,
+ .div1_mask = 0xff,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 1,
+ .sddiv_mask = 0xff << 24,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
+ of_ti_omap4_jtype_dpll_setup);
+#endif
+
+static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .ssc_enable_mask = 0x1 << 12,
+ .ssc_downspread_mask = 0x1 << 14,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .ssc_deltam_int_mask = 0x3 << 18,
+ .ssc_deltam_frac_mask = 0x3ffff,
+ .ssc_modfreq_mant_mask = 0x7f,
+ .ssc_modfreq_exp_mask = 0x7 << 8,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .max_rate = 1000000000,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
+ of_ti_am3_no_gate_dpll_setup);
+
+static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 4095,
+ .max_divider = 256,
+ .min_divider = 2,
+ .flags = DPLL_J_TYPE,
+ .max_rate = 2000000000,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
+ of_ti_am3_jtype_dpll_setup);
+
+static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .max_rate = 2000000000,
+ .flags = DPLL_J_TYPE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
+ "ti,am3-dpll-no-gate-j-type-clock",
+ of_ti_am3_no_gate_jtype_dpll_setup);
+
+static void __init of_ti_am3_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .ssc_enable_mask = 0x1 << 12,
+ .ssc_downspread_mask = 0x1 << 14,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .ssc_deltam_int_mask = 0x3 << 18,
+ .ssc_deltam_frac_mask = 0x3ffff,
+ .ssc_modfreq_mant_mask = 0x7f,
+ .ssc_modfreq_exp_mask = 0x7 << 8,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .max_rate = 1000000000,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
+
+static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .idlest_mask = 0x1,
+ .enable_mask = 0x7,
+ .mult_mask = 0x7ff << 8,
+ .div1_mask = 0x7f,
+ .max_multiplier = 2047,
+ .max_divider = 128,
+ .min_divider = 1,
+ .max_rate = 1000000000,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ };
+
+ of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
+ of_ti_am3_core_dpll_setup);
+
+static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
+{
+ const struct dpll_data dd = {
+ .enable_mask = 0x3,
+ .mult_mask = 0x3ff << 12,
+ .div1_mask = 0xf << 8,
+ .max_divider = 16,
+ .min_divider = 1,
+ };
+
+ of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
+}
+CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
+ of_ti_omap2_core_dpll_setup);
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
new file mode 100644
index 000000000..e32b3515f
--- /dev/null
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP3/4 - specific DPLL control functions
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+ * Testing and integration fixes by Jouni Högander
+ *
+ * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
+ * Menon
+ *
+ * Parts of this code are based on code written by
+ * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
+#define DPLL_AUTOIDLE_DISABLE 0x0
+#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
+
+#define MAX_DPLL_WAIT_TRIES 1000000
+
+#define OMAP3XXX_EN_DPLL_LOCKED 0x7
+
+/* Forward declarations */
+static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
+static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
+static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
+
+/* Private functions */
+
+/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
+static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ v &= ~dd->enable_mask;
+ v |= clken_bits << __ffs(dd->enable_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
+}
+
+/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
+static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
+{
+ const struct dpll_data *dd;
+ int i = 0;
+ int ret = -EINVAL;
+ const char *clk_name;
+
+ dd = clk->dpll_data;
+ clk_name = clk_hw_get_name(&clk->hw);
+
+ state <<= __ffs(dd->idlest_mask);
+
+ while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
+ != state) && i < MAX_DPLL_WAIT_TRIES) {
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_DPLL_WAIT_TRIES) {
+ pr_err("clock: %s failed transition to '%s'\n",
+ clk_name, (state) ? "locked" : "bypassed");
+ } else {
+ pr_debug("clock: %s transition to '%s' in %d loops\n",
+ clk_name, (state) ? "locked" : "bypassed", i);
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* From 3430 TRM ES2 4.7.6.2 */
+static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
+{
+ unsigned long fint;
+ u16 f = 0;
+
+ fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
+
+ pr_debug("clock: fint is %lu\n", fint);
+
+ if (fint >= 750000 && fint <= 1000000)
+ f = 0x3;
+ else if (fint > 1000000 && fint <= 1250000)
+ f = 0x4;
+ else if (fint > 1250000 && fint <= 1500000)
+ f = 0x5;
+ else if (fint > 1500000 && fint <= 1750000)
+ f = 0x6;
+ else if (fint > 1750000 && fint <= 2100000)
+ f = 0x7;
+ else if (fint > 7500000 && fint <= 10000000)
+ f = 0xB;
+ else if (fint > 10000000 && fint <= 12500000)
+ f = 0xC;
+ else if (fint > 12500000 && fint <= 15000000)
+ f = 0xD;
+ else if (fint > 15000000 && fint <= 17500000)
+ f = 0xE;
+ else if (fint > 17500000 && fint <= 21000000)
+ f = 0xF;
+ else
+ pr_debug("clock: unknown freqsel setting for %d\n", n);
+
+ return f;
+}
+
+/**
+ * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report
+ * readiness before returning. Will save and restore the DPLL's
+ * autoidle state across the enable, per the CDP code. If the DPLL
+ * locked successfully, return 0; if the DPLL did not lock in the time
+ * allotted, or DPLL3 was passed in, return -EINVAL.
+ */
+static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u8 ai;
+ u8 state = 1;
+ int r = 0;
+
+ pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
+
+ dd = clk->dpll_data;
+ state <<= __ffs(dd->idlest_mask);
+
+ /* Check if already locked */
+ if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
+ state)
+ goto done;
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ if (ai)
+ omap3_dpll_deny_idle(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+
+ r = _omap3_wait_dpll_status(clk, 1);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+done:
+ return r;
+}
+
+/**
+ * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power bypass mode. In
+ * bypass mode, the DPLL's rate is set equal to its parent clock's
+ * rate. Waits for the DPLL to report readiness before returning.
+ * Will save and restore the DPLL's autoidle state across the enable,
+ * per the CDP code. If the DPLL entered bypass mode successfully,
+ * return 0; if the DPLL did not enter bypass in the time allotted, or
+ * DPLL3 was passed in, or the DPLL does not support low-power bypass,
+ * return -EINVAL.
+ */
+static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
+{
+ int r;
+ u8 ai;
+
+ if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
+ return -EINVAL;
+
+ pr_debug("clock: configuring DPLL %s for low-power bypass\n",
+ clk_hw_get_name(&clk->hw));
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
+
+ r = _omap3_wait_dpll_status(clk, 0);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+ return r;
+}
+
+/**
+ * _omap3_noncore_dpll_stop - instruct a DPLL to stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. Will save and
+ * restore the DPLL's autoidle state across the stop, per the CDP
+ * code. If DPLL3 was passed in, or the DPLL does not support
+ * low-power stop, return -EINVAL; otherwise, return 0.
+ */
+static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
+{
+ u8 ai;
+
+ if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
+ return -EINVAL;
+
+ pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+ return 0;
+}
+
+/**
+ * _lookup_dco - Lookup DCO used by j-type DPLL
+ * @clk: pointer to a DPLL struct clk
+ * @dco: digital control oscillator selector
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
+{
+ unsigned long fint, clkinp; /* watch out for overflow */
+
+ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
+ fint = (clkinp / n) * m;
+
+ if (fint < 1000000000)
+ *dco = 2;
+ else
+ *dco = 4;
+}
+
+/**
+ * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
+ * @clk: pointer to a DPLL struct clk
+ * @sd_div: target sigma-delta divider
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
+{
+ unsigned long clkinp, sd; /* watch out for overflow */
+ int mod1, mod2;
+
+ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
+
+ /*
+ * target sigma-delta to near 250MHz
+ * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
+ */
+ clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
+ mod1 = (clkinp * m) % (250 * n);
+ sd = (clkinp * m) / (250 * n);
+ mod2 = sd % 10;
+ sd /= 10;
+
+ if (mod1 || mod2)
+ sd++;
+ *sd_div = sd;
+}
+
+/**
+ * omap3_noncore_dpll_ssc_program - set spread-spectrum clocking registers
+ * @clk: struct clk * of DPLL to set
+ *
+ * Enable the DPLL spread spectrum clocking if frequency modulation and
+ * frequency spreading have been set, otherwise disable it.
+ */
+static void omap3_noncore_dpll_ssc_program(struct clk_hw_omap *clk)
+{
+ struct dpll_data *dd = clk->dpll_data;
+ unsigned long ref_rate;
+ u32 v, ctrl, mod_freq_divider, exponent, mantissa;
+ u32 deltam_step, deltam_ceil;
+
+ ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+
+ if (dd->ssc_modfreq && dd->ssc_deltam) {
+ ctrl |= dd->ssc_enable_mask;
+
+ if (dd->ssc_downspread)
+ ctrl |= dd->ssc_downspread_mask;
+ else
+ ctrl &= ~dd->ssc_downspread_mask;
+
+ ref_rate = clk_hw_get_rate(dd->clk_ref);
+ mod_freq_divider =
+ (ref_rate / dd->last_rounded_n) / (4 * dd->ssc_modfreq);
+ if (dd->ssc_modfreq > (ref_rate / 70))
+ pr_warn("clock: SSC modulation frequency of DPLL %s greater than %ld\n",
+ __clk_get_name(clk->hw.clk), ref_rate / 70);
+
+ exponent = 0;
+ mantissa = mod_freq_divider;
+ while ((mantissa > 127) && (exponent < 7)) {
+ exponent++;
+ mantissa /= 2;
+ }
+ if (mantissa > 127)
+ mantissa = 127;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->ssc_modfreq_reg);
+ v &= ~(dd->ssc_modfreq_mant_mask | dd->ssc_modfreq_exp_mask);
+ v |= mantissa << __ffs(dd->ssc_modfreq_mant_mask);
+ v |= exponent << __ffs(dd->ssc_modfreq_exp_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->ssc_modfreq_reg);
+
+ deltam_step = dd->last_rounded_m * dd->ssc_deltam;
+ deltam_step /= 10;
+ if (dd->ssc_downspread)
+ deltam_step /= 2;
+
+ deltam_step <<= __ffs(dd->ssc_deltam_int_mask);
+ deltam_step /= 100;
+ deltam_step /= mod_freq_divider;
+ if (deltam_step > 0xFFFFF)
+ deltam_step = 0xFFFFF;
+
+ deltam_ceil = (deltam_step & dd->ssc_deltam_int_mask) >>
+ __ffs(dd->ssc_deltam_int_mask);
+ if (deltam_step & dd->ssc_deltam_frac_mask)
+ deltam_ceil++;
+
+ if ((dd->ssc_downspread &&
+ ((dd->last_rounded_m - (2 * deltam_ceil)) < 20 ||
+ dd->last_rounded_m > 2045)) ||
+ ((dd->last_rounded_m - deltam_ceil) < 20 ||
+ (dd->last_rounded_m + deltam_ceil) > 2045))
+ pr_warn("clock: SSC multiplier of DPLL %s is out of range\n",
+ __clk_get_name(clk->hw.clk));
+
+ v = ti_clk_ll_ops->clk_readl(&dd->ssc_deltam_reg);
+ v &= ~(dd->ssc_deltam_int_mask | dd->ssc_deltam_frac_mask);
+ v |= deltam_step << __ffs(dd->ssc_deltam_int_mask |
+ dd->ssc_deltam_frac_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->ssc_deltam_reg);
+ } else {
+ ctrl &= ~dd->ssc_enable_mask;
+ }
+
+ ti_clk_ll_ops->clk_writel(ctrl, &dd->control_reg);
+}
+
+/**
+ * omap3_noncore_dpll_program - set non-core DPLL M,N values directly
+ * @clk: struct clk * of DPLL to set
+ * @freqsel: FREQSEL value to set
+ *
+ * Program the DPLL with the last M, N values calculated, and wait for
+ * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
+ */
+static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
+{
+ struct dpll_data *dd = clk->dpll_data;
+ u8 dco, sd_div, ai = 0;
+ u32 v;
+ bool errata_i810;
+
+ /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
+ _omap3_noncore_dpll_bypass(clk);
+
+ /*
+ * Set jitter correction. Jitter correction applicable for OMAP343X
+ * only since freqsel field is no longer present on other devices.
+ */
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ v &= ~dd->freqsel_mask;
+ v |= freqsel << __ffs(dd->freqsel_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
+ }
+
+ /* Set DPLL multiplier, divider */
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+
+ /* Handle Duty Cycle Correction */
+ if (dd->dcc_mask) {
+ if (dd->last_rounded_rate >= dd->dcc_rate)
+ v |= dd->dcc_mask; /* Enable DCC */
+ else
+ v &= ~dd->dcc_mask; /* Disable DCC */
+ }
+
+ v &= ~(dd->mult_mask | dd->div1_mask);
+ v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+ v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
+
+ /* Configure dco and sd_div for dplls that have these fields */
+ if (dd->dco_mask) {
+ _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
+ v &= ~(dd->dco_mask);
+ v |= dco << __ffs(dd->dco_mask);
+ }
+ if (dd->sddiv_mask) {
+ _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
+ dd->last_rounded_n);
+ v &= ~(dd->sddiv_mask);
+ v |= sd_div << __ffs(dd->sddiv_mask);
+ }
+
+ /*
+ * Errata i810 - DPLL controller can get stuck while transitioning
+ * to a power saving state. Software must ensure the DPLL can not
+ * transition to a low power state while changing M/N values.
+ * Easiest way to accomplish this is to prevent DPLL autoidle
+ * before doing the M/N re-program.
+ */
+ errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
+
+ if (errata_i810) {
+ ai = omap3_dpll_autoidle_read(clk);
+ if (ai) {
+ omap3_dpll_deny_idle(clk);
+
+ /* OCP barrier */
+ omap3_dpll_autoidle_read(clk);
+ }
+ }
+
+ ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
+
+ /* Set 4X multiplier and low-power mode */
+ if (dd->m4xen_mask || dd->lpmode_mask) {
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+
+ if (dd->m4xen_mask) {
+ if (dd->last_rounded_m4xen)
+ v |= dd->m4xen_mask;
+ else
+ v &= ~dd->m4xen_mask;
+ }
+
+ if (dd->lpmode_mask) {
+ if (dd->last_rounded_lpmode)
+ v |= dd->lpmode_mask;
+ else
+ v &= ~dd->lpmode_mask;
+ }
+
+ ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
+ }
+
+ if (dd->ssc_enable_mask)
+ omap3_noncore_dpll_ssc_program(clk);
+
+ /* We let the clock framework set the other output dividers later */
+
+ /* REVISIT: Set ramp-up delay? */
+
+ _omap3_noncore_dpll_lock(clk);
+
+ if (errata_i810 && ai)
+ omap3_dpll_allow_idle(clk);
+
+ return 0;
+}
+
+/* Public functions */
+
+/**
+ * omap3_dpll_recalc - recalculate DPLL rate
+ * @hw: struct clk_hw containing the DPLL struct clk
+ * @parent_rate: clock rate of the DPLL parent
+ *
+ * Recalculate and propagate the DPLL rate.
+ */
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+ return omap2_get_dpll_rate(clk);
+}
+
+/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
+
+/**
+ * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
+ * @hw: struct clk_hw containing then pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
+ * The choice of modes depends on the DPLL's programmed rate: if it is
+ * the same as the DPLL's parent clock, it will enter bypass;
+ * otherwise, it will enter lock. This code will wait for the DPLL to
+ * indicate readiness before returning, unless the DPLL takes too long
+ * to enter the target state. Intended to be used as the struct clk's
+ * enable function. If DPLL3 was passed in, or the DPLL does not
+ * support low-power stop, or if the DPLL took too long to enter
+ * bypass or lock, return -EINVAL; otherwise, return 0.
+ */
+int omap3_noncore_dpll_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int r;
+ struct dpll_data *dd;
+ struct clk_hw *parent;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk->clkdm) {
+ r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (r) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw),
+ clk->clkdm_name, r);
+ return r;
+ }
+ }
+
+ parent = clk_hw_get_parent(hw);
+
+ if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
+ WARN_ON(parent != dd->clk_bypass);
+ r = _omap3_noncore_dpll_bypass(clk);
+ } else {
+ WARN_ON(parent != dd->clk_ref);
+ r = _omap3_noncore_dpll_lock(clk);
+ }
+
+ return r;
+}
+
+/**
+ * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
+ * @hw: struct clk_hw containing then pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. This function is
+ * intended for use in struct clkops. No return value.
+ */
+void omap3_noncore_dpll_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+ _omap3_noncore_dpll_stop(clk);
+ if (clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/* Non-CORE DPLL rate set code */
+
+/**
+ * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
+ * @hw: pointer to the clock to determine rate for
+ * @req: target rate request
+ *
+ * Determines which DPLL mode to use for reaching a desired target rate.
+ * Checks whether the DPLL shall be in bypass or locked mode, and if
+ * locked, calculates the M,N values for the DPLL via round-rate.
+ * Returns a 0 on success, negative error value in failure.
+ */
+int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+
+ if (!req->rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
+ (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+ req->best_parent_hw = dd->clk_bypass;
+ } else {
+ req->rate = omap2_dpll_round_rate(hw, req->rate,
+ &req->best_parent_rate);
+ req->best_parent_hw = dd->clk_ref;
+ }
+
+ req->best_parent_rate = req->rate;
+
+ return 0;
+}
+
+/**
+ * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
+ * @hw: pointer to the clock to set parent for
+ * @index: parent index to select
+ *
+ * Sets parent for a DPLL clock. This sets the DPLL into bypass or
+ * locked mode. Returns 0 with success, negative error value otherwise.
+ */
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ int ret;
+
+ if (!hw)
+ return -EINVAL;
+
+ if (index)
+ ret = _omap3_noncore_dpll_bypass(clk);
+ else
+ ret = _omap3_noncore_dpll_lock(clk);
+
+ return ret;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
+ * @hw: pointer to the clock to set parent for
+ * @rate: target rate for the clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Sets rate for a DPLL clock. First checks if the clock parent is
+ * reference clock (in bypass mode, the rate of the clock can't be
+ * changed) and proceeds with the rate change operation. Returns 0
+ * with success, negative error value otherwise.
+ */
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ u16 freqsel = 0;
+ int ret;
+
+ if (!hw || !rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk_hw_get_parent(hw) != dd->clk_ref)
+ return -EINVAL;
+
+ if (dd->last_rounded_rate == 0)
+ return -EINVAL;
+
+ /* Freqsel is available only on OMAP343X devices */
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
+ freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
+ WARN_ON(!freqsel);
+ }
+
+ pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
+ clk_hw_get_name(hw), rate);
+
+ ret = omap3_noncore_dpll_program(clk, freqsel);
+
+ return ret;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
+ * @hw: pointer to the clock to set rate and parent for
+ * @rate: target rate for the DPLL
+ * @parent_rate: clock rate of the DPLL parent
+ * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
+ *
+ * Sets rate and parent for a DPLL clock. If new parent is the bypass
+ * clock, only selects the parent. Otherwise proceeds with a rate
+ * change, as this will effectively also change the parent as the
+ * DPLL is put into locked mode. Returns 0 with success, negative error
+ * value otherwise.
+ */
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ int ret;
+
+ if (!hw || !rate)
+ return -EINVAL;
+
+ /*
+ * clk-ref at index[0], in which case we only need to set rate,
+ * the parent will be changed automatically with the lock sequence.
+ * With clk-bypass case we only need to change parent.
+ */
+ if (index)
+ ret = omap3_noncore_dpll_set_parent(hw, index);
+ else
+ ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+
+ return ret;
+}
+
+/* DPLL autoidle read/set code */
+
+/**
+ * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
+ * @clk: struct clk * of the DPLL to read
+ *
+ * Return the DPLL's autoidle bits, shifted down to bit 0. Returns
+ * -EINVAL if passed a null pointer or if the struct clk does not
+ * appear to refer to a DPLL.
+ */
+static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ if (!dd->autoidle_mask)
+ return -EINVAL;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
+ v &= dd->autoidle_mask;
+ v >>= __ffs(dd->autoidle_mask);
+
+ return v;
+}
+
+/**
+ * omap3_dpll_allow_idle - enable DPLL autoidle bits
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Enable DPLL automatic idle control. This automatic idle mode
+ * switching takes effect only when the DPLL is locked, at least on
+ * OMAP3430. The DPLL will enter low-power stop when its downstream
+ * clocks are gated. No return value.
+ */
+static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return;
+
+ dd = clk->dpll_data;
+
+ if (!dd->autoidle_mask)
+ return;
+
+ /*
+ * REVISIT: CORE DPLL can optionally enter low-power bypass
+ * by writing 0x5 instead of 0x1. Add some mechanism to
+ * optionally enter this mode.
+ */
+ v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
+}
+
+/**
+ * omap3_dpll_deny_idle - prevent DPLL from automatically idling
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Disable DPLL automatic idle control. No return value.
+ */
+static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return;
+
+ dd = clk->dpll_data;
+
+ if (!dd->autoidle_mask)
+ return;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
+}
+
+/* Clock control for DPLL outputs */
+
+/* Find the parent DPLL for the given clkoutx2 clock */
+static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
+{
+ struct clk_hw_omap *pclk = NULL;
+
+ /* Walk up the parents of clk, looking for a DPLL */
+ do {
+ do {
+ hw = clk_hw_get_parent(hw);
+ } while (hw && (!omap2_clk_is_hw_omap(hw)));
+ if (!hw)
+ break;
+ pclk = to_clk_hw_omap(hw);
+ } while (pclk && !pclk->dpll_data);
+
+ /* clk does not have a DPLL as a parent? error in the clock data */
+ if (!pclk) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return pclk;
+}
+
+/**
+ * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
+ * @hw: pointer struct clk_hw
+ * @parent_rate: clock rate of the DPLL parent
+ *
+ * Using parent clock DPLL data, look up DPLL state. If locked, set our
+ * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
+ */
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ const struct dpll_data *dd;
+ unsigned long rate;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!parent_rate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
+ dd = pclk->dpll_data;
+
+ WARN_ON(!dd->enable_mask);
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+ if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
+ rate = parent_rate;
+ else
+ rate = parent_rate * 2;
+ return rate;
+}
+
+/**
+ * omap3_core_dpll_save_context - Save the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Before the dpll registers are lost save the last rounded rate m and n
+ * and the enable mask.
+ */
+int omap3_core_dpll_save_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
+
+ if (clk->context == DPLL_LOCKED) {
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ dd->last_rounded_m = (v & dd->mult_mask) >>
+ __ffs(dd->mult_mask);
+ dd->last_rounded_n = ((v & dd->div1_mask) >>
+ __ffs(dd->div1_mask)) + 1;
+ }
+
+ return 0;
+}
+
+/**
+ * omap3_core_dpll_restore_context - restore the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the last rounded rate m and n
+ * and the enable mask.
+ */
+void omap3_core_dpll_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ const struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ if (clk->context == DPLL_LOCKED) {
+ _omap3_dpll_write_clken(clk, 0x4);
+ _omap3_wait_dpll_status(clk, 0);
+
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ v &= ~(dd->mult_mask | dd->div1_mask);
+ v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+ v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
+ ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+ _omap3_wait_dpll_status(clk, 1);
+ } else {
+ _omap3_dpll_write_clken(clk, clk->context);
+ }
+}
+
+/**
+ * omap3_non_core_dpll_save_context - Save the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Before the dpll registers are lost save the last rounded rate m and n
+ * and the enable mask.
+ */
+int omap3_noncore_dpll_save_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
+
+ if (clk->context == DPLL_LOCKED) {
+ v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+ dd->last_rounded_m = (v & dd->mult_mask) >>
+ __ffs(dd->mult_mask);
+ dd->last_rounded_n = ((v & dd->div1_mask) >>
+ __ffs(dd->div1_mask)) + 1;
+ }
+
+ return 0;
+}
+
+/**
+ * omap3_core_dpll_restore_context - restore the m and n values of the divider
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the last rounded rate m and n
+ * and the enable mask.
+ */
+void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ const struct dpll_data *dd;
+ u32 ctrl, mult_div1;
+
+ dd = clk->dpll_data;
+
+ ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
+
+ if (clk->context == ((ctrl & dd->enable_mask) >>
+ __ffs(dd->enable_mask)) &&
+ dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
+ __ffs(dd->mult_mask)) &&
+ dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
+ __ffs(dd->div1_mask)) + 1) {
+ /* nothing to be done */
+ return;
+ }
+
+ if (clk->context == DPLL_LOCKED)
+ omap3_noncore_dpll_program(clk, 0);
+ else
+ _omap3_dpll_write_clken(clk, clk->context);
+}
+
+/* OMAP3/4 non-CORE DPLL clkops */
+const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
+ .allow_idle = omap3_dpll_allow_idle,
+ .deny_idle = omap3_dpll_deny_idle,
+};
+
+/**
+ * omap3_dpll4_set_rate - set rate for omap3 per-dpll
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: clock rate of the DPLL parent
+ *
+ * Check if the current SoC supports the per-dpll reprogram operation
+ * or not, and then do the rate change if supported. Returns -EINVAL
+ * if not supported, 0 for success, and potential error codes from the
+ * clock rate change.
+ */
+int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * According to the 12-5 CDP code from TI, "Limitation 2.5"
+ * on 3430ES1 prevents us from changing DPLL multipliers or dividers
+ * on DPLL4.
+ */
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
+ pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
+ return -EINVAL;
+ }
+
+ return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+}
+
+/**
+ * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ * @index: parent index, 0 - reference clock, 1 - bypass clock
+ *
+ * Check if the current SoC support the per-dpll reprogram operation
+ * or not, and then do the rate + parent change if supported. Returns
+ * -EINVAL if not supported, 0 for success, and potential error codes
+ * from the clock rate change.
+ */
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
+ pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
+ return -EINVAL;
+ }
+
+ return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
+ index);
+}
+
+/* Apply DM3730 errata sprz319 advisory 2.1. */
+static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct omap3_dpll5_settings {
+ unsigned int rate, m, n;
+ };
+
+ static const struct omap3_dpll5_settings precomputed[] = {
+ /*
+ * From DM3730 errata advisory 2.1, table 35 and 36.
+ * The N value is increased by 1 compared to the tables as the
+ * errata lists register values while last_rounded_field is the
+ * real divider value.
+ */
+ { 12000000, 80, 0 + 1 },
+ { 13000000, 443, 5 + 1 },
+ { 19200000, 50, 0 + 1 },
+ { 26000000, 443, 11 + 1 },
+ { 38400000, 25, 0 + 1 }
+ };
+
+ const struct omap3_dpll5_settings *d;
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
+ if (parent_rate == precomputed[i].rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(precomputed))
+ return false;
+
+ d = &precomputed[i];
+
+ /* Update the M, N and rounded rate values and program the DPLL. */
+ dd = clk->dpll_data;
+ dd->last_rounded_m = d->m;
+ dd->last_rounded_n = d->n;
+ dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
+ omap3_noncore_dpll_program(clk, 0);
+
+ return true;
+}
+
+/**
+ * omap3_dpll5_set_rate - set rate for omap3 dpll5
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
+ * the DPLL is used for USB host (detected through the requested rate).
+ */
+int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
+ if (omap3_dpll5_apply_errata(hw, parent_rate))
+ return 0;
+ }
+
+ return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
new file mode 100644
index 000000000..3fc2cab69
--- /dev/null
+++ b/drivers/clk/ti/dpll44xx.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP4-specific DPLL control functions
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/*
+ * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
+ * can supported when using the DPLL low-power mode. Frequencies are
+ * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
+ * Status, and Low-Power Operation Mode".
+ */
+#define OMAP4_DPLL_LP_FINT_MAX 1000000
+#define OMAP4_DPLL_LP_FOUT_MAX 100000000
+
+/*
+ * Bitfield declarations
+ */
+#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK BIT(8)
+#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK BIT(10)
+#define OMAP4430_DPLL_REGM4XEN_MASK BIT(11)
+
+/* Static rate multiplier for OMAP4 REGM4XEN clocks */
+#define OMAP4430_REGM4XEN_MULT 4
+
+static void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
+{
+ u32 v;
+ u32 mask;
+
+ if (!clk)
+ return;
+
+ mask = clk->flags & CLOCK_CLKOUTX2 ?
+ OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
+ OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
+
+ v = ti_clk_ll_ops->clk_readl(&clk->clksel_reg);
+ /* Clear the bit to allow gatectrl */
+ v &= ~mask;
+ ti_clk_ll_ops->clk_writel(v, &clk->clksel_reg);
+}
+
+static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
+{
+ u32 v;
+ u32 mask;
+
+ if (!clk)
+ return;
+
+ mask = clk->flags & CLOCK_CLKOUTX2 ?
+ OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
+ OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
+
+ v = ti_clk_ll_ops->clk_readl(&clk->clksel_reg);
+ /* Set the bit to deny gatectrl */
+ v |= mask;
+ ti_clk_ll_ops->clk_writel(v, &clk->clksel_reg);
+}
+
+const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
+ .allow_idle = omap4_dpllmx_allow_gatectrl,
+ .deny_idle = omap4_dpllmx_deny_gatectrl,
+};
+
+/**
+ * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
+ * @dd: pointer to the dpll data structure
+ *
+ * Calculates if low-power mode can be enabled based upon the last
+ * multiplier and divider values calculated. If low-power mode can be
+ * enabled, then the bit to enable low-power mode is stored in the
+ * last_rounded_lpmode variable. This implementation is based upon the
+ * criteria for enabling low-power mode as described in the OMAP4430/60
+ * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
+ * Operation Mode".
+ */
+static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
+{
+ long fint, fout;
+
+ fint = clk_hw_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fout = fint * dd->last_rounded_m;
+
+ if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
+ dd->last_rounded_lpmode = 1;
+ else
+ dd->last_rounded_lpmode = 0;
+}
+
+/**
+ * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
+ * @hw: pointer to the clock to compute the rate for
+ * @parent_rate: clock rate of the DPLL parent
+ *
+ * Compute the output rate for the OMAP4 DPLL represented by @clk.
+ * Takes the REGM4XEN bit into consideration, which is needed for the
+ * OMAP4 ABE DPLL. Returns the DPLL's output rate (before M-dividers)
+ * upon success, or 0 upon error.
+ */
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 v;
+ unsigned long rate;
+ struct dpll_data *dd;
+
+ if (!clk || !clk->dpll_data)
+ return 0;
+
+ dd = clk->dpll_data;
+
+ rate = omap2_get_dpll_rate(clk);
+
+ /* regm4xen adds a multiplier of 4 to DPLL calculations */
+ v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
+ if (v & OMAP4430_DPLL_REGM4XEN_MASK)
+ rate *= OMAP4430_REGM4XEN_MULT;
+
+ return rate;
+}
+
+/**
+ * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
+ * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
+ * @target_rate: the desired rate of the DPLL
+ * @parent_rate: clock rate of the DPLL parent
+ *
+ * Compute the rate that would be programmed into the DPLL hardware
+ * for @clk if set_rate() were to be provided with the rate
+ * @target_rate. Takes the REGM4XEN bit into consideration, which is
+ * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
+ * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
+ * ~0 if an error occurred in omap2_dpll_round_rate().
+ */
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+ long r;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate, NULL);
+ if (r != ~0)
+ goto out;
+
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
+ NULL);
+ if (r == ~0)
+ return r;
+
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+
+out:
+ omap4_dpll_lpmode_recalc(dd);
+
+ return dd->last_rounded_rate;
+}
+
+/**
+ * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
+ * @hw: pointer to the clock to determine rate for
+ * @req: target rate request
+ *
+ * Determines which DPLL mode to use for reaching a desired rate.
+ * Checks whether the DPLL shall be in bypass or locked mode, and if
+ * locked, calculates the M,N values for the DPLL via round-rate.
+ * Returns 0 on success and a negative error value otherwise.
+ */
+int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ struct dpll_data *dd;
+
+ if (!req->rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
+ (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+ req->best_parent_hw = dd->clk_bypass;
+ } else {
+ req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
+ &req->best_parent_rate);
+ req->best_parent_hw = dd->clk_ref;
+ }
+
+ req->best_parent_rate = req->rate;
+
+ return 0;
+}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644
index 000000000..2db3fc4a4
--- /dev/null
+++ b/drivers/clk/ti/fapll.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* FAPLL Control Register PLL_CTRL */
+#define FAPLL_MAIN_MULT_N_SHIFT 16
+#define FAPLL_MAIN_DIV_P_SHIFT 8
+#define FAPLL_MAIN_LOCK BIT(7)
+#define FAPLL_MAIN_PLLEN BIT(3)
+#define FAPLL_MAIN_BP BIT(2)
+#define FAPLL_MAIN_LOC_CTL BIT(0)
+
+#define FAPLL_MAIN_MAX_MULT_N 0xffff
+#define FAPLL_MAIN_MAX_DIV_P 0xff
+#define FAPLL_MAIN_CLEAR_MASK \
+ ((FAPLL_MAIN_MAX_MULT_N << FAPLL_MAIN_MULT_N_SHIFT) | \
+ (FAPLL_MAIN_DIV_P_SHIFT << FAPLL_MAIN_DIV_P_SHIFT) | \
+ FAPLL_MAIN_LOC_CTL)
+
+/* FAPLL powerdown register PWD */
+#define FAPLL_PWD_OFFSET 4
+
+#define MAX_FAPLL_OUTPUTS 7
+#define FAPLL_MAX_RETRIES 1000
+
+#define to_fapll(_hw) container_of(_hw, struct fapll_data, hw)
+#define to_synth(_hw) container_of(_hw, struct fapll_synth, hw)
+
+/* The bypass bit is inverted on the ddr_pll.. */
+#define fapll_is_ddr_pll(va) (((u32)(va) & 0xffff) == 0x0440)
+
+/*
+ * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
+ * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
+ */
+#define is_ddr_pll_clk1(va) (((u32)(va) & 0xffff) == 0x044c)
+#define is_audio_pll_clk1(va) (((u32)(va) & 0xffff) == 0x04a8)
+
+/* Synthesizer divider register */
+#define SYNTH_LDMDIV1 BIT(8)
+
+/* Synthesizer frequency register */
+#define SYNTH_LDFREQ BIT(31)
+
+#define SYNTH_PHASE_K 8
+#define SYNTH_MAX_INT_DIV 0xf
+#define SYNTH_MAX_DIV_M 0xff
+
+struct fapll_data {
+ struct clk_hw hw;
+ void __iomem *base;
+ const char *name;
+ struct clk *clk_ref;
+ struct clk *clk_bypass;
+ struct clk_onecell_data outputs;
+ bool bypass_bit_inverted;
+};
+
+struct fapll_synth {
+ struct clk_hw hw;
+ struct fapll_data *fd;
+ int index;
+ void __iomem *freq;
+ void __iomem *div;
+ const char *name;
+ struct clk *clk_pll;
+};
+
+static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
+{
+ u32 v = readl_relaxed(fd->base);
+
+ if (fd->bypass_bit_inverted)
+ return !(v & FAPLL_MAIN_BP);
+ else
+ return !!(v & FAPLL_MAIN_BP);
+}
+
+static void ti_fapll_set_bypass(struct fapll_data *fd)
+{
+ u32 v = readl_relaxed(fd->base);
+
+ if (fd->bypass_bit_inverted)
+ v &= ~FAPLL_MAIN_BP;
+ else
+ v |= FAPLL_MAIN_BP;
+ writel_relaxed(v, fd->base);
+}
+
+static void ti_fapll_clear_bypass(struct fapll_data *fd)
+{
+ u32 v = readl_relaxed(fd->base);
+
+ if (fd->bypass_bit_inverted)
+ v |= FAPLL_MAIN_BP;
+ else
+ v &= ~FAPLL_MAIN_BP;
+ writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_wait_lock(struct fapll_data *fd)
+{
+ int retries = FAPLL_MAX_RETRIES;
+ u32 v;
+
+ while ((v = readl_relaxed(fd->base))) {
+ if (v & FAPLL_MAIN_LOCK)
+ return 0;
+
+ if (retries-- <= 0)
+ break;
+
+ udelay(1);
+ }
+
+ pr_err("%s failed to lock\n", fd->name);
+
+ return -ETIMEDOUT;
+}
+
+static int ti_fapll_enable(struct clk_hw *hw)
+{
+ struct fapll_data *fd = to_fapll(hw);
+ u32 v = readl_relaxed(fd->base);
+
+ v |= FAPLL_MAIN_PLLEN;
+ writel_relaxed(v, fd->base);
+ ti_fapll_wait_lock(fd);
+
+ return 0;
+}
+
+static void ti_fapll_disable(struct clk_hw *hw)
+{
+ struct fapll_data *fd = to_fapll(hw);
+ u32 v = readl_relaxed(fd->base);
+
+ v &= ~FAPLL_MAIN_PLLEN;
+ writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_is_enabled(struct clk_hw *hw)
+{
+ struct fapll_data *fd = to_fapll(hw);
+ u32 v = readl_relaxed(fd->base);
+
+ return v & FAPLL_MAIN_PLLEN;
+}
+
+static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct fapll_data *fd = to_fapll(hw);
+ u32 fapll_n, fapll_p, v;
+ u64 rate;
+
+ if (ti_fapll_clock_is_bypass(fd))
+ return parent_rate;
+
+ rate = parent_rate;
+
+ /* PLL pre-divider is P and multiplier is N */
+ v = readl_relaxed(fd->base);
+ fapll_p = (v >> 8) & 0xff;
+ if (fapll_p)
+ do_div(rate, fapll_p);
+ fapll_n = v >> 16;
+ if (fapll_n)
+ rate *= fapll_n;
+
+ return rate;
+}
+
+static u8 ti_fapll_get_parent(struct clk_hw *hw)
+{
+ struct fapll_data *fd = to_fapll(hw);
+
+ if (ti_fapll_clock_is_bypass(fd))
+ return 1;
+
+ return 0;
+}
+
+static int ti_fapll_set_div_mult(unsigned long rate,
+ unsigned long parent_rate,
+ u32 *pre_div_p, u32 *mult_n)
+{
+ /*
+ * So far no luck getting decent clock with PLL divider,
+ * PLL does not seem to lock and the signal does not look
+ * right. It seems the divider can only be used together
+ * with the multiplier?
+ */
+ if (rate < parent_rate) {
+ pr_warn("FAPLL main divider rates unsupported\n");
+ return -EINVAL;
+ }
+
+ *mult_n = rate / parent_rate;
+ if (*mult_n > FAPLL_MAIN_MAX_MULT_N)
+ return -EINVAL;
+ *pre_div_p = 1;
+
+ return 0;
+}
+
+static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 pre_div_p, mult_n;
+ int error;
+
+ if (!rate)
+ return -EINVAL;
+
+ error = ti_fapll_set_div_mult(rate, *parent_rate,
+ &pre_div_p, &mult_n);
+ if (error)
+ return error;
+
+ rate = *parent_rate / pre_div_p;
+ rate *= mult_n;
+
+ return rate;
+}
+
+static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct fapll_data *fd = to_fapll(hw);
+ u32 pre_div_p, mult_n, v;
+ int error;
+
+ if (!rate)
+ return -EINVAL;
+
+ error = ti_fapll_set_div_mult(rate, parent_rate,
+ &pre_div_p, &mult_n);
+ if (error)
+ return error;
+
+ ti_fapll_set_bypass(fd);
+ v = readl_relaxed(fd->base);
+ v &= ~FAPLL_MAIN_CLEAR_MASK;
+ v |= pre_div_p << FAPLL_MAIN_DIV_P_SHIFT;
+ v |= mult_n << FAPLL_MAIN_MULT_N_SHIFT;
+ writel_relaxed(v, fd->base);
+ if (ti_fapll_is_enabled(hw))
+ ti_fapll_wait_lock(fd);
+ ti_fapll_clear_bypass(fd);
+
+ return 0;
+}
+
+static const struct clk_ops ti_fapll_ops = {
+ .enable = ti_fapll_enable,
+ .disable = ti_fapll_disable,
+ .is_enabled = ti_fapll_is_enabled,
+ .recalc_rate = ti_fapll_recalc_rate,
+ .get_parent = ti_fapll_get_parent,
+ .round_rate = ti_fapll_round_rate,
+ .set_rate = ti_fapll_set_rate,
+};
+
+static int ti_fapll_synth_enable(struct clk_hw *hw)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+ v &= ~(1 << synth->index);
+ writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+
+ return 0;
+}
+
+static void ti_fapll_synth_disable(struct clk_hw *hw)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+ v |= 1 << synth->index;
+ writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+}
+
+static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+ return !(v & (1 << synth->index));
+}
+
+/*
+ * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
+ */
+static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ u32 synth_div_m;
+ u64 rate;
+
+ /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
+ if (!synth->div)
+ return 32768;
+
+ /*
+ * PLL in bypass sets the synths in bypass mode too. The PLL rate
+ * can be also be set to 27MHz, so we can't use parent_rate to
+ * check for bypass mode.
+ */
+ if (ti_fapll_clock_is_bypass(synth->fd))
+ return parent_rate;
+
+ rate = parent_rate;
+
+ /*
+ * Synth frequency integer and fractional divider.
+ * Note that the phase output K is 8, so the result needs
+ * to be multiplied by SYNTH_PHASE_K.
+ */
+ if (synth->freq) {
+ u32 v, synth_int_div, synth_frac_div, synth_div_freq;
+
+ v = readl_relaxed(synth->freq);
+ synth_int_div = (v >> 24) & 0xf;
+ synth_frac_div = v & 0xffffff;
+ synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
+ rate *= 10000000;
+ do_div(rate, synth_div_freq);
+ rate *= SYNTH_PHASE_K;
+ }
+
+ /* Synth post-divider M */
+ synth_div_m = readl_relaxed(synth->div) & SYNTH_MAX_DIV_M;
+
+ return DIV_ROUND_UP_ULL(rate, synth_div_m);
+}
+
+static unsigned long ti_fapll_synth_get_frac_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ unsigned long current_rate, frac_rate;
+ u32 post_div_m;
+
+ current_rate = ti_fapll_synth_recalc_rate(hw, parent_rate);
+ post_div_m = readl_relaxed(synth->div) & SYNTH_MAX_DIV_M;
+ frac_rate = current_rate * post_div_m;
+
+ return frac_rate;
+}
+
+static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 post_div_m, synth_int_div = 0, synth_frac_div = 0, v;
+
+ post_div_m = DIV_ROUND_UP_ULL((u64)parent_rate * SYNTH_PHASE_K, rate);
+ post_div_m = post_div_m / SYNTH_MAX_INT_DIV;
+ if (post_div_m > SYNTH_MAX_DIV_M)
+ return -EINVAL;
+ if (!post_div_m)
+ post_div_m = 1;
+
+ for (; post_div_m < SYNTH_MAX_DIV_M; post_div_m++) {
+ synth_int_div = DIV_ROUND_UP_ULL((u64)parent_rate *
+ SYNTH_PHASE_K *
+ 10000000,
+ rate * post_div_m);
+ synth_frac_div = synth_int_div % 10000000;
+ synth_int_div /= 10000000;
+
+ if (synth_int_div <= SYNTH_MAX_INT_DIV)
+ break;
+ }
+
+ if (synth_int_div > SYNTH_MAX_INT_DIV)
+ return -EINVAL;
+
+ v = readl_relaxed(synth->freq);
+ v &= ~0x1fffffff;
+ v |= (synth_int_div & SYNTH_MAX_INT_DIV) << 24;
+ v |= (synth_frac_div & 0xffffff);
+ v |= SYNTH_LDFREQ;
+ writel_relaxed(v, synth->freq);
+
+ return post_div_m;
+}
+
+static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ struct fapll_data *fd = synth->fd;
+ unsigned long r;
+
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ return -EINVAL;
+
+ /* Only post divider m available with no fractional divider? */
+ if (!synth->freq) {
+ unsigned long frac_rate;
+ u32 synth_post_div_m;
+
+ frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
+ synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
+ goto out;
+ }
+
+ r = *parent_rate * SYNTH_PHASE_K;
+ if (rate > r)
+ goto out;
+
+ r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
+ if (rate < r)
+ goto out;
+
+ r = rate;
+out:
+ return r;
+}
+
+static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct fapll_synth *synth = to_synth(hw);
+ struct fapll_data *fd = synth->fd;
+ unsigned long frac_rate, post_rate = 0;
+ u32 post_div_m = 0, v;
+
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ return -EINVAL;
+
+ /* Produce the rate with just post divider M? */
+ frac_rate = ti_fapll_synth_get_frac_rate(hw, parent_rate);
+ if (frac_rate < rate) {
+ if (!synth->freq)
+ return -EINVAL;
+ } else {
+ post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ if (post_div_m && (post_div_m <= SYNTH_MAX_DIV_M))
+ post_rate = DIV_ROUND_UP(frac_rate, post_div_m);
+ if (!synth->freq && !post_rate)
+ return -EINVAL;
+ }
+
+ /* Need to recalculate the fractional divider? */
+ if ((post_rate != rate) && synth->freq)
+ post_div_m = ti_fapll_synth_set_frac_rate(synth,
+ rate,
+ parent_rate);
+
+ v = readl_relaxed(synth->div);
+ v &= ~SYNTH_MAX_DIV_M;
+ v |= post_div_m;
+ v |= SYNTH_LDMDIV1;
+ writel_relaxed(v, synth->div);
+
+ return 0;
+}
+
+static const struct clk_ops ti_fapll_synt_ops = {
+ .enable = ti_fapll_synth_enable,
+ .disable = ti_fapll_synth_disable,
+ .is_enabled = ti_fapll_synth_is_enabled,
+ .recalc_rate = ti_fapll_synth_recalc_rate,
+ .round_rate = ti_fapll_synth_round_rate,
+ .set_rate = ti_fapll_synth_set_rate,
+};
+
+static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
+ void __iomem *freq,
+ void __iomem *div,
+ int index,
+ const char *name,
+ const char *parent,
+ struct clk *pll_clk)
+{
+ struct clk_init_data *init;
+ struct fapll_synth *synth;
+ struct clk *clk = ERR_PTR(-ENOMEM);
+
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!init)
+ return ERR_PTR(-ENOMEM);
+
+ init->ops = &ti_fapll_synt_ops;
+ init->name = name;
+ init->parent_names = &parent;
+ init->num_parents = 1;
+
+ synth = kzalloc(sizeof(*synth), GFP_KERNEL);
+ if (!synth)
+ goto free;
+
+ synth->fd = fd;
+ synth->index = index;
+ synth->freq = freq;
+ synth->div = div;
+ synth->name = name;
+ synth->hw.init = init;
+ synth->clk_pll = pll_clk;
+
+ clk = clk_register(NULL, &synth->hw);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ goto free;
+ }
+
+ return clk;
+
+free:
+ kfree(synth);
+ kfree(init);
+
+ return clk;
+}
+
+static void __init ti_fapll_setup(struct device_node *node)
+{
+ struct fapll_data *fd;
+ struct clk_init_data *init = NULL;
+ const char *parent_name[2];
+ struct clk *pll_clk;
+ const char *name;
+ int i;
+
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+ if (!fd)
+ return;
+
+ fd->outputs.clks = kzalloc(sizeof(struct clk *) *
+ MAX_FAPLL_OUTPUTS + 1,
+ GFP_KERNEL);
+ if (!fd->outputs.clks)
+ goto free;
+
+ init = kzalloc(sizeof(*init), GFP_KERNEL);
+ if (!init)
+ goto free;
+
+ init->ops = &ti_fapll_ops;
+ name = ti_dt_clk_name(node);
+ init->name = name;
+
+ init->num_parents = of_clk_get_parent_count(node);
+ if (init->num_parents != 2) {
+ pr_err("%pOFn must have two parents\n", node);
+ goto free;
+ }
+
+ of_clk_parent_fill(node, parent_name, 2);
+ init->parent_names = parent_name;
+
+ fd->clk_ref = of_clk_get(node, 0);
+ if (IS_ERR(fd->clk_ref)) {
+ pr_err("%pOFn could not get clk_ref\n", node);
+ goto free;
+ }
+
+ fd->clk_bypass = of_clk_get(node, 1);
+ if (IS_ERR(fd->clk_bypass)) {
+ pr_err("%pOFn could not get clk_bypass\n", node);
+ goto free;
+ }
+
+ fd->base = of_iomap(node, 0);
+ if (!fd->base) {
+ pr_err("%pOFn could not get IO base\n", node);
+ goto free;
+ }
+
+ if (fapll_is_ddr_pll(fd->base))
+ fd->bypass_bit_inverted = true;
+
+ fd->name = name;
+ fd->hw.init = init;
+
+ /* Register the parent PLL */
+ pll_clk = clk_register(NULL, &fd->hw);
+ if (IS_ERR(pll_clk))
+ goto unmap;
+
+ fd->outputs.clks[0] = pll_clk;
+ fd->outputs.clk_num++;
+
+ /*
+ * Set up the child synthesizers starting at index 1 as the
+ * PLL output is at index 0. We need to check the clock-indices
+ * for numbering in case there are holes in the synth mapping,
+ * and then probe the synth register to see if it has a FREQ
+ * register available.
+ */
+ for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
+ const char *output_name;
+ void __iomem *freq, *div;
+ struct clk *synth_clk;
+ int output_instance;
+ u32 v;
+
+ if (of_property_read_string_index(node, "clock-output-names",
+ i, &output_name))
+ continue;
+
+ if (of_property_read_u32_index(node, "clock-indices", i,
+ &output_instance))
+ output_instance = i;
+
+ freq = fd->base + (output_instance * 8);
+ div = freq + 4;
+
+ /* Check for hardwired audio_pll_clk1 */
+ if (is_audio_pll_clk1(freq)) {
+ freq = NULL;
+ div = NULL;
+ } else {
+ /* Does the synthesizer have a FREQ register? */
+ v = readl_relaxed(freq);
+ if (!v)
+ freq = NULL;
+ }
+ synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
+ output_name, name, pll_clk);
+ if (IS_ERR(synth_clk))
+ continue;
+
+ fd->outputs.clks[output_instance] = synth_clk;
+ fd->outputs.clk_num++;
+
+ clk_register_clkdev(synth_clk, output_name, NULL);
+ }
+
+ /* Register the child synthesizers as the FAPLL outputs */
+ of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
+ /* Add clock alias for the outputs */
+
+ kfree(init);
+
+ return;
+
+unmap:
+ iounmap(fd->base);
+free:
+ if (fd->clk_bypass)
+ clk_put(fd->clk_bypass);
+ if (fd->clk_ref)
+ clk_put(fd->clk_ref);
+ kfree(fd->outputs.clks);
+ kfree(fd);
+ kfree(init);
+}
+
+CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
new file mode 100644
index 000000000..c102c5320
--- /dev/null
+++ b/drivers/clk/ti/fixed-factor.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Fixed Factor Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+/**
+ * of_ti_fixed_factor_clk_setup - Setup function for TI fixed factor clock
+ * @node: device node for this clock
+ *
+ * Sets up a simple fixed factor clock based on device tree info.
+ */
+static void __init of_ti_fixed_factor_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = ti_dt_clk_name(node);
+ const char *parent_name;
+ u32 div, mult;
+ u32 flags = 0;
+
+ if (of_property_read_u32(node, "ti,clock-div", &div)) {
+ pr_err("%pOFn must have a clock-div property\n", node);
+ return;
+ }
+
+ if (of_property_read_u32(node, "ti,clock-mult", &mult)) {
+ pr_err("%pOFn must have a clock-mult property\n", node);
+ return;
+ }
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
+ mult, div);
+
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ of_ti_clk_autoidle_setup(node);
+ ti_clk_add_alias(clk, clk_name);
+ }
+}
+CLK_OF_DECLARE(ti_fixed_factor_clk, "ti,fixed-factor-clock",
+ of_ti_fixed_factor_clk_setup);
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
new file mode 100644
index 000000000..8e477d50d
--- /dev/null
+++ b/drivers/clk/ti/gate.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP gate clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk);
+
+static const struct clk_ops omap_gate_clkdm_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_clkops_enable_clkdm,
+ .disable = &omap2_clkops_disable_clkdm,
+ .restore_context = clk_gate_restore_context,
+};
+
+const struct clk_ops omap_gate_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .restore_context = clk_gate_restore_context,
+};
+
+static const struct clk_ops omap_gate_clk_hsdiv_restore_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap36xx_gate_clk_enable_with_hsdiv_restore,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+ .restore_context = clk_gate_restore_context,
+};
+
+/**
+ * omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
+ * from HSDivider PWRDN problem Implements Errata ID: i556.
+ * @hw: DPLL output struct clk_hw
+ *
+ * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
+ * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
+ * valueafter their respective PWRDN bits are set. Any dummy write
+ * (Any other value different from the Read value) to the
+ * corresponding CM_CLKSEL register will refresh the dividers.
+ */
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw)
+{
+ struct clk_omap_divider *parent;
+ struct clk_hw *parent_hw;
+ u32 dummy_v, orig_v;
+ int ret;
+
+ /* Clear PWRDN bit of HSDIVIDER */
+ ret = omap2_dflt_clk_enable(hw);
+
+ /* Parent is the x2 node, get parent of parent for the m2 div */
+ parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw));
+ parent = to_clk_omap_divider(parent_hw);
+
+ /* Restore the dividers */
+ if (!ret) {
+ orig_v = ti_clk_ll_ops->clk_readl(&parent->reg);
+ dummy_v = orig_v;
+
+ /* Write any other value different from the Read value */
+ dummy_v ^= (1 << parent->shift);
+ ti_clk_ll_ops->clk_writel(dummy_v, &parent->reg);
+
+ /* Write the original divider */
+ ti_clk_ll_ops->clk_writel(orig_v, &parent->reg);
+ }
+
+ return ret;
+}
+
+static struct clk *_register_gate(struct device_node *node, const char *name,
+ const char *parent_name, unsigned long flags,
+ struct clk_omap_reg *reg, u8 bit_idx,
+ u8 clk_gate_flags, const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ struct clk *clk;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw->hw.init = &init;
+
+ init.name = name;
+ init.ops = ops;
+
+ memcpy(&clk_hw->enable_reg, reg, sizeof(*reg));
+ clk_hw->enable_bit = bit_idx;
+ clk_hw->ops = hw_ops;
+
+ clk_hw->flags = clk_gate_flags;
+
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ init.flags = flags;
+
+ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(clk_hw);
+
+ return clk;
+}
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+ const struct clk_ops *ops,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk *clk;
+ const char *parent_name;
+ struct clk_omap_reg reg;
+ const char *name;
+ u8 enable_bit = 0;
+ u32 val;
+ u32 flags = 0;
+ u8 clk_gate_flags = 0;
+
+ if (ops != &omap_gate_clkdm_clk_ops) {
+ if (ti_clk_get_reg_addr(node, 0, &reg))
+ return;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ enable_bit = val;
+ }
+
+ if (of_clk_get_parent_count(node) != 1) {
+ pr_err("%pOFn must have 1 parent\n", node);
+ return;
+ }
+
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ if (of_property_read_bool(node, "ti,set-bit-to-disable"))
+ clk_gate_flags |= INVERT_ENABLE;
+
+ name = ti_dt_clk_name(node);
+ clk = _register_gate(node, name, parent_name, flags, &reg,
+ enable_bit, clk_gate_flags, ops, hw_ops);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+
+static void __init
+_of_ti_composite_gate_clk_setup(struct device_node *node,
+ const struct clk_hw_omap_ops *hw_ops)
+{
+ struct clk_hw_omap *gate;
+ u32 val = 0;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return;
+
+ if (ti_clk_get_reg_addr(node, 0, &gate->enable_reg))
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,bit-shift", &val);
+
+ gate->enable_bit = val;
+ gate->ops = hw_ops;
+
+ if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
+ return;
+
+cleanup:
+ kfree(gate);
+}
+
+static void __init
+of_ti_composite_no_wait_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, NULL);
+}
+CLK_OF_DECLARE(ti_composite_no_wait_gate_clk, "ti,composite-no-wait-gate-clock",
+ of_ti_composite_no_wait_gate_clk_setup);
+
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+static void __init of_ti_composite_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_composite_interface_clk, "ti,composite-interface-clock",
+ of_ti_composite_interface_clk_setup);
+#endif
+
+static void __init of_ti_composite_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_composite_gate_clk_setup(node, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_composite_gate_clk, "ti,composite-gate-clock",
+ of_ti_composite_gate_clk_setup);
+
+
+static void __init of_ti_clkdm_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clkdm_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_clkdm_gate_clk, "ti,clkdm-gate-clock",
+ of_ti_clkdm_gate_clk_setup);
+
+static void __init of_ti_hsdiv_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_hsdiv_restore_ops,
+ &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_hsdiv_gate_clk, "ti,hsdiv-gate-clock",
+ of_ti_hsdiv_gate_clk_setup);
+
+static void __init of_ti_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, NULL);
+}
+CLK_OF_DECLARE(ti_gate_clk, "ti,gate-clock", of_ti_gate_clk_setup);
+
+static void __init of_ti_wait_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops, &clkhwops_wait);
+}
+CLK_OF_DECLARE(ti_wait_gate_clk, "ti,wait-gate-clock",
+ of_ti_wait_gate_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_am35xx_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+ &clkhwops_am35xx_ipss_module_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_gate_clk, "ti,am35xx-gate-clock",
+ of_ti_am35xx_gate_clk_setup);
+
+static void __init of_ti_dss_gate_clk_setup(struct device_node *node)
+{
+ _of_ti_gate_clk_setup(node, &omap_gate_clk_ops,
+ &clkhwops_omap3430es2_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_gate_clk, "ti,dss-gate-clock",
+ of_ti_dss_gate_clk_setup);
+#endif
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
new file mode 100644
index 000000000..172301c64
--- /dev/null
+++ b/drivers/clk/ti/interface.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP interface clock support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static const struct clk_ops ti_interface_clk_ops = {
+ .init = &omap2_init_clk_clkdm,
+ .enable = &omap2_dflt_clk_enable,
+ .disable = &omap2_dflt_clk_disable,
+ .is_enabled = &omap2_dflt_clk_is_enabled,
+};
+
+static struct clk *_register_interface(struct device_node *node,
+ const char *name,
+ const char *parent_name,
+ struct clk_omap_reg *reg, u8 bit_idx,
+ const struct clk_hw_omap_ops *ops)
+{
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *clk_hw;
+ struct clk *clk;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw->hw.init = &init;
+ clk_hw->ops = ops;
+ memcpy(&clk_hw->enable_reg, reg, sizeof(*reg));
+ clk_hw->enable_bit = bit_idx;
+
+ init.name = name;
+ init.ops = &ti_interface_clk_ops;
+ init.flags = 0;
+
+ init.num_parents = 1;
+ init.parent_names = &parent_name;
+
+ clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(clk_hw);
+
+ return clk;
+}
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+ const struct clk_hw_omap_ops *ops)
+{
+ struct clk *clk;
+ const char *parent_name;
+ struct clk_omap_reg reg;
+ u8 enable_bit = 0;
+ const char *name;
+ u32 val;
+
+ if (ti_clk_get_reg_addr(node, 0, &reg))
+ return;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ enable_bit = val;
+
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name) {
+ pr_err("%pOFn must have a parent\n", node);
+ return;
+ }
+
+ name = ti_dt_clk_name(node);
+ clk = _register_interface(node, name, parent_name, &reg,
+ enable_bit, ops);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+
+static void __init of_ti_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_iclk_wait);
+}
+CLK_OF_DECLARE(ti_interface_clk, "ti,omap3-interface-clock",
+ of_ti_interface_clk_setup);
+
+static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_iclk);
+}
+CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock",
+ of_ti_no_wait_interface_clk_setup);
+
+#ifdef CONFIG_ARCH_OMAP3
+static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node,
+ &clkhwops_omap3430es2_iclk_hsotgusb_wait);
+}
+CLK_OF_DECLARE(ti_hsotgusb_interface_clk, "ti,omap3-hsotgusb-interface-clock",
+ of_ti_hsotgusb_interface_clk_setup);
+
+static void __init of_ti_dss_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node,
+ &clkhwops_omap3430es2_iclk_dss_usbhost_wait);
+}
+CLK_OF_DECLARE(ti_dss_interface_clk, "ti,omap3-dss-interface-clock",
+ of_ti_dss_interface_clk_setup);
+
+static void __init of_ti_ssi_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_omap3430es2_iclk_ssi_wait);
+}
+CLK_OF_DECLARE(ti_ssi_interface_clk, "ti,omap3-ssi-interface-clock",
+ of_ti_ssi_interface_clk_setup);
+
+static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_am35xx_ipss_wait);
+}
+CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock",
+ of_ti_am35xx_interface_clk_setup);
+#endif
+
+#ifdef CONFIG_SOC_OMAP2430
+static void __init of_ti_omap2430_interface_clk_setup(struct device_node *node)
+{
+ _of_ti_interface_clk_setup(node, &clkhwops_omap2430_i2chs_wait);
+}
+CLK_OF_DECLARE(ti_omap2430_interface_clk, "ti,omap2430-interface-clock",
+ of_ti_omap2430_interface_clk_setup);
+#endif
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
new file mode 100644
index 000000000..1ebafa386
--- /dev/null
+++ b/drivers/clk/ti/mux.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Multiplexer Clock
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include "clock.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_omap_mux *mux = to_clk_omap_mux(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ u32 val;
+
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or
+ * numeric. e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges
+ * from 0x1 to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = ti_clk_ll_ops->clk_readl(&mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ if (mux->table) {
+ int i;
+
+ for (i = 0; i < num_parents; i++)
+ if (mux->table[i] == val)
+ return i;
+ return -EINVAL;
+ }
+
+ if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ val = ffs(val) - 1;
+
+ if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ val--;
+
+ if (val >= num_parents)
+ return -EINVAL;
+
+ return val;
+}
+
+static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_omap_mux *mux = to_clk_omap_mux(hw);
+ u32 val;
+
+ if (mux->table) {
+ index = mux->table[index];
+ } else {
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = (1 << ffs(index));
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+ }
+
+ if (mux->flags & CLK_MUX_HIWORD_MASK) {
+ val = mux->mask << (mux->shift + 16);
+ } else {
+ val = ti_clk_ll_ops->clk_readl(&mux->reg);
+ val &= ~(mux->mask << mux->shift);
+ }
+ val |= index << mux->shift;
+ ti_clk_ll_ops->clk_writel(val, &mux->reg);
+ ti_clk_latch(&mux->reg, mux->latch);
+
+ return 0;
+}
+
+/**
+ * clk_mux_save_context - Save the parent selcted in the mux
+ * @hw: pointer struct clk_hw
+ *
+ * Save the parent mux value.
+ */
+static int clk_mux_save_context(struct clk_hw *hw)
+{
+ struct clk_omap_mux *mux = to_clk_omap_mux(hw);
+
+ mux->saved_parent = ti_clk_mux_get_parent(hw);
+ return 0;
+}
+
+/**
+ * clk_mux_restore_context - Restore the parent in the mux
+ * @hw: pointer struct clk_hw
+ *
+ * Restore the saved parent mux value.
+ */
+static void clk_mux_restore_context(struct clk_hw *hw)
+{
+ struct clk_omap_mux *mux = to_clk_omap_mux(hw);
+
+ ti_clk_mux_set_parent(hw, mux->saved_parent);
+}
+
+const struct clk_ops ti_clk_mux_ops = {
+ .get_parent = ti_clk_mux_get_parent,
+ .set_parent = ti_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+ .save_context = clk_mux_save_context,
+ .restore_context = clk_mux_restore_context,
+};
+
+static struct clk *_register_mux(struct device_node *node, const char *name,
+ const char * const *parent_names,
+ u8 num_parents, unsigned long flags,
+ struct clk_omap_reg *reg, u8 shift, u32 mask,
+ s8 latch, u8 clk_mux_flags, u32 *table)
+{
+ struct clk_omap_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the mux */
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &ti_clk_mux_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* struct clk_mux assignments */
+ memcpy(&mux->reg, reg, sizeof(*reg));
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->latch = latch;
+ mux->flags = clk_mux_flags;
+ mux->table = table;
+ mux->hw.init = &init;
+
+ clk = of_ti_clk_register(node, &mux->hw, name);
+
+ if (IS_ERR(clk))
+ kfree(mux);
+
+ return clk;
+}
+
+/**
+ * of_mux_clk_setup - Setup function for simple mux rate clock
+ * @node: DT node for the clock
+ *
+ * Sets up a basic clock multiplexer.
+ */
+static void of_mux_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_omap_reg reg;
+ unsigned int num_parents;
+ const char **parent_names;
+ const char *name;
+ u8 clk_mux_flags = 0;
+ u32 mask = 0;
+ u32 shift = 0;
+ s32 latch = -EINVAL;
+ u32 flags = CLK_SET_RATE_NO_REPARENT;
+
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents < 2) {
+ pr_err("mux-clock %pOFn must have parents\n", node);
+ return;
+ }
+ parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ goto cleanup;
+
+ of_clk_parent_fill(node, parent_names, num_parents);
+
+ if (ti_clk_get_reg_addr(node, 0, &reg))
+ goto cleanup;
+
+ of_property_read_u32(node, "ti,bit-shift", &shift);
+
+ of_property_read_u32(node, "ti,latch-bit", &latch);
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ clk_mux_flags |= CLK_MUX_INDEX_ONE;
+
+ if (of_property_read_bool(node, "ti,set-rate-parent"))
+ flags |= CLK_SET_RATE_PARENT;
+
+ /* Generate bit-mask based on parent info */
+ mask = num_parents;
+ if (!(clk_mux_flags & CLK_MUX_INDEX_ONE))
+ mask--;
+
+ mask = (1 << fls(mask)) - 1;
+
+ name = ti_dt_clk_name(node);
+ clk = _register_mux(node, name, parent_names, num_parents,
+ flags, &reg, shift, mask, latch, clk_mux_flags,
+ NULL);
+
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+cleanup:
+ kfree(parent_names);
+}
+CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
+
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
+{
+ struct clk_omap_mux *mux;
+ int num_parents;
+
+ if (!setup)
+ return NULL;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->shift = setup->bit_shift;
+ mux->latch = -EINVAL;
+
+ mux->reg.index = setup->module;
+ mux->reg.offset = setup->reg;
+
+ if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+ mux->flags |= CLK_MUX_INDEX_ONE;
+
+ num_parents = setup->num_parents;
+
+ mux->mask = num_parents - 1;
+ mux->mask = (1 << fls(mux->mask)) - 1;
+
+ return &mux->hw;
+}
+
+static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
+{
+ struct clk_omap_mux *mux;
+ unsigned int num_parents;
+ u32 val;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ if (ti_clk_get_reg_addr(node, 0, &mux->reg))
+ goto cleanup;
+
+ if (!of_property_read_u32(node, "ti,bit-shift", &val))
+ mux->shift = val;
+
+ if (of_property_read_bool(node, "ti,index-starts-at-one"))
+ mux->flags |= CLK_MUX_INDEX_ONE;
+
+ num_parents = of_clk_get_parent_count(node);
+
+ if (num_parents < 2) {
+ pr_err("%pOFn must have parents\n", node);
+ goto cleanup;
+ }
+
+ mux->mask = num_parents - 1;
+ mux->mask = (1 << fls(mux->mask)) - 1;
+
+ if (!ti_clk_add_component(node, &mux->hw, CLK_COMPONENT_TYPE_MUX))
+ return;
+
+cleanup:
+ kfree(mux);
+}
+CLK_OF_DECLARE(ti_composite_mux_clk_setup, "ti,composite-mux-clock",
+ of_ti_composite_mux_clk_setup);