summaryrefslogtreecommitdiffstats
path: root/drivers/clk/bcm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/clk/bcm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/clk/bcm')
-rw-r--r--drivers/clk/bcm/Kconfig111
-rw-r--r--drivers/clk/bcm/Makefile19
-rw-r--r--drivers/clk/bcm/clk-bcm21664.c282
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c122
-rw-r--r--drivers/clk/bcm/clk-bcm281xx.c367
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c71
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c2352
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c145
-rw-r--r--drivers/clk/bcm/clk-bcm63268-timer.c216
-rw-r--r--drivers/clk/bcm/clk-bcm63xx-gate.c576
-rw-r--r--drivers/clk/bcm/clk-bcm63xx.c12
-rw-r--r--drivers/clk/bcm/clk-cygnus.c304
-rw-r--r--drivers/clk/bcm/clk-hr2.c17
-rw-r--r--drivers/clk/bcm/clk-iproc-armpll.c273
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c261
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c863
-rw-r--r--drivers/clk/bcm/clk-iproc.h214
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c855
-rw-r--r--drivers/clk/bcm/clk-kona.c1270
-rw-r--r--drivers/clk/bcm/clk-kona.h502
-rw-r--r--drivers/clk/bcm/clk-ns2.c278
-rw-r--r--drivers/clk/bcm/clk-nsp.c129
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c468
-rw-r--r--drivers/clk/bcm/clk-sr.c421
24 files changed, 10128 insertions, 0 deletions
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
new file mode 100644
index 0000000000..a972d763eb
--- /dev/null
+++ b/drivers/clk/bcm/Kconfig
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CLK_BCM2711_DVP
+ tristate "Broadcom BCM2711 DVP support"
+ depends on ARCH_BCM2835 ||COMPILE_TEST
+ depends on COMMON_CLK
+ default ARCH_BCM2835
+ select RESET_CONTROLLER
+ select RESET_SIMPLE
+ help
+ Enable common clock framework support for the Broadcom BCM2711
+ DVP Controller.
+
+config CLK_BCM2835
+ bool "Broadcom BCM2835 clock support"
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on COMMON_CLK
+ default ARCH_BCM2835 || ARCH_BRCMSTB
+ help
+ Enable common clock framework support for Broadcom BCM2835
+ SoCs.
+
+config CLK_BCM_63XX
+ bool "Broadcom BCM63xx clock support"
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCMBCA
+ help
+ Enable common clock framework support for Broadcom BCM63xx DSL SoCs
+ based on the ARM architecture
+
+config CLK_BCM_63XX_GATE
+ bool "Broadcom BCM63xx gated clock support"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ default BMIPS_GENERIC
+ help
+ Enable common clock framework support for Broadcom BCM63xx DSL SoCs
+ based on the MIPS architecture
+
+config CLK_BCM63268_TIMER
+ bool "Broadcom BCM63268 timer clock and reset support"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ default BMIPS_GENERIC
+ select RESET_CONTROLLER
+ help
+ Enable timer clock and reset support for Broadcom BCM63268 DSL SoCs
+ based on the MIPS architecture.
+
+config CLK_BCM_KONA
+ bool "Broadcom Kona CCU clock support"
+ depends on ARCH_BCM_MOBILE || COMPILE_TEST
+ default ARCH_BCM_MOBILE
+ help
+ Enable common clock framework support for Broadcom SoCs
+ using "Kona" style clock control units, including those
+ in the BCM281xx and BCM21664 families.
+
+config COMMON_CLK_IPROC
+ bool
+ help
+ Enable common clock framework support for Broadcom SoCs
+ based on the iProc architecture
+
+config CLK_BCM_CYGNUS
+ bool "Broadcom Cygnus clock support"
+ depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_CYGNUS
+ help
+ Enable common clock framework support for the Broadcom Cygnus SoC
+
+config CLK_BCM_HR2
+ bool "Broadcom Hurricane 2 clock support"
+ depends on ARCH_BCM_HR2 || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_HR2
+ help
+ Enable common clock framework support for the Broadcom Hurricane 2
+ SoC
+
+config CLK_BCM_NSP
+ bool "Broadcom Northstar/Northstar Plus clock support"
+ depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_5301X || ARCH_BCM_NSP
+ help
+ Enable common clock framework support for the Broadcom Northstar and
+ Northstar Plus SoCs
+
+config CLK_BCM_NS2
+ bool "Broadcom Northstar 2 clock support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_IPROC
+ help
+ Enable common clock framework support for the Broadcom Northstar 2 SoC
+
+config CLK_BCM_SR
+ bool "Broadcom Stingray clock support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_IPROC
+ help
+ Enable common clock framework support for the Broadcom Stingray SoC
+
+config CLK_RASPBERRYPI
+ tristate "Raspberry Pi firmware based clock support"
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
+ help
+ Enable common clock framework support for Raspberry Pi's firmware
+ dependent clocks
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
new file mode 100644
index 0000000000..d0b6f4b1fb
--- /dev/null
+++ b/drivers/clk/bcm/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CLK_BCM_63XX) += clk-bcm63xx.o
+obj-$(CONFIG_CLK_BCM_63XX_GATE) += clk-bcm63xx-gate.o
+obj-$(CONFIG_CLK_BCM63268_TIMER) += clk-bcm63268-timer.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o
+obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o
+obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o
+obj-$(CONFIG_CLK_BCM2711_DVP) += clk-bcm2711-dvp.o
+obj-$(CONFIG_CLK_BCM2835) += clk-bcm2835.o
+obj-$(CONFIG_CLK_BCM2835) += clk-bcm2835-aux.o
+obj-$(CONFIG_CLK_RASPBERRYPI) += clk-raspberrypi.o
+obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o
+obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o
+obj-$(CONFIG_CLK_BCM_HR2) += clk-hr2.o
+obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o
+obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o
+obj-$(CONFIG_CLK_BCM_SR) += clk-sr.o
diff --git a/drivers/clk/bcm/clk-bcm21664.c b/drivers/clk/bcm/clk-bcm21664.c
new file mode 100644
index 0000000000..520c3aeb4e
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm21664.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2014 Linaro Limited
+ */
+
+#include "clk-kona.h"
+#include "dt-bindings/clock/bcm21664.h"
+
+#define BCM21664_CCU_COMMON(_name, _capname) \
+ KONA_CCU_COMMON(BCM21664, _name, _capname)
+
+/* Root CCU */
+
+static struct peri_clk_data frac_1m_data = {
+ .gate = HW_SW_GATE(0x214, 16, 0, 1),
+ .clocks = CLOCKS("ref_crystal"),
+};
+
+static struct ccu_data root_ccu_data = {
+ BCM21664_CCU_COMMON(root, ROOT),
+ /* no policy control */
+ .kona_clks = {
+ [BCM21664_ROOT_CCU_FRAC_1M] =
+ KONA_CLK(root, frac_1m, peri),
+ [BCM21664_ROOT_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* AON CCU */
+
+static struct peri_clk_data hub_timer_data = {
+ .gate = HW_SW_GATE(0x0414, 16, 0, 1),
+ .hyst = HYST(0x0414, 8, 9),
+ .clocks = CLOCKS("bbl_32k",
+ "frac_1m",
+ "dft_19_5m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .trig = TRIGGER(0x0a40, 4),
+};
+
+static struct ccu_data aon_ccu_data = {
+ BCM21664_CCU_COMMON(aon, AON),
+ .policy = {
+ .enable = CCU_LVM_EN(0x0034, 0),
+ .control = CCU_POLICY_CTL(0x000c, 0, 1, 2),
+ },
+ .kona_clks = {
+ [BCM21664_AON_CCU_HUB_TIMER] =
+ KONA_CLK(aon, hub_timer, peri),
+ [BCM21664_AON_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Master CCU */
+
+static struct peri_clk_data sdio1_data = {
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 9),
+};
+
+static struct peri_clk_data sdio2_data = {
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a2c, 0, 3),
+ .div = DIVIDER(0x0a2c, 4, 14),
+ .trig = TRIGGER(0x0afc, 10),
+};
+
+static struct peri_clk_data sdio3_data = {
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a34, 0, 3),
+ .div = DIVIDER(0x0a34, 4, 14),
+ .trig = TRIGGER(0x0afc, 12),
+};
+
+static struct peri_clk_data sdio4_data = {
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a30, 0, 3),
+ .div = DIVIDER(0x0a30, 4, 14),
+ .trig = TRIGGER(0x0afc, 11),
+};
+
+static struct peri_clk_data sdio1_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+};
+
+static struct peri_clk_data sdio2_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+};
+
+static struct peri_clk_data sdio3_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+};
+
+static struct peri_clk_data sdio4_sleep_data = {
+ .clocks = CLOCKS("ref_32k"), /* Verify */
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+};
+
+static struct ccu_data master_ccu_data = {
+ BCM21664_CCU_COMMON(master, MASTER),
+ .policy = {
+ .enable = CCU_LVM_EN(0x0034, 0),
+ .control = CCU_POLICY_CTL(0x000c, 0, 1, 2),
+ },
+ .kona_clks = {
+ [BCM21664_MASTER_CCU_SDIO1] =
+ KONA_CLK(master, sdio1, peri),
+ [BCM21664_MASTER_CCU_SDIO2] =
+ KONA_CLK(master, sdio2, peri),
+ [BCM21664_MASTER_CCU_SDIO3] =
+ KONA_CLK(master, sdio3, peri),
+ [BCM21664_MASTER_CCU_SDIO4] =
+ KONA_CLK(master, sdio4, peri),
+ [BCM21664_MASTER_CCU_SDIO1_SLEEP] =
+ KONA_CLK(master, sdio1_sleep, peri),
+ [BCM21664_MASTER_CCU_SDIO2_SLEEP] =
+ KONA_CLK(master, sdio2_sleep, peri),
+ [BCM21664_MASTER_CCU_SDIO3_SLEEP] =
+ KONA_CLK(master, sdio3_sleep, peri),
+ [BCM21664_MASTER_CCU_SDIO4_SLEEP] =
+ KONA_CLK(master, sdio4_sleep, peri),
+ [BCM21664_MASTER_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Slave CCU */
+
+static struct peri_clk_data uartb_data = {
+ .gate = HW_SW_GATE(0x0400, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .div = FRAC_DIVIDER(0x0a10, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 2),
+};
+
+static struct peri_clk_data uartb2_data = {
+ .gate = HW_SW_GATE(0x0404, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a14, 0, 2),
+ .div = FRAC_DIVIDER(0x0a14, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 3),
+};
+
+static struct peri_clk_data uartb3_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a18, 0, 2),
+ .div = FRAC_DIVIDER(0x0a18, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 4),
+};
+
+static struct peri_clk_data bsc1_data = {
+ .gate = HW_SW_GATE(0x0458, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a64, 0, 3),
+ .trig = TRIGGER(0x0afc, 23),
+};
+
+static struct peri_clk_data bsc2_data = {
+ .gate = HW_SW_GATE(0x045c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a68, 0, 3),
+ .trig = TRIGGER(0x0afc, 24),
+};
+
+static struct peri_clk_data bsc3_data = {
+ .gate = HW_SW_GATE(0x0470, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a7c, 0, 3),
+ .trig = TRIGGER(0x0afc, 18),
+};
+
+static struct peri_clk_data bsc4_data = {
+ .gate = HW_SW_GATE(0x0474, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a80, 0, 3),
+ .trig = TRIGGER(0x0afc, 19),
+};
+
+static struct ccu_data slave_ccu_data = {
+ BCM21664_CCU_COMMON(slave, SLAVE),
+ .policy = {
+ .enable = CCU_LVM_EN(0x0034, 0),
+ .control = CCU_POLICY_CTL(0x000c, 0, 1, 2),
+ },
+ .kona_clks = {
+ [BCM21664_SLAVE_CCU_UARTB] =
+ KONA_CLK(slave, uartb, peri),
+ [BCM21664_SLAVE_CCU_UARTB2] =
+ KONA_CLK(slave, uartb2, peri),
+ [BCM21664_SLAVE_CCU_UARTB3] =
+ KONA_CLK(slave, uartb3, peri),
+ [BCM21664_SLAVE_CCU_BSC1] =
+ KONA_CLK(slave, bsc1, peri),
+ [BCM21664_SLAVE_CCU_BSC2] =
+ KONA_CLK(slave, bsc2, peri),
+ [BCM21664_SLAVE_CCU_BSC3] =
+ KONA_CLK(slave, bsc3, peri),
+ [BCM21664_SLAVE_CCU_BSC4] =
+ KONA_CLK(slave, bsc4, peri),
+ [BCM21664_SLAVE_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Device tree match table callback functions */
+
+static void __init kona_dt_root_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&root_ccu_data, node);
+}
+
+static void __init kona_dt_aon_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&aon_ccu_data, node);
+}
+
+static void __init kona_dt_master_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&master_ccu_data, node);
+}
+
+static void __init kona_dt_slave_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&slave_ccu_data, node);
+}
+
+CLK_OF_DECLARE(bcm21664_root_ccu, BCM21664_DT_ROOT_CCU_COMPAT,
+ kona_dt_root_ccu_setup);
+CLK_OF_DECLARE(bcm21664_aon_ccu, BCM21664_DT_AON_CCU_COMPAT,
+ kona_dt_aon_ccu_setup);
+CLK_OF_DECLARE(bcm21664_master_ccu, BCM21664_DT_MASTER_CCU_COMPAT,
+ kona_dt_master_ccu_setup);
+CLK_OF_DECLARE(bcm21664_slave_ccu, BCM21664_DT_SLAVE_CCU_COMPAT,
+ kona_dt_slave_ccu_setup);
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
new file mode 100644
index 0000000000..e4fbbf3c40
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2020 Cerno
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/reset/reset-simple.h>
+
+#define DVP_HT_RPI_SW_INIT 0x04
+#define DVP_HT_RPI_MISC_CONFIG 0x08
+
+#define NR_CLOCKS 2
+#define NR_RESETS 6
+
+struct clk_dvp {
+ struct clk_hw_onecell_data *data;
+ struct reset_simple_data reset;
+};
+
+static const struct clk_parent_data clk_dvp_parent = {
+ .index = 0,
+};
+
+static int clk_dvp_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *data;
+ struct clk_dvp *dvp;
+ void __iomem *base;
+ int ret;
+
+ dvp = devm_kzalloc(&pdev->dev, sizeof(*dvp), GFP_KERNEL);
+ if (!dvp)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, dvp);
+
+ dvp->data = devm_kzalloc(&pdev->dev,
+ struct_size(dvp->data, hws, NR_CLOCKS),
+ GFP_KERNEL);
+ if (!dvp->data)
+ return -ENOMEM;
+ data = dvp->data;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ dvp->reset.rcdev.owner = THIS_MODULE;
+ dvp->reset.rcdev.nr_resets = NR_RESETS;
+ dvp->reset.rcdev.ops = &reset_simple_ops;
+ dvp->reset.rcdev.of_node = pdev->dev.of_node;
+ dvp->reset.membase = base + DVP_HT_RPI_SW_INIT;
+ spin_lock_init(&dvp->reset.lock);
+
+ ret = devm_reset_controller_register(&pdev->dev, &dvp->reset.rcdev);
+ if (ret)
+ return ret;
+
+ data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev,
+ "hdmi0-108MHz",
+ &clk_dvp_parent, 0,
+ base + DVP_HT_RPI_MISC_CONFIG, 3,
+ CLK_GATE_SET_TO_DISABLE,
+ &dvp->reset.lock);
+ if (IS_ERR(data->hws[0]))
+ return PTR_ERR(data->hws[0]);
+
+ data->hws[1] = clk_hw_register_gate_parent_data(&pdev->dev,
+ "hdmi1-108MHz",
+ &clk_dvp_parent, 0,
+ base + DVP_HT_RPI_MISC_CONFIG, 4,
+ CLK_GATE_SET_TO_DISABLE,
+ &dvp->reset.lock);
+ if (IS_ERR(data->hws[1])) {
+ ret = PTR_ERR(data->hws[1]);
+ goto unregister_clk0;
+ }
+
+ data->num = NR_CLOCKS;
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ data);
+ if (ret)
+ goto unregister_clk1;
+
+ return 0;
+
+unregister_clk1:
+ clk_hw_unregister_gate(data->hws[1]);
+
+unregister_clk0:
+ clk_hw_unregister_gate(data->hws[0]);
+ return ret;
+};
+
+static void clk_dvp_remove(struct platform_device *pdev)
+{
+ struct clk_dvp *dvp = platform_get_drvdata(pdev);
+ struct clk_hw_onecell_data *data = dvp->data;
+
+ clk_hw_unregister_gate(data->hws[1]);
+ clk_hw_unregister_gate(data->hws[0]);
+}
+
+static const struct of_device_id clk_dvp_dt_ids[] = {
+ { .compatible = "brcm,brcm2711-dvp", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
+
+static struct platform_driver clk_dvp_driver = {
+ .probe = clk_dvp_probe,
+ .remove_new = clk_dvp_remove,
+ .driver = {
+ .name = "brcm2711-dvp",
+ .of_match_table = clk_dvp_dt_ids,
+ },
+};
+module_platform_driver(clk_dvp_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_DESCRIPTION("BCM2711 DVP clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c
new file mode 100644
index 0000000000..823d5dfa31
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm281xx.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ */
+
+#include "clk-kona.h"
+#include "dt-bindings/clock/bcm281xx.h"
+
+#define BCM281XX_CCU_COMMON(_name, _ucase_name) \
+ KONA_CCU_COMMON(BCM281XX, _name, _ucase_name)
+
+/* Root CCU */
+
+static struct peri_clk_data frac_1m_data = {
+ .gate = HW_SW_GATE(0x214, 16, 0, 1),
+ .trig = TRIGGER(0x0e04, 0),
+ .div = FRAC_DIVIDER(0x0e00, 0, 22, 16),
+ .clocks = CLOCKS("ref_crystal"),
+};
+
+static struct ccu_data root_ccu_data = {
+ BCM281XX_CCU_COMMON(root, ROOT),
+ .kona_clks = {
+ [BCM281XX_ROOT_CCU_FRAC_1M] =
+ KONA_CLK(root, frac_1m, peri),
+ [BCM281XX_ROOT_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* AON CCU */
+
+static struct peri_clk_data hub_timer_data = {
+ .gate = HW_SW_GATE(0x0414, 16, 0, 1),
+ .clocks = CLOCKS("bbl_32k",
+ "frac_1m",
+ "dft_19_5m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .trig = TRIGGER(0x0a40, 4),
+};
+
+static struct peri_clk_data pmu_bsc_data = {
+ .gate = HW_SW_GATE(0x0418, 16, 0, 1),
+ .clocks = CLOCKS("ref_crystal",
+ "pmu_bsc_var",
+ "bbl_32k"),
+ .sel = SELECTOR(0x0a04, 0, 2),
+ .div = DIVIDER(0x0a04, 3, 4),
+ .trig = TRIGGER(0x0a40, 0),
+};
+
+static struct peri_clk_data pmu_bsc_var_data = {
+ .clocks = CLOCKS("var_312m",
+ "ref_312m"),
+ .sel = SELECTOR(0x0a00, 0, 2),
+ .div = DIVIDER(0x0a00, 4, 5),
+ .trig = TRIGGER(0x0a40, 2),
+};
+
+static struct ccu_data aon_ccu_data = {
+ BCM281XX_CCU_COMMON(aon, AON),
+ .kona_clks = {
+ [BCM281XX_AON_CCU_HUB_TIMER] =
+ KONA_CLK(aon, hub_timer, peri),
+ [BCM281XX_AON_CCU_PMU_BSC] =
+ KONA_CLK(aon, pmu_bsc, peri),
+ [BCM281XX_AON_CCU_PMU_BSC_VAR] =
+ KONA_CLK(aon, pmu_bsc_var, peri),
+ [BCM281XX_AON_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Hub CCU */
+
+static struct peri_clk_data tmon_1m_data = {
+ .gate = HW_SW_GATE(0x04a4, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "frac_1m"),
+ .sel = SELECTOR(0x0e74, 0, 2),
+ .trig = TRIGGER(0x0e84, 1),
+};
+
+static struct ccu_data hub_ccu_data = {
+ BCM281XX_CCU_COMMON(hub, HUB),
+ .kona_clks = {
+ [BCM281XX_HUB_CCU_TMON_1M] =
+ KONA_CLK(hub, tmon_1m, peri),
+ [BCM281XX_HUB_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Master CCU */
+
+static struct peri_clk_data sdio1_data = {
+ .gate = HW_SW_GATE(0x0358, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 9),
+};
+
+static struct peri_clk_data sdio2_data = {
+ .gate = HW_SW_GATE(0x035c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a2c, 0, 3),
+ .div = DIVIDER(0x0a2c, 4, 14),
+ .trig = TRIGGER(0x0afc, 10),
+};
+
+static struct peri_clk_data sdio3_data = {
+ .gate = HW_SW_GATE(0x0364, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a34, 0, 3),
+ .div = DIVIDER(0x0a34, 4, 14),
+ .trig = TRIGGER(0x0afc, 12),
+};
+
+static struct peri_clk_data sdio4_data = {
+ .gate = HW_SW_GATE(0x0360, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_52m",
+ "ref_52m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a30, 0, 3),
+ .div = DIVIDER(0x0a30, 4, 14),
+ .trig = TRIGGER(0x0afc, 11),
+};
+
+static struct peri_clk_data usb_ic_data = {
+ .gate = HW_SW_GATE(0x0354, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .div = FIXED_DIVIDER(2),
+ .sel = SELECTOR(0x0a24, 0, 2),
+ .trig = TRIGGER(0x0afc, 7),
+};
+
+/* also called usbh_48m */
+static struct peri_clk_data hsic2_48m_data = {
+ .gate = HW_SW_GATE(0x0370, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a38, 0, 2),
+ .div = FIXED_DIVIDER(2),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+/* also called usbh_12m */
+static struct peri_clk_data hsic2_12m_data = {
+ .gate = HW_SW_GATE(0x0370, 20, 4, 5),
+ .div = DIVIDER(0x0a38, 12, 2),
+ .clocks = CLOCKS("ref_crystal",
+ "var_96m",
+ "ref_96m"),
+ .pre_div = FIXED_DIVIDER(2),
+ .sel = SELECTOR(0x0a38, 0, 2),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+static struct ccu_data master_ccu_data = {
+ BCM281XX_CCU_COMMON(master, MASTER),
+ .kona_clks = {
+ [BCM281XX_MASTER_CCU_SDIO1] =
+ KONA_CLK(master, sdio1, peri),
+ [BCM281XX_MASTER_CCU_SDIO2] =
+ KONA_CLK(master, sdio2, peri),
+ [BCM281XX_MASTER_CCU_SDIO3] =
+ KONA_CLK(master, sdio3, peri),
+ [BCM281XX_MASTER_CCU_SDIO4] =
+ KONA_CLK(master, sdio4, peri),
+ [BCM281XX_MASTER_CCU_USB_IC] =
+ KONA_CLK(master, usb_ic, peri),
+ [BCM281XX_MASTER_CCU_HSIC2_48M] =
+ KONA_CLK(master, hsic2_48m, peri),
+ [BCM281XX_MASTER_CCU_HSIC2_12M] =
+ KONA_CLK(master, hsic2_12m, peri),
+ [BCM281XX_MASTER_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Slave CCU */
+
+static struct peri_clk_data uartb_data = {
+ .gate = HW_SW_GATE(0x0400, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a10, 0, 2),
+ .div = FRAC_DIVIDER(0x0a10, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 2),
+};
+
+static struct peri_clk_data uartb2_data = {
+ .gate = HW_SW_GATE(0x0404, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a14, 0, 2),
+ .div = FRAC_DIVIDER(0x0a14, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 3),
+};
+
+static struct peri_clk_data uartb3_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a18, 0, 2),
+ .div = FRAC_DIVIDER(0x0a18, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 4),
+};
+
+static struct peri_clk_data uartb4_data = {
+ .gate = HW_SW_GATE(0x0408, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_156m",
+ "ref_156m"),
+ .sel = SELECTOR(0x0a1c, 0, 2),
+ .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8),
+ .trig = TRIGGER(0x0afc, 5),
+};
+
+static struct peri_clk_data ssp0_data = {
+ .gate = HW_SW_GATE(0x0410, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a20, 0, 3),
+ .div = DIVIDER(0x0a20, 4, 14),
+ .trig = TRIGGER(0x0afc, 6),
+};
+
+static struct peri_clk_data ssp2_data = {
+ .gate = HW_SW_GATE(0x0418, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_96m",
+ "ref_96m"),
+ .sel = SELECTOR(0x0a28, 0, 3),
+ .div = DIVIDER(0x0a28, 4, 14),
+ .trig = TRIGGER(0x0afc, 8),
+};
+
+static struct peri_clk_data bsc1_data = {
+ .gate = HW_SW_GATE(0x0458, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a64, 0, 3),
+ .trig = TRIGGER(0x0afc, 23),
+};
+
+static struct peri_clk_data bsc2_data = {
+ .gate = HW_SW_GATE(0x045c, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a68, 0, 3),
+ .trig = TRIGGER(0x0afc, 24),
+};
+
+static struct peri_clk_data bsc3_data = {
+ .gate = HW_SW_GATE(0x0484, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m",
+ "ref_104m",
+ "var_13m",
+ "ref_13m"),
+ .sel = SELECTOR(0x0a84, 0, 3),
+ .trig = TRIGGER(0x0b00, 2),
+};
+
+static struct peri_clk_data pwm_data = {
+ .gate = HW_SW_GATE(0x0468, 18, 2, 3),
+ .clocks = CLOCKS("ref_crystal",
+ "var_104m"),
+ .sel = SELECTOR(0x0a70, 0, 2),
+ .div = DIVIDER(0x0a70, 4, 3),
+ .trig = TRIGGER(0x0afc, 15),
+};
+
+static struct ccu_data slave_ccu_data = {
+ BCM281XX_CCU_COMMON(slave, SLAVE),
+ .kona_clks = {
+ [BCM281XX_SLAVE_CCU_UARTB] =
+ KONA_CLK(slave, uartb, peri),
+ [BCM281XX_SLAVE_CCU_UARTB2] =
+ KONA_CLK(slave, uartb2, peri),
+ [BCM281XX_SLAVE_CCU_UARTB3] =
+ KONA_CLK(slave, uartb3, peri),
+ [BCM281XX_SLAVE_CCU_UARTB4] =
+ KONA_CLK(slave, uartb4, peri),
+ [BCM281XX_SLAVE_CCU_SSP0] =
+ KONA_CLK(slave, ssp0, peri),
+ [BCM281XX_SLAVE_CCU_SSP2] =
+ KONA_CLK(slave, ssp2, peri),
+ [BCM281XX_SLAVE_CCU_BSC1] =
+ KONA_CLK(slave, bsc1, peri),
+ [BCM281XX_SLAVE_CCU_BSC2] =
+ KONA_CLK(slave, bsc2, peri),
+ [BCM281XX_SLAVE_CCU_BSC3] =
+ KONA_CLK(slave, bsc3, peri),
+ [BCM281XX_SLAVE_CCU_PWM] =
+ KONA_CLK(slave, pwm, peri),
+ [BCM281XX_SLAVE_CCU_CLOCK_COUNT] = LAST_KONA_CLK,
+ },
+};
+
+/* Device tree match table callback functions */
+
+static void __init kona_dt_root_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&root_ccu_data, node);
+}
+
+static void __init kona_dt_aon_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&aon_ccu_data, node);
+}
+
+static void __init kona_dt_hub_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&hub_ccu_data, node);
+}
+
+static void __init kona_dt_master_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&master_ccu_data, node);
+}
+
+static void __init kona_dt_slave_ccu_setup(struct device_node *node)
+{
+ kona_dt_ccu_setup(&slave_ccu_data, node);
+}
+
+CLK_OF_DECLARE(bcm281xx_root_ccu, BCM281XX_DT_ROOT_CCU_COMPAT,
+ kona_dt_root_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_aon_ccu, BCM281XX_DT_AON_CCU_COMPAT,
+ kona_dt_aon_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_hub_ccu, BCM281XX_DT_HUB_CCU_COMPAT,
+ kona_dt_hub_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_master_ccu, BCM281XX_DT_MASTER_CCU_COMPAT,
+ kona_dt_master_ccu_setup);
+CLK_OF_DECLARE(bcm281xx_slave_ccu, BCM281XX_DT_SLAVE_CCU_COMPAT,
+ kona_dt_slave_ccu_setup);
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
new file mode 100644
index 0000000000..0fafa5cba4
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015 Broadcom
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/bcm2835-aux.h>
+
+#define BCM2835_AUXIRQ 0x00
+#define BCM2835_AUXENB 0x04
+
+static int bcm2835_aux_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *onecell;
+ const char *parent;
+ struct clk *parent_clk;
+ void __iomem *reg, *gate;
+
+ parent_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(parent_clk))
+ return PTR_ERR(parent_clk);
+ parent = __clk_get_name(parent_clk);
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ onecell = devm_kmalloc(dev,
+ struct_size(onecell, hws,
+ BCM2835_AUX_CLOCK_COUNT),
+ GFP_KERNEL);
+ if (!onecell)
+ return -ENOMEM;
+ onecell->num = BCM2835_AUX_CLOCK_COUNT;
+
+ gate = reg + BCM2835_AUXENB;
+ onecell->hws[BCM2835_AUX_CLOCK_UART] =
+ clk_hw_register_gate(dev, "aux_uart", parent, 0, gate, 0, 0, NULL);
+
+ onecell->hws[BCM2835_AUX_CLOCK_SPI1] =
+ clk_hw_register_gate(dev, "aux_spi1", parent, 0, gate, 1, 0, NULL);
+
+ onecell->hws[BCM2835_AUX_CLOCK_SPI2] =
+ clk_hw_register_gate(dev, "aux_spi2", parent, 0, gate, 2, 0, NULL);
+
+ return of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ onecell);
+}
+
+static const struct of_device_id bcm2835_aux_clk_of_match[] = {
+ { .compatible = "brcm,bcm2835-aux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_aux_clk_of_match);
+
+static struct platform_driver bcm2835_aux_clk_driver = {
+ .driver = {
+ .name = "bcm2835-aux-clk",
+ .of_match_table = bcm2835_aux_clk_of_match,
+ },
+ .probe = bcm2835_aux_clk_probe,
+};
+builtin_platform_driver(bcm2835_aux_clk_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
new file mode 100644
index 0000000000..fb04734afc
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -0,0 +1,2352 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2010,2015 Broadcom
+ * Copyright (C) 2012 Stephen Warren
+ */
+
+/**
+ * DOC: BCM2835 CPRMAN (clock manager for the "audio" domain)
+ *
+ * The clock tree on the 2835 has several levels. There's a root
+ * oscillator running at 19.2Mhz. After the oscillator there are 5
+ * PLLs, roughly divided as "camera", "ARM", "core", "DSI displays",
+ * and "HDMI displays". Those 5 PLLs each can divide their output to
+ * produce up to 4 channels. Finally, there is the level of clocks to
+ * be consumed by other hardware components (like "H264" or "HDMI
+ * state machine"), which divide off of some subset of the PLL
+ * channels.
+ *
+ * All of the clocks in the tree are exposed in the DT, because the DT
+ * may want to make assignments of the final layer of clocks to the
+ * PLL channels, and some components of the hardware will actually
+ * skip layers of the tree (for example, the pixel clock comes
+ * directly from the PLLH PIX channel without using a CM_*CTL clock
+ * generator).
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/bcm2835.h>
+
+#define CM_PASSWORD 0x5a000000
+
+#define CM_GNRICCTL 0x000
+#define CM_GNRICDIV 0x004
+# define CM_DIV_FRAC_BITS 12
+# define CM_DIV_FRAC_MASK GENMASK(CM_DIV_FRAC_BITS - 1, 0)
+
+#define CM_VPUCTL 0x008
+#define CM_VPUDIV 0x00c
+#define CM_SYSCTL 0x010
+#define CM_SYSDIV 0x014
+#define CM_PERIACTL 0x018
+#define CM_PERIADIV 0x01c
+#define CM_PERIICTL 0x020
+#define CM_PERIIDIV 0x024
+#define CM_H264CTL 0x028
+#define CM_H264DIV 0x02c
+#define CM_ISPCTL 0x030
+#define CM_ISPDIV 0x034
+#define CM_V3DCTL 0x038
+#define CM_V3DDIV 0x03c
+#define CM_CAM0CTL 0x040
+#define CM_CAM0DIV 0x044
+#define CM_CAM1CTL 0x048
+#define CM_CAM1DIV 0x04c
+#define CM_CCP2CTL 0x050
+#define CM_CCP2DIV 0x054
+#define CM_DSI0ECTL 0x058
+#define CM_DSI0EDIV 0x05c
+#define CM_DSI0PCTL 0x060
+#define CM_DSI0PDIV 0x064
+#define CM_DPICTL 0x068
+#define CM_DPIDIV 0x06c
+#define CM_GP0CTL 0x070
+#define CM_GP0DIV 0x074
+#define CM_GP1CTL 0x078
+#define CM_GP1DIV 0x07c
+#define CM_GP2CTL 0x080
+#define CM_GP2DIV 0x084
+#define CM_HSMCTL 0x088
+#define CM_HSMDIV 0x08c
+#define CM_OTPCTL 0x090
+#define CM_OTPDIV 0x094
+#define CM_PCMCTL 0x098
+#define CM_PCMDIV 0x09c
+#define CM_PWMCTL 0x0a0
+#define CM_PWMDIV 0x0a4
+#define CM_SLIMCTL 0x0a8
+#define CM_SLIMDIV 0x0ac
+#define CM_SMICTL 0x0b0
+#define CM_SMIDIV 0x0b4
+/* no definition for 0x0b8 and 0x0bc */
+#define CM_TCNTCTL 0x0c0
+# define CM_TCNT_SRC1_SHIFT 12
+#define CM_TCNTCNT 0x0c4
+#define CM_TECCTL 0x0c8
+#define CM_TECDIV 0x0cc
+#define CM_TD0CTL 0x0d0
+#define CM_TD0DIV 0x0d4
+#define CM_TD1CTL 0x0d8
+#define CM_TD1DIV 0x0dc
+#define CM_TSENSCTL 0x0e0
+#define CM_TSENSDIV 0x0e4
+#define CM_TIMERCTL 0x0e8
+#define CM_TIMERDIV 0x0ec
+#define CM_UARTCTL 0x0f0
+#define CM_UARTDIV 0x0f4
+#define CM_VECCTL 0x0f8
+#define CM_VECDIV 0x0fc
+#define CM_PULSECTL 0x190
+#define CM_PULSEDIV 0x194
+#define CM_SDCCTL 0x1a8
+#define CM_SDCDIV 0x1ac
+#define CM_ARMCTL 0x1b0
+#define CM_AVEOCTL 0x1b8
+#define CM_AVEODIV 0x1bc
+#define CM_EMMCCTL 0x1c0
+#define CM_EMMCDIV 0x1c4
+#define CM_EMMC2CTL 0x1d0
+#define CM_EMMC2DIV 0x1d4
+
+/* General bits for the CM_*CTL regs */
+# define CM_ENABLE BIT(4)
+# define CM_KILL BIT(5)
+# define CM_GATE_BIT 6
+# define CM_GATE BIT(CM_GATE_BIT)
+# define CM_BUSY BIT(7)
+# define CM_BUSYD BIT(8)
+# define CM_FRAC BIT(9)
+# define CM_SRC_SHIFT 0
+# define CM_SRC_BITS 4
+# define CM_SRC_MASK 0xf
+# define CM_SRC_GND 0
+# define CM_SRC_OSC 1
+# define CM_SRC_TESTDEBUG0 2
+# define CM_SRC_TESTDEBUG1 3
+# define CM_SRC_PLLA_CORE 4
+# define CM_SRC_PLLA_PER 4
+# define CM_SRC_PLLC_CORE0 5
+# define CM_SRC_PLLC_PER 5
+# define CM_SRC_PLLC_CORE1 8
+# define CM_SRC_PLLD_CORE 6
+# define CM_SRC_PLLD_PER 6
+# define CM_SRC_PLLH_AUX 7
+# define CM_SRC_PLLC_CORE1 8
+# define CM_SRC_PLLC_CORE2 9
+
+#define CM_OSCCOUNT 0x100
+
+#define CM_PLLA 0x104
+# define CM_PLL_ANARST BIT(8)
+# define CM_PLLA_HOLDPER BIT(7)
+# define CM_PLLA_LOADPER BIT(6)
+# define CM_PLLA_HOLDCORE BIT(5)
+# define CM_PLLA_LOADCORE BIT(4)
+# define CM_PLLA_HOLDCCP2 BIT(3)
+# define CM_PLLA_LOADCCP2 BIT(2)
+# define CM_PLLA_HOLDDSI0 BIT(1)
+# define CM_PLLA_LOADDSI0 BIT(0)
+
+#define CM_PLLC 0x108
+# define CM_PLLC_HOLDPER BIT(7)
+# define CM_PLLC_LOADPER BIT(6)
+# define CM_PLLC_HOLDCORE2 BIT(5)
+# define CM_PLLC_LOADCORE2 BIT(4)
+# define CM_PLLC_HOLDCORE1 BIT(3)
+# define CM_PLLC_LOADCORE1 BIT(2)
+# define CM_PLLC_HOLDCORE0 BIT(1)
+# define CM_PLLC_LOADCORE0 BIT(0)
+
+#define CM_PLLD 0x10c
+# define CM_PLLD_HOLDPER BIT(7)
+# define CM_PLLD_LOADPER BIT(6)
+# define CM_PLLD_HOLDCORE BIT(5)
+# define CM_PLLD_LOADCORE BIT(4)
+# define CM_PLLD_HOLDDSI1 BIT(3)
+# define CM_PLLD_LOADDSI1 BIT(2)
+# define CM_PLLD_HOLDDSI0 BIT(1)
+# define CM_PLLD_LOADDSI0 BIT(0)
+
+#define CM_PLLH 0x110
+# define CM_PLLH_LOADRCAL BIT(2)
+# define CM_PLLH_LOADAUX BIT(1)
+# define CM_PLLH_LOADPIX BIT(0)
+
+#define CM_LOCK 0x114
+# define CM_LOCK_FLOCKH BIT(12)
+# define CM_LOCK_FLOCKD BIT(11)
+# define CM_LOCK_FLOCKC BIT(10)
+# define CM_LOCK_FLOCKB BIT(9)
+# define CM_LOCK_FLOCKA BIT(8)
+
+#define CM_EVENT 0x118
+#define CM_DSI1ECTL 0x158
+#define CM_DSI1EDIV 0x15c
+#define CM_DSI1PCTL 0x160
+#define CM_DSI1PDIV 0x164
+#define CM_DFTCTL 0x168
+#define CM_DFTDIV 0x16c
+
+#define CM_PLLB 0x170
+# define CM_PLLB_HOLDARM BIT(1)
+# define CM_PLLB_LOADARM BIT(0)
+
+#define A2W_PLLA_CTRL 0x1100
+#define A2W_PLLC_CTRL 0x1120
+#define A2W_PLLD_CTRL 0x1140
+#define A2W_PLLH_CTRL 0x1160
+#define A2W_PLLB_CTRL 0x11e0
+# define A2W_PLL_CTRL_PRST_DISABLE BIT(17)
+# define A2W_PLL_CTRL_PWRDN BIT(16)
+# define A2W_PLL_CTRL_PDIV_MASK 0x000007000
+# define A2W_PLL_CTRL_PDIV_SHIFT 12
+# define A2W_PLL_CTRL_NDIV_MASK 0x0000003ff
+# define A2W_PLL_CTRL_NDIV_SHIFT 0
+
+#define A2W_PLLA_ANA0 0x1010
+#define A2W_PLLC_ANA0 0x1030
+#define A2W_PLLD_ANA0 0x1050
+#define A2W_PLLH_ANA0 0x1070
+#define A2W_PLLB_ANA0 0x10f0
+
+#define A2W_PLL_KA_SHIFT 7
+#define A2W_PLL_KA_MASK GENMASK(9, 7)
+#define A2W_PLL_KI_SHIFT 19
+#define A2W_PLL_KI_MASK GENMASK(21, 19)
+#define A2W_PLL_KP_SHIFT 15
+#define A2W_PLL_KP_MASK GENMASK(18, 15)
+
+#define A2W_PLLH_KA_SHIFT 19
+#define A2W_PLLH_KA_MASK GENMASK(21, 19)
+#define A2W_PLLH_KI_LOW_SHIFT 22
+#define A2W_PLLH_KI_LOW_MASK GENMASK(23, 22)
+#define A2W_PLLH_KI_HIGH_SHIFT 0
+#define A2W_PLLH_KI_HIGH_MASK GENMASK(0, 0)
+#define A2W_PLLH_KP_SHIFT 1
+#define A2W_PLLH_KP_MASK GENMASK(4, 1)
+
+#define A2W_XOSC_CTRL 0x1190
+# define A2W_XOSC_CTRL_PLLB_ENABLE BIT(7)
+# define A2W_XOSC_CTRL_PLLA_ENABLE BIT(6)
+# define A2W_XOSC_CTRL_PLLD_ENABLE BIT(5)
+# define A2W_XOSC_CTRL_DDR_ENABLE BIT(4)
+# define A2W_XOSC_CTRL_CPR1_ENABLE BIT(3)
+# define A2W_XOSC_CTRL_USB_ENABLE BIT(2)
+# define A2W_XOSC_CTRL_HDMI_ENABLE BIT(1)
+# define A2W_XOSC_CTRL_PLLC_ENABLE BIT(0)
+
+#define A2W_PLLA_FRAC 0x1200
+#define A2W_PLLC_FRAC 0x1220
+#define A2W_PLLD_FRAC 0x1240
+#define A2W_PLLH_FRAC 0x1260
+#define A2W_PLLB_FRAC 0x12e0
+# define A2W_PLL_FRAC_MASK ((1 << A2W_PLL_FRAC_BITS) - 1)
+# define A2W_PLL_FRAC_BITS 20
+
+#define A2W_PLL_CHANNEL_DISABLE BIT(8)
+#define A2W_PLL_DIV_BITS 8
+#define A2W_PLL_DIV_SHIFT 0
+
+#define A2W_PLLA_DSI0 0x1300
+#define A2W_PLLA_CORE 0x1400
+#define A2W_PLLA_PER 0x1500
+#define A2W_PLLA_CCP2 0x1600
+
+#define A2W_PLLC_CORE2 0x1320
+#define A2W_PLLC_CORE1 0x1420
+#define A2W_PLLC_PER 0x1520
+#define A2W_PLLC_CORE0 0x1620
+
+#define A2W_PLLD_DSI0 0x1340
+#define A2W_PLLD_CORE 0x1440
+#define A2W_PLLD_PER 0x1540
+#define A2W_PLLD_DSI1 0x1640
+
+#define A2W_PLLH_AUX 0x1360
+#define A2W_PLLH_RCAL 0x1460
+#define A2W_PLLH_PIX 0x1560
+#define A2W_PLLH_STS 0x1660
+
+#define A2W_PLLH_CTRLR 0x1960
+#define A2W_PLLH_FRACR 0x1a60
+#define A2W_PLLH_AUXR 0x1b60
+#define A2W_PLLH_RCALR 0x1c60
+#define A2W_PLLH_PIXR 0x1d60
+#define A2W_PLLH_STSR 0x1e60
+
+#define A2W_PLLB_ARM 0x13e0
+#define A2W_PLLB_SP0 0x14e0
+#define A2W_PLLB_SP1 0x15e0
+#define A2W_PLLB_SP2 0x16e0
+
+#define LOCK_TIMEOUT_NS 100000000
+#define BCM2835_MAX_FB_RATE 1750000000u
+
+#define SOC_BCM2835 BIT(0)
+#define SOC_BCM2711 BIT(1)
+#define SOC_ALL (SOC_BCM2835 | SOC_BCM2711)
+
+/*
+ * Names of clocks used within the driver that need to be replaced
+ * with an external parent's name. This array is in the order that
+ * the clocks node in the DT references external clocks.
+ */
+static const char *const cprman_parent_names[] = {
+ "xosc",
+ "dsi0_byte",
+ "dsi0_ddr2",
+ "dsi0_ddr",
+ "dsi1_byte",
+ "dsi1_ddr2",
+ "dsi1_ddr",
+};
+
+struct bcm2835_cprman {
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t regs_lock; /* spinlock for all clocks */
+ unsigned int soc;
+
+ /*
+ * Real names of cprman clock parents looked up through
+ * of_clk_get_parent_name(), which will be used in the
+ * parent_names[] arrays for clock registration.
+ */
+ const char *real_parent_names[ARRAY_SIZE(cprman_parent_names)];
+
+ /* Must be last */
+ struct clk_hw_onecell_data onecell;
+};
+
+struct cprman_plat_data {
+ unsigned int soc;
+};
+
+static inline void cprman_write(struct bcm2835_cprman *cprman, u32 reg, u32 val)
+{
+ writel(CM_PASSWORD | val, cprman->regs + reg);
+}
+
+static inline u32 cprman_read(struct bcm2835_cprman *cprman, u32 reg)
+{
+ return readl(cprman->regs + reg);
+}
+
+/* Does a cycle of measuring a clock through the TCNT clock, which may
+ * source from many other clocks in the system.
+ */
+static unsigned long bcm2835_measure_tcnt_mux(struct bcm2835_cprman *cprman,
+ u32 tcnt_mux)
+{
+ u32 osccount = 19200; /* 1ms */
+ u32 count;
+ ktime_t timeout;
+
+ spin_lock(&cprman->regs_lock);
+
+ cprman_write(cprman, CM_TCNTCTL, CM_KILL);
+
+ cprman_write(cprman, CM_TCNTCTL,
+ (tcnt_mux & CM_SRC_MASK) |
+ (tcnt_mux >> CM_SRC_BITS) << CM_TCNT_SRC1_SHIFT);
+
+ cprman_write(cprman, CM_OSCCOUNT, osccount);
+
+ /* do a kind delay at the start */
+ mdelay(1);
+
+ /* Finish off whatever is left of OSCCOUNT */
+ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
+ while (cprman_read(cprman, CM_OSCCOUNT)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(cprman->dev, "timeout waiting for OSCCOUNT\n");
+ count = 0;
+ goto out;
+ }
+ cpu_relax();
+ }
+
+ /* Wait for BUSY to clear. */
+ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
+ while (cprman_read(cprman, CM_TCNTCTL) & CM_BUSY) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(cprman->dev, "timeout waiting for !BUSY\n");
+ count = 0;
+ goto out;
+ }
+ cpu_relax();
+ }
+
+ count = cprman_read(cprman, CM_TCNTCNT);
+
+ cprman_write(cprman, CM_TCNTCTL, 0);
+
+out:
+ spin_unlock(&cprman->regs_lock);
+
+ return count * 1000;
+}
+
+static void bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
+ const struct debugfs_reg32 *regs,
+ size_t nregs, struct dentry *dentry)
+{
+ struct debugfs_regset32 *regset;
+
+ regset = devm_kzalloc(cprman->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ regset->regs = regs;
+ regset->nregs = nregs;
+ regset->base = cprman->regs + base;
+
+ debugfs_create_regset32("regdump", S_IRUGO, dentry, regset);
+}
+
+struct bcm2835_pll_data {
+ const char *name;
+ u32 cm_ctrl_reg;
+ u32 a2w_ctrl_reg;
+ u32 frac_reg;
+ u32 ana_reg_base;
+ u32 reference_enable_mask;
+ /* Bit in CM_LOCK to indicate when the PLL has locked. */
+ u32 lock_mask;
+ u32 flags;
+
+ const struct bcm2835_pll_ana_bits *ana;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+ /*
+ * Highest rate for the VCO before we have to use the
+ * pre-divide-by-2.
+ */
+ unsigned long max_fb_rate;
+};
+
+struct bcm2835_pll_ana_bits {
+ u32 mask0;
+ u32 set0;
+ u32 mask1;
+ u32 set1;
+ u32 mask3;
+ u32 set3;
+ u32 fb_prediv_mask;
+};
+
+static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
+ .mask0 = 0,
+ .set0 = 0,
+ .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,
+ .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
+ .mask3 = A2W_PLL_KA_MASK,
+ .set3 = (2 << A2W_PLL_KA_SHIFT),
+ .fb_prediv_mask = BIT(14),
+};
+
+static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
+ .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,
+ .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
+ .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,
+ .set1 = (6 << A2W_PLLH_KP_SHIFT),
+ .mask3 = 0,
+ .set3 = 0,
+ .fb_prediv_mask = BIT(11),
+};
+
+struct bcm2835_pll_divider_data {
+ const char *name;
+ const char *source_pll;
+
+ u32 cm_reg;
+ u32 a2w_reg;
+
+ u32 load_mask;
+ u32 hold_mask;
+ u32 fixed_divider;
+ u32 flags;
+};
+
+struct bcm2835_clock_data {
+ const char *name;
+
+ const char *const *parents;
+ int num_mux_parents;
+
+ /* Bitmap encoding which parents accept rate change propagation. */
+ unsigned int set_rate_parent;
+
+ u32 ctl_reg;
+ u32 div_reg;
+
+ /* Number of integer bits in the divider */
+ u32 int_bits;
+ /* Number of fractional bits in the divider */
+ u32 frac_bits;
+
+ u32 flags;
+
+ bool is_vpu_clock;
+ bool is_mash_clock;
+ bool low_jitter;
+
+ u32 tcnt_mux;
+
+ bool round_up;
+};
+
+struct bcm2835_gate_data {
+ const char *name;
+ const char *parent;
+
+ u32 ctl_reg;
+};
+
+struct bcm2835_pll {
+ struct clk_hw hw;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_pll_data *data;
+};
+
+static int bcm2835_pll_is_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+
+ return cprman_read(cprman, data->a2w_ctrl_reg) &
+ A2W_PLL_CTRL_PRST_DISABLE;
+}
+
+static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman,
+ const struct bcm2835_pll_data *data)
+{
+ /*
+ * On BCM2711 there isn't a pre-divisor available in the PLL feedback
+ * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed
+ * for to for VCO RANGE bits.
+ */
+ if (cprman->soc & SOC_BCM2711)
+ return 0;
+
+ return data->ana->fb_prediv_mask;
+}
+
+static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate,
+ unsigned long parent_rate,
+ u32 *ndiv, u32 *fdiv)
+{
+ u64 div;
+
+ div = (u64)rate << A2W_PLL_FRAC_BITS;
+ do_div(div, parent_rate);
+
+ *ndiv = div >> A2W_PLL_FRAC_BITS;
+ *fdiv = div & ((1 << A2W_PLL_FRAC_BITS) - 1);
+}
+
+static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
+ u32 ndiv, u32 fdiv, u32 pdiv)
+{
+ u64 rate;
+
+ if (pdiv == 0)
+ return 0;
+
+ rate = (u64)parent_rate * ((ndiv << A2W_PLL_FRAC_BITS) + fdiv);
+ do_div(rate, pdiv);
+ return rate >> A2W_PLL_FRAC_BITS;
+}
+
+static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ const struct bcm2835_pll_data *data = pll->data;
+ u32 ndiv, fdiv;
+
+ rate = clamp(rate, data->min_rate, data->max_rate);
+
+ bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
+
+ return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
+}
+
+static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ u32 a2wctrl = cprman_read(cprman, data->a2w_ctrl_reg);
+ u32 ndiv, pdiv, fdiv;
+ bool using_prediv;
+
+ if (parent_rate == 0)
+ return 0;
+
+ fdiv = cprman_read(cprman, data->frac_reg) & A2W_PLL_FRAC_MASK;
+ ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT;
+ pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT;
+ using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
+ bcm2835_pll_get_prediv_mask(cprman, data);
+
+ if (using_prediv) {
+ ndiv *= 2;
+ fdiv *= 2;
+ }
+
+ return bcm2835_pll_rate_from_divisors(parent_rate, ndiv, fdiv, pdiv);
+}
+
+static void bcm2835_pll_off(struct clk_hw *hw)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PWRDN);
+ spin_unlock(&cprman->regs_lock);
+}
+
+static int bcm2835_pll_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ ktime_t timeout;
+
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) &
+ ~A2W_PLL_CTRL_PWRDN);
+
+ /* Take the PLL out of reset. */
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->cm_ctrl_reg,
+ cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
+ spin_unlock(&cprman->regs_lock);
+
+ /* Wait for the PLL to lock. */
+ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
+ while (!(cprman_read(cprman, CM_LOCK) & data->lock_mask)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(cprman->dev, "%s: couldn't lock PLL\n",
+ clk_hw_get_name(hw));
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+
+ cprman_write(cprman, data->a2w_ctrl_reg,
+ cprman_read(cprman, data->a2w_ctrl_reg) |
+ A2W_PLL_CTRL_PRST_DISABLE);
+
+ return 0;
+}
+
+static void
+bcm2835_pll_write_ana(struct bcm2835_cprman *cprman, u32 ana_reg_base, u32 *ana)
+{
+ int i;
+
+ /*
+ * ANA register setup is done as a series of writes to
+ * ANA3-ANA0, in that order. This lets us write all 4
+ * registers as a single cycle of the serdes interface (taking
+ * 100 xosc clocks), whereas if we were to update ana0, 1, and
+ * 3 individually through their partial-write registers, each
+ * would be their own serdes cycle.
+ */
+ for (i = 3; i >= 0; i--)
+ cprman_write(cprman, ana_reg_base + i * 4, ana[i]);
+}
+
+static int bcm2835_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data);
+ bool was_using_prediv, use_fb_prediv, do_ana_setup_first;
+ u32 ndiv, fdiv, a2w_ctl;
+ u32 ana[4];
+ int i;
+
+ if (rate > data->max_fb_rate) {
+ use_fb_prediv = true;
+ rate /= 2;
+ } else {
+ use_fb_prediv = false;
+ }
+
+ bcm2835_pll_choose_ndiv_and_fdiv(rate, parent_rate, &ndiv, &fdiv);
+
+ for (i = 3; i >= 0; i--)
+ ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4);
+
+ was_using_prediv = ana[1] & prediv_mask;
+
+ ana[0] &= ~data->ana->mask0;
+ ana[0] |= data->ana->set0;
+ ana[1] &= ~data->ana->mask1;
+ ana[1] |= data->ana->set1;
+ ana[3] &= ~data->ana->mask3;
+ ana[3] |= data->ana->set3;
+
+ if (was_using_prediv && !use_fb_prediv) {
+ ana[1] &= ~prediv_mask;
+ do_ana_setup_first = true;
+ } else if (!was_using_prediv && use_fb_prediv) {
+ ana[1] |= prediv_mask;
+ do_ana_setup_first = false;
+ } else {
+ do_ana_setup_first = true;
+ }
+
+ /* Unmask the reference clock from the oscillator. */
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, A2W_XOSC_CTRL,
+ cprman_read(cprman, A2W_XOSC_CTRL) |
+ data->reference_enable_mask);
+ spin_unlock(&cprman->regs_lock);
+
+ if (do_ana_setup_first)
+ bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
+
+ /* Set the PLL multiplier from the oscillator. */
+ cprman_write(cprman, data->frac_reg, fdiv);
+
+ a2w_ctl = cprman_read(cprman, data->a2w_ctrl_reg);
+ a2w_ctl &= ~A2W_PLL_CTRL_NDIV_MASK;
+ a2w_ctl |= ndiv << A2W_PLL_CTRL_NDIV_SHIFT;
+ a2w_ctl &= ~A2W_PLL_CTRL_PDIV_MASK;
+ a2w_ctl |= 1 << A2W_PLL_CTRL_PDIV_SHIFT;
+ cprman_write(cprman, data->a2w_ctrl_reg, a2w_ctl);
+
+ if (!do_ana_setup_first)
+ bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
+
+ return 0;
+}
+
+static void bcm2835_pll_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ struct debugfs_reg32 *regs;
+
+ regs = devm_kcalloc(cprman->dev, 7, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return;
+
+ regs[0].name = "cm_ctrl";
+ regs[0].offset = data->cm_ctrl_reg;
+ regs[1].name = "a2w_ctrl";
+ regs[1].offset = data->a2w_ctrl_reg;
+ regs[2].name = "frac";
+ regs[2].offset = data->frac_reg;
+ regs[3].name = "ana0";
+ regs[3].offset = data->ana_reg_base + 0 * 4;
+ regs[4].name = "ana1";
+ regs[4].offset = data->ana_reg_base + 1 * 4;
+ regs[5].name = "ana2";
+ regs[5].offset = data->ana_reg_base + 2 * 4;
+ regs[6].name = "ana3";
+ regs[6].offset = data->ana_reg_base + 3 * 4;
+
+ bcm2835_debugfs_regset(cprman, 0, regs, 7, dentry);
+}
+
+static const struct clk_ops bcm2835_pll_clk_ops = {
+ .is_prepared = bcm2835_pll_is_on,
+ .prepare = bcm2835_pll_on,
+ .unprepare = bcm2835_pll_off,
+ .recalc_rate = bcm2835_pll_get_rate,
+ .set_rate = bcm2835_pll_set_rate,
+ .round_rate = bcm2835_pll_round_rate,
+ .debug_init = bcm2835_pll_debug_init,
+};
+
+struct bcm2835_pll_divider {
+ struct clk_divider div;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_pll_divider_data *data;
+};
+
+static struct bcm2835_pll_divider *
+bcm2835_pll_divider_from_hw(struct clk_hw *hw)
+{
+ return container_of(hw, struct bcm2835_pll_divider, div.hw);
+}
+
+static int bcm2835_pll_divider_is_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+
+ return !(cprman_read(cprman, data->a2w_reg) & A2W_PLL_CHANNEL_DISABLE);
+}
+
+static int bcm2835_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static unsigned long bcm2835_pll_divider_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static void bcm2835_pll_divider_off(struct clk_hw *hw)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->cm_reg,
+ (cprman_read(cprman, data->cm_reg) &
+ ~data->load_mask) | data->hold_mask);
+ cprman_write(cprman, data->a2w_reg,
+ cprman_read(cprman, data->a2w_reg) |
+ A2W_PLL_CHANNEL_DISABLE);
+ spin_unlock(&cprman->regs_lock);
+}
+
+static int bcm2835_pll_divider_on(struct clk_hw *hw)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->a2w_reg,
+ cprman_read(cprman, data->a2w_reg) &
+ ~A2W_PLL_CHANNEL_DISABLE);
+
+ cprman_write(cprman, data->cm_reg,
+ cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
+ spin_unlock(&cprman->regs_lock);
+
+ return 0;
+}
+
+static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
+
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+
+ div = min(div, max_div);
+ if (div == max_div)
+ div = 0;
+
+ cprman_write(cprman, data->a2w_reg, div);
+ cm = cprman_read(cprman, data->cm_reg);
+ cprman_write(cprman, data->cm_reg, cm | data->load_mask);
+ cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
+
+ return 0;
+}
+
+static void bcm2835_pll_divider_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+ struct debugfs_reg32 *regs;
+
+ regs = devm_kcalloc(cprman->dev, 7, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return;
+
+ regs[0].name = "cm";
+ regs[0].offset = data->cm_reg;
+ regs[1].name = "a2w";
+ regs[1].offset = data->a2w_reg;
+
+ bcm2835_debugfs_regset(cprman, 0, regs, 2, dentry);
+}
+
+static const struct clk_ops bcm2835_pll_divider_clk_ops = {
+ .is_prepared = bcm2835_pll_divider_is_on,
+ .prepare = bcm2835_pll_divider_on,
+ .unprepare = bcm2835_pll_divider_off,
+ .recalc_rate = bcm2835_pll_divider_get_rate,
+ .set_rate = bcm2835_pll_divider_set_rate,
+ .determine_rate = bcm2835_pll_divider_determine_rate,
+ .debug_init = bcm2835_pll_divider_debug_init,
+};
+
+/*
+ * The CM dividers do fixed-point division, so we can't use the
+ * generic integer divider code like the PLL dividers do (and we can't
+ * fake it by having some fixed shifts preceding it in the clock tree,
+ * because we'd run out of bits in a 32-bit unsigned long).
+ */
+struct bcm2835_clock {
+ struct clk_hw hw;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_clock_data *data;
+};
+
+static struct bcm2835_clock *bcm2835_clock_from_hw(struct clk_hw *hw)
+{
+ return container_of(hw, struct bcm2835_clock, hw);
+}
+
+static int bcm2835_clock_is_on(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ return (cprman_read(cprman, data->ctl_reg) & CM_ENABLE) != 0;
+}
+
+static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ const struct bcm2835_clock_data *data = clock->data;
+ u32 unused_frac_mask =
+ GENMASK(CM_DIV_FRAC_BITS - data->frac_bits, 0) >> 1;
+ u64 temp = (u64)parent_rate << CM_DIV_FRAC_BITS;
+ u32 div, mindiv, maxdiv;
+
+ do_div(temp, rate);
+ div = temp;
+ div &= ~unused_frac_mask;
+
+ /* different clamping limits apply for a mash clock */
+ if (data->is_mash_clock) {
+ /* clamp to min divider of 2 */
+ mindiv = 2 << CM_DIV_FRAC_BITS;
+ /* clamp to the highest possible integer divider */
+ maxdiv = (BIT(data->int_bits) - 1) << CM_DIV_FRAC_BITS;
+ } else {
+ /* clamp to min divider of 1 */
+ mindiv = 1 << CM_DIV_FRAC_BITS;
+ /* clamp to the highest possible fractional divider */
+ maxdiv = GENMASK(data->int_bits + CM_DIV_FRAC_BITS - 1,
+ CM_DIV_FRAC_BITS - data->frac_bits);
+ }
+
+ /* apply the clamping limits */
+ div = max_t(u32, div, mindiv);
+ div = min_t(u32, div, maxdiv);
+
+ return div;
+}
+
+static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
+ unsigned long parent_rate,
+ u32 div)
+{
+ const struct bcm2835_clock_data *data = clock->data;
+ u64 temp;
+
+ if (data->int_bits == 0 && data->frac_bits == 0)
+ return parent_rate;
+
+ /*
+ * The divisor is a 12.12 fixed point field, but only some of
+ * the bits are populated in any given clock.
+ */
+ div >>= CM_DIV_FRAC_BITS - data->frac_bits;
+ div &= (1 << (data->int_bits + data->frac_bits)) - 1;
+
+ if (div == 0)
+ return 0;
+
+ temp = (u64)parent_rate << data->frac_bits;
+
+ do_div(temp, div);
+
+ return temp;
+}
+
+static unsigned long bcm2835_round_rate(unsigned long rate)
+{
+ unsigned long scaler;
+ unsigned long limit;
+
+ limit = rate / 100000;
+
+ scaler = 1;
+ while (scaler < limit)
+ scaler *= 10;
+
+ /*
+ * If increasing a clock by less than 0.1% changes it
+ * from ..999.. to ..000.., round up.
+ */
+ if ((rate + scaler - 1) / scaler % 1000 == 0)
+ rate = roundup(rate, scaler);
+
+ return rate;
+}
+
+static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ unsigned long rate;
+ u32 div;
+
+ if (data->int_bits == 0 && data->frac_bits == 0)
+ return parent_rate;
+
+ div = cprman_read(cprman, data->div_reg);
+
+ rate = bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
+
+ if (data->round_up)
+ rate = bcm2835_round_rate(rate);
+
+ return rate;
+}
+
+static void bcm2835_clock_wait_busy(struct bcm2835_clock *clock)
+{
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ ktime_t timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
+
+ while (cprman_read(cprman, data->ctl_reg) & CM_BUSY) {
+ if (ktime_after(ktime_get(), timeout)) {
+ dev_err(cprman->dev, "%s: couldn't lock PLL\n",
+ clk_hw_get_name(&clock->hw));
+ return;
+ }
+ cpu_relax();
+ }
+}
+
+static void bcm2835_clock_off(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->ctl_reg,
+ cprman_read(cprman, data->ctl_reg) & ~CM_ENABLE);
+ spin_unlock(&cprman->regs_lock);
+
+ /* BUSY will remain high until the divider completes its cycle. */
+ bcm2835_clock_wait_busy(clock);
+}
+
+static int bcm2835_clock_on(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ spin_lock(&cprman->regs_lock);
+ cprman_write(cprman, data->ctl_reg,
+ cprman_read(cprman, data->ctl_reg) |
+ CM_ENABLE |
+ CM_GATE);
+ spin_unlock(&cprman->regs_lock);
+
+ /* Debug code to measure the clock once it's turned on to see
+ * if it's ticking at the rate we expect.
+ */
+ if (data->tcnt_mux && false) {
+ dev_info(cprman->dev,
+ "clk %s: rate %ld, measure %ld\n",
+ data->name,
+ clk_hw_get_rate(hw),
+ bcm2835_measure_tcnt_mux(cprman, data->tcnt_mux));
+ }
+
+ return 0;
+}
+
+static int bcm2835_clock_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate);
+ u32 ctl;
+
+ spin_lock(&cprman->regs_lock);
+
+ /*
+ * Setting up frac support
+ *
+ * In principle it is recommended to stop/start the clock first,
+ * but as we set CLK_SET_RATE_GATE during registration of the
+ * clock this requirement should be take care of by the
+ * clk-framework.
+ */
+ ctl = cprman_read(cprman, data->ctl_reg) & ~CM_FRAC;
+ ctl |= (div & CM_DIV_FRAC_MASK) ? CM_FRAC : 0;
+ cprman_write(cprman, data->ctl_reg, ctl);
+
+ cprman_write(cprman, data->div_reg, div);
+
+ spin_unlock(&cprman->regs_lock);
+
+ return 0;
+}
+
+static bool
+bcm2835_clk_is_pllc(struct clk_hw *hw)
+{
+ if (!hw)
+ return false;
+
+ return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0;
+}
+
+static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
+ int parent_idx,
+ unsigned long rate,
+ u32 *div,
+ unsigned long *prate,
+ unsigned long *avgrate)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ unsigned long best_rate = 0;
+ u32 curdiv, mindiv, maxdiv;
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent_by_index(hw, parent_idx);
+
+ if (!(BIT(parent_idx) & data->set_rate_parent)) {
+ *prate = clk_hw_get_rate(parent);
+ *div = bcm2835_clock_choose_div(hw, rate, *prate);
+
+ *avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div);
+
+ if (data->low_jitter && (*div & CM_DIV_FRAC_MASK)) {
+ unsigned long high, low;
+ u32 int_div = *div & ~CM_DIV_FRAC_MASK;
+
+ high = bcm2835_clock_rate_from_divisor(clock, *prate,
+ int_div);
+ int_div += CM_DIV_FRAC_MASK + 1;
+ low = bcm2835_clock_rate_from_divisor(clock, *prate,
+ int_div);
+
+ /*
+ * Return a value which is the maximum deviation
+ * below the ideal rate, for use as a metric.
+ */
+ return *avgrate - max(*avgrate - low, high - *avgrate);
+ }
+ return *avgrate;
+ }
+
+ if (data->frac_bits)
+ dev_warn(cprman->dev,
+ "frac bits are not used when propagating rate change");
+
+ /* clamp to min divider of 2 if we're dealing with a mash clock */
+ mindiv = data->is_mash_clock ? 2 : 1;
+ maxdiv = BIT(data->int_bits) - 1;
+
+ /* TODO: Be smart, and only test a subset of the available divisors. */
+ for (curdiv = mindiv; curdiv <= maxdiv; curdiv++) {
+ unsigned long tmp_rate;
+
+ tmp_rate = clk_hw_round_rate(parent, rate * curdiv);
+ tmp_rate /= curdiv;
+ if (curdiv == mindiv ||
+ (tmp_rate > best_rate && tmp_rate <= rate))
+ best_rate = tmp_rate;
+
+ if (best_rate == rate)
+ break;
+ }
+
+ *div = curdiv << CM_DIV_FRAC_BITS;
+ *prate = curdiv * best_rate;
+ *avgrate = best_rate;
+
+ return best_rate;
+}
+
+static int bcm2835_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent, *best_parent = NULL;
+ bool current_parent_is_pllc;
+ unsigned long rate, best_rate = 0;
+ unsigned long prate, best_prate = 0;
+ unsigned long avgrate, best_avgrate = 0;
+ size_t i;
+ u32 div;
+
+ current_parent_is_pllc = bcm2835_clk_is_pllc(clk_hw_get_parent(hw));
+
+ /*
+ * Select parent clock that results in the closest but lower rate
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); ++i) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ /*
+ * Don't choose a PLLC-derived clock as our parent
+ * unless it had been manually set that way. PLLC's
+ * frequency gets adjusted by the firmware due to
+ * over-temp or under-voltage conditions, without
+ * prior notification to our clock consumer.
+ */
+ if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc)
+ continue;
+
+ rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate,
+ &div, &prate,
+ &avgrate);
+ if (abs(req->rate - rate) < abs(req->rate - best_rate)) {
+ best_parent = parent;
+ best_prate = prate;
+ best_rate = rate;
+ best_avgrate = avgrate;
+ }
+ }
+
+ if (!best_parent)
+ return -EINVAL;
+
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best_prate;
+
+ req->rate = best_avgrate;
+
+ return 0;
+}
+
+static int bcm2835_clock_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ u8 src = (index << CM_SRC_SHIFT) & CM_SRC_MASK;
+
+ cprman_write(cprman, data->ctl_reg, src);
+ return 0;
+}
+
+static u8 bcm2835_clock_get_parent(struct clk_hw *hw)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+ u32 src = cprman_read(cprman, data->ctl_reg);
+
+ return (src & CM_SRC_MASK) >> CM_SRC_SHIFT;
+}
+
+static const struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
+ {
+ .name = "ctl",
+ .offset = 0,
+ },
+ {
+ .name = "div",
+ .offset = 4,
+ },
+};
+
+static void bcm2835_clock_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ bcm2835_debugfs_regset(cprman, data->ctl_reg,
+ bcm2835_debugfs_clock_reg32,
+ ARRAY_SIZE(bcm2835_debugfs_clock_reg32),
+ dentry);
+}
+
+static const struct clk_ops bcm2835_clock_clk_ops = {
+ .is_prepared = bcm2835_clock_is_on,
+ .prepare = bcm2835_clock_on,
+ .unprepare = bcm2835_clock_off,
+ .recalc_rate = bcm2835_clock_get_rate,
+ .set_rate = bcm2835_clock_set_rate,
+ .determine_rate = bcm2835_clock_determine_rate,
+ .set_parent = bcm2835_clock_set_parent,
+ .get_parent = bcm2835_clock_get_parent,
+ .debug_init = bcm2835_clock_debug_init,
+};
+
+static int bcm2835_vpu_clock_is_on(struct clk_hw *hw)
+{
+ return true;
+}
+
+/*
+ * The VPU clock can never be disabled (it doesn't have an ENABLE
+ * bit), so it gets its own set of clock ops.
+ */
+static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
+ .is_prepared = bcm2835_vpu_clock_is_on,
+ .recalc_rate = bcm2835_clock_get_rate,
+ .set_rate = bcm2835_clock_set_rate,
+ .determine_rate = bcm2835_clock_determine_rate,
+ .set_parent = bcm2835_clock_set_parent,
+ .get_parent = bcm2835_clock_get_parent,
+ .debug_init = bcm2835_clock_debug_init,
+};
+
+static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+ const void *data)
+{
+ const struct bcm2835_pll_data *pll_data = data;
+ struct bcm2835_pll *pll;
+ struct clk_init_data init;
+ int ret;
+
+ memset(&init, 0, sizeof(init));
+
+ /* All of the PLLs derive from the external oscillator. */
+ init.parent_names = &cprman->real_parent_names[0];
+ init.num_parents = 1;
+ init.name = pll_data->name;
+ init.ops = &bcm2835_pll_clk_ops;
+ init.flags = pll_data->flags | CLK_IGNORE_UNUSED;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return NULL;
+
+ pll->cprman = cprman;
+ pll->data = pll_data;
+ pll->hw.init = &init;
+
+ ret = devm_clk_hw_register(cprman->dev, &pll->hw);
+ if (ret) {
+ kfree(pll);
+ return NULL;
+ }
+ return &pll->hw;
+}
+
+static struct clk_hw *
+bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
+ const void *data)
+{
+ const struct bcm2835_pll_divider_data *divider_data = data;
+ struct bcm2835_pll_divider *divider;
+ struct clk_init_data init;
+ const char *divider_name;
+ int ret;
+
+ if (divider_data->fixed_divider != 1) {
+ divider_name = devm_kasprintf(cprman->dev, GFP_KERNEL,
+ "%s_prediv", divider_data->name);
+ if (!divider_name)
+ return NULL;
+ } else {
+ divider_name = divider_data->name;
+ }
+
+ memset(&init, 0, sizeof(init));
+
+ init.parent_names = &divider_data->source_pll;
+ init.num_parents = 1;
+ init.name = divider_name;
+ init.ops = &bcm2835_pll_divider_clk_ops;
+ init.flags = divider_data->flags | CLK_IGNORE_UNUSED;
+
+ divider = devm_kzalloc(cprman->dev, sizeof(*divider), GFP_KERNEL);
+ if (!divider)
+ return NULL;
+
+ divider->div.reg = cprman->regs + divider_data->a2w_reg;
+ divider->div.shift = A2W_PLL_DIV_SHIFT;
+ divider->div.width = A2W_PLL_DIV_BITS;
+ divider->div.flags = CLK_DIVIDER_MAX_AT_ZERO;
+ divider->div.lock = &cprman->regs_lock;
+ divider->div.hw.init = &init;
+ divider->div.table = NULL;
+
+ divider->cprman = cprman;
+ divider->data = divider_data;
+
+ ret = devm_clk_hw_register(cprman->dev, &divider->div.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * PLLH's channels have a fixed divide by 10 afterwards, which
+ * is what our consumers are actually using.
+ */
+ if (divider_data->fixed_divider != 1) {
+ return clk_hw_register_fixed_factor(cprman->dev,
+ divider_data->name,
+ divider_name,
+ CLK_SET_RATE_PARENT,
+ 1,
+ divider_data->fixed_divider);
+ }
+
+ return &divider->div.hw;
+}
+
+static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
+ const void *data)
+{
+ const struct bcm2835_clock_data *clock_data = data;
+ struct bcm2835_clock *clock;
+ struct clk_init_data init;
+ const char *parents[1 << CM_SRC_BITS];
+ size_t i;
+ int ret;
+
+ /*
+ * Replace our strings referencing parent clocks with the
+ * actual clock-output-name of the parent.
+ */
+ for (i = 0; i < clock_data->num_mux_parents; i++) {
+ parents[i] = clock_data->parents[i];
+
+ ret = match_string(cprman_parent_names,
+ ARRAY_SIZE(cprman_parent_names),
+ parents[i]);
+ if (ret >= 0)
+ parents[i] = cprman->real_parent_names[ret];
+ }
+
+ memset(&init, 0, sizeof(init));
+ init.parent_names = parents;
+ init.num_parents = clock_data->num_mux_parents;
+ init.name = clock_data->name;
+ init.flags = clock_data->flags | CLK_IGNORE_UNUSED;
+
+ /*
+ * Pass the CLK_SET_RATE_PARENT flag if we are allowed to propagate
+ * rate changes on at least of the parents.
+ */
+ if (clock_data->set_rate_parent)
+ init.flags |= CLK_SET_RATE_PARENT;
+
+ if (clock_data->is_vpu_clock) {
+ init.ops = &bcm2835_vpu_clock_clk_ops;
+ } else {
+ init.ops = &bcm2835_clock_clk_ops;
+ init.flags |= CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ /* If the clock wasn't actually enabled at boot, it's not
+ * critical.
+ */
+ if (!(cprman_read(cprman, clock_data->ctl_reg) & CM_ENABLE))
+ init.flags &= ~CLK_IS_CRITICAL;
+ }
+
+ clock = devm_kzalloc(cprman->dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return NULL;
+
+ clock->cprman = cprman;
+ clock->data = clock_data;
+ clock->hw.init = &init;
+
+ ret = devm_clk_hw_register(cprman->dev, &clock->hw);
+ if (ret)
+ return ERR_PTR(ret);
+ return &clock->hw;
+}
+
+static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman,
+ const void *data)
+{
+ const struct bcm2835_gate_data *gate_data = data;
+
+ return clk_hw_register_gate(cprman->dev, gate_data->name,
+ gate_data->parent,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ cprman->regs + gate_data->ctl_reg,
+ CM_GATE_BIT, 0, &cprman->regs_lock);
+}
+
+struct bcm2835_clk_desc {
+ struct clk_hw *(*clk_register)(struct bcm2835_cprman *cprman,
+ const void *data);
+ unsigned int supported;
+ const void *data;
+};
+
+/* assignment helper macros for different clock types */
+#define _REGISTER(f, s, ...) { .clk_register = f, \
+ .supported = s, \
+ .data = __VA_ARGS__ }
+#define REGISTER_PLL(s, ...) _REGISTER(&bcm2835_register_pll, \
+ s, \
+ &(struct bcm2835_pll_data) \
+ {__VA_ARGS__})
+#define REGISTER_PLL_DIV(s, ...) _REGISTER(&bcm2835_register_pll_divider, \
+ s, \
+ &(struct bcm2835_pll_divider_data) \
+ {__VA_ARGS__})
+#define REGISTER_CLK(s, ...) _REGISTER(&bcm2835_register_clock, \
+ s, \
+ &(struct bcm2835_clock_data) \
+ {__VA_ARGS__})
+#define REGISTER_GATE(s, ...) _REGISTER(&bcm2835_register_gate, \
+ s, \
+ &(struct bcm2835_gate_data) \
+ {__VA_ARGS__})
+
+/* parent mux arrays plus helper macros */
+
+/* main oscillator parent mux */
+static const char *const bcm2835_clock_osc_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1"
+};
+
+#define REGISTER_OSC_CLK(s, ...) REGISTER_CLK( \
+ s, \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents), \
+ .parents = bcm2835_clock_osc_parents, \
+ __VA_ARGS__)
+
+/* main peripherial parent mux */
+static const char *const bcm2835_clock_per_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "plla_per",
+ "pllc_per",
+ "plld_per",
+ "pllh_aux",
+};
+
+#define REGISTER_PER_CLK(s, ...) REGISTER_CLK( \
+ s, \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents), \
+ .parents = bcm2835_clock_per_parents, \
+ __VA_ARGS__)
+
+/*
+ * Restrict clock sources for the PCM peripheral to the oscillator and
+ * PLLD_PER because other source may have varying rates or be switched
+ * off.
+ *
+ * Prevent other sources from being selected by replacing their names in
+ * the list of potential parents with dummy entries (entry index is
+ * significant).
+ */
+static const char *const bcm2835_pcm_per_parents[] = {
+ "-",
+ "xosc",
+ "-",
+ "-",
+ "-",
+ "-",
+ "plld_per",
+ "-",
+};
+
+#define REGISTER_PCM_CLK(s, ...) REGISTER_CLK( \
+ s, \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_pcm_per_parents), \
+ .parents = bcm2835_pcm_per_parents, \
+ __VA_ARGS__)
+
+/* main vpu parent mux */
+static const char *const bcm2835_clock_vpu_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "plla_core",
+ "pllc_core0",
+ "plld_core",
+ "pllh_aux",
+ "pllc_core1",
+ "pllc_core2",
+};
+
+#define REGISTER_VPU_CLK(s, ...) REGISTER_CLK( \
+ s, \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents), \
+ .parents = bcm2835_clock_vpu_parents, \
+ __VA_ARGS__)
+
+/*
+ * DSI parent clocks. The DSI byte/DDR/DDR2 clocks come from the DSI
+ * analog PHY. The _inv variants are generated internally to cprman,
+ * but we don't use them so they aren't hooked up.
+ */
+static const char *const bcm2835_clock_dsi0_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "dsi0_ddr",
+ "dsi0_ddr_inv",
+ "dsi0_ddr2",
+ "dsi0_ddr2_inv",
+ "dsi0_byte",
+ "dsi0_byte_inv",
+};
+
+static const char *const bcm2835_clock_dsi1_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "dsi1_ddr",
+ "dsi1_ddr_inv",
+ "dsi1_ddr2",
+ "dsi1_ddr2_inv",
+ "dsi1_byte",
+ "dsi1_byte_inv",
+};
+
+#define REGISTER_DSI0_CLK(s, ...) REGISTER_CLK( \
+ s, \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_dsi0_parents), \
+ .parents = bcm2835_clock_dsi0_parents, \
+ __VA_ARGS__)
+
+#define REGISTER_DSI1_CLK(s, ...) REGISTER_CLK( \
+ s, \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_dsi1_parents), \
+ .parents = bcm2835_clock_dsi1_parents, \
+ __VA_ARGS__)
+
+/*
+ * the real definition of all the pll, pll_dividers and clocks
+ * these make use of the above REGISTER_* macros
+ */
+static const struct bcm2835_clk_desc clk_desc_array[] = {
+ /* the PLL + PLL dividers */
+
+ /*
+ * PLLA is the auxiliary PLL, used to drive the CCP2
+ * (Compact Camera Port 2) transmitter clock.
+ *
+ * It is in the PX LDO power domain, which is on when the
+ * AUDIO domain is on.
+ */
+ [BCM2835_PLLA] = REGISTER_PLL(
+ SOC_ALL,
+ .name = "plla",
+ .cm_ctrl_reg = CM_PLLA,
+ .a2w_ctrl_reg = A2W_PLLA_CTRL,
+ .frac_reg = A2W_PLLA_FRAC,
+ .ana_reg_base = A2W_PLLA_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLA_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKA,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 2400000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLA_CORE] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plla_core",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_CORE,
+ .load_mask = CM_PLLA_LOADCORE,
+ .hold_mask = CM_PLLA_HOLDCORE,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLA_PER] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plla_per",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_PER,
+ .load_mask = CM_PLLA_LOADPER,
+ .hold_mask = CM_PLLA_HOLDPER,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLA_DSI0] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plla_dsi0",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_DSI0,
+ .load_mask = CM_PLLA_LOADDSI0,
+ .hold_mask = CM_PLLA_HOLDDSI0,
+ .fixed_divider = 1),
+ [BCM2835_PLLA_CCP2] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plla_ccp2",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_CCP2,
+ .load_mask = CM_PLLA_LOADCCP2,
+ .hold_mask = CM_PLLA_HOLDCCP2,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+
+ /* PLLB is used for the ARM's clock. */
+ [BCM2835_PLLB] = REGISTER_PLL(
+ SOC_ALL,
+ .name = "pllb",
+ .cm_ctrl_reg = CM_PLLB,
+ .a2w_ctrl_reg = A2W_PLLB_CTRL,
+ .frac_reg = A2W_PLLB_FRAC,
+ .ana_reg_base = A2W_PLLB_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLB_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKB,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE,
+ .flags = CLK_GET_RATE_NOCACHE),
+ [BCM2835_PLLB_ARM] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "pllb_arm",
+ .source_pll = "pllb",
+ .cm_reg = CM_PLLB,
+ .a2w_reg = A2W_PLLB_ARM,
+ .load_mask = CM_PLLB_LOADARM,
+ .hold_mask = CM_PLLB_HOLDARM,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE),
+
+ /*
+ * PLLC is the core PLL, used to drive the core VPU clock.
+ *
+ * It is in the PX LDO power domain, which is on when the
+ * AUDIO domain is on.
+ */
+ [BCM2835_PLLC] = REGISTER_PLL(
+ SOC_ALL,
+ .name = "pllc",
+ .cm_ctrl_reg = CM_PLLC,
+ .a2w_ctrl_reg = A2W_PLLC_CTRL,
+ .frac_reg = A2W_PLLC_FRAC,
+ .ana_reg_base = A2W_PLLC_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKC,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLC_CORE0] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "pllc_core0",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE0,
+ .load_mask = CM_PLLC_LOADCORE0,
+ .hold_mask = CM_PLLC_HOLDCORE0,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLC_CORE1] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "pllc_core1",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE1,
+ .load_mask = CM_PLLC_LOADCORE1,
+ .hold_mask = CM_PLLC_HOLDCORE1,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLC_CORE2] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "pllc_core2",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE2,
+ .load_mask = CM_PLLC_LOADCORE2,
+ .hold_mask = CM_PLLC_HOLDCORE2,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLC_PER] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "pllc_per",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_PER,
+ .load_mask = CM_PLLC_LOADPER,
+ .hold_mask = CM_PLLC_HOLDPER,
+ .fixed_divider = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+
+ /*
+ * PLLD is the display PLL, used to drive DSI display panels.
+ *
+ * It is in the PX LDO power domain, which is on when the
+ * AUDIO domain is on.
+ */
+ [BCM2835_PLLD] = REGISTER_PLL(
+ SOC_ALL,
+ .name = "plld",
+ .cm_ctrl_reg = CM_PLLD,
+ .a2w_ctrl_reg = A2W_PLLD_CTRL,
+ .frac_reg = A2W_PLLD_FRAC,
+ .ana_reg_base = A2W_PLLD_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_DDR_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKD,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 2400000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLD_CORE] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plld_core",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_CORE,
+ .load_mask = CM_PLLD_LOADCORE,
+ .hold_mask = CM_PLLD_HOLDCORE,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ /*
+ * VPU firmware assumes that PLLD_PER isn't disabled by the ARM core.
+ * Otherwise this could cause firmware lookups. That's why we mark
+ * it as critical.
+ */
+ [BCM2835_PLLD_PER] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plld_per",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_PER,
+ .load_mask = CM_PLLD_LOADPER,
+ .hold_mask = CM_PLLD_HOLDPER,
+ .fixed_divider = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
+ [BCM2835_PLLD_DSI0] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plld_dsi0",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_DSI0,
+ .load_mask = CM_PLLD_LOADDSI0,
+ .hold_mask = CM_PLLD_HOLDDSI0,
+ .fixed_divider = 1),
+ [BCM2835_PLLD_DSI1] = REGISTER_PLL_DIV(
+ SOC_ALL,
+ .name = "plld_dsi1",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_DSI1,
+ .load_mask = CM_PLLD_LOADDSI1,
+ .hold_mask = CM_PLLD_HOLDDSI1,
+ .fixed_divider = 1),
+
+ /*
+ * PLLH is used to supply the pixel clock or the AUX clock for the
+ * TV encoder.
+ *
+ * It is in the HDMI power domain.
+ */
+ [BCM2835_PLLH] = REGISTER_PLL(
+ SOC_BCM2835,
+ "pllh",
+ .cm_ctrl_reg = CM_PLLH,
+ .a2w_ctrl_reg = A2W_PLLH_CTRL,
+ .frac_reg = A2W_PLLH_FRAC,
+ .ana_reg_base = A2W_PLLH_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKH,
+
+ .ana = &bcm2835_ana_pllh,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLH_RCAL] = REGISTER_PLL_DIV(
+ SOC_BCM2835,
+ .name = "pllh_rcal",
+ .source_pll = "pllh",
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_RCAL,
+ .load_mask = CM_PLLH_LOADRCAL,
+ .hold_mask = 0,
+ .fixed_divider = 10,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLH_AUX] = REGISTER_PLL_DIV(
+ SOC_BCM2835,
+ .name = "pllh_aux",
+ .source_pll = "pllh",
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_AUX,
+ .load_mask = CM_PLLH_LOADAUX,
+ .hold_mask = 0,
+ .fixed_divider = 1,
+ .flags = CLK_SET_RATE_PARENT),
+ [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV(
+ SOC_BCM2835,
+ .name = "pllh_pix",
+ .source_pll = "pllh",
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_PIX,
+ .load_mask = CM_PLLH_LOADPIX,
+ .hold_mask = 0,
+ .fixed_divider = 10,
+ .flags = CLK_SET_RATE_PARENT),
+
+ /* the clocks */
+
+ /* clocks with oscillator parent mux */
+
+ /* One Time Programmable Memory clock. Maximum 10Mhz. */
+ [BCM2835_CLOCK_OTP] = REGISTER_OSC_CLK(
+ SOC_ALL,
+ .name = "otp",
+ .ctl_reg = CM_OTPCTL,
+ .div_reg = CM_OTPDIV,
+ .int_bits = 4,
+ .frac_bits = 0,
+ .tcnt_mux = 6),
+ /*
+ * Used for a 1Mhz clock for the system clocksource, and also used
+ * bythe watchdog timer and the camera pulse generator.
+ */
+ [BCM2835_CLOCK_TIMER] = REGISTER_OSC_CLK(
+ SOC_ALL,
+ .name = "timer",
+ .ctl_reg = CM_TIMERCTL,
+ .div_reg = CM_TIMERDIV,
+ .int_bits = 6,
+ .frac_bits = 12),
+ /*
+ * Clock for the temperature sensor.
+ * Generally run at 2Mhz, max 5Mhz.
+ */
+ [BCM2835_CLOCK_TSENS] = REGISTER_OSC_CLK(
+ SOC_ALL,
+ .name = "tsens",
+ .ctl_reg = CM_TSENSCTL,
+ .div_reg = CM_TSENSDIV,
+ .int_bits = 5,
+ .frac_bits = 0),
+ [BCM2835_CLOCK_TEC] = REGISTER_OSC_CLK(
+ SOC_ALL,
+ .name = "tec",
+ .ctl_reg = CM_TECCTL,
+ .div_reg = CM_TECDIV,
+ .int_bits = 6,
+ .frac_bits = 0),
+
+ /* clocks with vpu parent mux */
+ [BCM2835_CLOCK_H264] = REGISTER_VPU_CLK(
+ SOC_ALL,
+ .name = "h264",
+ .ctl_reg = CM_H264CTL,
+ .div_reg = CM_H264DIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 1),
+ [BCM2835_CLOCK_ISP] = REGISTER_VPU_CLK(
+ SOC_ALL,
+ .name = "isp",
+ .ctl_reg = CM_ISPCTL,
+ .div_reg = CM_ISPDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 2),
+
+ /*
+ * Secondary SDRAM clock. Used for low-voltage modes when the PLL
+ * in the SDRAM controller can't be used.
+ */
+ [BCM2835_CLOCK_SDRAM] = REGISTER_VPU_CLK(
+ SOC_ALL,
+ .name = "sdram",
+ .ctl_reg = CM_SDCCTL,
+ .div_reg = CM_SDCDIV,
+ .int_bits = 6,
+ .frac_bits = 0,
+ .tcnt_mux = 3),
+ [BCM2835_CLOCK_V3D] = REGISTER_VPU_CLK(
+ SOC_ALL,
+ .name = "v3d",
+ .ctl_reg = CM_V3DCTL,
+ .div_reg = CM_V3DDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 4),
+ /*
+ * VPU clock. This doesn't have an enable bit, since it drives
+ * the bus for everything else, and is special so it doesn't need
+ * to be gated for rate changes. It is also known as "clk_audio"
+ * in various hardware documentation.
+ */
+ [BCM2835_CLOCK_VPU] = REGISTER_VPU_CLK(
+ SOC_ALL,
+ .name = "vpu",
+ .ctl_reg = CM_VPUCTL,
+ .div_reg = CM_VPUDIV,
+ .int_bits = 12,
+ .frac_bits = 8,
+ .flags = CLK_IS_CRITICAL,
+ .is_vpu_clock = true,
+ .tcnt_mux = 5),
+
+ /* clocks with per parent mux */
+ [BCM2835_CLOCK_AVEO] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "aveo",
+ .ctl_reg = CM_AVEOCTL,
+ .div_reg = CM_AVEODIV,
+ .int_bits = 4,
+ .frac_bits = 0,
+ .tcnt_mux = 38),
+ [BCM2835_CLOCK_CAM0] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "cam0",
+ .ctl_reg = CM_CAM0CTL,
+ .div_reg = CM_CAM0DIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 14),
+ [BCM2835_CLOCK_CAM1] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "cam1",
+ .ctl_reg = CM_CAM1CTL,
+ .div_reg = CM_CAM1DIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 15),
+ [BCM2835_CLOCK_DFT] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "dft",
+ .ctl_reg = CM_DFTCTL,
+ .div_reg = CM_DFTDIV,
+ .int_bits = 5,
+ .frac_bits = 0),
+ [BCM2835_CLOCK_DPI] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "dpi",
+ .ctl_reg = CM_DPICTL,
+ .div_reg = CM_DPIDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 17),
+
+ /* Arasan EMMC clock */
+ [BCM2835_CLOCK_EMMC] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "emmc",
+ .ctl_reg = CM_EMMCCTL,
+ .div_reg = CM_EMMCDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 39),
+
+ /* EMMC2 clock (only available for BCM2711) */
+ [BCM2711_CLOCK_EMMC2] = REGISTER_PER_CLK(
+ SOC_BCM2711,
+ .name = "emmc2",
+ .ctl_reg = CM_EMMC2CTL,
+ .div_reg = CM_EMMC2DIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 42),
+
+ /* General purpose (GPIO) clocks */
+ [BCM2835_CLOCK_GP0] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "gp0",
+ .ctl_reg = CM_GP0CTL,
+ .div_reg = CM_GP0DIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true,
+ .tcnt_mux = 20),
+ [BCM2835_CLOCK_GP1] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "gp1",
+ .ctl_reg = CM_GP1CTL,
+ .div_reg = CM_GP1DIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .flags = CLK_IS_CRITICAL,
+ .is_mash_clock = true,
+ .tcnt_mux = 21),
+ [BCM2835_CLOCK_GP2] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "gp2",
+ .ctl_reg = CM_GP2CTL,
+ .div_reg = CM_GP2DIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .flags = CLK_IS_CRITICAL),
+
+ /* HDMI state machine */
+ [BCM2835_CLOCK_HSM] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "hsm",
+ .ctl_reg = CM_HSMCTL,
+ .div_reg = CM_HSMDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 22),
+ [BCM2835_CLOCK_PCM] = REGISTER_PCM_CLK(
+ SOC_ALL,
+ .name = "pcm",
+ .ctl_reg = CM_PCMCTL,
+ .div_reg = CM_PCMDIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true,
+ .low_jitter = true,
+ .tcnt_mux = 23),
+ [BCM2835_CLOCK_PWM] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "pwm",
+ .ctl_reg = CM_PWMCTL,
+ .div_reg = CM_PWMDIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true,
+ .tcnt_mux = 24),
+ [BCM2835_CLOCK_SLIM] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "slim",
+ .ctl_reg = CM_SLIMCTL,
+ .div_reg = CM_SLIMDIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true,
+ .tcnt_mux = 25),
+ [BCM2835_CLOCK_SMI] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "smi",
+ .ctl_reg = CM_SMICTL,
+ .div_reg = CM_SMIDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 27),
+ [BCM2835_CLOCK_UART] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "uart",
+ .ctl_reg = CM_UARTCTL,
+ .div_reg = CM_UARTDIV,
+ .int_bits = 10,
+ .frac_bits = 12,
+ .tcnt_mux = 28,
+ .round_up = true),
+
+ /* TV encoder clock. Only operating frequency is 108Mhz. */
+ [BCM2835_CLOCK_VEC] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "vec",
+ .ctl_reg = CM_VECCTL,
+ .div_reg = CM_VECDIV,
+ .int_bits = 4,
+ .frac_bits = 0,
+ /*
+ * Allow rate change propagation only on PLLH_AUX which is
+ * assigned index 7 in the parent array.
+ */
+ .set_rate_parent = BIT(7),
+ .tcnt_mux = 29),
+
+ /* dsi clocks */
+ [BCM2835_CLOCK_DSI0E] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "dsi0e",
+ .ctl_reg = CM_DSI0ECTL,
+ .div_reg = CM_DSI0EDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 18),
+ [BCM2835_CLOCK_DSI1E] = REGISTER_PER_CLK(
+ SOC_ALL,
+ .name = "dsi1e",
+ .ctl_reg = CM_DSI1ECTL,
+ .div_reg = CM_DSI1EDIV,
+ .int_bits = 4,
+ .frac_bits = 8,
+ .tcnt_mux = 19),
+ [BCM2835_CLOCK_DSI0P] = REGISTER_DSI0_CLK(
+ SOC_ALL,
+ .name = "dsi0p",
+ .ctl_reg = CM_DSI0PCTL,
+ .div_reg = CM_DSI0PDIV,
+ .int_bits = 0,
+ .frac_bits = 0,
+ .tcnt_mux = 12),
+ [BCM2835_CLOCK_DSI1P] = REGISTER_DSI1_CLK(
+ SOC_ALL,
+ .name = "dsi1p",
+ .ctl_reg = CM_DSI1PCTL,
+ .div_reg = CM_DSI1PDIV,
+ .int_bits = 0,
+ .frac_bits = 0,
+ .tcnt_mux = 13),
+
+ /* the gates */
+
+ /*
+ * CM_PERIICTL (and CM_PERIACTL, CM_SYSCTL and CM_VPUCTL if
+ * you have the debug bit set in the power manager, which we
+ * don't bother exposing) are individual gates off of the
+ * non-stop vpu clock.
+ */
+ [BCM2835_CLOCK_PERI_IMAGE] = REGISTER_GATE(
+ SOC_ALL,
+ .name = "peri_image",
+ .parent = "vpu",
+ .ctl_reg = CM_PERIICTL),
+};
+
+/*
+ * Permanently take a reference on the parent of the SDRAM clock.
+ *
+ * While the SDRAM is being driven by its dedicated PLL most of the
+ * time, there is a little loop running in the firmware that
+ * periodically switches the SDRAM to using our CM clock to do PVT
+ * recalibration, with the assumption that the previously configured
+ * SDRAM parent is still enabled and running.
+ */
+static int bcm2835_mark_sdc_parent_critical(struct clk *sdc)
+{
+ struct clk *parent = clk_get_parent(sdc);
+
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+
+ return clk_prepare_enable(parent);
+}
+
+static int bcm2835_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw **hws;
+ struct bcm2835_cprman *cprman;
+ const struct bcm2835_clk_desc *desc;
+ const size_t asize = ARRAY_SIZE(clk_desc_array);
+ const struct cprman_plat_data *pdata;
+ size_t i;
+ int ret;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return -ENODEV;
+
+ cprman = devm_kzalloc(dev,
+ struct_size(cprman, onecell.hws, asize),
+ GFP_KERNEL);
+ if (!cprman)
+ return -ENOMEM;
+
+ spin_lock_init(&cprman->regs_lock);
+ cprman->dev = dev;
+ cprman->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cprman->regs))
+ return PTR_ERR(cprman->regs);
+
+ memcpy(cprman->real_parent_names, cprman_parent_names,
+ sizeof(cprman_parent_names));
+ of_clk_parent_fill(dev->of_node, cprman->real_parent_names,
+ ARRAY_SIZE(cprman_parent_names));
+
+ /*
+ * Make sure the external oscillator has been registered.
+ *
+ * The other (DSI) clocks are not present on older device
+ * trees, which we still need to support for backwards
+ * compatibility.
+ */
+ if (!cprman->real_parent_names[0])
+ return -ENODEV;
+
+ platform_set_drvdata(pdev, cprman);
+
+ cprman->onecell.num = asize;
+ cprman->soc = pdata->soc;
+ hws = cprman->onecell.hws;
+
+ for (i = 0; i < asize; i++) {
+ desc = &clk_desc_array[i];
+ if (desc->clk_register && desc->data &&
+ (desc->supported & pdata->soc)) {
+ hws[i] = desc->clk_register(cprman, desc->data);
+ }
+ }
+
+ ret = bcm2835_mark_sdc_parent_critical(hws[BCM2835_CLOCK_SDRAM]->clk);
+ if (ret)
+ return ret;
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &cprman->onecell);
+}
+
+static const struct cprman_plat_data cprman_bcm2835_plat_data = {
+ .soc = SOC_BCM2835,
+};
+
+static const struct cprman_plat_data cprman_bcm2711_plat_data = {
+ .soc = SOC_BCM2711,
+};
+
+static const struct of_device_id bcm2835_clk_of_match[] = {
+ { .compatible = "brcm,bcm2835-cprman", .data = &cprman_bcm2835_plat_data },
+ { .compatible = "brcm,bcm2711-cprman", .data = &cprman_bcm2711_plat_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_clk_of_match);
+
+static struct platform_driver bcm2835_clk_driver = {
+ .driver = {
+ .name = "bcm2835-clk",
+ .of_match_table = bcm2835_clk_of_match,
+ },
+ .probe = bcm2835_clk_probe,
+};
+
+builtin_platform_driver(bcm2835_clk_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("BCM2835 clock driver");
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
new file mode 100644
index 0000000000..84f2af736e
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define PMU_XTAL_FREQ_RATIO 0x66c
+#define XTAL_ALP_PER_4ILP 0x00001fff
+#define XTAL_CTL_EN 0x80000000
+#define PMU_SLOW_CLK_PERIOD 0x6dc
+
+struct bcm53573_ilp {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+static int bcm53573_ilp_enable(struct clk_hw *hw)
+{
+ struct bcm53573_ilp *ilp = container_of(hw, struct bcm53573_ilp, hw);
+
+ regmap_write(ilp->regmap, PMU_SLOW_CLK_PERIOD, 0x10199);
+ regmap_write(ilp->regmap, 0x674, 0x10000);
+
+ return 0;
+}
+
+static void bcm53573_ilp_disable(struct clk_hw *hw)
+{
+ struct bcm53573_ilp *ilp = container_of(hw, struct bcm53573_ilp, hw);
+
+ regmap_write(ilp->regmap, PMU_SLOW_CLK_PERIOD, 0);
+ regmap_write(ilp->regmap, 0x674, 0);
+}
+
+static unsigned long bcm53573_ilp_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bcm53573_ilp *ilp = container_of(hw, struct bcm53573_ilp, hw);
+ struct regmap *regmap = ilp->regmap;
+ u32 last_val, cur_val;
+ int sum = 0, num = 0, loop_num = 0;
+ int avg;
+
+ /* Enable measurement */
+ regmap_write(regmap, PMU_XTAL_FREQ_RATIO, XTAL_CTL_EN);
+
+ /* Read initial value */
+ regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &last_val);
+ last_val &= XTAL_ALP_PER_4ILP;
+
+ /*
+ * At minimum we should loop for a bit to let hardware do the
+ * measurement. This isn't very accurate however, so for a better
+ * precision lets try getting 20 different values for and use average.
+ */
+ while (num < 20) {
+ regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &cur_val);
+ cur_val &= XTAL_ALP_PER_4ILP;
+
+ if (cur_val != last_val) {
+ /* Got different value, use it */
+ sum += cur_val;
+ num++;
+ loop_num = 0;
+ last_val = cur_val;
+ } else if (++loop_num > 5000) {
+ /* Same value over and over, give up */
+ sum += cur_val;
+ num++;
+ break;
+ }
+
+ cpu_relax();
+ }
+
+ /* Disable measurement to save power */
+ regmap_write(regmap, PMU_XTAL_FREQ_RATIO, 0x0);
+
+ avg = sum / num;
+
+ return parent_rate * 4 / avg;
+}
+
+static const struct clk_ops bcm53573_ilp_clk_ops = {
+ .enable = bcm53573_ilp_enable,
+ .disable = bcm53573_ilp_disable,
+ .recalc_rate = bcm53573_ilp_recalc_rate,
+};
+
+static void bcm53573_ilp_init(struct device_node *np)
+{
+ struct bcm53573_ilp *ilp;
+ struct clk_init_data init = { };
+ const char *parent_name;
+ int err;
+
+ ilp = kzalloc(sizeof(*ilp), GFP_KERNEL);
+ if (!ilp)
+ return;
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name) {
+ err = -ENOENT;
+ goto err_free_ilp;
+ }
+
+ ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(ilp->regmap)) {
+ err = PTR_ERR(ilp->regmap);
+ goto err_free_ilp;
+ }
+
+ init.name = np->name;
+ init.ops = &bcm53573_ilp_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ ilp->hw.init = &init;
+ err = clk_hw_register(NULL, &ilp->hw);
+ if (err)
+ goto err_free_ilp;
+
+ err = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &ilp->hw);
+ if (err)
+ goto err_clk_hw_unregister;
+
+ return;
+
+err_clk_hw_unregister:
+ clk_hw_unregister(&ilp->hw);
+err_free_ilp:
+ kfree(ilp);
+ pr_err("Failed to init ILP clock: %d\n", err);
+}
+
+/* We need it very early for arch code, before device model gets ready */
+CLK_OF_DECLARE(bcm53573_ilp_clk, "brcm,bcm53573-ilp", bcm53573_ilp_init);
diff --git a/drivers/clk/bcm/clk-bcm63268-timer.c b/drivers/clk/bcm/clk-bcm63268-timer.c
new file mode 100644
index 0000000000..463710d272
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm63268-timer.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BCM63268 Timer Clock and Reset Controller Driver
+ *
+ * Copyright (C) 2023 Álvaro Fernández Rojas <noltari@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/bcm63268-clock.h>
+
+#define BCM63268_TIMER_RESET_SLEEP_MIN_US 10000
+#define BCM63268_TIMER_RESET_SLEEP_MAX_US 20000
+
+struct bcm63268_tclkrst_hw {
+ void __iomem *regs;
+ spinlock_t lock;
+
+ struct reset_controller_dev rcdev;
+ struct clk_hw_onecell_data data;
+};
+
+struct bcm63268_tclk_table_entry {
+ const char * const name;
+ u8 bit;
+};
+
+static const struct bcm63268_tclk_table_entry bcm63268_timer_clocks[] = {
+ {
+ .name = "ephy1",
+ .bit = BCM63268_TCLK_EPHY1,
+ }, {
+ .name = "ephy2",
+ .bit = BCM63268_TCLK_EPHY2,
+ }, {
+ .name = "ephy3",
+ .bit = BCM63268_TCLK_EPHY3,
+ }, {
+ .name = "gphy1",
+ .bit = BCM63268_TCLK_GPHY1,
+ }, {
+ .name = "dsl",
+ .bit = BCM63268_TCLK_DSL,
+ }, {
+ .name = "wakeon_ephy",
+ .bit = BCM63268_TCLK_WAKEON_EPHY,
+ }, {
+ .name = "wakeon_dsl",
+ .bit = BCM63268_TCLK_WAKEON_DSL,
+ }, {
+ .name = "fap1_pll",
+ .bit = BCM63268_TCLK_FAP1,
+ }, {
+ .name = "fap2_pll",
+ .bit = BCM63268_TCLK_FAP2,
+ }, {
+ .name = "uto_50",
+ .bit = BCM63268_TCLK_UTO_50,
+ }, {
+ .name = "uto_extin",
+ .bit = BCM63268_TCLK_UTO_EXTIN,
+ }, {
+ .name = "usb_ref",
+ .bit = BCM63268_TCLK_USB_REF,
+ }, {
+ /* sentinel */
+ }
+};
+
+static inline struct bcm63268_tclkrst_hw *
+to_bcm63268_timer_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct bcm63268_tclkrst_hw, rcdev);
+}
+
+static int bcm63268_timer_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct bcm63268_tclkrst_hw *reset = to_bcm63268_timer_reset(rcdev);
+ unsigned long flags;
+ uint32_t val;
+
+ spin_lock_irqsave(&reset->lock, flags);
+ val = __raw_readl(reset->regs);
+ if (assert)
+ val &= ~BIT(id);
+ else
+ val |= BIT(id);
+ __raw_writel(val, reset->regs);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int bcm63268_timer_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return bcm63268_timer_reset_update(rcdev, id, true);
+}
+
+static int bcm63268_timer_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return bcm63268_timer_reset_update(rcdev, id, false);
+}
+
+static int bcm63268_timer_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ bcm63268_timer_reset_update(rcdev, id, true);
+ usleep_range(BCM63268_TIMER_RESET_SLEEP_MIN_US,
+ BCM63268_TIMER_RESET_SLEEP_MAX_US);
+
+ bcm63268_timer_reset_update(rcdev, id, false);
+ /*
+ * Ensure component is taken out reset state by sleeping also after
+ * deasserting the reset. Otherwise, the component may not be ready
+ * for operation.
+ */
+ usleep_range(BCM63268_TIMER_RESET_SLEEP_MIN_US,
+ BCM63268_TIMER_RESET_SLEEP_MAX_US);
+
+ return 0;
+}
+
+static int bcm63268_timer_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm63268_tclkrst_hw *reset = to_bcm63268_timer_reset(rcdev);
+
+ return !(__raw_readl(reset->regs) & BIT(id));
+}
+
+static const struct reset_control_ops bcm63268_timer_reset_ops = {
+ .assert = bcm63268_timer_reset_assert,
+ .deassert = bcm63268_timer_reset_deassert,
+ .reset = bcm63268_timer_reset_reset,
+ .status = bcm63268_timer_reset_status,
+};
+
+static int bcm63268_tclk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct bcm63268_tclk_table_entry *entry;
+ struct bcm63268_tclkrst_hw *hw;
+ struct clk_hw *clk;
+ u8 maxbit = 0;
+ int i, ret;
+
+ for (entry = bcm63268_timer_clocks; entry->name; entry++)
+ maxbit = max(maxbit, entry->bit);
+ maxbit++;
+
+ hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit),
+ GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ spin_lock_init(&hw->lock);
+
+ hw->data.num = maxbit;
+ for (i = 0; i < maxbit; i++)
+ hw->data.hws[i] = ERR_PTR(-ENODEV);
+
+ hw->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hw->regs))
+ return PTR_ERR(hw->regs);
+
+ for (entry = bcm63268_timer_clocks; entry->name; entry++) {
+ clk = devm_clk_hw_register_gate(dev, entry->name, NULL, 0,
+ hw->regs, entry->bit,
+ CLK_GATE_BIG_ENDIAN,
+ &hw->lock);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ hw->data.hws[entry->bit] = clk;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &hw->data);
+ if (ret)
+ return ret;
+
+ hw->rcdev.of_node = dev->of_node;
+ hw->rcdev.ops = &bcm63268_timer_reset_ops;
+
+ ret = devm_reset_controller_register(dev, &hw->rcdev);
+ if (ret)
+ dev_err(dev, "Failed to register reset controller\n");
+
+ return 0;
+}
+
+static const struct of_device_id bcm63268_tclk_dt_ids[] = {
+ { .compatible = "brcm,bcm63268-timer-clocks" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm63268_tclk = {
+ .probe = bcm63268_tclk_probe,
+ .driver = {
+ .name = "bcm63268-timer-clock",
+ .of_match_table = bcm63268_tclk_dt_ids,
+ },
+};
+builtin_platform_driver(bcm63268_tclk);
diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
new file mode 100644
index 0000000000..36c7b302e3
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/bcm3368-clock.h>
+#include <dt-bindings/clock/bcm6318-clock.h>
+#include <dt-bindings/clock/bcm6328-clock.h>
+#include <dt-bindings/clock/bcm6358-clock.h>
+#include <dt-bindings/clock/bcm6362-clock.h>
+#include <dt-bindings/clock/bcm6368-clock.h>
+#include <dt-bindings/clock/bcm63268-clock.h>
+
+struct clk_bcm63xx_table_entry {
+ const char * const name;
+ u8 bit;
+ unsigned long flags;
+};
+
+struct clk_bcm63xx_hw {
+ void __iomem *regs;
+ spinlock_t lock;
+
+ struct clk_hw_onecell_data data;
+};
+
+static const struct clk_bcm63xx_table_entry bcm3368_clocks[] = {
+ {
+ .name = "mac",
+ .bit = BCM3368_CLK_MAC,
+ }, {
+ .name = "tc",
+ .bit = BCM3368_CLK_TC,
+ }, {
+ .name = "us_top",
+ .bit = BCM3368_CLK_US_TOP,
+ }, {
+ .name = "ds_top",
+ .bit = BCM3368_CLK_DS_TOP,
+ }, {
+ .name = "acm",
+ .bit = BCM3368_CLK_ACM,
+ }, {
+ .name = "spi",
+ .bit = BCM3368_CLK_SPI,
+ }, {
+ .name = "usbs",
+ .bit = BCM3368_CLK_USBS,
+ }, {
+ .name = "bmu",
+ .bit = BCM3368_CLK_BMU,
+ }, {
+ .name = "pcm",
+ .bit = BCM3368_CLK_PCM,
+ }, {
+ .name = "ntp",
+ .bit = BCM3368_CLK_NTP,
+ }, {
+ .name = "acp_b",
+ .bit = BCM3368_CLK_ACP_B,
+ }, {
+ .name = "acp_a",
+ .bit = BCM3368_CLK_ACP_A,
+ }, {
+ .name = "emusb",
+ .bit = BCM3368_CLK_EMUSB,
+ }, {
+ .name = "enet0",
+ .bit = BCM3368_CLK_ENET0,
+ }, {
+ .name = "enet1",
+ .bit = BCM3368_CLK_ENET1,
+ }, {
+ .name = "usbsu",
+ .bit = BCM3368_CLK_USBSU,
+ }, {
+ .name = "ephy",
+ .bit = BCM3368_CLK_EPHY,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm6318_clocks[] = {
+ {
+ .name = "adsl_asb",
+ .bit = BCM6318_CLK_ADSL_ASB,
+ }, {
+ .name = "usb_asb",
+ .bit = BCM6318_CLK_USB_ASB,
+ }, {
+ .name = "mips_asb",
+ .bit = BCM6318_CLK_MIPS_ASB,
+ }, {
+ .name = "pcie_asb",
+ .bit = BCM6318_CLK_PCIE_ASB,
+ }, {
+ .name = "phymips_asb",
+ .bit = BCM6318_CLK_PHYMIPS_ASB,
+ }, {
+ .name = "robosw_asb",
+ .bit = BCM6318_CLK_ROBOSW_ASB,
+ }, {
+ .name = "sar_asb",
+ .bit = BCM6318_CLK_SAR_ASB,
+ }, {
+ .name = "sdr_asb",
+ .bit = BCM6318_CLK_SDR_ASB,
+ }, {
+ .name = "swreg_asb",
+ .bit = BCM6318_CLK_SWREG_ASB,
+ }, {
+ .name = "periph_asb",
+ .bit = BCM6318_CLK_PERIPH_ASB,
+ }, {
+ .name = "cpubus160",
+ .bit = BCM6318_CLK_CPUBUS160,
+ }, {
+ .name = "adsl",
+ .bit = BCM6318_CLK_ADSL,
+ }, {
+ .name = "sar125",
+ .bit = BCM6318_CLK_SAR125,
+ }, {
+ .name = "mips",
+ .bit = BCM6318_CLK_MIPS,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "pcie",
+ .bit = BCM6318_CLK_PCIE,
+ }, {
+ .name = "robosw250",
+ .bit = BCM6318_CLK_ROBOSW250,
+ }, {
+ .name = "robosw025",
+ .bit = BCM6318_CLK_ROBOSW025,
+ }, {
+ .name = "sdr",
+ .bit = BCM6318_CLK_SDR,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "usbd",
+ .bit = BCM6318_CLK_USBD,
+ }, {
+ .name = "hsspi",
+ .bit = BCM6318_CLK_HSSPI,
+ }, {
+ .name = "pcie25",
+ .bit = BCM6318_CLK_PCIE25,
+ }, {
+ .name = "phymips",
+ .bit = BCM6318_CLK_PHYMIPS,
+ }, {
+ .name = "afe",
+ .bit = BCM6318_CLK_AFE,
+ }, {
+ .name = "qproc",
+ .bit = BCM6318_CLK_QPROC,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm6318_ubus_clocks[] = {
+ {
+ .name = "adsl-ubus",
+ .bit = BCM6318_UCLK_ADSL,
+ }, {
+ .name = "arb-ubus",
+ .bit = BCM6318_UCLK_ARB,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "mips-ubus",
+ .bit = BCM6318_UCLK_MIPS,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "pcie-ubus",
+ .bit = BCM6318_UCLK_PCIE,
+ }, {
+ .name = "periph-ubus",
+ .bit = BCM6318_UCLK_PERIPH,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "phymips-ubus",
+ .bit = BCM6318_UCLK_PHYMIPS,
+ }, {
+ .name = "robosw-ubus",
+ .bit = BCM6318_UCLK_ROBOSW,
+ }, {
+ .name = "sar-ubus",
+ .bit = BCM6318_UCLK_SAR,
+ }, {
+ .name = "sdr-ubus",
+ .bit = BCM6318_UCLK_SDR,
+ }, {
+ .name = "usb-ubus",
+ .bit = BCM6318_UCLK_USB,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm6328_clocks[] = {
+ {
+ .name = "phy_mips",
+ .bit = BCM6328_CLK_PHYMIPS,
+ }, {
+ .name = "adsl_qproc",
+ .bit = BCM6328_CLK_ADSL_QPROC,
+ }, {
+ .name = "adsl_afe",
+ .bit = BCM6328_CLK_ADSL_AFE,
+ }, {
+ .name = "adsl",
+ .bit = BCM6328_CLK_ADSL,
+ }, {
+ .name = "mips",
+ .bit = BCM6328_CLK_MIPS,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "sar",
+ .bit = BCM6328_CLK_SAR,
+ }, {
+ .name = "pcm",
+ .bit = BCM6328_CLK_PCM,
+ }, {
+ .name = "usbd",
+ .bit = BCM6328_CLK_USBD,
+ }, {
+ .name = "usbh",
+ .bit = BCM6328_CLK_USBH,
+ }, {
+ .name = "hsspi",
+ .bit = BCM6328_CLK_HSSPI,
+ }, {
+ .name = "pcie",
+ .bit = BCM6328_CLK_PCIE,
+ }, {
+ .name = "robosw",
+ .bit = BCM6328_CLK_ROBOSW,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm6358_clocks[] = {
+ {
+ .name = "enet",
+ .bit = BCM6358_CLK_ENET,
+ }, {
+ .name = "adslphy",
+ .bit = BCM6358_CLK_ADSLPHY,
+ }, {
+ .name = "pcm",
+ .bit = BCM6358_CLK_PCM,
+ }, {
+ .name = "spi",
+ .bit = BCM6358_CLK_SPI,
+ }, {
+ .name = "usbs",
+ .bit = BCM6358_CLK_USBS,
+ }, {
+ .name = "sar",
+ .bit = BCM6358_CLK_SAR,
+ }, {
+ .name = "emusb",
+ .bit = BCM6358_CLK_EMUSB,
+ }, {
+ .name = "enet0",
+ .bit = BCM6358_CLK_ENET0,
+ }, {
+ .name = "enet1",
+ .bit = BCM6358_CLK_ENET1,
+ }, {
+ .name = "usbsu",
+ .bit = BCM6358_CLK_USBSU,
+ }, {
+ .name = "ephy",
+ .bit = BCM6358_CLK_EPHY,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm6362_clocks[] = {
+ {
+ .name = "adsl_qproc",
+ .bit = BCM6362_CLK_ADSL_QPROC,
+ }, {
+ .name = "adsl_afe",
+ .bit = BCM6362_CLK_ADSL_AFE,
+ }, {
+ .name = "adsl",
+ .bit = BCM6362_CLK_ADSL,
+ }, {
+ .name = "mips",
+ .bit = BCM6362_CLK_MIPS,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "wlan_ocp",
+ .bit = BCM6362_CLK_WLAN_OCP,
+ }, {
+ .name = "swpkt_usb",
+ .bit = BCM6362_CLK_SWPKT_USB,
+ }, {
+ .name = "swpkt_sar",
+ .bit = BCM6362_CLK_SWPKT_SAR,
+ }, {
+ .name = "sar",
+ .bit = BCM6362_CLK_SAR,
+ }, {
+ .name = "robosw",
+ .bit = BCM6362_CLK_ROBOSW,
+ }, {
+ .name = "pcm",
+ .bit = BCM6362_CLK_PCM,
+ }, {
+ .name = "usbd",
+ .bit = BCM6362_CLK_USBD,
+ }, {
+ .name = "usbh",
+ .bit = BCM6362_CLK_USBH,
+ }, {
+ .name = "ipsec",
+ .bit = BCM6362_CLK_IPSEC,
+ }, {
+ .name = "spi",
+ .bit = BCM6362_CLK_SPI,
+ }, {
+ .name = "hsspi",
+ .bit = BCM6362_CLK_HSSPI,
+ }, {
+ .name = "pcie",
+ .bit = BCM6362_CLK_PCIE,
+ }, {
+ .name = "fap",
+ .bit = BCM6362_CLK_FAP,
+ }, {
+ .name = "phymips",
+ .bit = BCM6362_CLK_PHYMIPS,
+ }, {
+ .name = "nand",
+ .bit = BCM6362_CLK_NAND,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm6368_clocks[] = {
+ {
+ .name = "vdsl_qproc",
+ .bit = BCM6368_CLK_VDSL_QPROC,
+ }, {
+ .name = "vdsl_afe",
+ .bit = BCM6368_CLK_VDSL_AFE,
+ }, {
+ .name = "vdsl_bonding",
+ .bit = BCM6368_CLK_VDSL_BONDING,
+ }, {
+ .name = "vdsl",
+ .bit = BCM6368_CLK_VDSL,
+ }, {
+ .name = "phymips",
+ .bit = BCM6368_CLK_PHYMIPS,
+ }, {
+ .name = "swpkt_usb",
+ .bit = BCM6368_CLK_SWPKT_USB,
+ }, {
+ .name = "swpkt_sar",
+ .bit = BCM6368_CLK_SWPKT_SAR,
+ }, {
+ .name = "spi",
+ .bit = BCM6368_CLK_SPI,
+ }, {
+ .name = "usbd",
+ .bit = BCM6368_CLK_USBD,
+ }, {
+ .name = "sar",
+ .bit = BCM6368_CLK_SAR,
+ }, {
+ .name = "robosw",
+ .bit = BCM6368_CLK_ROBOSW,
+ }, {
+ .name = "utopia",
+ .bit = BCM6368_CLK_UTOPIA,
+ }, {
+ .name = "pcm",
+ .bit = BCM6368_CLK_PCM,
+ }, {
+ .name = "usbh",
+ .bit = BCM6368_CLK_USBH,
+ }, {
+ .name = "disable_gless",
+ .bit = BCM6368_CLK_DIS_GLESS,
+ }, {
+ .name = "nand",
+ .bit = BCM6368_CLK_NAND,
+ }, {
+ .name = "ipsec",
+ .bit = BCM6368_CLK_IPSEC,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct clk_bcm63xx_table_entry bcm63268_clocks[] = {
+ {
+ .name = "disable_gless",
+ .bit = BCM63268_CLK_DIS_GLESS,
+ }, {
+ .name = "vdsl_qproc",
+ .bit = BCM63268_CLK_VDSL_QPROC,
+ }, {
+ .name = "vdsl_afe",
+ .bit = BCM63268_CLK_VDSL_AFE,
+ }, {
+ .name = "vdsl",
+ .bit = BCM63268_CLK_VDSL,
+ }, {
+ .name = "mips",
+ .bit = BCM63268_CLK_MIPS,
+ .flags = CLK_IS_CRITICAL,
+ }, {
+ .name = "wlan_ocp",
+ .bit = BCM63268_CLK_WLAN_OCP,
+ }, {
+ .name = "dect",
+ .bit = BCM63268_CLK_DECT,
+ }, {
+ .name = "fap0",
+ .bit = BCM63268_CLK_FAP0,
+ }, {
+ .name = "fap1",
+ .bit = BCM63268_CLK_FAP1,
+ }, {
+ .name = "sar",
+ .bit = BCM63268_CLK_SAR,
+ }, {
+ .name = "robosw",
+ .bit = BCM63268_CLK_ROBOSW,
+ }, {
+ .name = "pcm",
+ .bit = BCM63268_CLK_PCM,
+ }, {
+ .name = "usbd",
+ .bit = BCM63268_CLK_USBD,
+ }, {
+ .name = "usbh",
+ .bit = BCM63268_CLK_USBH,
+ }, {
+ .name = "ipsec",
+ .bit = BCM63268_CLK_IPSEC,
+ }, {
+ .name = "spi",
+ .bit = BCM63268_CLK_SPI,
+ }, {
+ .name = "hsspi",
+ .bit = BCM63268_CLK_HSSPI,
+ }, {
+ .name = "pcie",
+ .bit = BCM63268_CLK_PCIE,
+ }, {
+ .name = "phymips",
+ .bit = BCM63268_CLK_PHYMIPS,
+ }, {
+ .name = "gmac",
+ .bit = BCM63268_CLK_GMAC,
+ }, {
+ .name = "nand",
+ .bit = BCM63268_CLK_NAND,
+ }, {
+ .name = "tbus",
+ .bit = BCM63268_CLK_TBUS,
+ }, {
+ .name = "robosw250",
+ .bit = BCM63268_CLK_ROBOSW250,
+ }, {
+ /* sentinel */
+ },
+};
+
+static int clk_bcm63xx_probe(struct platform_device *pdev)
+{
+ const struct clk_bcm63xx_table_entry *entry, *table;
+ struct clk_bcm63xx_hw *hw;
+ u8 maxbit = 0;
+ int i, ret;
+
+ table = of_device_get_match_data(&pdev->dev);
+ if (!table)
+ return -EINVAL;
+
+ for (entry = table; entry->name; entry++)
+ maxbit = max_t(u8, maxbit, entry->bit);
+ maxbit++;
+
+ hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit),
+ GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ spin_lock_init(&hw->lock);
+
+ hw->data.num = maxbit;
+ for (i = 0; i < maxbit; i++)
+ hw->data.hws[i] = ERR_PTR(-ENODEV);
+
+ hw->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hw->regs))
+ return PTR_ERR(hw->regs);
+
+ for (entry = table; entry->name; entry++) {
+ struct clk_hw *clk;
+
+ clk = clk_hw_register_gate(&pdev->dev, entry->name, NULL,
+ entry->flags, hw->regs, entry->bit,
+ CLK_GATE_BIG_ENDIAN, &hw->lock);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_err;
+ }
+
+ hw->data.hws[entry->bit] = clk;
+ }
+
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ &hw->data);
+ if (!ret)
+ return 0;
+out_err:
+ for (i = 0; i < hw->data.num; i++) {
+ if (!IS_ERR(hw->data.hws[i]))
+ clk_hw_unregister_gate(hw->data.hws[i]);
+ }
+
+ return ret;
+}
+
+static void clk_bcm63xx_remove(struct platform_device *pdev)
+{
+ struct clk_bcm63xx_hw *hw = platform_get_drvdata(pdev);
+ int i;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < hw->data.num; i++) {
+ if (!IS_ERR(hw->data.hws[i]))
+ clk_hw_unregister_gate(hw->data.hws[i]);
+ }
+}
+
+static const struct of_device_id clk_bcm63xx_dt_ids[] = {
+ { .compatible = "brcm,bcm3368-clocks", .data = &bcm3368_clocks, },
+ { .compatible = "brcm,bcm6318-clocks", .data = &bcm6318_clocks, },
+ { .compatible = "brcm,bcm6318-ubus-clocks", .data = &bcm6318_ubus_clocks, },
+ { .compatible = "brcm,bcm6328-clocks", .data = &bcm6328_clocks, },
+ { .compatible = "brcm,bcm6358-clocks", .data = &bcm6358_clocks, },
+ { .compatible = "brcm,bcm6362-clocks", .data = &bcm6362_clocks, },
+ { .compatible = "brcm,bcm6368-clocks", .data = &bcm6368_clocks, },
+ { .compatible = "brcm,bcm63268-clocks", .data = &bcm63268_clocks, },
+ { }
+};
+
+static struct platform_driver clk_bcm63xx = {
+ .probe = clk_bcm63xx_probe,
+ .remove_new = clk_bcm63xx_remove,
+ .driver = {
+ .name = "bcm63xx-clock",
+ .of_match_table = clk_bcm63xx_dt_ids,
+ },
+};
+builtin_platform_driver(clk_bcm63xx);
diff --git a/drivers/clk/bcm/clk-bcm63xx.c b/drivers/clk/bcm/clk-bcm63xx.c
new file mode 100644
index 0000000000..c8383834fb
--- /dev/null
+++ b/drivers/clk/bcm/clk-bcm63xx.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
+#include <linux/init.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include "clk-iproc.h"
+
+static void __init bcm63138_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(bcm63138_armpll, "brcm,bcm63138-armpll", bcm63138_armpll_init);
diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c
new file mode 100644
index 0000000000..43b04fc4c4
--- /dev/null
+++ b/drivers/clk/bcm/clk-cygnus.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include <dt-bindings/clock/bcm-cygnus.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define SW_CTRL_VAL(o, s) { .offset = o, .shift = s, }
+
+#define ASIU_DIV_VAL(o, es, hs, hw, ls, lw) \
+ { .offset = o, .en_shift = es, .high_shift = hs, \
+ .high_width = hw, .low_shift = ls, .low_width = lw }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, .ki_shift = kis,\
+ .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+ .ka_width = kaw }
+
+#define VCO_CTRL_VAL(uo, lo) { .u_offset = uo, .l_offset = lo }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+#define ASIU_GATE_VAL(o, es) { .offset = o, .en_shift = es }
+
+static void __init cygnus_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(cygnus_armpll, "brcm,cygnus-armpll", cygnus_armpll_init);
+
+static const struct iproc_pll_ctrl genpll = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 1, 0),
+ .reset = RESET_VAL(0x0, 11, 10),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x18, 0x1c),
+ .status = REG_VAL(0x28, 12, 1),
+};
+
+static const struct iproc_clk_ctrl genpll_clk[] = {
+ [BCM_CYGNUS_GENPLL_AXI21_CLK] = {
+ .channel = BCM_CYGNUS_GENPLL_AXI21_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x20, 0, 8),
+ },
+ [BCM_CYGNUS_GENPLL_250MHZ_CLK] = {
+ .channel = BCM_CYGNUS_GENPLL_250MHZ_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x20, 10, 8),
+ },
+ [BCM_CYGNUS_GENPLL_IHOST_SYS_CLK] = {
+ .channel = BCM_CYGNUS_GENPLL_IHOST_SYS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x20, 20, 8),
+ },
+ [BCM_CYGNUS_GENPLL_ENET_SW_CLK] = {
+ .channel = BCM_CYGNUS_GENPLL_ENET_SW_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x24, 0, 8),
+ },
+ [BCM_CYGNUS_GENPLL_AUDIO_125_CLK] = {
+ .channel = BCM_CYGNUS_GENPLL_AUDIO_125_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x24, 10, 8),
+ },
+ [BCM_CYGNUS_GENPLL_CAN_CLK] = {
+ .channel = BCM_CYGNUS_GENPLL_CAN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 11, 5, 17),
+ .mdiv = REG_VAL(0x24, 20, 8),
+ },
+};
+
+static void __init cygnus_genpll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk,
+ ARRAY_SIZE(genpll_clk));
+}
+CLK_OF_DECLARE(cygnus_genpll, "brcm,cygnus-genpll", cygnus_genpll_clk_init);
+
+static const struct iproc_pll_ctrl lcpll0 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 5, 4),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .dig_filter = DF_VAL(0x0, 27, 3, 23, 4, 19, 4),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0x14),
+ .status = REG_VAL(0x18, 12, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll0_clk[] = {
+ [BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK] = {
+ .channel = BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x8, 0, 8),
+ },
+ [BCM_CYGNUS_LCPLL0_DDR_PHY_CLK] = {
+ .channel = BCM_CYGNUS_LCPLL0_DDR_PHY_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 2, 14),
+ .mdiv = REG_VAL(0x8, 10, 8),
+ },
+ [BCM_CYGNUS_LCPLL0_SDIO_CLK] = {
+ .channel = BCM_CYGNUS_LCPLL0_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 9, 3, 15),
+ .mdiv = REG_VAL(0x8, 20, 8),
+ },
+ [BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK] = {
+ .channel = BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 10, 4, 16),
+ .mdiv = REG_VAL(0xc, 0, 8),
+ },
+ [BCM_CYGNUS_LCPLL0_SMART_CARD_CLK] = {
+ .channel = BCM_CYGNUS_LCPLL0_SMART_CARD_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 11, 5, 17),
+ .mdiv = REG_VAL(0xc, 10, 8),
+ },
+ [BCM_CYGNUS_LCPLL0_CH5_UNUSED] = {
+ .channel = BCM_CYGNUS_LCPLL0_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 12, 6, 18),
+ .mdiv = REG_VAL(0xc, 20, 8),
+ },
+};
+
+static void __init cygnus_lcpll0_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk,
+ ARRAY_SIZE(lcpll0_clk));
+}
+CLK_OF_DECLARE(cygnus_lcpll0, "brcm,cygnus-lcpll0", cygnus_lcpll0_clk_init);
+
+/*
+ * MIPI PLL VCO frequency parameter table
+ */
+static const struct iproc_pll_vco_param mipipll_vco_params[] = {
+ /* rate (Hz) ndiv_int ndiv_frac pdiv */
+ { 750000000UL, 30, 0, 1 },
+ { 1000000000UL, 40, 0, 1 },
+ { 1350000000ul, 54, 0, 1 },
+ { 2000000000UL, 80, 0, 1 },
+ { 2100000000UL, 84, 0, 1 },
+ { 2250000000UL, 90, 0, 1 },
+ { 2500000000UL, 100, 0, 1 },
+ { 2700000000UL, 54, 0, 0 },
+ { 2975000000UL, 119, 0, 1 },
+ { 3100000000UL, 124, 0, 1 },
+ { 3150000000UL, 126, 0, 1 },
+};
+
+static const struct iproc_pll_ctrl mipipll = {
+ .flags = IPROC_CLK_PLL_ASIU | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_NEEDS_READ_BACK,
+ .aon = AON_VAL(0x0, 4, 17, 16),
+ .asiu = ASIU_GATE_VAL(0x0, 3),
+ .reset = RESET_VAL(0x0, 11, 10),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 4),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x18, 0x1c),
+ .status = REG_VAL(0x28, 12, 1),
+};
+
+static const struct iproc_clk_ctrl mipipll_clk[] = {
+ [BCM_CYGNUS_MIPIPLL_CH0_UNUSED] = {
+ .channel = BCM_CYGNUS_MIPIPLL_CH0_UNUSED,
+ .flags = IPROC_CLK_NEEDS_READ_BACK,
+ .enable = ENABLE_VAL(0x4, 12, 6, 18),
+ .mdiv = REG_VAL(0x20, 0, 8),
+ },
+ [BCM_CYGNUS_MIPIPLL_CH1_LCD] = {
+ .channel = BCM_CYGNUS_MIPIPLL_CH1_LCD,
+ .flags = IPROC_CLK_NEEDS_READ_BACK,
+ .enable = ENABLE_VAL(0x4, 13, 7, 19),
+ .mdiv = REG_VAL(0x20, 10, 8),
+ },
+ [BCM_CYGNUS_MIPIPLL_CH2_V3D] = {
+ .channel = BCM_CYGNUS_MIPIPLL_CH2_V3D,
+ .flags = IPROC_CLK_NEEDS_READ_BACK,
+ .enable = ENABLE_VAL(0x4, 14, 8, 20),
+ .mdiv = REG_VAL(0x20, 20, 8),
+ },
+ [BCM_CYGNUS_MIPIPLL_CH3_UNUSED] = {
+ .channel = BCM_CYGNUS_MIPIPLL_CH3_UNUSED,
+ .flags = IPROC_CLK_NEEDS_READ_BACK,
+ .enable = ENABLE_VAL(0x4, 15, 9, 21),
+ .mdiv = REG_VAL(0x24, 0, 8),
+ },
+ [BCM_CYGNUS_MIPIPLL_CH4_UNUSED] = {
+ .channel = BCM_CYGNUS_MIPIPLL_CH4_UNUSED,
+ .flags = IPROC_CLK_NEEDS_READ_BACK,
+ .enable = ENABLE_VAL(0x4, 16, 10, 22),
+ .mdiv = REG_VAL(0x24, 10, 8),
+ },
+ [BCM_CYGNUS_MIPIPLL_CH5_UNUSED] = {
+ .channel = BCM_CYGNUS_MIPIPLL_CH5_UNUSED,
+ .flags = IPROC_CLK_NEEDS_READ_BACK,
+ .enable = ENABLE_VAL(0x4, 17, 11, 23),
+ .mdiv = REG_VAL(0x24, 20, 8),
+ },
+};
+
+static void __init cygnus_mipipll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &mipipll, mipipll_vco_params,
+ ARRAY_SIZE(mipipll_vco_params), mipipll_clk,
+ ARRAY_SIZE(mipipll_clk));
+}
+CLK_OF_DECLARE(cygnus_mipipll, "brcm,cygnus-mipipll", cygnus_mipipll_clk_init);
+
+static const struct iproc_asiu_div asiu_div[] = {
+ [BCM_CYGNUS_ASIU_KEYPAD_CLK] = ASIU_DIV_VAL(0x0, 31, 16, 10, 0, 10),
+ [BCM_CYGNUS_ASIU_ADC_CLK] = ASIU_DIV_VAL(0x4, 31, 16, 10, 0, 10),
+ [BCM_CYGNUS_ASIU_PWM_CLK] = ASIU_DIV_VAL(0x8, 31, 16, 10, 0, 10),
+};
+
+static const struct iproc_asiu_gate asiu_gate[] = {
+ [BCM_CYGNUS_ASIU_KEYPAD_CLK] = ASIU_GATE_VAL(0x0, 7),
+ [BCM_CYGNUS_ASIU_ADC_CLK] = ASIU_GATE_VAL(0x0, 9),
+ [BCM_CYGNUS_ASIU_PWM_CLK] = ASIU_GATE_VAL(IPROC_CLK_INVALID_OFFSET, 0),
+};
+
+static void __init cygnus_asiu_init(struct device_node *node)
+{
+ iproc_asiu_setup(node, asiu_div, asiu_gate, ARRAY_SIZE(asiu_div));
+}
+CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init);
+
+static const struct iproc_pll_ctrl audiopll = {
+ .flags = IPROC_CLK_PLL_NEEDS_SW_CFG | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW |
+ IPROC_CLK_PLL_CALC_PARAM,
+ .reset = RESET_VAL(0x5c, 0, 1),
+ .dig_filter = DF_VAL(0x48, 0, 3, 6, 4, 3, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 0),
+ .ndiv_int = REG_VAL(0x8, 0, 10),
+ .ndiv_frac = REG_VAL(0x8, 10, 20),
+ .pdiv = REG_VAL(0x44, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x0c, 0x10),
+ .status = REG_VAL(0x54, 0, 1),
+ .macro_mode = REG_VAL(0x0, 0, 3),
+};
+
+static const struct iproc_clk_ctrl audiopll_clk[] = {
+ [BCM_CYGNUS_AUDIOPLL_CH0] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH0,
+ .flags = IPROC_CLK_AON | IPROC_CLK_MCLK_DIV_BY_2,
+ .enable = ENABLE_VAL(0x14, 8, 10, 9),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_CYGNUS_AUDIOPLL_CH1] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH1,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x18, 8, 10, 9),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_CYGNUS_AUDIOPLL_CH2] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH2,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x1c, 8, 10, 9),
+ .mdiv = REG_VAL(0x1c, 0, 8),
+ },
+};
+
+static void __init cygnus_audiopll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &audiopll, NULL, 0,
+ audiopll_clk, ARRAY_SIZE(audiopll_clk));
+}
+CLK_OF_DECLARE(cygnus_audiopll, "brcm,cygnus-audiopll",
+ cygnus_audiopll_clk_init);
diff --git a/drivers/clk/bcm/clk-hr2.c b/drivers/clk/bcm/clk-hr2.c
new file mode 100644
index 0000000000..9f6318f375
--- /dev/null
+++ b/drivers/clk/bcm/clk-hr2.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk-iproc.h"
+
+static void __init hr2_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(hr2_armpll, "brcm,hr2-armpll", hr2_armpll_init);
diff --git a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c
new file mode 100644
index 0000000000..9e86c0c10b
--- /dev/null
+++ b/drivers/clk/bcm/clk-iproc-armpll.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+
+#include "clk-iproc.h"
+
+#define IPROC_CLK_MAX_FREQ_POLICY 0x3
+#define IPROC_CLK_POLICY_FREQ_OFFSET 0x008
+#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT 8
+#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK 0x7
+
+#define IPROC_CLK_PLLARMA_OFFSET 0xc00
+#define IPROC_CLK_PLLARMA_LOCK_SHIFT 28
+#define IPROC_CLK_PLLARMA_PDIV_SHIFT 24
+#define IPROC_CLK_PLLARMA_PDIV_MASK 0xf
+#define IPROC_CLK_PLLARMA_NDIV_INT_SHIFT 8
+#define IPROC_CLK_PLLARMA_NDIV_INT_MASK 0x3ff
+
+#define IPROC_CLK_PLLARMB_OFFSET 0xc04
+#define IPROC_CLK_PLLARMB_NDIV_FRAC_MASK 0xfffff
+
+#define IPROC_CLK_PLLARMC_OFFSET 0xc08
+#define IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT 8
+#define IPROC_CLK_PLLARMC_MDIV_MASK 0xff
+
+#define IPROC_CLK_PLLARMCTL5_OFFSET 0xc20
+#define IPROC_CLK_PLLARMCTL5_H_MDIV_MASK 0xff
+
+#define IPROC_CLK_PLLARM_OFFSET_OFFSET 0xc24
+#define IPROC_CLK_PLLARM_SW_CTL_SHIFT 29
+#define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT 20
+#define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK 0xff
+#define IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK 0xfffff
+
+#define IPROC_CLK_ARM_DIV_OFFSET 0xe00
+#define IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT 4
+#define IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK 0xf
+
+#define IPROC_CLK_POLICY_DBG_OFFSET 0xec0
+#define IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT 12
+#define IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK 0x7
+
+enum iproc_arm_pll_fid {
+ ARM_PLL_FID_CRYSTAL_CLK = 0,
+ ARM_PLL_FID_SYS_CLK = 2,
+ ARM_PLL_FID_CH0_SLOW_CLK = 6,
+ ARM_PLL_FID_CH1_FAST_CLK = 7
+};
+
+struct iproc_arm_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ unsigned long rate;
+};
+
+#define to_iproc_arm_pll(hw) container_of(hw, struct iproc_arm_pll, hw)
+
+static unsigned int __get_fid(struct iproc_arm_pll *pll)
+{
+ u32 val;
+ unsigned int policy, fid, active_fid;
+
+ val = readl(pll->base + IPROC_CLK_ARM_DIV_OFFSET);
+ if (val & (1 << IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT))
+ policy = val & IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK;
+ else
+ policy = 0;
+
+ /* something is seriously wrong */
+ BUG_ON(policy > IPROC_CLK_MAX_FREQ_POLICY);
+
+ val = readl(pll->base + IPROC_CLK_POLICY_FREQ_OFFSET);
+ fid = (val >> (IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT * policy)) &
+ IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK;
+
+ val = readl(pll->base + IPROC_CLK_POLICY_DBG_OFFSET);
+ active_fid = IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK &
+ (val >> IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT);
+ if (fid != active_fid) {
+ pr_debug("%s: fid override %u->%u\n", __func__, fid,
+ active_fid);
+ fid = active_fid;
+ }
+
+ pr_debug("%s: active fid: %u\n", __func__, fid);
+
+ return fid;
+}
+
+/*
+ * Determine the mdiv (post divider) based on the frequency ID being used.
+ * There are 4 sources that can be used to derive the output clock rate:
+ * - 25 MHz Crystal
+ * - System clock
+ * - PLL channel 0 (slow clock)
+ * - PLL channel 1 (fast clock)
+ */
+static int __get_mdiv(struct iproc_arm_pll *pll)
+{
+ unsigned int fid;
+ int mdiv;
+ u32 val;
+
+ fid = __get_fid(pll);
+
+ switch (fid) {
+ case ARM_PLL_FID_CRYSTAL_CLK:
+ case ARM_PLL_FID_SYS_CLK:
+ mdiv = 1;
+ break;
+
+ case ARM_PLL_FID_CH0_SLOW_CLK:
+ val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET);
+ mdiv = val & IPROC_CLK_PLLARMC_MDIV_MASK;
+ if (mdiv == 0)
+ mdiv = 256;
+ break;
+
+ case ARM_PLL_FID_CH1_FAST_CLK:
+ val = readl(pll->base + IPROC_CLK_PLLARMCTL5_OFFSET);
+ mdiv = val & IPROC_CLK_PLLARMCTL5_H_MDIV_MASK;
+ if (mdiv == 0)
+ mdiv = 256;
+ break;
+
+ default:
+ mdiv = -EFAULT;
+ }
+
+ return mdiv;
+}
+
+static unsigned int __get_ndiv(struct iproc_arm_pll *pll)
+{
+ u32 val;
+ unsigned int ndiv_int, ndiv_frac, ndiv;
+
+ val = readl(pll->base + IPROC_CLK_PLLARM_OFFSET_OFFSET);
+ if (val & (1 << IPROC_CLK_PLLARM_SW_CTL_SHIFT)) {
+ /*
+ * offset mode is active. Read the ndiv from the PLLARM OFFSET
+ * register
+ */
+ ndiv_int = (val >> IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT) &
+ IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK;
+ if (ndiv_int == 0)
+ ndiv_int = 256;
+
+ ndiv_frac = val & IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK;
+ } else {
+ /* offset mode not active */
+ val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET);
+ ndiv_int = (val >> IPROC_CLK_PLLARMA_NDIV_INT_SHIFT) &
+ IPROC_CLK_PLLARMA_NDIV_INT_MASK;
+ if (ndiv_int == 0)
+ ndiv_int = 1024;
+
+ val = readl(pll->base + IPROC_CLK_PLLARMB_OFFSET);
+ ndiv_frac = val & IPROC_CLK_PLLARMB_NDIV_FRAC_MASK;
+ }
+
+ ndiv = (ndiv_int << 20) | ndiv_frac;
+
+ return ndiv;
+}
+
+/*
+ * The output frequency of the ARM PLL is calculated based on the ARM PLL
+ * divider values:
+ * pdiv = ARM PLL pre-divider
+ * ndiv = ARM PLL multiplier
+ * mdiv = ARM PLL post divider
+ *
+ * The frequency is calculated by:
+ * ((ndiv * parent clock rate) / pdiv) / mdiv
+ */
+static unsigned long iproc_arm_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct iproc_arm_pll *pll = to_iproc_arm_pll(hw);
+ u32 val;
+ int mdiv;
+ u64 ndiv;
+ unsigned int pdiv;
+
+ /* in bypass mode, use parent rate */
+ val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET);
+ if (val & (1 << IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT)) {
+ pll->rate = parent_rate;
+ return pll->rate;
+ }
+
+ /* PLL needs to be locked */
+ val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET);
+ if (!(val & (1 << IPROC_CLK_PLLARMA_LOCK_SHIFT))) {
+ pll->rate = 0;
+ return 0;
+ }
+
+ pdiv = (val >> IPROC_CLK_PLLARMA_PDIV_SHIFT) &
+ IPROC_CLK_PLLARMA_PDIV_MASK;
+ if (pdiv == 0)
+ pdiv = 16;
+
+ ndiv = __get_ndiv(pll);
+ mdiv = __get_mdiv(pll);
+ if (mdiv <= 0) {
+ pll->rate = 0;
+ return 0;
+ }
+ pll->rate = (ndiv * parent_rate) >> 20;
+ pll->rate = (pll->rate / pdiv) / mdiv;
+
+ pr_debug("%s: ARM PLL rate: %lu. parent rate: %lu\n", __func__,
+ pll->rate, parent_rate);
+ pr_debug("%s: ndiv_int: %u, pdiv: %u, mdiv: %d\n", __func__,
+ (unsigned int)(ndiv >> 20), pdiv, mdiv);
+
+ return pll->rate;
+}
+
+static const struct clk_ops iproc_arm_pll_ops = {
+ .recalc_rate = iproc_arm_pll_recalc_rate,
+};
+
+void __init iproc_armpll_setup(struct device_node *node)
+{
+ int ret;
+ struct iproc_arm_pll *pll;
+ struct clk_init_data init;
+ const char *parent_name;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (WARN_ON(!pll))
+ return;
+
+ pll->base = of_iomap(node, 0);
+ if (WARN_ON(!pll->base))
+ goto err_free_pll;
+
+ init.name = node->name;
+ init.ops = &iproc_arm_pll_ops;
+ init.flags = 0;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ pll->hw.init = &init;
+
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (WARN_ON(ret))
+ goto err_iounmap;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll->hw);
+ if (WARN_ON(ret))
+ goto err_clk_unregister;
+
+ return;
+
+err_clk_unregister:
+ clk_hw_unregister(&pll->hw);
+err_iounmap:
+ iounmap(pll->base);
+err_free_pll:
+ kfree(pll);
+}
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
new file mode 100644
index 0000000000..dcacf55c55
--- /dev/null
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include "clk-iproc.h"
+
+struct iproc_asiu;
+
+struct iproc_asiu_clk {
+ struct clk_hw hw;
+ const char *name;
+ struct iproc_asiu *asiu;
+ unsigned long rate;
+ struct iproc_asiu_div div;
+ struct iproc_asiu_gate gate;
+};
+
+struct iproc_asiu {
+ void __iomem *div_base;
+ void __iomem *gate_base;
+
+ struct clk_hw_onecell_data *clk_data;
+ struct iproc_asiu_clk *clks;
+};
+
+#define to_asiu_clk(hw) container_of(hw, struct iproc_asiu_clk, hw)
+
+static int iproc_asiu_clk_enable(struct clk_hw *hw)
+{
+ struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+ struct iproc_asiu *asiu = clk->asiu;
+ u32 val;
+
+ /* some clocks at the ASIU level are always enabled */
+ if (clk->gate.offset == IPROC_CLK_INVALID_OFFSET)
+ return 0;
+
+ val = readl(asiu->gate_base + clk->gate.offset);
+ val |= (1 << clk->gate.en_shift);
+ writel(val, asiu->gate_base + clk->gate.offset);
+
+ return 0;
+}
+
+static void iproc_asiu_clk_disable(struct clk_hw *hw)
+{
+ struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+ struct iproc_asiu *asiu = clk->asiu;
+ u32 val;
+
+ /* some clocks at the ASIU level are always enabled */
+ if (clk->gate.offset == IPROC_CLK_INVALID_OFFSET)
+ return;
+
+ val = readl(asiu->gate_base + clk->gate.offset);
+ val &= ~(1 << clk->gate.en_shift);
+ writel(val, asiu->gate_base + clk->gate.offset);
+}
+
+static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+ struct iproc_asiu *asiu = clk->asiu;
+ u32 val;
+ unsigned int div_h, div_l;
+
+ if (parent_rate == 0) {
+ clk->rate = 0;
+ return 0;
+ }
+
+ /* if clock divisor is not enabled, simply return parent rate */
+ val = readl(asiu->div_base + clk->div.offset);
+ if ((val & (1 << clk->div.en_shift)) == 0) {
+ clk->rate = parent_rate;
+ return parent_rate;
+ }
+
+ /* clock rate = parent rate / (high_div + 1) + (low_div + 1) */
+ div_h = (val >> clk->div.high_shift) & bit_mask(clk->div.high_width);
+ div_h++;
+ div_l = (val >> clk->div.low_shift) & bit_mask(clk->div.low_width);
+ div_l++;
+
+ clk->rate = parent_rate / (div_h + div_l);
+ pr_debug("%s: rate: %lu. parent rate: %lu div_h: %u div_l: %u\n",
+ __func__, clk->rate, parent_rate, div_h, div_l);
+
+ return clk->rate;
+}
+
+static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int div;
+
+ if (rate == 0 || *parent_rate == 0)
+ return -EINVAL;
+
+ if (rate == *parent_rate)
+ return *parent_rate;
+
+ div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+ if (div < 2)
+ return *parent_rate;
+
+ return *parent_rate / div;
+}
+
+static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct iproc_asiu_clk *clk = to_asiu_clk(hw);
+ struct iproc_asiu *asiu = clk->asiu;
+ unsigned int div, div_h, div_l;
+ u32 val;
+
+ if (rate == 0 || parent_rate == 0)
+ return -EINVAL;
+
+ /* simply disable the divisor if one wants the same rate as parent */
+ if (rate == parent_rate) {
+ val = readl(asiu->div_base + clk->div.offset);
+ val &= ~(1 << clk->div.en_shift);
+ writel(val, asiu->div_base + clk->div.offset);
+ return 0;
+ }
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div < 2)
+ return -EINVAL;
+
+ div_h = div_l = div >> 1;
+ div_h--;
+ div_l--;
+
+ val = readl(asiu->div_base + clk->div.offset);
+ val |= 1 << clk->div.en_shift;
+ if (div_h) {
+ val &= ~(bit_mask(clk->div.high_width)
+ << clk->div.high_shift);
+ val |= div_h << clk->div.high_shift;
+ } else {
+ val &= ~(bit_mask(clk->div.high_width)
+ << clk->div.high_shift);
+ }
+ if (div_l) {
+ val &= ~(bit_mask(clk->div.low_width) << clk->div.low_shift);
+ val |= div_l << clk->div.low_shift;
+ } else {
+ val &= ~(bit_mask(clk->div.low_width) << clk->div.low_shift);
+ }
+ writel(val, asiu->div_base + clk->div.offset);
+
+ return 0;
+}
+
+static const struct clk_ops iproc_asiu_ops = {
+ .enable = iproc_asiu_clk_enable,
+ .disable = iproc_asiu_clk_disable,
+ .recalc_rate = iproc_asiu_clk_recalc_rate,
+ .round_rate = iproc_asiu_clk_round_rate,
+ .set_rate = iproc_asiu_clk_set_rate,
+};
+
+void __init iproc_asiu_setup(struct device_node *node,
+ const struct iproc_asiu_div *div,
+ const struct iproc_asiu_gate *gate,
+ unsigned int num_clks)
+{
+ int i, ret;
+ struct iproc_asiu *asiu;
+
+ if (WARN_ON(!gate || !div))
+ return;
+
+ asiu = kzalloc(sizeof(*asiu), GFP_KERNEL);
+ if (WARN_ON(!asiu))
+ return;
+
+ asiu->clk_data = kzalloc(struct_size(asiu->clk_data, hws, num_clks),
+ GFP_KERNEL);
+ if (WARN_ON(!asiu->clk_data))
+ goto err_clks;
+ asiu->clk_data->num = num_clks;
+
+ asiu->clks = kcalloc(num_clks, sizeof(*asiu->clks), GFP_KERNEL);
+ if (WARN_ON(!asiu->clks))
+ goto err_asiu_clks;
+
+ asiu->div_base = of_iomap(node, 0);
+ if (WARN_ON(!asiu->div_base))
+ goto err_iomap_div;
+
+ asiu->gate_base = of_iomap(node, 1);
+ if (WARN_ON(!asiu->gate_base))
+ goto err_iomap_gate;
+
+ for (i = 0; i < num_clks; i++) {
+ struct clk_init_data init;
+ const char *parent_name;
+ struct iproc_asiu_clk *asiu_clk;
+ const char *clk_name;
+
+ ret = of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name);
+ if (WARN_ON(ret))
+ goto err_clk_register;
+
+ asiu_clk = &asiu->clks[i];
+ asiu_clk->name = clk_name;
+ asiu_clk->asiu = asiu;
+ asiu_clk->div = div[i];
+ asiu_clk->gate = gate[i];
+ init.name = clk_name;
+ init.ops = &iproc_asiu_ops;
+ init.flags = 0;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ asiu_clk->hw.init = &init;
+
+ ret = clk_hw_register(NULL, &asiu_clk->hw);
+ if (WARN_ON(ret))
+ goto err_clk_register;
+ asiu->clk_data->hws[i] = &asiu_clk->hw;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ asiu->clk_data);
+ if (WARN_ON(ret))
+ goto err_clk_register;
+
+ return;
+
+err_clk_register:
+ while (--i >= 0)
+ clk_hw_unregister(asiu->clk_data->hws[i]);
+ iounmap(asiu->gate_base);
+
+err_iomap_gate:
+ iounmap(asiu->div_base);
+
+err_iomap_div:
+ kfree(asiu->clks);
+
+err_asiu_clks:
+ kfree(asiu->clk_data);
+
+err_clks:
+ kfree(asiu);
+}
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
new file mode 100644
index 0000000000..680f9d8d35
--- /dev/null
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2014 Broadcom Corporation
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include "clk-iproc.h"
+
+#define PLL_VCO_HIGH_SHIFT 19
+#define PLL_VCO_LOW_SHIFT 30
+
+/*
+ * PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
+ * from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
+ */
+#define PLL_USER_MODE 7
+
+/* number of delay loops waiting for PLL to lock */
+#define LOCK_DELAY 100
+
+/* number of VCO frequency bands */
+#define NUM_FREQ_BANDS 8
+
+#define NUM_KP_BANDS 3
+enum kp_band {
+ KP_BAND_MID = 0,
+ KP_BAND_HIGH,
+ KP_BAND_HIGH_HIGH
+};
+
+static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = {
+ { 5, 6, 6, 7, 7, 8, 9, 10 },
+ { 4, 4, 5, 5, 6, 7, 8, 9 },
+ { 4, 5, 5, 6, 7, 8, 9, 10 },
+};
+
+static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = {
+ { 10000000, 12500000 },
+ { 12500000, 15000000 },
+ { 15000000, 20000000 },
+ { 20000000, 25000000 },
+ { 25000000, 50000000 },
+ { 50000000, 75000000 },
+ { 75000000, 100000000 },
+ { 100000000, 125000000 },
+};
+
+enum vco_freq_range {
+ VCO_LOW = 700000000U,
+ VCO_MID = 1200000000U,
+ VCO_HIGH = 2200000000U,
+ VCO_HIGH_HIGH = 3100000000U,
+ VCO_MAX = 4000000000U,
+};
+
+struct iproc_pll {
+ void __iomem *status_base;
+ void __iomem *control_base;
+ void __iomem *pwr_base;
+ void __iomem *asiu_base;
+
+ const struct iproc_pll_ctrl *ctrl;
+ const struct iproc_pll_vco_param *vco_param;
+ unsigned int num_vco_entries;
+};
+
+struct iproc_clk {
+ struct clk_hw hw;
+ struct iproc_pll *pll;
+ const struct iproc_clk_ctrl *ctrl;
+};
+
+#define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
+
+static int pll_calc_param(unsigned long target_rate,
+ unsigned long parent_rate,
+ struct iproc_pll_vco_param *vco_out)
+{
+ u64 ndiv_int, ndiv_frac, residual;
+
+ ndiv_int = target_rate / parent_rate;
+
+ if (!ndiv_int || (ndiv_int > 255))
+ return -EINVAL;
+
+ residual = target_rate - (ndiv_int * parent_rate);
+ residual <<= 20;
+
+ /*
+ * Add half of the divisor so the result will be rounded to closest
+ * instead of rounded down.
+ */
+ residual += (parent_rate / 2);
+ ndiv_frac = div64_u64((u64)residual, (u64)parent_rate);
+
+ vco_out->ndiv_int = ndiv_int;
+ vco_out->ndiv_frac = ndiv_frac;
+ vco_out->pdiv = 1;
+
+ vco_out->rate = vco_out->ndiv_int * parent_rate;
+ residual = (u64)vco_out->ndiv_frac * (u64)parent_rate;
+ residual >>= 20;
+ vco_out->rate += residual;
+
+ return 0;
+}
+
+/*
+ * Based on the target frequency, find a match from the VCO frequency parameter
+ * table and return its index
+ */
+static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate)
+{
+ int i;
+
+ for (i = 0; i < pll->num_vco_entries; i++)
+ if (target_rate == pll->vco_param[i].rate)
+ break;
+
+ if (i >= pll->num_vco_entries)
+ return -EINVAL;
+
+ return i;
+}
+
+static int get_kp(unsigned long ref_freq, enum kp_band kp_index)
+{
+ int i;
+
+ if (ref_freq < ref_freq_table[0][0])
+ return -EINVAL;
+
+ for (i = 0; i < NUM_FREQ_BANDS; i++) {
+ if (ref_freq >= ref_freq_table[i][0] &&
+ ref_freq < ref_freq_table[i][1])
+ return kp_table[kp_index][i];
+ }
+ return -EINVAL;
+}
+
+static int pll_wait_for_lock(struct iproc_pll *pll)
+{
+ int i;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+ for (i = 0; i < LOCK_DELAY; i++) {
+ u32 val = readl(pll->status_base + ctrl->status.offset);
+
+ if (val & (1 << ctrl->status.shift))
+ return 0;
+ udelay(10);
+ }
+
+ return -EIO;
+}
+
+static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base,
+ const u32 offset, u32 val)
+{
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+ writel(val, base + offset);
+
+ if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK &&
+ (base == pll->status_base || base == pll->control_base)))
+ val = readl(base + offset);
+}
+
+static void __pll_disable(struct iproc_pll *pll)
+{
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ u32 val;
+
+ if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
+ val = readl(pll->asiu_base + ctrl->asiu.offset);
+ val &= ~(1 << ctrl->asiu.en_shift);
+ iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
+ }
+
+ if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
+ val = readl(pll->control_base + ctrl->aon.offset);
+ val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
+ iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
+ }
+
+ if (pll->pwr_base) {
+ /* latch input value so core power can be shut down */
+ val = readl(pll->pwr_base + ctrl->aon.offset);
+ val |= 1 << ctrl->aon.iso_shift;
+ iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
+
+ /* power down the core */
+ val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
+ iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
+ }
+}
+
+static int __pll_enable(struct iproc_pll *pll)
+{
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ u32 val;
+
+ if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
+ val = readl(pll->control_base + ctrl->aon.offset);
+ val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
+ iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
+ }
+
+ if (pll->pwr_base) {
+ /* power up the PLL and make sure it's not latched */
+ val = readl(pll->pwr_base + ctrl->aon.offset);
+ val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
+ val &= ~(1 << ctrl->aon.iso_shift);
+ iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
+ }
+
+ /* certain PLLs also need to be ungated from the ASIU top level */
+ if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
+ val = readl(pll->asiu_base + ctrl->asiu.offset);
+ val |= (1 << ctrl->asiu.en_shift);
+ iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
+ }
+
+ return 0;
+}
+
+static void __pll_put_in_reset(struct iproc_pll *pll)
+{
+ u32 val;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
+
+ val = readl(pll->control_base + reset->offset);
+ if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
+ val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
+ else
+ val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
+ iproc_pll_write(pll, pll->control_base, reset->offset, val);
+}
+
+static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
+ unsigned int ka, unsigned int ki)
+{
+ u32 val;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
+ const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter;
+
+ val = readl(pll->control_base + dig_filter->offset);
+ val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift |
+ bit_mask(dig_filter->kp_width) << dig_filter->kp_shift |
+ bit_mask(dig_filter->ka_width) << dig_filter->ka_shift);
+ val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift |
+ ka << dig_filter->ka_shift;
+ iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
+
+ val = readl(pll->control_base + reset->offset);
+ if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
+ val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
+ else
+ val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
+ iproc_pll_write(pll, pll->control_base, reset->offset, val);
+}
+
+/*
+ * Determines if the change to be applied to the PLL is minor (just an update
+ * or the fractional divider). If so, then we can avoid going through a
+ * disruptive reset and lock sequence.
+ */
+static bool pll_fractional_change_only(struct iproc_pll *pll,
+ struct iproc_pll_vco_param *vco)
+{
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ u32 val;
+ u32 ndiv_int;
+ unsigned int pdiv;
+
+ /* PLL needs to be locked */
+ val = readl(pll->status_base + ctrl->status.offset);
+ if ((val & (1 << ctrl->status.shift)) == 0)
+ return false;
+
+ val = readl(pll->control_base + ctrl->ndiv_int.offset);
+ ndiv_int = (val >> ctrl->ndiv_int.shift) &
+ bit_mask(ctrl->ndiv_int.width);
+
+ if (ndiv_int != vco->ndiv_int)
+ return false;
+
+ val = readl(pll->control_base + ctrl->pdiv.offset);
+ pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
+
+ if (pdiv != vco->pdiv)
+ return false;
+
+ return true;
+}
+
+static int pll_set_rate(struct iproc_clk *clk, struct iproc_pll_vco_param *vco,
+ unsigned long parent_rate)
+{
+ struct iproc_pll *pll = clk->pll;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ int ka = 0, ki, kp, ret;
+ unsigned long rate = vco->rate;
+ u32 val;
+ enum kp_band kp_index;
+ unsigned long ref_freq;
+ const char *clk_name = clk_hw_get_name(&clk->hw);
+
+ /*
+ * reference frequency = parent frequency / PDIV
+ * If PDIV = 0, then it becomes a multiplier (x2)
+ */
+ if (vco->pdiv == 0)
+ ref_freq = parent_rate * 2;
+ else
+ ref_freq = parent_rate / vco->pdiv;
+
+ /* determine Ki and Kp index based on target VCO frequency */
+ if (rate >= VCO_LOW && rate < VCO_HIGH) {
+ ki = 4;
+ kp_index = KP_BAND_MID;
+ } else if (rate >= VCO_HIGH && rate < VCO_HIGH_HIGH) {
+ ki = 3;
+ kp_index = KP_BAND_HIGH;
+ } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) {
+ ki = 3;
+ kp_index = KP_BAND_HIGH_HIGH;
+ } else {
+ pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
+ clk_name, rate);
+ return -EINVAL;
+ }
+
+ kp = get_kp(ref_freq, kp_index);
+ if (kp < 0) {
+ pr_err("%s: pll: %s has invalid kp\n", __func__, clk_name);
+ return kp;
+ }
+
+ ret = __pll_enable(pll);
+ if (ret) {
+ pr_err("%s: pll: %s fails to enable\n", __func__, clk_name);
+ return ret;
+ }
+
+ if (pll_fractional_change_only(clk->pll, vco)) {
+ /* program fractional part of NDIV */
+ if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
+ val = readl(pll->control_base + ctrl->ndiv_frac.offset);
+ val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
+ ctrl->ndiv_frac.shift);
+ val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
+ iproc_pll_write(pll, pll->control_base,
+ ctrl->ndiv_frac.offset, val);
+ return 0;
+ }
+ }
+
+ /* put PLL in reset */
+ __pll_put_in_reset(pll);
+
+ /* set PLL in user mode before modifying PLL controls */
+ if (ctrl->flags & IPROC_CLK_PLL_USER_MODE_ON) {
+ val = readl(pll->control_base + ctrl->macro_mode.offset);
+ val &= ~(bit_mask(ctrl->macro_mode.width) <<
+ ctrl->macro_mode.shift);
+ val |= PLL_USER_MODE << ctrl->macro_mode.shift;
+ iproc_pll_write(pll, pll->control_base,
+ ctrl->macro_mode.offset, val);
+ }
+
+ iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
+
+ val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
+
+ if (rate >= VCO_LOW && rate < VCO_MID)
+ val |= (1 << PLL_VCO_LOW_SHIFT);
+
+ if (rate < VCO_HIGH)
+ val &= ~(1 << PLL_VCO_HIGH_SHIFT);
+ else
+ val |= (1 << PLL_VCO_HIGH_SHIFT);
+
+ iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.l_offset, val);
+
+ /* program integer part of NDIV */
+ val = readl(pll->control_base + ctrl->ndiv_int.offset);
+ val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
+ val |= vco->ndiv_int << ctrl->ndiv_int.shift;
+ iproc_pll_write(pll, pll->control_base, ctrl->ndiv_int.offset, val);
+
+ /* program fractional part of NDIV */
+ if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
+ val = readl(pll->control_base + ctrl->ndiv_frac.offset);
+ val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
+ ctrl->ndiv_frac.shift);
+ val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
+ iproc_pll_write(pll, pll->control_base, ctrl->ndiv_frac.offset,
+ val);
+ }
+
+ /* program PDIV */
+ val = readl(pll->control_base + ctrl->pdiv.offset);
+ val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
+ val |= vco->pdiv << ctrl->pdiv.shift;
+ iproc_pll_write(pll, pll->control_base, ctrl->pdiv.offset, val);
+
+ __pll_bring_out_reset(pll, kp, ka, ki);
+
+ ret = pll_wait_for_lock(pll);
+ if (ret < 0) {
+ pr_err("%s: pll: %s failed to lock\n", __func__, clk_name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iproc_pll_enable(struct clk_hw *hw)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ struct iproc_pll *pll = clk->pll;
+
+ return __pll_enable(pll);
+}
+
+static void iproc_pll_disable(struct clk_hw *hw)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ struct iproc_pll *pll = clk->pll;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+ if (ctrl->flags & IPROC_CLK_AON)
+ return;
+
+ __pll_disable(pll);
+}
+
+static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ struct iproc_pll *pll = clk->pll;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ u32 val;
+ u64 ndiv, ndiv_int, ndiv_frac;
+ unsigned int pdiv;
+ unsigned long rate;
+
+ if (parent_rate == 0)
+ return 0;
+
+ /* PLL needs to be locked */
+ val = readl(pll->status_base + ctrl->status.offset);
+ if ((val & (1 << ctrl->status.shift)) == 0)
+ return 0;
+
+ /*
+ * PLL output frequency =
+ *
+ * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
+ */
+ val = readl(pll->control_base + ctrl->ndiv_int.offset);
+ ndiv_int = (val >> ctrl->ndiv_int.shift) &
+ bit_mask(ctrl->ndiv_int.width);
+ ndiv = ndiv_int << 20;
+
+ if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
+ val = readl(pll->control_base + ctrl->ndiv_frac.offset);
+ ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
+ bit_mask(ctrl->ndiv_frac.width);
+ ndiv += ndiv_frac;
+ }
+
+ val = readl(pll->control_base + ctrl->pdiv.offset);
+ pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
+
+ rate = (ndiv * parent_rate) >> 20;
+
+ if (pdiv == 0)
+ rate *= 2;
+ else
+ rate /= pdiv;
+
+ return rate;
+}
+
+static int iproc_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned int i;
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ struct iproc_pll *pll = clk->pll;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ unsigned long diff, best_diff;
+ unsigned int best_idx = 0;
+ int ret;
+
+ if (req->rate == 0 || req->best_parent_rate == 0)
+ return -EINVAL;
+
+ if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
+ struct iproc_pll_vco_param vco_param;
+
+ ret = pll_calc_param(req->rate, req->best_parent_rate,
+ &vco_param);
+ if (ret)
+ return ret;
+
+ req->rate = vco_param.rate;
+ return 0;
+ }
+
+ if (!pll->vco_param)
+ return -EINVAL;
+
+ best_diff = ULONG_MAX;
+ for (i = 0; i < pll->num_vco_entries; i++) {
+ diff = abs(req->rate - pll->vco_param[i].rate);
+ if (diff <= best_diff) {
+ best_diff = diff;
+ best_idx = i;
+ }
+ /* break now if perfect match */
+ if (diff == 0)
+ break;
+ }
+
+ req->rate = pll->vco_param[best_idx].rate;
+
+ return 0;
+}
+
+static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ struct iproc_pll *pll = clk->pll;
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+ struct iproc_pll_vco_param vco_param;
+ int rate_index, ret;
+
+ if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
+ ret = pll_calc_param(rate, parent_rate, &vco_param);
+ if (ret)
+ return ret;
+ } else {
+ rate_index = pll_get_rate_index(pll, rate);
+ if (rate_index < 0)
+ return rate_index;
+
+ vco_param = pll->vco_param[rate_index];
+ }
+
+ ret = pll_set_rate(clk, &vco_param, parent_rate);
+ return ret;
+}
+
+static const struct clk_ops iproc_pll_ops = {
+ .enable = iproc_pll_enable,
+ .disable = iproc_pll_disable,
+ .recalc_rate = iproc_pll_recalc_rate,
+ .determine_rate = iproc_pll_determine_rate,
+ .set_rate = iproc_pll_set_rate,
+};
+
+static int iproc_clk_enable(struct clk_hw *hw)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+ struct iproc_pll *pll = clk->pll;
+ u32 val;
+
+ /* channel enable is active low */
+ val = readl(pll->control_base + ctrl->enable.offset);
+ val &= ~(1 << ctrl->enable.enable_shift);
+ iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
+
+ /* also make sure channel is not held */
+ val = readl(pll->control_base + ctrl->enable.offset);
+ val &= ~(1 << ctrl->enable.hold_shift);
+ iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
+
+ return 0;
+}
+
+static void iproc_clk_disable(struct clk_hw *hw)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+ struct iproc_pll *pll = clk->pll;
+ u32 val;
+
+ if (ctrl->flags & IPROC_CLK_AON)
+ return;
+
+ val = readl(pll->control_base + ctrl->enable.offset);
+ val |= 1 << ctrl->enable.enable_shift;
+ iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
+}
+
+static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+ struct iproc_pll *pll = clk->pll;
+ u32 val;
+ unsigned int mdiv;
+ unsigned long rate;
+
+ if (parent_rate == 0)
+ return 0;
+
+ val = readl(pll->control_base + ctrl->mdiv.offset);
+ mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
+ if (mdiv == 0)
+ mdiv = 256;
+
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ rate = parent_rate / (mdiv * 2);
+ else
+ rate = parent_rate / mdiv;
+
+ return rate;
+}
+
+static int iproc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned int bestdiv;
+
+ if (req->rate == 0)
+ return -EINVAL;
+ if (req->rate == req->best_parent_rate)
+ return 0;
+
+ bestdiv = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ if (bestdiv < 2)
+ req->rate = req->best_parent_rate;
+
+ if (bestdiv > 256)
+ bestdiv = 256;
+
+ req->rate = req->best_parent_rate / bestdiv;
+
+ return 0;
+}
+
+static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct iproc_clk *clk = to_iproc_clk(hw);
+ const struct iproc_clk_ctrl *ctrl = clk->ctrl;
+ struct iproc_pll *pll = clk->pll;
+ u32 val;
+ unsigned int div;
+
+ if (rate == 0 || parent_rate == 0)
+ return -EINVAL;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ div /= 2;
+
+ if (div > 256)
+ return -EINVAL;
+
+ val = readl(pll->control_base + ctrl->mdiv.offset);
+ if (div == 256) {
+ val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
+ } else {
+ val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
+ val |= div << ctrl->mdiv.shift;
+ }
+ iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
+
+ return 0;
+}
+
+static const struct clk_ops iproc_clk_ops = {
+ .enable = iproc_clk_enable,
+ .disable = iproc_clk_disable,
+ .recalc_rate = iproc_clk_recalc_rate,
+ .determine_rate = iproc_clk_determine_rate,
+ .set_rate = iproc_clk_set_rate,
+};
+
+/*
+ * Some PLLs require the PLL SW override bit to be set before changes can be
+ * applied to the PLL
+ */
+static void iproc_pll_sw_cfg(struct iproc_pll *pll)
+{
+ const struct iproc_pll_ctrl *ctrl = pll->ctrl;
+
+ if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
+ u32 val;
+
+ val = readl(pll->control_base + ctrl->sw_ctrl.offset);
+ val |= BIT(ctrl->sw_ctrl.shift);
+ iproc_pll_write(pll, pll->control_base, ctrl->sw_ctrl.offset,
+ val);
+ }
+}
+
+void iproc_pll_clk_setup(struct device_node *node,
+ const struct iproc_pll_ctrl *pll_ctrl,
+ const struct iproc_pll_vco_param *vco,
+ unsigned int num_vco_entries,
+ const struct iproc_clk_ctrl *clk_ctrl,
+ unsigned int num_clks)
+{
+ int i, ret;
+ struct iproc_pll *pll;
+ struct iproc_clk *iclk;
+ struct clk_init_data init;
+ const char *parent_name;
+ struct iproc_clk *iclk_array;
+ struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
+
+ if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
+ return;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (WARN_ON(!pll))
+ return;
+
+ clk_data = kzalloc(struct_size(clk_data, hws, num_clks), GFP_KERNEL);
+ if (WARN_ON(!clk_data))
+ goto err_clk_data;
+ clk_data->num = num_clks;
+
+ iclk_array = kcalloc(num_clks, sizeof(struct iproc_clk), GFP_KERNEL);
+ if (WARN_ON(!iclk_array))
+ goto err_clks;
+
+ pll->control_base = of_iomap(node, 0);
+ if (WARN_ON(!pll->control_base))
+ goto err_pll_iomap;
+
+ /* Some SoCs do not require the pwr_base, thus failing is not fatal */
+ pll->pwr_base = of_iomap(node, 1);
+
+ /* some PLLs require gating control at the top ASIU level */
+ if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
+ pll->asiu_base = of_iomap(node, 2);
+ if (WARN_ON(!pll->asiu_base))
+ goto err_asiu_iomap;
+ }
+
+ if (pll_ctrl->flags & IPROC_CLK_PLL_SPLIT_STAT_CTRL) {
+ /* Some SoCs have a split status/control. If this does not
+ * exist, assume they are unified.
+ */
+ pll->status_base = of_iomap(node, 2);
+ if (!pll->status_base)
+ goto err_status_iomap;
+ } else
+ pll->status_base = pll->control_base;
+
+ /* initialize and register the PLL itself */
+ pll->ctrl = pll_ctrl;
+
+ iclk = &iclk_array[0];
+ iclk->pll = pll;
+
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
+ init.ops = &iproc_pll_ops;
+ init.flags = 0;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ iclk->hw.init = &init;
+
+ if (vco) {
+ pll->num_vco_entries = num_vco_entries;
+ pll->vco_param = vco;
+ }
+
+ iproc_pll_sw_cfg(pll);
+
+ ret = clk_hw_register(NULL, &iclk->hw);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
+
+ /* now initialize and register all leaf clocks */
+ for (i = 1; i < num_clks; i++) {
+ memset(&init, 0, sizeof(init));
+
+ ret = of_property_read_string_index(node, "clock-output-names",
+ i, &clk_name);
+ if (WARN_ON(ret))
+ goto err_clk_register;
+
+ iclk = &iclk_array[i];
+ iclk->pll = pll;
+ iclk->ctrl = &clk_ctrl[i];
+
+ init.name = clk_name;
+ init.ops = &iproc_clk_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ iclk->hw.init = &init;
+
+ ret = clk_hw_register(NULL, &iclk->hw);
+ if (WARN_ON(ret))
+ goto err_clk_register;
+
+ clk_data->hws[i] = &iclk->hw;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (WARN_ON(ret))
+ goto err_clk_register;
+
+ return;
+
+err_clk_register:
+ while (--i >= 0)
+ clk_hw_unregister(clk_data->hws[i]);
+
+err_pll_register:
+ if (pll->status_base != pll->control_base)
+ iounmap(pll->status_base);
+
+err_status_iomap:
+ if (pll->asiu_base)
+ iounmap(pll->asiu_base);
+
+err_asiu_iomap:
+ if (pll->pwr_base)
+ iounmap(pll->pwr_base);
+
+ iounmap(pll->control_base);
+
+err_pll_iomap:
+ kfree(iclk_array);
+
+err_clks:
+ kfree(clk_data);
+
+err_clk_data:
+ kfree(pll);
+}
diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h
new file mode 100644
index 0000000000..0151d6ae16
--- /dev/null
+++ b/drivers/clk/bcm/clk-iproc.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2014 Broadcom Corporation */
+
+#ifndef _CLK_IPROC_H
+#define _CLK_IPROC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+
+#define IPROC_CLK_NAME_LEN 25
+#define IPROC_CLK_INVALID_OFFSET 0xffffffff
+#define bit_mask(width) ((1 << (width)) - 1)
+
+/* clocks that should not be disabled at runtime */
+#define IPROC_CLK_AON BIT(0)
+
+/* PLL that requires gating through ASIU */
+#define IPROC_CLK_PLL_ASIU BIT(1)
+
+/* PLL that has fractional part of the NDIV */
+#define IPROC_CLK_PLL_HAS_NDIV_FRAC BIT(2)
+
+/*
+ * Some of the iProc PLL/clocks may have an ASIC bug that requires read back
+ * of the same register following the write to flush the write transaction into
+ * the intended register
+ */
+#define IPROC_CLK_NEEDS_READ_BACK BIT(3)
+
+/*
+ * Some PLLs require the PLL SW override bit to be set before changes can be
+ * applied to the PLL
+ */
+#define IPROC_CLK_PLL_NEEDS_SW_CFG BIT(4)
+
+/*
+ * Some PLLs use a different way to control clock power, via the PWRDWN bit in
+ * the PLL control register
+ */
+#define IPROC_CLK_EMBED_PWRCTRL BIT(5)
+
+/*
+ * Some PLLs have separate registers for Status and Control. Identify this to
+ * let the driver know if additional registers need to be used
+ */
+#define IPROC_CLK_PLL_SPLIT_STAT_CTRL BIT(6)
+
+/*
+ * Some PLLs have an additional divide by 2 in master clock calculation;
+ * MCLK = VCO_freq / (Mdiv * 2). Identify this to let the driver know
+ * of modified calculations
+ */
+#define IPROC_CLK_MCLK_DIV_BY_2 BIT(7)
+
+/*
+ * Some PLLs provide a look up table for the leaf clock frequencies and
+ * auto calculates VCO frequency parameters based on the provided leaf
+ * clock frequencies. They have a user mode that allows the divider
+ * controls to be determined by the user
+ */
+#define IPROC_CLK_PLL_USER_MODE_ON BIT(8)
+
+/*
+ * Some PLLs have an active low reset
+ */
+#define IPROC_CLK_PLL_RESET_ACTIVE_LOW BIT(9)
+
+/*
+ * Calculate the PLL parameters are runtime, instead of using table
+ */
+#define IPROC_CLK_PLL_CALC_PARAM BIT(10)
+
+/*
+ * Parameters for VCO frequency configuration
+ *
+ * VCO frequency =
+ * ((ndiv_int + ndiv_frac / 2^20) * (ref frequency / pdiv)
+ */
+struct iproc_pll_vco_param {
+ unsigned long rate;
+ unsigned int ndiv_int;
+ unsigned int ndiv_frac;
+ unsigned int pdiv;
+};
+
+struct iproc_clk_reg_op {
+ unsigned int offset;
+ unsigned int shift;
+ unsigned int width;
+};
+
+/*
+ * Clock gating control at the top ASIU level
+ */
+struct iproc_asiu_gate {
+ unsigned int offset;
+ unsigned int en_shift;
+};
+
+/*
+ * Control of powering on/off of a PLL
+ *
+ * Before powering off a PLL, input isolation (ISO) needs to be enabled
+ */
+struct iproc_pll_aon_pwr_ctrl {
+ unsigned int offset;
+ unsigned int pwr_width;
+ unsigned int pwr_shift;
+ unsigned int iso_shift;
+};
+
+/*
+ * Control of the PLL reset
+ */
+struct iproc_pll_reset_ctrl {
+ unsigned int offset;
+ unsigned int reset_shift;
+ unsigned int p_reset_shift;
+};
+
+/*
+ * Control of the Ki, Kp, and Ka parameters
+ */
+struct iproc_pll_dig_filter_ctrl {
+ unsigned int offset;
+ unsigned int ki_shift;
+ unsigned int ki_width;
+ unsigned int kp_shift;
+ unsigned int kp_width;
+ unsigned int ka_shift;
+ unsigned int ka_width;
+};
+
+/*
+ * To enable SW control of the PLL
+ */
+struct iproc_pll_sw_ctrl {
+ unsigned int offset;
+ unsigned int shift;
+};
+
+struct iproc_pll_vco_ctrl {
+ unsigned int u_offset;
+ unsigned int l_offset;
+};
+
+/*
+ * Main PLL control parameters
+ */
+struct iproc_pll_ctrl {
+ unsigned long flags;
+ struct iproc_pll_aon_pwr_ctrl aon;
+ struct iproc_asiu_gate asiu;
+ struct iproc_pll_reset_ctrl reset;
+ struct iproc_pll_dig_filter_ctrl dig_filter;
+ struct iproc_pll_sw_ctrl sw_ctrl;
+ struct iproc_clk_reg_op ndiv_int;
+ struct iproc_clk_reg_op ndiv_frac;
+ struct iproc_clk_reg_op pdiv;
+ struct iproc_pll_vco_ctrl vco_ctrl;
+ struct iproc_clk_reg_op status;
+ struct iproc_clk_reg_op macro_mode;
+};
+
+/*
+ * Controls enabling/disabling a PLL derived clock
+ */
+struct iproc_clk_enable_ctrl {
+ unsigned int offset;
+ unsigned int enable_shift;
+ unsigned int hold_shift;
+ unsigned int bypass_shift;
+};
+
+/*
+ * Main clock control parameters for clocks derived from the PLLs
+ */
+struct iproc_clk_ctrl {
+ unsigned int channel;
+ unsigned long flags;
+ struct iproc_clk_enable_ctrl enable;
+ struct iproc_clk_reg_op mdiv;
+};
+
+/*
+ * Divisor of the ASIU clocks
+ */
+struct iproc_asiu_div {
+ unsigned int offset;
+ unsigned int en_shift;
+ unsigned int high_shift;
+ unsigned int high_width;
+ unsigned int low_shift;
+ unsigned int low_width;
+};
+
+void iproc_armpll_setup(struct device_node *node);
+void iproc_pll_clk_setup(struct device_node *node,
+ const struct iproc_pll_ctrl *pll_ctrl,
+ const struct iproc_pll_vco_param *vco,
+ unsigned int num_vco_entries,
+ const struct iproc_clk_ctrl *clk_ctrl,
+ unsigned int num_clks);
+void iproc_asiu_setup(struct device_node *node,
+ const struct iproc_asiu_div *div,
+ const struct iproc_asiu_gate *gate,
+ unsigned int num_clks);
+
+#endif /* _CLK_IPROC_H */
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
new file mode 100644
index 0000000000..338558f6fb
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+
+#include "clk-kona.h"
+
+/* These are used when a selector or trigger is found to be unneeded */
+#define selector_clear_exists(sel) ((sel)->width = 0)
+#define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS)
+
+/* Validity checking */
+
+static bool ccu_data_offsets_valid(struct ccu_data *ccu)
+{
+ struct ccu_policy *ccu_policy = &ccu->policy;
+ u32 limit;
+
+ limit = ccu->range - sizeof(u32);
+ limit = round_down(limit, sizeof(u32));
+ if (ccu_policy_exists(ccu_policy)) {
+ if (ccu_policy->enable.offset > limit) {
+ pr_err("%s: bad policy enable offset for %s "
+ "(%u > %u)\n", __func__,
+ ccu->name, ccu_policy->enable.offset, limit);
+ return false;
+ }
+ if (ccu_policy->control.offset > limit) {
+ pr_err("%s: bad policy control offset for %s "
+ "(%u > %u)\n", __func__,
+ ccu->name, ccu_policy->control.offset, limit);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool clk_requires_trigger(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri = bcm_clk->u.peri;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_div *div;
+
+ if (bcm_clk->type != bcm_clk_peri)
+ return false;
+
+ sel = &peri->sel;
+ if (sel->parent_count && selector_exists(sel))
+ return true;
+
+ div = &peri->div;
+ if (!divider_exists(div))
+ return false;
+
+ /* Fixed dividers don't need triggers */
+ if (!divider_is_fixed(div))
+ return true;
+
+ div = &peri->pre_div;
+
+ return divider_exists(div) && !divider_is_fixed(div);
+}
+
+static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri;
+ struct bcm_clk_policy *policy;
+ struct bcm_clk_gate *gate;
+ struct bcm_clk_hyst *hyst;
+ struct bcm_clk_div *div;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_trig *trig;
+ const char *name;
+ u32 range;
+ u32 limit;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+ peri = bcm_clk->u.peri;
+ name = bcm_clk->init_data.name;
+ range = bcm_clk->ccu->range;
+
+ limit = range - sizeof(u32);
+ limit = round_down(limit, sizeof(u32));
+
+ policy = &peri->policy;
+ if (policy_exists(policy)) {
+ if (policy->offset > limit) {
+ pr_err("%s: bad policy offset for %s (%u > %u)\n",
+ __func__, name, policy->offset, limit);
+ return false;
+ }
+ }
+
+ gate = &peri->gate;
+ hyst = &peri->hyst;
+ if (gate_exists(gate)) {
+ if (gate->offset > limit) {
+ pr_err("%s: bad gate offset for %s (%u > %u)\n",
+ __func__, name, gate->offset, limit);
+ return false;
+ }
+
+ if (hyst_exists(hyst)) {
+ if (hyst->offset > limit) {
+ pr_err("%s: bad hysteresis offset for %s "
+ "(%u > %u)\n", __func__,
+ name, hyst->offset, limit);
+ return false;
+ }
+ }
+ } else if (hyst_exists(hyst)) {
+ pr_err("%s: hysteresis but no gate for %s\n", __func__, name);
+ return false;
+ }
+
+ div = &peri->div;
+ if (divider_exists(div)) {
+ if (div->u.s.offset > limit) {
+ pr_err("%s: bad divider offset for %s (%u > %u)\n",
+ __func__, name, div->u.s.offset, limit);
+ return false;
+ }
+ }
+
+ div = &peri->pre_div;
+ if (divider_exists(div)) {
+ if (div->u.s.offset > limit) {
+ pr_err("%s: bad pre-divider offset for %s "
+ "(%u > %u)\n",
+ __func__, name, div->u.s.offset, limit);
+ return false;
+ }
+ }
+
+ sel = &peri->sel;
+ if (selector_exists(sel)) {
+ if (sel->offset > limit) {
+ pr_err("%s: bad selector offset for %s (%u > %u)\n",
+ __func__, name, sel->offset, limit);
+ return false;
+ }
+ }
+
+ trig = &peri->trig;
+ if (trigger_exists(trig)) {
+ if (trig->offset > limit) {
+ pr_err("%s: bad trigger offset for %s (%u > %u)\n",
+ __func__, name, trig->offset, limit);
+ return false;
+ }
+ }
+
+ trig = &peri->pre_trig;
+ if (trigger_exists(trig)) {
+ if (trig->offset > limit) {
+ pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n",
+ __func__, name, trig->offset, limit);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* A bit position must be less than the number of bits in a 32-bit register. */
+static bool bit_posn_valid(u32 bit_posn, const char *field_name,
+ const char *clock_name)
+{
+ u32 limit = BITS_PER_BYTE * sizeof(u32) - 1;
+
+ if (bit_posn > limit) {
+ pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__,
+ field_name, clock_name, bit_posn, limit);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * A bitfield must be at least 1 bit wide. Both the low-order and
+ * high-order bits must lie within a 32-bit register. We require
+ * fields to be less than 32 bits wide, mainly because we use
+ * shifting to produce field masks, and shifting a full word width
+ * is not well-defined by the C standard.
+ */
+static bool bitfield_valid(u32 shift, u32 width, const char *field_name,
+ const char *clock_name)
+{
+ u32 limit = BITS_PER_BYTE * sizeof(u32);
+
+ if (!width) {
+ pr_err("%s: bad %s field width 0 for %s\n", __func__,
+ field_name, clock_name);
+ return false;
+ }
+ if (shift + width > limit) {
+ pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__,
+ field_name, clock_name, shift, width, limit);
+ return false;
+ }
+ return true;
+}
+
+static bool
+ccu_policy_valid(struct ccu_policy *ccu_policy, const char *ccu_name)
+{
+ struct bcm_lvm_en *enable = &ccu_policy->enable;
+ struct bcm_policy_ctl *control;
+
+ if (!bit_posn_valid(enable->bit, "policy enable", ccu_name))
+ return false;
+
+ control = &ccu_policy->control;
+ if (!bit_posn_valid(control->go_bit, "policy control GO", ccu_name))
+ return false;
+
+ if (!bit_posn_valid(control->atl_bit, "policy control ATL", ccu_name))
+ return false;
+
+ if (!bit_posn_valid(control->ac_bit, "policy control AC", ccu_name))
+ return false;
+
+ return true;
+}
+
+static bool policy_valid(struct bcm_clk_policy *policy, const char *clock_name)
+{
+ if (!bit_posn_valid(policy->bit, "policy", clock_name))
+ return false;
+
+ return true;
+}
+
+/*
+ * All gates, if defined, have a status bit, and for hardware-only
+ * gates, that's it. Gates that can be software controlled also
+ * have an enable bit. And a gate that can be hardware or software
+ * controlled will have a hardware/software select bit.
+ */
+static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name,
+ const char *clock_name)
+{
+ if (!bit_posn_valid(gate->status_bit, "gate status", clock_name))
+ return false;
+
+ if (gate_is_sw_controllable(gate)) {
+ if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name))
+ return false;
+
+ if (gate_is_hw_controllable(gate)) {
+ if (!bit_posn_valid(gate->hw_sw_sel_bit,
+ "gate hw/sw select",
+ clock_name))
+ return false;
+ }
+ } else {
+ BUG_ON(!gate_is_hw_controllable(gate));
+ }
+
+ return true;
+}
+
+static bool hyst_valid(struct bcm_clk_hyst *hyst, const char *clock_name)
+{
+ if (!bit_posn_valid(hyst->en_bit, "hysteresis enable", clock_name))
+ return false;
+
+ if (!bit_posn_valid(hyst->val_bit, "hysteresis value", clock_name))
+ return false;
+
+ return true;
+}
+
+/*
+ * A selector bitfield must be valid. Its parent_sel array must
+ * also be reasonable for the field.
+ */
+static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name,
+ const char *clock_name)
+{
+ if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name))
+ return false;
+
+ if (sel->parent_count) {
+ u32 max_sel;
+ u32 limit;
+
+ /*
+ * Make sure the selector field can hold all the
+ * selector values we expect to be able to use. A
+ * clock only needs to have a selector defined if it
+ * has more than one parent. And in that case the
+ * highest selector value will be in the last entry
+ * in the array.
+ */
+ max_sel = sel->parent_sel[sel->parent_count - 1];
+ limit = (1 << sel->width) - 1;
+ if (max_sel > limit) {
+ pr_err("%s: bad selector for %s "
+ "(%u needs > %u bits)\n",
+ __func__, clock_name, max_sel,
+ sel->width);
+ return false;
+ }
+ } else {
+ pr_warn("%s: ignoring selector for %s (no parents)\n",
+ __func__, clock_name);
+ selector_clear_exists(sel);
+ kfree(sel->parent_sel);
+ sel->parent_sel = NULL;
+ }
+
+ return true;
+}
+
+/*
+ * A fixed divider just needs to be non-zero. A variable divider
+ * has to have a valid divider bitfield, and if it has a fraction,
+ * the width of the fraction must not be no more than the width of
+ * the divider as a whole.
+ */
+static bool div_valid(struct bcm_clk_div *div, const char *field_name,
+ const char *clock_name)
+{
+ if (divider_is_fixed(div)) {
+ /* Any fixed divider value but 0 is OK */
+ if (div->u.fixed == 0) {
+ pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
+ field_name, clock_name);
+ return false;
+ }
+ return true;
+ }
+ if (!bitfield_valid(div->u.s.shift, div->u.s.width,
+ field_name, clock_name))
+ return false;
+
+ if (divider_has_fraction(div))
+ if (div->u.s.frac_width > div->u.s.width) {
+ pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
+ __func__, field_name, clock_name,
+ div->u.s.frac_width, div->u.s.width);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * If a clock has two dividers, the combined number of fractional
+ * bits must be representable in a 32-bit unsigned value. This
+ * is because we scale up a dividend using both dividers before
+ * dividing to improve accuracy, and we need to avoid overflow.
+ */
+static bool kona_dividers_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri = bcm_clk->u.peri;
+ struct bcm_clk_div *div;
+ struct bcm_clk_div *pre_div;
+ u32 limit;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div))
+ return true;
+
+ div = &peri->div;
+ pre_div = &peri->pre_div;
+ if (divider_is_fixed(div) || divider_is_fixed(pre_div))
+ return true;
+
+ limit = BITS_PER_BYTE * sizeof(u32);
+
+ return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
+}
+
+
+/* A trigger just needs to represent a valid bit position */
+static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name,
+ const char *clock_name)
+{
+ return bit_posn_valid(trig->bit, field_name, clock_name);
+}
+
+/* Determine whether the set of peripheral clock registers are valid. */
+static bool
+peri_clk_data_valid(struct kona_clk *bcm_clk)
+{
+ struct peri_clk_data *peri;
+ struct bcm_clk_policy *policy;
+ struct bcm_clk_gate *gate;
+ struct bcm_clk_hyst *hyst;
+ struct bcm_clk_sel *sel;
+ struct bcm_clk_div *div;
+ struct bcm_clk_div *pre_div;
+ struct bcm_clk_trig *trig;
+ const char *name;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ /*
+ * First validate register offsets. This is the only place
+ * where we need something from the ccu, so we do these
+ * together.
+ */
+ if (!peri_clk_data_offsets_valid(bcm_clk))
+ return false;
+
+ peri = bcm_clk->u.peri;
+ name = bcm_clk->init_data.name;
+
+ policy = &peri->policy;
+ if (policy_exists(policy) && !policy_valid(policy, name))
+ return false;
+
+ gate = &peri->gate;
+ if (gate_exists(gate) && !gate_valid(gate, "gate", name))
+ return false;
+
+ hyst = &peri->hyst;
+ if (hyst_exists(hyst) && !hyst_valid(hyst, name))
+ return false;
+
+ sel = &peri->sel;
+ if (selector_exists(sel)) {
+ if (!sel_valid(sel, "selector", name))
+ return false;
+
+ } else if (sel->parent_count > 1) {
+ pr_err("%s: multiple parents but no selector for %s\n",
+ __func__, name);
+
+ return false;
+ }
+
+ div = &peri->div;
+ pre_div = &peri->pre_div;
+ if (divider_exists(div)) {
+ if (!div_valid(div, "divider", name))
+ return false;
+
+ if (divider_exists(pre_div))
+ if (!div_valid(pre_div, "pre-divider", name))
+ return false;
+ } else if (divider_exists(pre_div)) {
+ pr_err("%s: pre-divider but no divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ trig = &peri->trig;
+ if (trigger_exists(trig)) {
+ if (!trig_valid(trig, "trigger", name))
+ return false;
+
+ if (trigger_exists(&peri->pre_trig)) {
+ if (!trig_valid(trig, "pre-trigger", name)) {
+ return false;
+ }
+ }
+ if (!clk_requires_trigger(bcm_clk)) {
+ pr_warn("%s: ignoring trigger for %s (not needed)\n",
+ __func__, name);
+ trigger_clear_exists(trig);
+ }
+ } else if (trigger_exists(&peri->pre_trig)) {
+ pr_err("%s: pre-trigger but no trigger for %s\n", __func__,
+ name);
+ return false;
+ } else if (clk_requires_trigger(bcm_clk)) {
+ pr_err("%s: required trigger missing for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ return kona_dividers_valid(bcm_clk);
+}
+
+static bool kona_clk_valid(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ if (!peri_clk_data_valid(bcm_clk))
+ return false;
+ break;
+ default:
+ pr_err("%s: unrecognized clock type (%d)\n", __func__,
+ (int)bcm_clk->type);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Scan an array of parent clock names to determine whether there
+ * are any entries containing BAD_CLK_NAME. Such entries are
+ * placeholders for non-supported clocks. Keep track of the
+ * position of each clock name in the original array.
+ *
+ * Allocates an array of pointers to hold the names of all
+ * non-null entries in the original array, and returns a pointer to
+ * that array in *names. This will be used for registering the
+ * clock with the common clock code. On successful return,
+ * *count indicates how many entries are in that names array.
+ *
+ * If there is more than one entry in the resulting names array,
+ * another array is allocated to record the parent selector value
+ * for each (defined) parent clock. This is the value that
+ * represents this parent clock in the clock's source selector
+ * register. The position of the clock in the original parent array
+ * defines that selector value. The number of entries in this array
+ * is the same as the number of entries in the parent names array.
+ *
+ * The array of selector values is returned. If the clock has no
+ * parents, no selector is required and a null pointer is returned.
+ *
+ * Returns a null pointer if the clock names array supplied was
+ * null. (This is not an error.)
+ *
+ * Returns a pointer-coded error if an error occurs.
+ */
+static u32 *parent_process(const char *clocks[],
+ u32 *count, const char ***names)
+{
+ static const char **parent_names;
+ static u32 *parent_sel;
+ const char **clock;
+ u32 parent_count;
+ u32 bad_count = 0;
+ u32 orig_count;
+ u32 i;
+ u32 j;
+
+ *count = 0; /* In case of early return */
+ *names = NULL;
+ if (!clocks)
+ return NULL;
+
+ /*
+ * Count the number of names in the null-terminated array,
+ * and find out how many of those are actually clock names.
+ */
+ for (clock = clocks; *clock; clock++)
+ if (*clock == BAD_CLK_NAME)
+ bad_count++;
+ orig_count = (u32)(clock - clocks);
+ parent_count = orig_count - bad_count;
+
+ /* If all clocks are unsupported, we treat it as no clock */
+ if (!parent_count)
+ return NULL;
+
+ /* Avoid exceeding our parent clock limit */
+ if (parent_count > PARENT_COUNT_MAX) {
+ pr_err("%s: too many parents (%u > %u)\n", __func__,
+ parent_count, PARENT_COUNT_MAX);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * There is one parent name for each defined parent clock.
+ * We also maintain an array containing the selector value
+ * for each defined clock. If there's only one clock, the
+ * selector is not required, but we allocate space for the
+ * array anyway to keep things simple.
+ */
+ parent_names = kmalloc_array(parent_count, sizeof(*parent_names),
+ GFP_KERNEL);
+ if (!parent_names)
+ return ERR_PTR(-ENOMEM);
+
+ /* There is at least one parent, so allocate a selector array */
+ parent_sel = kmalloc_array(parent_count, sizeof(*parent_sel),
+ GFP_KERNEL);
+ if (!parent_sel) {
+ kfree(parent_names);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Now fill in the parent names and selector arrays */
+ for (i = 0, j = 0; i < orig_count; i++) {
+ if (clocks[i] != BAD_CLK_NAME) {
+ parent_names[j] = clocks[i];
+ parent_sel[j] = i;
+ j++;
+ }
+ }
+ *names = parent_names;
+ *count = parent_count;
+
+ return parent_sel;
+}
+
+static int
+clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel,
+ struct clk_init_data *init_data)
+{
+ const char **parent_names = NULL;
+ u32 parent_count = 0;
+ u32 *parent_sel;
+
+ /*
+ * If a peripheral clock has multiple parents, the value
+ * used by the hardware to select that parent is represented
+ * by the parent clock's position in the "clocks" list. Some
+ * values don't have defined or supported clocks; these will
+ * have BAD_CLK_NAME entries in the parents[] array. The
+ * list is terminated by a NULL entry.
+ *
+ * We need to supply (only) the names of defined parent
+ * clocks when registering a clock though, so we use an
+ * array of parent selector values to map between the
+ * indexes the common clock code uses and the selector
+ * values we need.
+ */
+ parent_sel = parent_process(clocks, &parent_count, &parent_names);
+ if (IS_ERR(parent_sel)) {
+ int ret = PTR_ERR(parent_sel);
+
+ pr_err("%s: error processing parent clocks for %s (%d)\n",
+ __func__, init_data->name, ret);
+
+ return ret;
+ }
+
+ init_data->parent_names = parent_names;
+ init_data->num_parents = parent_count;
+
+ sel->parent_count = parent_count;
+ sel->parent_sel = parent_sel;
+
+ return 0;
+}
+
+static void clk_sel_teardown(struct bcm_clk_sel *sel,
+ struct clk_init_data *init_data)
+{
+ kfree(sel->parent_sel);
+ sel->parent_sel = NULL;
+ sel->parent_count = 0;
+
+ init_data->num_parents = 0;
+ kfree(init_data->parent_names);
+ init_data->parent_names = NULL;
+}
+
+static void peri_clk_teardown(struct peri_clk_data *data,
+ struct clk_init_data *init_data)
+{
+ clk_sel_teardown(&data->sel, init_data);
+}
+
+/*
+ * Caller is responsible for freeing the parent_names[] and
+ * parent_sel[] arrays in the peripheral clock's "data" structure
+ * that can be assigned if the clock has one or more parent clocks
+ * associated with it.
+ */
+static int
+peri_clk_setup(struct peri_clk_data *data, struct clk_init_data *init_data)
+{
+ init_data->flags = CLK_IGNORE_UNUSED;
+
+ return clk_sel_setup(data->clocks, &data->sel, init_data);
+}
+
+static void bcm_clk_teardown(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
+ break;
+ default:
+ break;
+ }
+ bcm_clk->u.data = NULL;
+ bcm_clk->type = bcm_clk_none;
+}
+
+static void kona_clk_teardown(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk;
+
+ if (!hw)
+ return;
+
+ clk_hw_unregister(hw);
+
+ bcm_clk = to_kona_clk(hw);
+ bcm_clk_teardown(bcm_clk);
+}
+
+static int kona_clk_setup(struct kona_clk *bcm_clk)
+{
+ int ret;
+ struct clk_init_data *init_data = &bcm_clk->init_data;
+
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ ret = peri_clk_setup(bcm_clk->u.data, init_data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ pr_err("%s: clock type %d invalid for %s\n", __func__,
+ (int)bcm_clk->type, init_data->name);
+ return -EINVAL;
+ }
+
+ /* Make sure everything makes sense before we set it up */
+ if (!kona_clk_valid(bcm_clk)) {
+ pr_err("%s: clock data invalid for %s\n", __func__,
+ init_data->name);
+ ret = -EINVAL;
+ goto out_teardown;
+ }
+
+ bcm_clk->hw.init = init_data;
+ ret = clk_hw_register(NULL, &bcm_clk->hw);
+ if (ret) {
+ pr_err("%s: error registering clock %s (%d)\n", __func__,
+ init_data->name, ret);
+ goto out_teardown;
+ }
+
+ return 0;
+out_teardown:
+ bcm_clk_teardown(bcm_clk);
+
+ return ret;
+}
+
+static void ccu_clks_teardown(struct ccu_data *ccu)
+{
+ u32 i;
+
+ for (i = 0; i < ccu->clk_num; i++)
+ kona_clk_teardown(&ccu->kona_clks[i].hw);
+}
+
+static void kona_ccu_teardown(struct ccu_data *ccu)
+{
+ if (!ccu->base)
+ return;
+
+ of_clk_del_provider(ccu->node); /* safe if never added */
+ ccu_clks_teardown(ccu);
+ of_node_put(ccu->node);
+ ccu->node = NULL;
+ iounmap(ccu->base);
+ ccu->base = NULL;
+}
+
+static bool ccu_data_valid(struct ccu_data *ccu)
+{
+ struct ccu_policy *ccu_policy;
+
+ if (!ccu_data_offsets_valid(ccu))
+ return false;
+
+ ccu_policy = &ccu->policy;
+ if (ccu_policy_exists(ccu_policy))
+ if (!ccu_policy_valid(ccu_policy, ccu->name))
+ return false;
+
+ return true;
+}
+
+static struct clk_hw *
+of_clk_kona_onecell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct ccu_data *ccu = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ccu->clk_num) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &ccu->kona_clks[idx].hw;
+}
+
+/*
+ * Set up a CCU. Call the provided ccu_clks_setup callback to
+ * initialize the array of clocks provided by the CCU.
+ */
+void __init kona_dt_ccu_setup(struct ccu_data *ccu,
+ struct device_node *node)
+{
+ struct resource res = { 0 };
+ resource_size_t range;
+ unsigned int i;
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("%s: no valid CCU registers found for %pOFn\n", __func__,
+ node);
+ goto out_err;
+ }
+
+ range = resource_size(&res);
+ if (range > (resource_size_t)U32_MAX) {
+ pr_err("%s: address range too large for %pOFn\n", __func__,
+ node);
+ goto out_err;
+ }
+
+ ccu->range = (u32)range;
+
+ if (!ccu_data_valid(ccu)) {
+ pr_err("%s: ccu data not valid for %pOFn\n", __func__, node);
+ goto out_err;
+ }
+
+ ccu->base = ioremap(res.start, ccu->range);
+ if (!ccu->base) {
+ pr_err("%s: unable to map CCU registers for %pOFn\n", __func__,
+ node);
+ goto out_err;
+ }
+ ccu->node = of_node_get(node);
+
+ /*
+ * Set up each defined kona clock and save the result in
+ * the clock framework clock array (in ccu->data). Then
+ * register as a provider for these clocks.
+ */
+ for (i = 0; i < ccu->clk_num; i++) {
+ if (!ccu->kona_clks[i].ccu)
+ continue;
+ kona_clk_setup(&ccu->kona_clks[i]);
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_kona_onecell_get, ccu);
+ if (ret) {
+ pr_err("%s: error adding ccu %pOFn as provider (%d)\n", __func__,
+ node, ret);
+ goto out_err;
+ }
+
+ if (!kona_ccu_init(ccu))
+ pr_err("Broadcom %pOFn initialization had errors\n", node);
+
+ return;
+out_err:
+ kona_ccu_teardown(ccu);
+ pr_err("Broadcom %pOFn setup aborted\n", node);
+}
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
new file mode 100644
index 0000000000..ec5749e301
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.c
@@ -0,0 +1,1270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ */
+
+#include "clk-kona.h"
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+
+/*
+ * "Policies" affect the frequencies of bus clocks provided by a
+ * CCU. (I believe these polices are named "Deep Sleep", "Economy",
+ * "Normal", and "Turbo".) A lower policy number has lower power
+ * consumption, and policy 2 is the default.
+ */
+#define CCU_POLICY_COUNT 4
+
+#define CCU_ACCESS_PASSWORD 0xA5A500
+#define CLK_GATE_DELAY_LOOP 2000
+
+/* Bitfield operations */
+
+/* Produces a mask of set bits covering a range of a 32-bit value */
+static inline u32 bitfield_mask(u32 shift, u32 width)
+{
+ return ((1 << width) - 1) << shift;
+}
+
+/* Extract the value of a bitfield found within a given register value */
+static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
+{
+ return (reg_val & bitfield_mask(shift, width)) >> shift;
+}
+
+/* Replace the value of a bitfield found within a given register value */
+static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
+{
+ u32 mask = bitfield_mask(shift, width);
+
+ return (reg_val & ~mask) | (val << shift);
+}
+
+/* Divider and scaling helpers */
+
+/* Convert a divider into the scaled divisor value it represents. */
+static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
+{
+ return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
+}
+
+/*
+ * Build a scaled divider value as close as possible to the
+ * given whole part (div_value) and fractional part (expressed
+ * in billionths).
+ */
+u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
+{
+ u64 combined;
+
+ BUG_ON(!div_value);
+ BUG_ON(billionths >= BILLION);
+
+ combined = (u64)div_value * BILLION + billionths;
+ combined <<= div->u.s.frac_width;
+
+ return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
+}
+
+/* The scaled minimum divisor representable by a divider */
+static inline u64
+scaled_div_min(struct bcm_clk_div *div)
+{
+ if (divider_is_fixed(div))
+ return (u64)div->u.fixed;
+
+ return scaled_div_value(div, 0);
+}
+
+/* The scaled maximum divisor representable by a divider */
+u64 scaled_div_max(struct bcm_clk_div *div)
+{
+ u32 reg_div;
+
+ if (divider_is_fixed(div))
+ return (u64)div->u.fixed;
+
+ reg_div = ((u32)1 << div->u.s.width) - 1;
+
+ return scaled_div_value(div, reg_div);
+}
+
+/*
+ * Convert a scaled divisor into its divider representation as
+ * stored in a divider register field.
+ */
+static inline u32
+divider(struct bcm_clk_div *div, u64 scaled_div)
+{
+ BUG_ON(scaled_div < scaled_div_min(div));
+ BUG_ON(scaled_div > scaled_div_max(div));
+
+ return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
+}
+
+/* Return a rate scaled for use when dividing by a scaled divisor. */
+static inline u64
+scale_rate(struct bcm_clk_div *div, u32 rate)
+{
+ if (divider_is_fixed(div))
+ return (u64)rate;
+
+ return (u64)rate << div->u.s.frac_width;
+}
+
+/* CCU access */
+
+/* Read a 32-bit register value from a CCU's address space. */
+static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
+{
+ return readl(ccu->base + reg_offset);
+}
+
+/* Write a 32-bit register value into a CCU's address space. */
+static inline void
+__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
+{
+ writel(reg_val, ccu->base + reg_offset);
+}
+
+static inline unsigned long ccu_lock(struct ccu_data *ccu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccu->lock, flags);
+
+ return flags;
+}
+static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
+{
+ spin_unlock_irqrestore(&ccu->lock, flags);
+}
+
+/*
+ * Enable/disable write access to CCU protected registers. The
+ * WR_ACCESS register for all CCUs is at offset 0.
+ */
+static inline void __ccu_write_enable(struct ccu_data *ccu)
+{
+ if (ccu->write_enabled) {
+ pr_err("%s: access already enabled for %s\n", __func__,
+ ccu->name);
+ return;
+ }
+ ccu->write_enabled = true;
+ __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
+}
+
+static inline void __ccu_write_disable(struct ccu_data *ccu)
+{
+ if (!ccu->write_enabled) {
+ pr_err("%s: access wasn't enabled for %s\n", __func__,
+ ccu->name);
+ return;
+ }
+
+ __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
+ ccu->write_enabled = false;
+}
+
+/*
+ * Poll a register in a CCU's address space, returning when the
+ * specified bit in that register's value is set (or clear). Delay
+ * a microsecond after each read of the register. Returns true if
+ * successful, or false if we gave up trying.
+ *
+ * Caller must ensure the CCU lock is held.
+ */
+static inline bool
+__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
+{
+ unsigned int tries;
+ u32 bit_mask = 1 << bit;
+
+ for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
+ u32 val;
+ bool bit_val;
+
+ val = __ccu_read(ccu, reg_offset);
+ bit_val = (val & bit_mask) != 0;
+ if (bit_val == want)
+ return true;
+ udelay(1);
+ }
+ pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
+ ccu->name, reg_offset, bit, want ? "set" : "clear");
+
+ return false;
+}
+
+/* Policy operations */
+
+static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
+{
+ struct bcm_policy_ctl *control = &ccu->policy.control;
+ u32 offset;
+ u32 go_bit;
+ u32 mask;
+ bool ret;
+
+ /* If we don't need to control policy for this CCU, we're done. */
+ if (!policy_ctl_exists(control))
+ return true;
+
+ offset = control->offset;
+ go_bit = control->go_bit;
+
+ /* Ensure we're not busy before we start */
+ ret = __ccu_wait_bit(ccu, offset, go_bit, false);
+ if (!ret) {
+ pr_err("%s: ccu %s policy engine wouldn't go idle\n",
+ __func__, ccu->name);
+ return false;
+ }
+
+ /*
+ * If it's a synchronous request, we'll wait for the voltage
+ * and frequency of the active load to stabilize before
+ * returning. To do this we select the active load by
+ * setting the ATL bit.
+ *
+ * An asynchronous request instead ramps the voltage in the
+ * background, and when that process stabilizes, the target
+ * load is copied to the active load and the CCU frequency
+ * is switched. We do this by selecting the target load
+ * (ATL bit clear) and setting the request auto-copy (AC bit
+ * set).
+ *
+ * Note, we do NOT read-modify-write this register.
+ */
+ mask = (u32)1 << go_bit;
+ if (sync)
+ mask |= 1 << control->atl_bit;
+ else
+ mask |= 1 << control->ac_bit;
+ __ccu_write(ccu, offset, mask);
+
+ /* Wait for indication that operation is complete. */
+ ret = __ccu_wait_bit(ccu, offset, go_bit, false);
+ if (!ret)
+ pr_err("%s: ccu %s policy engine never started\n",
+ __func__, ccu->name);
+
+ return ret;
+}
+
+static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
+{
+ struct bcm_lvm_en *enable = &ccu->policy.enable;
+ u32 offset;
+ u32 enable_bit;
+ bool ret;
+
+ /* If we don't need to control policy for this CCU, we're done. */
+ if (!policy_lvm_en_exists(enable))
+ return true;
+
+ /* Ensure we're not busy before we start */
+ offset = enable->offset;
+ enable_bit = enable->bit;
+ ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
+ if (!ret) {
+ pr_err("%s: ccu %s policy engine already stopped\n",
+ __func__, ccu->name);
+ return false;
+ }
+
+ /* Now set the bit to stop the engine (NO read-modify-write) */
+ __ccu_write(ccu, offset, (u32)1 << enable_bit);
+
+ /* Wait for indication that it has stopped. */
+ ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
+ if (!ret)
+ pr_err("%s: ccu %s policy engine never stopped\n",
+ __func__, ccu->name);
+
+ return ret;
+}
+
+/*
+ * A CCU has four operating conditions ("policies"), and some clocks
+ * can be disabled or enabled based on which policy is currently in
+ * effect. Such clocks have a bit in a "policy mask" register for
+ * each policy indicating whether the clock is enabled for that
+ * policy or not. The bit position for a clock is the same for all
+ * four registers, and the 32-bit registers are at consecutive
+ * addresses.
+ */
+static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
+{
+ u32 offset;
+ u32 mask;
+ int i;
+ bool ret;
+
+ if (!policy_exists(policy))
+ return true;
+
+ /*
+ * We need to stop the CCU policy engine to allow update
+ * of our policy bits.
+ */
+ if (!__ccu_policy_engine_stop(ccu)) {
+ pr_err("%s: unable to stop CCU %s policy engine\n",
+ __func__, ccu->name);
+ return false;
+ }
+
+ /*
+ * For now, if a clock defines its policy bit we just mark
+ * it "enabled" for all four policies.
+ */
+ offset = policy->offset;
+ mask = (u32)1 << policy->bit;
+ for (i = 0; i < CCU_POLICY_COUNT; i++) {
+ u32 reg_val;
+
+ reg_val = __ccu_read(ccu, offset);
+ reg_val |= mask;
+ __ccu_write(ccu, offset, reg_val);
+ offset += sizeof(u32);
+ }
+
+ /* We're done updating; fire up the policy engine again. */
+ ret = __ccu_policy_engine_start(ccu, true);
+ if (!ret)
+ pr_err("%s: unable to restart CCU %s policy engine\n",
+ __func__, ccu->name);
+
+ return ret;
+}
+
+/* Gate operations */
+
+/* Determine whether a clock is gated. CCU lock must be held. */
+static bool
+__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ u32 bit_mask;
+ u32 reg_val;
+
+ /* If there is no gate we can assume it's enabled. */
+ if (!gate_exists(gate))
+ return true;
+
+ bit_mask = 1 << gate->status_bit;
+ reg_val = __ccu_read(ccu, gate->offset);
+
+ return (reg_val & bit_mask) != 0;
+}
+
+/* Determine whether a clock is gated. */
+static bool
+is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ long flags;
+ bool ret;
+
+ /* Avoid taking the lock if we can */
+ if (!gate_exists(gate))
+ return true;
+
+ flags = ccu_lock(ccu);
+ ret = __is_clk_gate_enabled(ccu, gate);
+ ccu_unlock(ccu, flags);
+
+ return ret;
+}
+
+/*
+ * Commit our desired gate state to the hardware.
+ * Returns true if successful, false otherwise.
+ */
+static bool
+__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ u32 reg_val;
+ u32 mask;
+ bool enabled = false;
+
+ BUG_ON(!gate_exists(gate));
+ if (!gate_is_sw_controllable(gate))
+ return true; /* Nothing we can change */
+
+ reg_val = __ccu_read(ccu, gate->offset);
+
+ /* For a hardware/software gate, set which is in control */
+ if (gate_is_hw_controllable(gate)) {
+ mask = (u32)1 << gate->hw_sw_sel_bit;
+ if (gate_is_sw_managed(gate))
+ reg_val |= mask;
+ else
+ reg_val &= ~mask;
+ }
+
+ /*
+ * If software is in control, enable or disable the gate.
+ * If hardware is, clear the enabled bit for good measure.
+ * If a software controlled gate can't be disabled, we're
+ * required to write a 0 into the enable bit (but the gate
+ * will be enabled).
+ */
+ mask = (u32)1 << gate->en_bit;
+ if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
+ !gate_is_no_disable(gate))
+ reg_val |= mask;
+ else
+ reg_val &= ~mask;
+
+ __ccu_write(ccu, gate->offset, reg_val);
+
+ /* For a hardware controlled gate, we're done */
+ if (!gate_is_sw_managed(gate))
+ return true;
+
+ /* Otherwise wait for the gate to be in desired state */
+ return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
+}
+
+/*
+ * Initialize a gate. Our desired state (hardware/software select,
+ * and if software, its enable state) is committed to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
+{
+ if (!gate_exists(gate))
+ return true;
+ return __gate_commit(ccu, gate);
+}
+
+/*
+ * Set a gate to enabled or disabled state. Does nothing if the
+ * gate is not currently under software control, or if it is already
+ * in the requested state. Returns true if successful, false
+ * otherwise. CCU lock must be held.
+ */
+static bool
+__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
+{
+ bool ret;
+
+ if (!gate_exists(gate) || !gate_is_sw_managed(gate))
+ return true; /* Nothing to do */
+
+ if (!enable && gate_is_no_disable(gate)) {
+ pr_warn("%s: invalid gate disable request (ignoring)\n",
+ __func__);
+ return true;
+ }
+
+ if (enable == gate_is_enabled(gate))
+ return true; /* No change */
+
+ gate_flip_enabled(gate);
+ ret = __gate_commit(ccu, gate);
+ if (!ret)
+ gate_flip_enabled(gate); /* Revert the change */
+
+ return ret;
+}
+
+/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
+static int clk_gate(struct ccu_data *ccu, const char *name,
+ struct bcm_clk_gate *gate, bool enable)
+{
+ unsigned long flags;
+ bool success;
+
+ /*
+ * Avoid taking the lock if we can. We quietly ignore
+ * requests to change state that don't make sense.
+ */
+ if (!gate_exists(gate) || !gate_is_sw_managed(gate))
+ return 0;
+ if (!enable && gate_is_no_disable(gate))
+ return 0;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ success = __clk_gate(ccu, gate, enable);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (success)
+ return 0;
+
+ pr_err("%s: failed to %s gate for %s\n", __func__,
+ enable ? "enable" : "disable", name);
+
+ return -EIO;
+}
+
+/* Hysteresis operations */
+
+/*
+ * If a clock gate requires a turn-off delay it will have
+ * "hysteresis" register bits defined. The first, if set, enables
+ * the delay; and if enabled, the second bit determines whether the
+ * delay is "low" or "high" (1 means high). For now, if it's
+ * defined for a clock, we set it.
+ */
+static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
+{
+ u32 offset;
+ u32 reg_val;
+ u32 mask;
+
+ if (!hyst_exists(hyst))
+ return true;
+
+ offset = hyst->offset;
+ mask = (u32)1 << hyst->en_bit;
+ mask |= (u32)1 << hyst->val_bit;
+
+ reg_val = __ccu_read(ccu, offset);
+ reg_val |= mask;
+ __ccu_write(ccu, offset, reg_val);
+
+ return true;
+}
+
+/* Trigger operations */
+
+/*
+ * Caller must ensure CCU lock is held and access is enabled.
+ * Returns true if successful, false otherwise.
+ */
+static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
+{
+ /* Trigger the clock and wait for it to finish */
+ __ccu_write(ccu, trig->offset, 1 << trig->bit);
+
+ return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
+}
+
+/* Divider operations */
+
+/* Read a divider value and return the scaled divisor it represents. */
+static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
+{
+ unsigned long flags;
+ u32 reg_val;
+ u32 reg_div;
+
+ if (divider_is_fixed(div))
+ return (u64)div->u.fixed;
+
+ flags = ccu_lock(ccu);
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ ccu_unlock(ccu, flags);
+
+ /* Extract the full divider field from the register value */
+ reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
+
+ /* Return the scaled divisor value it represents */
+ return scaled_div_value(div, reg_div);
+}
+
+/*
+ * Convert a divider's scaled divisor value into its recorded form
+ * and commit it into the hardware divider register.
+ *
+ * Returns 0 on success. Returns -EINVAL for invalid arguments.
+ * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
+ */
+static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig)
+{
+ bool enabled;
+ u32 reg_div;
+ u32 reg_val;
+ int ret = 0;
+
+ BUG_ON(divider_is_fixed(div));
+
+ /*
+ * If we're just initializing the divider, and no initial
+ * state was defined in the device tree, we just find out
+ * what its current value is rather than updating it.
+ */
+ if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ reg_div = bitfield_extract(reg_val, div->u.s.shift,
+ div->u.s.width);
+ div->u.s.scaled_div = scaled_div_value(div, reg_div);
+
+ return 0;
+ }
+
+ /* Convert the scaled divisor to the value we need to record */
+ reg_div = divider(div, div->u.s.scaled_div);
+
+ /* Clock needs to be enabled before changing the rate */
+ enabled = __is_clk_gate_enabled(ccu, gate);
+ if (!enabled && !__clk_gate(ccu, gate, true)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Replace the divider value and record the result */
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
+ reg_div);
+ __ccu_write(ccu, div->u.s.offset, reg_val);
+
+ /* If the trigger fails we still want to disable the gate */
+ if (!__clk_trigger(ccu, trig))
+ ret = -EIO;
+
+ /* Disable the clock again if it was disabled to begin with */
+ if (!enabled && !__clk_gate(ccu, gate, false))
+ ret = ret ? ret : -ENXIO; /* return first error */
+out:
+ return ret;
+}
+
+/*
+ * Initialize a divider by committing our desired state to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig)
+{
+ if (!divider_exists(div) || divider_is_fixed(div))
+ return true;
+ return !__div_commit(ccu, gate, div, trig);
+}
+
+static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_div *div, struct bcm_clk_trig *trig,
+ u64 scaled_div)
+{
+ unsigned long flags;
+ u64 previous;
+ int ret;
+
+ BUG_ON(divider_is_fixed(div));
+
+ previous = div->u.s.scaled_div;
+ if (previous == scaled_div)
+ return 0; /* No change */
+
+ div->u.s.scaled_div = scaled_div;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ ret = __div_commit(ccu, gate, div, trig);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (ret)
+ div->u.s.scaled_div = previous; /* Revert the change */
+
+ return ret;
+
+}
+
+/* Common clock rate helpers */
+
+/*
+ * Implement the common clock framework recalc_rate method, taking
+ * into account a divider and an optional pre-divider. The
+ * pre-divider register pointer may be NULL.
+ */
+static unsigned long clk_recalc_rate(struct ccu_data *ccu,
+ struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
+ unsigned long parent_rate)
+{
+ u64 scaled_parent_rate;
+ u64 scaled_div;
+ u64 result;
+
+ if (!divider_exists(div))
+ return parent_rate;
+
+ if (parent_rate > (unsigned long)LONG_MAX)
+ return 0; /* actually this would be a caller bug */
+
+ /*
+ * If there is a pre-divider, divide the scaled parent rate
+ * by the pre-divider value first. In this case--to improve
+ * accuracy--scale the parent rate by *both* the pre-divider
+ * value and the divider before actually computing the
+ * result of the pre-divider.
+ *
+ * If there's only one divider, just scale the parent rate.
+ */
+ if (pre_div && divider_exists(pre_div)) {
+ u64 scaled_rate;
+
+ scaled_rate = scale_rate(pre_div, parent_rate);
+ scaled_rate = scale_rate(div, scaled_rate);
+ scaled_div = divider_read_scaled(ccu, pre_div);
+ scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
+ scaled_div);
+ } else {
+ scaled_parent_rate = scale_rate(div, parent_rate);
+ }
+
+ /*
+ * Get the scaled divisor value, and divide the scaled
+ * parent rate by that to determine this clock's resulting
+ * rate.
+ */
+ scaled_div = divider_read_scaled(ccu, div);
+ result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
+
+ return (unsigned long)result;
+}
+
+/*
+ * Compute the output rate produced when a given parent rate is fed
+ * into two dividers. The pre-divider can be NULL, and even if it's
+ * non-null it may be nonexistent. It's also OK for the divider to
+ * be nonexistent, and in that case the pre-divider is also ignored.
+ *
+ * If scaled_div is non-null, it is used to return the scaled divisor
+ * value used by the (downstream) divider to produce that rate.
+ */
+static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
+ struct bcm_clk_div *pre_div,
+ unsigned long rate, unsigned long parent_rate,
+ u64 *scaled_div)
+{
+ u64 scaled_parent_rate;
+ u64 min_scaled_div;
+ u64 max_scaled_div;
+ u64 best_scaled_div;
+ u64 result;
+
+ BUG_ON(!divider_exists(div));
+ BUG_ON(!rate);
+ BUG_ON(parent_rate > (u64)LONG_MAX);
+
+ /*
+ * If there is a pre-divider, divide the scaled parent rate
+ * by the pre-divider value first. In this case--to improve
+ * accuracy--scale the parent rate by *both* the pre-divider
+ * value and the divider before actually computing the
+ * result of the pre-divider.
+ *
+ * If there's only one divider, just scale the parent rate.
+ *
+ * For simplicity we treat the pre-divider as fixed (for now).
+ */
+ if (divider_exists(pre_div)) {
+ u64 scaled_rate;
+ u64 scaled_pre_div;
+
+ scaled_rate = scale_rate(pre_div, parent_rate);
+ scaled_rate = scale_rate(div, scaled_rate);
+ scaled_pre_div = divider_read_scaled(ccu, pre_div);
+ scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
+ scaled_pre_div);
+ } else {
+ scaled_parent_rate = scale_rate(div, parent_rate);
+ }
+
+ /*
+ * Compute the best possible divider and ensure it is in
+ * range. A fixed divider can't be changed, so just report
+ * the best we can do.
+ */
+ if (!divider_is_fixed(div)) {
+ best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
+ rate);
+ min_scaled_div = scaled_div_min(div);
+ max_scaled_div = scaled_div_max(div);
+ if (best_scaled_div > max_scaled_div)
+ best_scaled_div = max_scaled_div;
+ else if (best_scaled_div < min_scaled_div)
+ best_scaled_div = min_scaled_div;
+ } else {
+ best_scaled_div = divider_read_scaled(ccu, div);
+ }
+
+ /* OK, figure out the resulting rate */
+ result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
+
+ if (scaled_div)
+ *scaled_div = best_scaled_div;
+
+ return (long)result;
+}
+
+/* Common clock parent helpers */
+
+/*
+ * For a given parent selector (register field) value, find the
+ * index into a selector's parent_sel array that contains it.
+ * Returns the index, or BAD_CLK_INDEX if it's not found.
+ */
+static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
+{
+ u8 i;
+
+ BUG_ON(sel->parent_count > (u32)U8_MAX);
+ for (i = 0; i < sel->parent_count; i++)
+ if (sel->parent_sel[i] == parent_sel)
+ return i;
+ return BAD_CLK_INDEX;
+}
+
+/*
+ * Fetch the current value of the selector, and translate that into
+ * its corresponding index in the parent array we registered with
+ * the clock framework.
+ *
+ * Returns parent array index that corresponds with the value found,
+ * or BAD_CLK_INDEX if the found value is out of range.
+ */
+static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
+{
+ unsigned long flags;
+ u32 reg_val;
+ u32 parent_sel;
+ u8 index;
+
+ /* If there's no selector, there's only one parent */
+ if (!selector_exists(sel))
+ return 0;
+
+ /* Get the value in the selector register */
+ flags = ccu_lock(ccu);
+ reg_val = __ccu_read(ccu, sel->offset);
+ ccu_unlock(ccu, flags);
+
+ parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
+
+ /* Look up that selector's parent array index and return it */
+ index = parent_index(sel, parent_sel);
+ if (index == BAD_CLK_INDEX)
+ pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
+ __func__, parent_sel, ccu->name, sel->offset);
+
+ return index;
+}
+
+/*
+ * Commit our desired selector value to the hardware.
+ *
+ * Returns 0 on success. Returns -EINVAL for invalid arguments.
+ * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
+ */
+static int
+__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
+{
+ u32 parent_sel;
+ u32 reg_val;
+ bool enabled;
+ int ret = 0;
+
+ BUG_ON(!selector_exists(sel));
+
+ /*
+ * If we're just initializing the selector, and no initial
+ * state was defined in the device tree, we just find out
+ * what its current value is rather than updating it.
+ */
+ if (sel->clk_index == BAD_CLK_INDEX) {
+ u8 index;
+
+ reg_val = __ccu_read(ccu, sel->offset);
+ parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
+ index = parent_index(sel, parent_sel);
+ if (index == BAD_CLK_INDEX)
+ return -EINVAL;
+ sel->clk_index = index;
+
+ return 0;
+ }
+
+ BUG_ON((u32)sel->clk_index >= sel->parent_count);
+ parent_sel = sel->parent_sel[sel->clk_index];
+
+ /* Clock needs to be enabled before changing the parent */
+ enabled = __is_clk_gate_enabled(ccu, gate);
+ if (!enabled && !__clk_gate(ccu, gate, true))
+ return -ENXIO;
+
+ /* Replace the selector value and record the result */
+ reg_val = __ccu_read(ccu, sel->offset);
+ reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
+ __ccu_write(ccu, sel->offset, reg_val);
+
+ /* If the trigger fails we still want to disable the gate */
+ if (!__clk_trigger(ccu, trig))
+ ret = -EIO;
+
+ /* Disable the clock again if it was disabled to begin with */
+ if (!enabled && !__clk_gate(ccu, gate, false))
+ ret = ret ? ret : -ENXIO; /* return first error */
+
+ return ret;
+}
+
+/*
+ * Initialize a selector by committing our desired state to hardware
+ * without the usual checks to see if it's already set up that way.
+ * Returns true if successful, false otherwise.
+ */
+static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
+{
+ if (!selector_exists(sel))
+ return true;
+ return !__sel_commit(ccu, gate, sel, trig);
+}
+
+/*
+ * Write a new value into a selector register to switch to a
+ * different parent clock. Returns 0 on success, or an error code
+ * (from __sel_commit()) otherwise.
+ */
+static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
+ struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
+ u8 index)
+{
+ unsigned long flags;
+ u8 previous;
+ int ret;
+
+ previous = sel->clk_index;
+ if (previous == index)
+ return 0; /* No change */
+
+ sel->clk_index = index;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ ret = __sel_commit(ccu, gate, sel, trig);
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+
+ if (ret)
+ sel->clk_index = previous; /* Revert the change */
+
+ return ret;
+}
+
+/* Clock operations */
+
+static int kona_peri_clk_enable(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
+
+ return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
+}
+
+static void kona_peri_clk_disable(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
+
+ (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
+}
+
+static int kona_peri_clk_is_enabled(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
+
+ return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
+}
+
+static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+
+ return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
+ parent_rate);
+}
+
+static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct bcm_clk_div *div = &bcm_clk->u.peri->div;
+
+ if (!divider_exists(div))
+ return clk_hw_get_rate(hw);
+
+ /* Quietly avoid a zero rate */
+ return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
+ rate ? rate : 1, *parent_rate, NULL);
+}
+
+static int kona_peri_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct clk_hw *current_parent;
+ unsigned long parent_rate;
+ unsigned long best_delta;
+ unsigned long best_rate;
+ u32 parent_count;
+ long rate;
+ u32 which;
+
+ /*
+ * If there is no other parent to choose, use the current one.
+ * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
+ */
+ WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
+ parent_count = (u32)bcm_clk->init_data.num_parents;
+ if (parent_count < 2) {
+ rate = kona_peri_clk_round_rate(hw, req->rate,
+ &req->best_parent_rate);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+ return 0;
+ }
+
+ /* Unless we can do better, stick with current parent */
+ current_parent = clk_hw_get_parent(hw);
+ parent_rate = clk_hw_get_rate(current_parent);
+ best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
+ best_delta = abs(best_rate - req->rate);
+
+ /* Check whether any other parent clock can produce a better result */
+ for (which = 0; which < parent_count; which++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
+ unsigned long delta;
+ unsigned long other_rate;
+
+ BUG_ON(!parent);
+ if (parent == current_parent)
+ continue;
+
+ /* We don't support CLK_SET_RATE_PARENT */
+ parent_rate = clk_hw_get_rate(parent);
+ other_rate = kona_peri_clk_round_rate(hw, req->rate,
+ &parent_rate);
+ delta = abs(other_rate - req->rate);
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_rate = other_rate;
+ req->best_parent_hw = parent;
+ req->best_parent_rate = parent_rate;
+ }
+ }
+
+ req->rate = best_rate;
+ return 0;
+}
+
+static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+ struct bcm_clk_sel *sel = &data->sel;
+ struct bcm_clk_trig *trig;
+ int ret;
+
+ BUG_ON(index >= sel->parent_count);
+
+ /* If there's only one parent we don't require a selector */
+ if (!selector_exists(sel))
+ return 0;
+
+ /*
+ * The regular trigger is used by default, but if there's a
+ * pre-trigger we want to use that instead.
+ */
+ trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
+ : &data->trig;
+
+ ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
+ if (ret == -ENXIO) {
+ pr_err("%s: gating failure for %s\n", __func__,
+ bcm_clk->init_data.name);
+ ret = -EIO; /* Don't proliferate weird errors */
+ } else if (ret == -EIO) {
+ pr_err("%s: %strigger failed for %s\n", __func__,
+ trig == &data->pre_trig ? "pre-" : "",
+ bcm_clk->init_data.name);
+ }
+
+ return ret;
+}
+
+static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+ u8 index;
+
+ index = selector_read_index(bcm_clk->ccu, &data->sel);
+
+ /* Not all callers would handle an out-of-range value gracefully */
+ return index == BAD_CLK_INDEX ? 0 : index;
+}
+
+static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct kona_clk *bcm_clk = to_kona_clk(hw);
+ struct peri_clk_data *data = bcm_clk->u.peri;
+ struct bcm_clk_div *div = &data->div;
+ u64 scaled_div = 0;
+ int ret;
+
+ if (parent_rate > (unsigned long)LONG_MAX)
+ return -EINVAL;
+
+ if (rate == clk_hw_get_rate(hw))
+ return 0;
+
+ if (!divider_exists(div))
+ return rate == parent_rate ? 0 : -EINVAL;
+
+ /*
+ * A fixed divider can't be changed. (Nor can a fixed
+ * pre-divider be, but for now we never actually try to
+ * change that.) Tolerate a request for a no-op change.
+ */
+ if (divider_is_fixed(&data->div))
+ return rate == parent_rate ? 0 : -EINVAL;
+
+ /*
+ * Get the scaled divisor value needed to achieve a clock
+ * rate as close as possible to what was requested, given
+ * the parent clock rate supplied.
+ */
+ (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
+ rate ? rate : 1, parent_rate, &scaled_div);
+
+ /*
+ * We aren't updating any pre-divider at this point, so
+ * we'll use the regular trigger.
+ */
+ ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
+ &data->trig, scaled_div);
+ if (ret == -ENXIO) {
+ pr_err("%s: gating failure for %s\n", __func__,
+ bcm_clk->init_data.name);
+ ret = -EIO; /* Don't proliferate weird errors */
+ } else if (ret == -EIO) {
+ pr_err("%s: trigger failed for %s\n", __func__,
+ bcm_clk->init_data.name);
+ }
+
+ return ret;
+}
+
+struct clk_ops kona_peri_clk_ops = {
+ .enable = kona_peri_clk_enable,
+ .disable = kona_peri_clk_disable,
+ .is_enabled = kona_peri_clk_is_enabled,
+ .recalc_rate = kona_peri_clk_recalc_rate,
+ .determine_rate = kona_peri_clk_determine_rate,
+ .set_parent = kona_peri_clk_set_parent,
+ .get_parent = kona_peri_clk_get_parent,
+ .set_rate = kona_peri_clk_set_rate,
+};
+
+/* Put a peripheral clock into its initial state */
+static bool __peri_clk_init(struct kona_clk *bcm_clk)
+{
+ struct ccu_data *ccu = bcm_clk->ccu;
+ struct peri_clk_data *peri = bcm_clk->u.peri;
+ const char *name = bcm_clk->init_data.name;
+ struct bcm_clk_trig *trig;
+
+ BUG_ON(bcm_clk->type != bcm_clk_peri);
+
+ if (!policy_init(ccu, &peri->policy)) {
+ pr_err("%s: error initializing policy for %s\n",
+ __func__, name);
+ return false;
+ }
+ if (!gate_init(ccu, &peri->gate)) {
+ pr_err("%s: error initializing gate for %s\n", __func__, name);
+ return false;
+ }
+ if (!hyst_init(ccu, &peri->hyst)) {
+ pr_err("%s: error initializing hyst for %s\n", __func__, name);
+ return false;
+ }
+ if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
+ pr_err("%s: error initializing divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ /*
+ * For the pre-divider and selector, the pre-trigger is used
+ * if it's present, otherwise we just use the regular trigger.
+ */
+ trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
+ : &peri->trig;
+
+ if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
+ pr_err("%s: error initializing pre-divider for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
+ pr_err("%s: error initializing selector for %s\n", __func__,
+ name);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __kona_clk_init(struct kona_clk *bcm_clk)
+{
+ switch (bcm_clk->type) {
+ case bcm_clk_peri:
+ return __peri_clk_init(bcm_clk);
+ default:
+ BUG();
+ }
+ return false;
+}
+
+/* Set a CCU and all its clocks into their desired initial state */
+bool __init kona_ccu_init(struct ccu_data *ccu)
+{
+ unsigned long flags;
+ unsigned int which;
+ struct kona_clk *kona_clks = ccu->kona_clks;
+ bool success = true;
+
+ flags = ccu_lock(ccu);
+ __ccu_write_enable(ccu);
+
+ for (which = 0; which < ccu->clk_num; which++) {
+ struct kona_clk *bcm_clk = &kona_clks[which];
+
+ if (!bcm_clk->ccu)
+ continue;
+
+ success &= __kona_clk_init(bcm_clk);
+ }
+
+ __ccu_write_disable(ccu);
+ ccu_unlock(ccu, flags);
+ return success;
+}
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
new file mode 100644
index 0000000000..e09655024a
--- /dev/null
+++ b/drivers/clk/bcm/clk-kona.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ * Copyright 2013 Linaro Limited
+ */
+
+#ifndef _CLK_KONA_H
+#define _CLK_KONA_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+
+#define BILLION 1000000000
+
+/* The common clock framework uses u8 to represent a parent index */
+#define PARENT_COUNT_MAX ((u32)U8_MAX)
+
+#define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */
+#define BAD_CLK_NAME ((const char *)-1)
+
+#define BAD_SCALED_DIV_VALUE U64_MAX
+
+/*
+ * Utility macros for object flag management. If possible, flags
+ * should be defined such that 0 is the desired default value.
+ */
+#define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag
+#define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag))
+#define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag)))
+#define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag))
+#define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag)))
+
+/* CCU field state tests */
+
+#define ccu_policy_exists(ccu_policy) ((ccu_policy)->enable.offset != 0)
+
+/* Clock field state tests */
+
+#define policy_exists(policy) ((policy)->offset != 0)
+
+#define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS)
+#define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED)
+#define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW)
+#define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW)
+#define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED)
+#define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE)
+
+#define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED)
+
+#define hyst_exists(hyst) ((hyst)->offset != 0)
+
+#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
+#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
+#define divider_has_fraction(div) (!divider_is_fixed(div) && \
+ (div)->u.s.frac_width > 0)
+
+#define selector_exists(sel) ((sel)->width != 0)
+#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
+
+#define policy_lvm_en_exists(enable) ((enable)->offset != 0)
+#define policy_ctl_exists(control) ((control)->offset != 0)
+
+/* Clock type, used to tell common block what it's part of */
+enum bcm_clk_type {
+ bcm_clk_none, /* undefined clock type */
+ bcm_clk_bus,
+ bcm_clk_core,
+ bcm_clk_peri
+};
+
+/*
+ * CCU policy control for clocks. Clocks can be enabled or disabled
+ * based on the CCU policy in effect. One bit in each policy mask
+ * register (one per CCU policy) represents whether the clock is
+ * enabled when that policy is effect or not. The CCU policy engine
+ * must be stopped to update these bits, and must be restarted again
+ * afterward.
+ */
+struct bcm_clk_policy {
+ u32 offset; /* first policy mask register offset */
+ u32 bit; /* bit used in all mask registers */
+};
+
+/* Policy initialization macro */
+
+#define POLICY(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ }
+
+/*
+ * Gating control and status is managed by a 32-bit gate register.
+ *
+ * There are several types of gating available:
+ * - (no gate)
+ * A clock with no gate is assumed to be always enabled.
+ * - hardware-only gating (auto-gating)
+ * Enabling or disabling clocks with this type of gate is
+ * managed automatically by the hardware. Such clocks can be
+ * considered by the software to be enabled. The current status
+ * of auto-gated clocks can be read from the gate status bit.
+ * - software-only gating
+ * Auto-gating is not available for this type of clock.
+ * Instead, software manages whether it's enabled by setting or
+ * clearing the enable bit. The current gate status of a gate
+ * under software control can be read from the gate status bit.
+ * To ensure a change to the gating status is complete, the
+ * status bit can be polled to verify that the gate has entered
+ * the desired state.
+ * - selectable hardware or software gating
+ * Gating for this type of clock can be configured to be either
+ * under software or hardware control. Which type is in use is
+ * determined by the hw_sw_sel bit of the gate register.
+ */
+struct bcm_clk_gate {
+ u32 offset; /* gate register offset */
+ u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */
+ u32 en_bit; /* 0: disable; 1: enable */
+ u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */
+ u32 flags; /* BCM_CLK_GATE_FLAGS_* below */
+};
+
+/*
+ * Gate flags:
+ * HW means this gate can be auto-gated
+ * SW means the state of this gate can be software controlled
+ * NO_DISABLE means this gate is (only) enabled if under software control
+ * SW_MANAGED means the status of this gate is under software control
+ * ENABLED means this software-managed gate is *supposed* to be enabled
+ */
+#define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */
+#define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */
+#define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */
+#define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */
+#define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */
+#define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */
+
+/*
+ * Gate initialization macros.
+ *
+ * Any gate initially under software control will be enabled.
+ */
+
+/* A hardware/software gate initially under software control */
+#define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \
+ FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware/software gate initially under hardware control */
+#define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware-or-enabled gate (enabled if not under hardware control) */
+#define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .hw_sw_sel_bit = (_hw_sw_sel_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \
+ FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \
+ }
+
+/* A software-only gate */
+#define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .en_bit = (_en_bit), \
+ .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \
+ FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \
+ }
+
+/* A hardware-only gate */
+#define HW_ONLY_GATE(_offset, _status_bit) \
+ { \
+ .offset = (_offset), \
+ .status_bit = (_status_bit), \
+ .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \
+ }
+
+/* Gate hysteresis for clocks */
+struct bcm_clk_hyst {
+ u32 offset; /* hyst register offset (normally CLKGATE) */
+ u32 en_bit; /* bit used to enable hysteresis */
+ u32 val_bit; /* if enabled: 0 = low delay; 1 = high delay */
+};
+
+/* Hysteresis initialization macro */
+
+#define HYST(_offset, _en_bit, _val_bit) \
+ { \
+ .offset = (_offset), \
+ .en_bit = (_en_bit), \
+ .val_bit = (_val_bit), \
+ }
+
+/*
+ * Each clock can have zero, one, or two dividers which change the
+ * output rate of the clock. Each divider can be either fixed or
+ * variable. If there are two dividers, they are the "pre-divider"
+ * and the "regular" or "downstream" divider. If there is only one,
+ * there is no pre-divider.
+ *
+ * A fixed divider is any non-zero (positive) value, and it
+ * indicates how the input rate is affected by the divider.
+ *
+ * The value of a variable divider is maintained in a sub-field of a
+ * 32-bit divider register. The position of the field in the
+ * register is defined by its offset and width. The value recorded
+ * in this field is always 1 less than the value it represents.
+ *
+ * In addition, a variable divider can indicate that some subset
+ * of its bits represent a "fractional" part of the divider. Such
+ * bits comprise the low-order portion of the divider field, and can
+ * be viewed as representing the portion of the divider that lies to
+ * the right of the decimal point. Most variable dividers have zero
+ * fractional bits. Variable dividers with non-zero fraction width
+ * still record a value 1 less than the value they represent; the
+ * added 1 does *not* affect the low-order bit in this case, it
+ * affects the bits above the fractional part only. (Often in this
+ * code a divider field value is distinguished from the value it
+ * represents by referring to the latter as a "divisor".)
+ *
+ * In order to avoid dealing with fractions, divider arithmetic is
+ * performed using "scaled" values. A scaled value is one that's
+ * been left-shifted by the fractional width of a divider. Dividing
+ * a scaled value by a scaled divisor produces the desired quotient
+ * without loss of precision and without any other special handling
+ * for fractions.
+ *
+ * The recorded value of a variable divider can be modified. To
+ * modify either divider (or both), a clock must be enabled (i.e.,
+ * using its gate). In addition, a trigger register (described
+ * below) must be used to commit the change, and polled to verify
+ * the change is complete.
+ */
+struct bcm_clk_div {
+ union {
+ struct { /* variable divider */
+ u32 offset; /* divider register offset */
+ u32 shift; /* field shift */
+ u32 width; /* field width */
+ u32 frac_width; /* field fraction width */
+
+ u64 scaled_div; /* scaled divider value */
+ } s;
+ u32 fixed; /* non-zero fixed divider value */
+ } u;
+ u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
+};
+
+/*
+ * Divider flags:
+ * EXISTS means this divider exists
+ * FIXED means it is a fixed-rate divider
+ */
+#define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */
+#define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */
+
+/* Divider initialization macros */
+
+/* A fixed (non-zero) divider */
+#define FIXED_DIVIDER(_value) \
+ { \
+ .u.fixed = (_value), \
+ .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
+ }
+
+/* A divider with an integral divisor */
+#define DIVIDER(_offset, _shift, _width) \
+ { \
+ .u.s.offset = (_offset), \
+ .u.s.shift = (_shift), \
+ .u.s.width = (_width), \
+ .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
+ .flags = FLAG(DIV, EXISTS), \
+ }
+
+/* A divider whose divisor has an integer and fractional part */
+#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
+ { \
+ .u.s.offset = (_offset), \
+ .u.s.shift = (_shift), \
+ .u.s.width = (_width), \
+ .u.s.frac_width = (_frac_width), \
+ .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
+ .flags = FLAG(DIV, EXISTS), \
+ }
+
+/*
+ * Clocks may have multiple "parent" clocks. If there is more than
+ * one, a selector must be specified to define which of the parent
+ * clocks is currently in use. The selected clock is indicated in a
+ * sub-field of a 32-bit selector register. The range of
+ * representable selector values typically exceeds the number of
+ * available parent clocks. Occasionally the reset value of a
+ * selector field is explicitly set to a (specific) value that does
+ * not correspond to a defined input clock.
+ *
+ * We register all known parent clocks with the common clock code
+ * using a packed array (i.e., no empty slots) of (parent) clock
+ * names, and refer to them later using indexes into that array.
+ * We maintain an array of selector values indexed by common clock
+ * index values in order to map between these common clock indexes
+ * and the selector values used by the hardware.
+ *
+ * Like dividers, a selector can be modified, but to do so a clock
+ * must be enabled, and a trigger must be used to commit the change.
+ */
+struct bcm_clk_sel {
+ u32 offset; /* selector register offset */
+ u32 shift; /* field shift */
+ u32 width; /* field width */
+
+ u32 parent_count; /* number of entries in parent_sel[] */
+ u32 *parent_sel; /* array of parent selector values */
+ u8 clk_index; /* current selected index in parent_sel[] */
+};
+
+/* Selector initialization macro */
+#define SELECTOR(_offset, _shift, _width) \
+ { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .clk_index = BAD_CLK_INDEX, \
+ }
+
+/*
+ * Making changes to a variable divider or a selector for a clock
+ * requires the use of a trigger. A trigger is defined by a single
+ * bit within a register. To signal a change, a 1 is written into
+ * that bit. To determine when the change has been completed, that
+ * trigger bit is polled; the read value will be 1 while the change
+ * is in progress, and 0 when it is complete.
+ *
+ * Occasionally a clock will have more than one trigger. In this
+ * case, the "pre-trigger" will be used when changing a clock's
+ * selector and/or its pre-divider.
+ */
+struct bcm_clk_trig {
+ u32 offset; /* trigger register offset */
+ u32 bit; /* trigger bit */
+ u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */
+};
+
+/*
+ * Trigger flags:
+ * EXISTS means this trigger exists
+ */
+#define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */
+
+/* Trigger initialization macro */
+#define TRIGGER(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ .flags = FLAG(TRIG, EXISTS), \
+ }
+
+struct peri_clk_data {
+ struct bcm_clk_policy policy;
+ struct bcm_clk_gate gate;
+ struct bcm_clk_hyst hyst;
+ struct bcm_clk_trig pre_trig;
+ struct bcm_clk_div pre_div;
+ struct bcm_clk_trig trig;
+ struct bcm_clk_div div;
+ struct bcm_clk_sel sel;
+ const char *clocks[]; /* must be last; use CLOCKS() to declare */
+};
+#define CLOCKS(...) { __VA_ARGS__, NULL, }
+#define NO_CLOCKS { NULL, } /* Must use of no parent clocks */
+
+struct kona_clk {
+ struct clk_hw hw;
+ struct clk_init_data init_data; /* includes name of this clock */
+ struct ccu_data *ccu; /* ccu this clock is associated with */
+ enum bcm_clk_type type;
+ union {
+ void *data;
+ struct peri_clk_data *peri;
+ } u;
+};
+#define to_kona_clk(_hw) \
+ container_of(_hw, struct kona_clk, hw)
+
+/* Initialization macro for an entry in a CCU's kona_clks[] array. */
+#define KONA_CLK(_ccu_name, _clk_name, _type) \
+ { \
+ .init_data = { \
+ .name = #_clk_name, \
+ .ops = &kona_ ## _type ## _clk_ops, \
+ }, \
+ .ccu = &_ccu_name ## _ccu_data, \
+ .type = bcm_clk_ ## _type, \
+ .u.data = &_clk_name ## _data, \
+ }
+#define LAST_KONA_CLK { .type = bcm_clk_none }
+
+/*
+ * CCU policy control. To enable software update of the policy
+ * tables the CCU policy engine must be stopped by setting the
+ * software update enable bit (LVM_EN). After an update the engine
+ * is restarted using the GO bit and either the GO_ATL or GO_AC bit.
+ */
+struct bcm_lvm_en {
+ u32 offset; /* LVM_EN register offset */
+ u32 bit; /* POLICY_CONFIG_EN bit in register */
+};
+
+/* Policy enable initialization macro */
+#define CCU_LVM_EN(_offset, _bit) \
+ { \
+ .offset = (_offset), \
+ .bit = (_bit), \
+ }
+
+struct bcm_policy_ctl {
+ u32 offset; /* POLICY_CTL register offset */
+ u32 go_bit;
+ u32 atl_bit; /* GO, GO_ATL, and GO_AC bits */
+ u32 ac_bit;
+};
+
+/* Policy control initialization macro */
+#define CCU_POLICY_CTL(_offset, _go_bit, _ac_bit, _atl_bit) \
+ { \
+ .offset = (_offset), \
+ .go_bit = (_go_bit), \
+ .ac_bit = (_ac_bit), \
+ .atl_bit = (_atl_bit), \
+ }
+
+struct ccu_policy {
+ struct bcm_lvm_en enable;
+ struct bcm_policy_ctl control;
+};
+
+/*
+ * Each CCU defines a mapped area of memory containing registers
+ * used to manage clocks implemented by the CCU. Access to memory
+ * within the CCU's space is serialized by a spinlock. Before any
+ * (other) address can be written, a special access "password" value
+ * must be written to its WR_ACCESS register (located at the base
+ * address of the range). We keep track of the name of each CCU as
+ * it is set up, and maintain them in a list.
+ */
+struct ccu_data {
+ void __iomem *base; /* base of mapped address space */
+ spinlock_t lock; /* serialization lock */
+ bool write_enabled; /* write access is currently enabled */
+ struct ccu_policy policy;
+ struct device_node *node;
+ size_t clk_num;
+ const char *name;
+ u32 range; /* byte range of address space */
+ struct kona_clk kona_clks[]; /* must be last */
+};
+
+/* Initialization for common fields in a Kona ccu_data structure */
+#define KONA_CCU_COMMON(_prefix, _name, _ccuname) \
+ .name = #_name "_ccu", \
+ .lock = __SPIN_LOCK_UNLOCKED(_name ## _ccu_data.lock), \
+ .clk_num = _prefix ## _ ## _ccuname ## _CCU_CLOCK_COUNT
+
+/* Exported globals */
+
+extern struct clk_ops kona_peri_clk_ops;
+
+/* Externally visible functions */
+
+extern u64 scaled_div_max(struct bcm_clk_div *div);
+extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
+ u32 billionths);
+
+extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
+ struct device_node *node);
+extern bool __init kona_ccu_init(struct ccu_data *ccu);
+
+#endif /* _CLK_KONA_H */
diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c
new file mode 100644
index 0000000000..065f4290aa
--- /dev/null
+++ b/drivers/clk/bcm/clk-ns2.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/bcm-ns2.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, .ki_shift = kis,\
+ .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+ .ka_width = kaw }
+
+#define VCO_CTRL_VAL(uo, lo) { .u_offset = uo, .l_offset = lo }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+static const struct iproc_pll_ctrl genpll_scr = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 1, 15, 12),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 27, 1),
+};
+
+
+static const struct iproc_clk_ctrl genpll_scr_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_GENPLL_SCR_SCR_CLK] = {
+ .channel = BCM_NS2_GENPLL_SCR_SCR_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_FS_CLK] = {
+ .channel = BCM_NS2_GENPLL_SCR_FS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x18, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_AUDIO_CLK] = {
+ .channel = BCM_NS2_GENPLL_SCR_AUDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_CH3_UNUSED] = {
+ .channel = BCM_NS2_GENPLL_SCR_CH3_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_CH4_UNUSED] = {
+ .channel = BCM_NS2_GENPLL_SCR_CH4_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x14, 16, 8),
+ },
+ [BCM_NS2_GENPLL_SCR_CH5_UNUSED] = {
+ .channel = BCM_NS2_GENPLL_SCR_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x14, 24, 8),
+ },
+};
+
+static void __init ns2_genpll_scr_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll_scr, NULL, 0, genpll_scr_clk,
+ ARRAY_SIZE(genpll_scr_clk));
+}
+CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr",
+ ns2_genpll_scr_clk_init);
+
+static const struct iproc_pll_ctrl genpll_sw = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 1, 11, 10),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 13, 1),
+};
+
+static const struct iproc_clk_ctrl genpll_sw_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_GENPLL_SW_RPE_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_RPE_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SW_250_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_250_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x18, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SW_NIC_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_NIC_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_GENPLL_SW_CHIMP_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_CHIMP_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_GENPLL_SW_PORT_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_PORT_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x14, 16, 8),
+ },
+ [BCM_NS2_GENPLL_SW_SDIO_CLK] = {
+ .channel = BCM_NS2_GENPLL_SW_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x14, 24, 8),
+ },
+};
+
+static void __init ns2_genpll_sw_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll_sw, NULL, 0, genpll_sw_clk,
+ ARRAY_SIZE(genpll_sw_clk));
+}
+CLK_OF_DECLARE(ns2_genpll_sw_clk, "brcm,ns2-genpll-sw",
+ ns2_genpll_sw_clk_init);
+
+static const struct iproc_pll_ctrl lcpll_ddr = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 2, 1, 0),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 1, 4),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 0, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll_ddr_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK] = {
+ .channel = BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_DDR_CLK] = {
+ .channel = BCM_NS2_LCPLL_DDR_DDR_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH2_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH2_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x10, 0, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH3_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH3_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x10, 8, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH4_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH4_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x10, 16, 8),
+ },
+ [BCM_NS2_LCPLL_DDR_CH5_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_DDR_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x10, 24, 8),
+ },
+};
+
+static void __init ns2_lcpll_ddr_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll_ddr, NULL, 0, lcpll_ddr_clk,
+ ARRAY_SIZE(lcpll_ddr_clk));
+}
+CLK_OF_DECLARE(ns2_lcpll_ddr_clk, "brcm,ns2-lcpll-ddr",
+ ns2_lcpll_ddr_clk_init);
+
+static const struct iproc_pll_ctrl lcpll_ports = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL,
+ .aon = AON_VAL(0x0, 2, 5, 4),
+ .reset = RESET_VAL(0x4, 2, 1),
+ .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 1, 4),
+ .ndiv_int = REG_VAL(0x8, 4, 10),
+ .pdiv = REG_VAL(0x8, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x10, 0xc),
+ .status = REG_VAL(0x0, 0, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll_ports_clk[] = {
+ /* bypass_shift, the last value passed into ENABLE_VAL(), is not defined
+ * in NS2. However, it doesn't appear to be used anywhere, so setting
+ * it to 0.
+ */
+ [BCM_NS2_LCPLL_PORTS_WAN_CLK] = {
+ .channel = BCM_NS2_LCPLL_PORTS_WAN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 18, 12, 0),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_RGMII_CLK] = {
+ .channel = BCM_NS2_LCPLL_PORTS_RGMII_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 19, 13, 0),
+ .mdiv = REG_VAL(0x14, 8, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH2_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH2_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 20, 14, 0),
+ .mdiv = REG_VAL(0x10, 0, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH3_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH3_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 21, 15, 0),
+ .mdiv = REG_VAL(0x10, 8, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH4_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH4_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 22, 16, 0),
+ .mdiv = REG_VAL(0x10, 16, 8),
+ },
+ [BCM_NS2_LCPLL_PORTS_CH5_UNUSED] = {
+ .channel = BCM_NS2_LCPLL_PORTS_CH5_UNUSED,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 23, 17, 0),
+ .mdiv = REG_VAL(0x10, 24, 8),
+ },
+};
+
+static void __init ns2_lcpll_ports_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll_ports, NULL, 0, lcpll_ports_clk,
+ ARRAY_SIZE(lcpll_ports_clk));
+}
+CLK_OF_DECLARE(ns2_lcpll_ports_clk, "brcm,ns2-lcpll-ports",
+ ns2_lcpll_ports_clk_init);
diff --git a/drivers/clk/bcm/clk-nsp.c b/drivers/clk/bcm/clk-nsp.c
new file mode 100644
index 0000000000..c24c9adbc6
--- /dev/null
+++ b/drivers/clk/bcm/clk-nsp.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2015 Broadcom Corporation
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/bcm-nsp.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, .ki_shift = kis,\
+ .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \
+ .ka_width = kaw }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+static void __init nsp_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(nsp_armpll, "brcm,nsp-armpll", nsp_armpll_init);
+
+static const struct iproc_pll_ctrl genpll = {
+ .flags = IPROC_CLK_PLL_HAS_NDIV_FRAC | IPROC_CLK_EMBED_PWRCTRL,
+ .aon = AON_VAL(0x0, 1, 12, 0),
+ .reset = RESET_VAL(0x0, 11, 10),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .ndiv_int = REG_VAL(0x14, 20, 10),
+ .ndiv_frac = REG_VAL(0x14, 0, 20),
+ .pdiv = REG_VAL(0x18, 24, 3),
+ .status = REG_VAL(0x20, 12, 1),
+};
+
+static const struct iproc_clk_ctrl genpll_clk[] = {
+ [BCM_NSP_GENPLL_PHY_CLK] = {
+ .channel = BCM_NSP_GENPLL_PHY_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 12, 6, 18),
+ .mdiv = REG_VAL(0x18, 16, 8),
+ },
+ [BCM_NSP_GENPLL_ENET_SW_CLK] = {
+ .channel = BCM_NSP_GENPLL_ENET_SW_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 13, 7, 19),
+ .mdiv = REG_VAL(0x18, 8, 8),
+ },
+ [BCM_NSP_GENPLL_USB_PHY_REF_CLK] = {
+ .channel = BCM_NSP_GENPLL_USB_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 14, 8, 20),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_NSP_GENPLL_IPROCFAST_CLK] = {
+ .channel = BCM_NSP_GENPLL_IPROCFAST_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 15, 9, 21),
+ .mdiv = REG_VAL(0x1c, 16, 8),
+ },
+ [BCM_NSP_GENPLL_SATA1_CLK] = {
+ .channel = BCM_NSP_GENPLL_SATA1_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 16, 10, 22),
+ .mdiv = REG_VAL(0x1c, 8, 8),
+ },
+ [BCM_NSP_GENPLL_SATA2_CLK] = {
+ .channel = BCM_NSP_GENPLL_SATA2_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 17, 11, 23),
+ .mdiv = REG_VAL(0x1c, 0, 8),
+ },
+};
+
+static void __init nsp_genpll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk,
+ ARRAY_SIZE(genpll_clk));
+}
+CLK_OF_DECLARE(nsp_genpll_clk, "brcm,nsp-genpll", nsp_genpll_clk_init);
+
+static const struct iproc_pll_ctrl lcpll0 = {
+ .flags = IPROC_CLK_PLL_HAS_NDIV_FRAC | IPROC_CLK_EMBED_PWRCTRL,
+ .aon = AON_VAL(0x0, 1, 24, 0),
+ .reset = RESET_VAL(0x0, 23, 22),
+ .dig_filter = DF_VAL(0x0, 16, 3, 12, 4, 19, 4),
+ .ndiv_int = REG_VAL(0x4, 20, 8),
+ .ndiv_frac = REG_VAL(0x4, 0, 20),
+ .pdiv = REG_VAL(0x4, 28, 3),
+ .status = REG_VAL(0x10, 12, 1),
+};
+
+static const struct iproc_clk_ctrl lcpll0_clk[] = {
+ [BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK] = {
+ .channel = BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 6, 3, 9),
+ .mdiv = REG_VAL(0x8, 24, 8),
+ },
+ [BCM_NSP_LCPLL0_SDIO_CLK] = {
+ .channel = BCM_NSP_LCPLL0_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 4, 10),
+ .mdiv = REG_VAL(0x8, 16, 8),
+ },
+ [BCM_NSP_LCPLL0_DDR_PHY_CLK] = {
+ .channel = BCM_NSP_LCPLL0_DDR_PHY_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 5, 11),
+ .mdiv = REG_VAL(0x8, 8, 8),
+ },
+};
+
+static void __init nsp_lcpll0_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk,
+ ARRAY_SIZE(lcpll0_clk));
+}
+CLK_OF_DECLARE(nsp_lcpll0_clk, "brcm,nsp-lcpll0", nsp_lcpll0_clk_init);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
new file mode 100644
index 0000000000..829406dc44
--- /dev/null
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Raspberry Pi driver for firmware controlled clocks
+ *
+ * Even though clk-bcm2835 provides an interface to the hardware registers for
+ * the system clocks we've had to factor out 'pllb' as the firmware 'owns' it.
+ * We're not allowed to change it directly as we might race with the
+ * over-temperature and under-voltage protections provided by the firmware.
+ *
+ * Copyright (C) 2019 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+static char *rpi_firmware_clk_names[] = {
+ [RPI_FIRMWARE_EMMC_CLK_ID] = "emmc",
+ [RPI_FIRMWARE_UART_CLK_ID] = "uart",
+ [RPI_FIRMWARE_ARM_CLK_ID] = "arm",
+ [RPI_FIRMWARE_CORE_CLK_ID] = "core",
+ [RPI_FIRMWARE_V3D_CLK_ID] = "v3d",
+ [RPI_FIRMWARE_H264_CLK_ID] = "h264",
+ [RPI_FIRMWARE_ISP_CLK_ID] = "isp",
+ [RPI_FIRMWARE_SDRAM_CLK_ID] = "sdram",
+ [RPI_FIRMWARE_PIXEL_CLK_ID] = "pixel",
+ [RPI_FIRMWARE_PWM_CLK_ID] = "pwm",
+ [RPI_FIRMWARE_HEVC_CLK_ID] = "hevc",
+ [RPI_FIRMWARE_EMMC2_CLK_ID] = "emmc2",
+ [RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
+ [RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
+ [RPI_FIRMWARE_VEC_CLK_ID] = "vec",
+};
+
+#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
+#define RPI_FIRMWARE_STATE_WAIT_BIT BIT(1)
+
+struct raspberrypi_clk_variant;
+
+struct raspberrypi_clk {
+ struct device *dev;
+ struct rpi_firmware *firmware;
+ struct platform_device *cpufreq;
+};
+
+struct raspberrypi_clk_data {
+ struct clk_hw hw;
+
+ unsigned int id;
+ struct raspberrypi_clk_variant *variant;
+
+ struct raspberrypi_clk *rpi;
+};
+
+struct raspberrypi_clk_variant {
+ bool export;
+ char *clkdev;
+ unsigned long min_rate;
+ bool minimize;
+};
+
+static struct raspberrypi_clk_variant
+raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
+ [RPI_FIRMWARE_ARM_CLK_ID] = {
+ .export = true,
+ .clkdev = "cpu0",
+ },
+ [RPI_FIRMWARE_CORE_CLK_ID] = {
+ .export = true,
+
+ /*
+ * The clock is shared between the HVS and the CSI
+ * controllers, on the BCM2711 and will change depending
+ * on the pixels composited on the HVS and the capture
+ * resolution on Unicam.
+ *
+ * Since the rate can get quite large, and we need to
+ * coordinate between both driver instances, let's
+ * always use the minimum the drivers will let us.
+ */
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_M2MC_CLK_ID] = {
+ .export = true,
+
+ /*
+ * If we boot without any cable connected to any of the
+ * HDMI connector, the firmware will skip the HSM
+ * initialization and leave it with a rate of 0,
+ * resulting in a bus lockup when we're accessing the
+ * registers even if it's enabled.
+ *
+ * Let's put a sensible default so that we don't end up
+ * in this situation.
+ */
+ .min_rate = 120000000,
+
+ /*
+ * The clock is shared between the two HDMI controllers
+ * on the BCM2711 and will change depending on the
+ * resolution output on each. Since the rate can get
+ * quite large, and we need to coordinate between both
+ * driver instances, let's always use the minimum the
+ * drivers will let us.
+ */
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_V3D_CLK_ID] = {
+ .export = true,
+ },
+ [RPI_FIRMWARE_PIXEL_CLK_ID] = {
+ .export = true,
+ },
+ [RPI_FIRMWARE_HEVC_CLK_ID] = {
+ .export = true,
+ },
+ [RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
+ .export = true,
+ },
+ [RPI_FIRMWARE_VEC_CLK_ID] = {
+ .export = true,
+ },
+};
+
+/*
+ * Structure of the message passed to Raspberry Pi's firmware in order to
+ * change clock rates. The 'disable_turbo' option is only available to the ARM
+ * clock (pllb) which we enable by default as turbo mode will alter multiple
+ * clocks at once.
+ *
+ * Even though we're able to access the clock registers directly we're bound to
+ * use the firmware interface as the firmware ultimately takes care of
+ * mitigating overheating/undervoltage situations and we would be changing
+ * frequencies behind his back.
+ *
+ * For more information on the firmware interface check:
+ * https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface
+ */
+struct raspberrypi_firmware_prop {
+ __le32 id;
+ __le32 val;
+ __le32 disable_turbo;
+} __packed;
+
+static int raspberrypi_clock_property(struct rpi_firmware *firmware,
+ const struct raspberrypi_clk_data *data,
+ u32 tag, u32 *val)
+{
+ struct raspberrypi_firmware_prop msg = {
+ .id = cpu_to_le32(data->id),
+ .val = cpu_to_le32(*val),
+ .disable_turbo = cpu_to_le32(1),
+ };
+ int ret;
+
+ ret = rpi_firmware_property(firmware, tag, &msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ *val = le32_to_cpu(msg.val);
+
+ return 0;
+}
+
+static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
+{
+ struct raspberrypi_clk_data *data =
+ container_of(hw, struct raspberrypi_clk_data, hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 val = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_GET_CLOCK_STATE, &val);
+ if (ret)
+ return 0;
+
+ return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT);
+}
+
+
+static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct raspberrypi_clk_data *data =
+ container_of(hw, struct raspberrypi_clk_data, hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 val = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_GET_CLOCK_RATE, &val);
+ if (ret)
+ return 0;
+
+ return val;
+}
+
+static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct raspberrypi_clk_data *data =
+ container_of(hw, struct raspberrypi_clk_data, hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 _rate = rate;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
+ if (ret)
+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
+ clk_hw_get_name(hw), ret);
+
+ return ret;
+}
+
+static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct raspberrypi_clk_data *data =
+ container_of(hw, struct raspberrypi_clk_data, hw);
+ struct raspberrypi_clk_variant *variant = data->variant;
+
+ /*
+ * The firmware will do the rounding but that isn't part of
+ * the interface with the firmware, so we just do our best
+ * here.
+ */
+
+ req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+
+ /*
+ * We want to aggressively reduce the clock rate here, so let's
+ * just ignore the requested rate and return the bare minimum
+ * rate we can get away with.
+ */
+ if (variant->minimize && req->min_rate > 0)
+ req->rate = req->min_rate;
+
+ return 0;
+}
+
+static const struct clk_ops raspberrypi_firmware_clk_ops = {
+ .is_prepared = raspberrypi_fw_is_prepared,
+ .recalc_rate = raspberrypi_fw_get_rate,
+ .determine_rate = raspberrypi_fw_dumb_determine_rate,
+ .set_rate = raspberrypi_fw_set_rate,
+};
+
+static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
+ unsigned int parent,
+ unsigned int id,
+ struct raspberrypi_clk_variant *variant)
+{
+ struct raspberrypi_clk_data *data;
+ struct clk_init_data init = {};
+ u32 min_rate, max_rate;
+ int ret;
+
+ data = devm_kzalloc(rpi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+ data->rpi = rpi;
+ data->id = id;
+ data->variant = variant;
+
+ init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
+ "fw-clk-%s",
+ rpi_firmware_clk_names[id]);
+ init.ops = &raspberrypi_firmware_clk_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+
+ data->hw.init = &init;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
+ &min_rate);
+ if (ret) {
+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
+ id, ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_GET_MAX_CLOCK_RATE,
+ &max_rate);
+ if (ret) {
+ dev_err(rpi->dev, "Failed to get clock %d max freq: %d\n",
+ id, ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_clk_hw_register(rpi->dev, &data->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk_hw_set_rate_range(&data->hw, min_rate, max_rate);
+
+ if (variant->clkdev) {
+ ret = devm_clk_hw_register_clkdev(rpi->dev, &data->hw,
+ NULL, variant->clkdev);
+ if (ret) {
+ dev_err(rpi->dev, "Failed to initialize clkdev\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (variant->min_rate) {
+ unsigned long rate;
+
+ clk_hw_set_rate_range(&data->hw, variant->min_rate, max_rate);
+
+ rate = raspberrypi_fw_get_rate(&data->hw, 0);
+ if (rate < variant->min_rate) {
+ ret = raspberrypi_fw_set_rate(&data->hw, variant->min_rate, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+
+ return &data->hw;
+}
+
+struct rpi_firmware_get_clocks_response {
+ u32 parent;
+ u32 id;
+};
+
+static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
+ struct clk_hw_onecell_data *data)
+{
+ struct rpi_firmware_get_clocks_response *clks;
+ int ret;
+
+ /*
+ * The firmware doesn't guarantee that the last element of
+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
+ * zero element as sentinel.
+ */
+ clks = devm_kcalloc(rpi->dev,
+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
+ GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ ret = rpi_firmware_property(rpi->firmware, RPI_FIRMWARE_GET_CLOCKS,
+ clks,
+ sizeof(*clks) * RPI_FIRMWARE_NUM_CLK_ID);
+ if (ret)
+ return ret;
+
+ while (clks->id) {
+ struct raspberrypi_clk_variant *variant;
+
+ if (clks->id >= RPI_FIRMWARE_NUM_CLK_ID) {
+ dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+ clks->id, RPI_FIRMWARE_NUM_CLK_ID - 1);
+ return -EINVAL;
+ }
+
+ variant = &raspberrypi_clk_variants[clks->id];
+ if (variant->export) {
+ struct clk_hw *hw;
+
+ hw = raspberrypi_clk_register(rpi, clks->parent,
+ clks->id, variant);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ data->hws[clks->id] = hw;
+ data->num = clks->id + 1;
+ }
+
+ clks++;
+ }
+
+ return 0;
+}
+
+static int raspberrypi_clk_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *firmware_node;
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *firmware;
+ struct raspberrypi_clk *rpi;
+ int ret;
+
+ /*
+ * We can be probed either through the an old-fashioned
+ * platform device registration or through a DT node that is a
+ * child of the firmware node. Handle both cases.
+ */
+ if (dev->of_node)
+ firmware_node = of_get_parent(dev->of_node);
+ else
+ firmware_node = of_find_compatible_node(NULL, NULL,
+ "raspberrypi,bcm2835-firmware");
+ if (!firmware_node) {
+ dev_err(dev, "Missing firmware node\n");
+ return -ENOENT;
+ }
+
+ firmware = devm_rpi_firmware_get(&pdev->dev, firmware_node);
+ of_node_put(firmware_node);
+ if (!firmware)
+ return -EPROBE_DEFER;
+
+ rpi = devm_kzalloc(dev, sizeof(*rpi), GFP_KERNEL);
+ if (!rpi)
+ return -ENOMEM;
+
+ rpi->dev = dev;
+ rpi->firmware = firmware;
+ platform_set_drvdata(pdev, rpi);
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws,
+ RPI_FIRMWARE_NUM_CLK_ID),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = raspberrypi_discover_clocks(rpi, clk_data);
+ if (ret)
+ return ret;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ return ret;
+
+ rpi->cpufreq = platform_device_register_data(dev, "raspberrypi-cpufreq",
+ -1, NULL, 0);
+
+ return 0;
+}
+
+static void raspberrypi_clk_remove(struct platform_device *pdev)
+{
+ struct raspberrypi_clk *rpi = platform_get_drvdata(pdev);
+
+ platform_device_unregister(rpi->cpufreq);
+}
+
+static const struct of_device_id raspberrypi_clk_match[] = {
+ { .compatible = "raspberrypi,firmware-clocks" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, raspberrypi_clk_match);
+
+static struct platform_driver raspberrypi_clk_driver = {
+ .driver = {
+ .name = "raspberrypi-clk",
+ .of_match_table = raspberrypi_clk_match,
+ },
+ .probe = raspberrypi_clk_probe,
+ .remove_new = raspberrypi_clk_remove,
+};
+module_platform_driver(raspberrypi_clk_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
+MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/bcm/clk-sr.c b/drivers/clk/bcm/clk-sr.c
new file mode 100644
index 0000000000..3b2cf397b7
--- /dev/null
+++ b/drivers/clk/bcm/clk-sr.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2017 Broadcom
+ */
+
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/bcm-sr.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define SW_CTRL_VAL(o, s) { .offset = o, .shift = s, }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, \
+ .ki_shift = kis, .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, \
+ .ka_shift = kas, .ka_width = kaw }
+
+#define VCO_CTRL_VAL(uo, lo) { .u_offset = uo, .l_offset = lo }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+
+static const struct iproc_pll_ctrl sr_genpll0 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 5, 1, 0),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll0_clk[] = {
+ [BCM_SR_GENPLL0_125M_CLK] = {
+ .channel = BCM_SR_GENPLL0_125M_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL0_SCR_CLK] = {
+ .channel = BCM_SR_GENPLL0_SCR_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL0_250M_CLK] = {
+ .channel = BCM_SR_GENPLL0_250M_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+ [BCM_SR_GENPLL0_PCIE_AXI_CLK] = {
+ .channel = BCM_SR_GENPLL0_PCIE_AXI_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x1c, 0, 9),
+ },
+ [BCM_SR_GENPLL0_PAXC_AXI_X2_CLK] = {
+ .channel = BCM_SR_GENPLL0_PAXC_AXI_X2_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x1c, 10, 9),
+ },
+ [BCM_SR_GENPLL0_PAXC_AXI_CLK] = {
+ .channel = BCM_SR_GENPLL0_PAXC_AXI_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 11, 5, 17),
+ .mdiv = REG_VAL(0x1c, 20, 9),
+ },
+};
+
+static int sr_genpll0_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll0, NULL, 0, sr_genpll0_clk,
+ ARRAY_SIZE(sr_genpll0_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_genpll2 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 13, 12),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll2_clk[] = {
+ [BCM_SR_GENPLL2_NIC_CLK] = {
+ .channel = BCM_SR_GENPLL2_NIC_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL2_TS_500_CLK] = {
+ .channel = BCM_SR_GENPLL2_TS_500_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL2_125_NITRO_CLK] = {
+ .channel = BCM_SR_GENPLL2_125_NITRO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+ [BCM_SR_GENPLL2_CHIMP_CLK] = {
+ .channel = BCM_SR_GENPLL2_CHIMP_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x1c, 0, 9),
+ },
+ [BCM_SR_GENPLL2_NIC_FLASH_CLK] = {
+ .channel = BCM_SR_GENPLL2_NIC_FLASH_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x1c, 10, 9),
+ },
+ [BCM_SR_GENPLL2_FS4_CLK] = {
+ .channel = BCM_SR_GENPLL2_FS4_CLK,
+ .enable = ENABLE_VAL(0x4, 11, 5, 17),
+ .mdiv = REG_VAL(0x1c, 20, 9),
+ },
+};
+
+static int sr_genpll2_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll2, NULL, 0, sr_genpll2_clk,
+ ARRAY_SIZE(sr_genpll2_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_genpll3 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 19, 18),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll3_clk[] = {
+ [BCM_SR_GENPLL3_HSLS_CLK] = {
+ .channel = BCM_SR_GENPLL3_HSLS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL3_SDIO_CLK] = {
+ .channel = BCM_SR_GENPLL3_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+};
+
+static void sr_genpll3_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &sr_genpll3, NULL, 0, sr_genpll3_clk,
+ ARRAY_SIZE(sr_genpll3_clk));
+}
+CLK_OF_DECLARE(sr_genpll3_clk, "brcm,sr-genpll3", sr_genpll3_clk_init);
+
+static const struct iproc_pll_ctrl sr_genpll4 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 25, 24),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll4_clk[] = {
+ [BCM_SR_GENPLL4_CCN_CLK] = {
+ .channel = BCM_SR_GENPLL4_CCN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL4_TPIU_PLL_CLK] = {
+ .channel = BCM_SR_GENPLL4_TPIU_PLL_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL4_NOC_CLK] = {
+ .channel = BCM_SR_GENPLL4_NOC_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+ [BCM_SR_GENPLL4_CHCLK_FS4_CLK] = {
+ .channel = BCM_SR_GENPLL4_CHCLK_FS4_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x1c, 0, 9),
+ },
+ [BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK] = {
+ .channel = BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x1c, 10, 9),
+ },
+};
+
+static int sr_genpll4_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll4, NULL, 0, sr_genpll4_clk,
+ ARRAY_SIZE(sr_genpll4_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_genpll5 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 1, 0),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll5_clk[] = {
+ [BCM_SR_GENPLL5_FS4_HF_CLK] = {
+ .channel = BCM_SR_GENPLL5_FS4_HF_CLK,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL5_CRYPTO_AE_CLK] = {
+ .channel = BCM_SR_GENPLL5_CRYPTO_AE_CLK,
+ .enable = ENABLE_VAL(0x4, 7, 1, 12),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL5_RAID_AE_CLK] = {
+ .channel = BCM_SR_GENPLL5_RAID_AE_CLK,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+};
+
+static int sr_genpll5_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll5, NULL, 0, sr_genpll5_clk,
+ ARRAY_SIZE(sr_genpll5_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_lcpll0 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 19, 18),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .status = REG_VAL(0x38, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_lcpll0_clk[] = {
+ [BCM_SR_LCPLL0_SATA_REFP_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_REFP_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x14, 0, 9),
+ },
+ [BCM_SR_LCPLL0_SATA_REFN_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_REFN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 2, 14),
+ .mdiv = REG_VAL(0x14, 10, 9),
+ },
+ [BCM_SR_LCPLL0_SATA_350_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_350_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 9, 3, 15),
+ .mdiv = REG_VAL(0x14, 20, 9),
+ },
+ [BCM_SR_LCPLL0_SATA_500_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_500_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 10, 4, 16),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+};
+
+static int sr_lcpll0_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_lcpll0, NULL, 0, sr_lcpll0_clk,
+ ARRAY_SIZE(sr_lcpll0_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_lcpll1 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 22, 21),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .status = REG_VAL(0x38, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_lcpll1_clk[] = {
+ [BCM_SR_LCPLL1_WAN_CLK] = {
+ .channel = BCM_SR_LCPLL1_WAN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x14, 0, 9),
+ },
+ [BCM_SR_LCPLL1_USB_REF_CLK] = {
+ .channel = BCM_SR_LCPLL1_USB_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 2, 14),
+ .mdiv = REG_VAL(0x14, 10, 9),
+ },
+ [BCM_SR_LCPLL1_CRMU_TS_CLK] = {
+ .channel = BCM_SR_LCPLL1_CRMU_TS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 9, 3, 15),
+ .mdiv = REG_VAL(0x14, 20, 9),
+ },
+};
+
+static int sr_lcpll1_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_lcpll1, NULL, 0, sr_lcpll1_clk,
+ ARRAY_SIZE(sr_lcpll1_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_lcpll_pcie = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 25, 24),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .status = REG_VAL(0x38, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_lcpll_pcie_clk[] = {
+ [BCM_SR_LCPLL_PCIE_PHY_REF_CLK] = {
+ .channel = BCM_SR_LCPLL_PCIE_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x14, 0, 9),
+ },
+};
+
+static int sr_lcpll_pcie_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_lcpll_pcie, NULL, 0, sr_lcpll_pcie_clk,
+ ARRAY_SIZE(sr_lcpll_pcie_clk));
+ return 0;
+}
+
+static const struct of_device_id sr_clk_dt_ids[] = {
+ { .compatible = "brcm,sr-genpll0", .data = sr_genpll0_clk_init },
+ { .compatible = "brcm,sr-genpll2", .data = sr_genpll2_clk_init },
+ { .compatible = "brcm,sr-genpll4", .data = sr_genpll4_clk_init },
+ { .compatible = "brcm,sr-genpll5", .data = sr_genpll5_clk_init },
+ { .compatible = "brcm,sr-lcpll0", .data = sr_lcpll0_clk_init },
+ { .compatible = "brcm,sr-lcpll1", .data = sr_lcpll1_clk_init },
+ { .compatible = "brcm,sr-lcpll-pcie", .data = sr_lcpll_pcie_clk_init },
+ { /* sentinel */ }
+};
+
+static int sr_clk_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct platform_device *);
+
+ probe_func = of_device_get_match_data(&pdev->dev);
+ if (!probe_func)
+ return -ENODEV;
+
+ return probe_func(pdev);
+}
+
+static struct platform_driver sr_clk_driver = {
+ .driver = {
+ .name = "sr-clk",
+ .of_match_table = sr_clk_dt_ids,
+ },
+ .probe = sr_clk_probe,
+};
+builtin_platform_driver(sr_clk_driver);