diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/clk/mvebu/Kconfig | 51 | ||||
-rw-r--r-- | drivers/clk/mvebu/Makefile | 18 | ||||
-rw-r--r-- | drivers/clk/mvebu/ap806-system-controller.c | 223 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-370.c | 186 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-375.c | 184 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-37xx-periph.c | 762 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-37xx-tbg.c | 158 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-37xx-xtal.c | 91 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-38x.c | 168 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-39x.c | 158 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-xp.c | 236 | ||||
-rw-r--r-- | drivers/clk/mvebu/clk-corediv.c | 338 | ||||
-rw-r--r-- | drivers/clk/mvebu/clk-cpu.c | 255 | ||||
-rw-r--r-- | drivers/clk/mvebu/common.c | 296 | ||||
-rw-r--r-- | drivers/clk/mvebu/common.h | 58 | ||||
-rw-r--r-- | drivers/clk/mvebu/cp110-system-controller.c | 452 | ||||
-rw-r--r-- | drivers/clk/mvebu/dove-divider.c | 262 | ||||
-rw-r--r-- | drivers/clk/mvebu/dove-divider.h | 7 | ||||
-rw-r--r-- | drivers/clk/mvebu/dove.c | 203 | ||||
-rw-r--r-- | drivers/clk/mvebu/kirkwood.c | 344 | ||||
-rw-r--r-- | drivers/clk/mvebu/mv98dx3236.c | 182 | ||||
-rw-r--r-- | drivers/clk/mvebu/orion.c | 280 |
22 files changed, 4912 insertions, 0 deletions
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig new file mode 100644 index 000000000..fddc8ac5f --- /dev/null +++ b/drivers/clk/mvebu/Kconfig @@ -0,0 +1,51 @@ +config MVEBU_CLK_COMMON + bool + +config MVEBU_CLK_CPU + bool + +config MVEBU_CLK_COREDIV + bool + +config ARMADA_370_CLK + bool + select MVEBU_CLK_COMMON + select MVEBU_CLK_CPU + +config ARMADA_375_CLK + bool + select MVEBU_CLK_COMMON + +config ARMADA_38X_CLK + bool + select MVEBU_CLK_COMMON + +config ARMADA_39X_CLK + bool + select MVEBU_CLK_COMMON + +config ARMADA_37XX_CLK + bool + +config ARMADA_XP_CLK + bool + select MVEBU_CLK_COMMON + select MVEBU_CLK_CPU + +config ARMADA_AP806_SYSCON + bool + +config ARMADA_CP110_SYSCON + bool + +config DOVE_CLK + bool + select MVEBU_CLK_COMMON + +config KIRKWOOD_CLK + bool + select MVEBU_CLK_COMMON + +config ORION_CLK + bool + select MVEBU_CLK_COMMON diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile new file mode 100644 index 000000000..93ac36852 --- /dev/null +++ b/drivers/clk/mvebu/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_MVEBU_CLK_COMMON) += common.o +obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o +obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o + +obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o +obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o +obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o +obj-$(CONFIG_ARMADA_39X_CLK) += armada-39x.o +obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-xtal.o +obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-tbg.o +obj-$(CONFIG_ARMADA_37XX_CLK) += armada-37xx-periph.o +obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o mv98dx3236.o +obj-$(CONFIG_ARMADA_AP806_SYSCON) += ap806-system-controller.o +obj-$(CONFIG_ARMADA_CP110_SYSCON) += cp110-system-controller.o +obj-$(CONFIG_DOVE_CLK) += dove.o dove-divider.o +obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o +obj-$(CONFIG_ORION_CLK) += orion.o diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c new file mode 100644 index 000000000..fa2fbd2ce --- /dev/null +++ b/drivers/clk/mvebu/ap806-system-controller.c @@ -0,0 +1,223 @@ +/* + * Marvell Armada AP806 System Controller + * + * Copyright (C) 2016 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) "ap806-system-controller: " fmt + +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define AP806_SAR_REG 0x400 +#define AP806_SAR_CLKFREQ_MODE_MASK 0x1f + +#define AP806_CLK_NUM 5 + +static struct clk *ap806_clks[AP806_CLK_NUM]; + +static struct clk_onecell_data ap806_clk_data = { + .clks = ap806_clks, + .clk_num = AP806_CLK_NUM, +}; + +static char *ap806_unique_name(struct device *dev, struct device_node *np, + char *name) +{ + const __be32 *reg; + u64 addr; + + reg = of_get_property(np, "reg", NULL); + addr = of_translate_address(np, reg); + return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s", + (unsigned long long)addr, name); +} + +static int ap806_syscon_common_probe(struct platform_device *pdev, + struct device_node *syscon_node) +{ + unsigned int freq_mode, cpuclk_freq; + const char *name, *fixedclk_name; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct regmap *regmap; + u32 reg; + int ret; + + regmap = syscon_node_to_regmap(syscon_node); + if (IS_ERR(regmap)) { + dev_err(dev, "cannot get regmap\n"); + return PTR_ERR(regmap); + } + + ret = regmap_read(regmap, AP806_SAR_REG, ®); + if (ret) { + dev_err(dev, "cannot read from regmap\n"); + return ret; + } + + freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK; + switch (freq_mode) { + case 0x0: + case 0x1: + cpuclk_freq = 2000; + break; + case 0x6: + case 0x7: + cpuclk_freq = 1800; + break; + case 0x4: + case 0xB: + case 0xD: + cpuclk_freq = 1600; + break; + case 0x1a: + cpuclk_freq = 1400; + break; + case 0x14: + case 0x17: + cpuclk_freq = 1300; + break; + case 0x19: + cpuclk_freq = 1200; + break; + case 0x13: + case 0x1d: + cpuclk_freq = 1000; + break; + case 0x1c: + cpuclk_freq = 800; + break; + case 0x1b: + cpuclk_freq = 600; + break; + default: + dev_err(dev, "invalid SAR value\n"); + return -EINVAL; + } + + /* Convert to hertz */ + cpuclk_freq *= 1000 * 1000; + + /* CPU clocks depend on the Sample At Reset configuration */ + name = ap806_unique_name(dev, syscon_node, "cpu-cluster-0"); + ap806_clks[0] = clk_register_fixed_rate(dev, name, NULL, + 0, cpuclk_freq); + if (IS_ERR(ap806_clks[0])) { + ret = PTR_ERR(ap806_clks[0]); + goto fail0; + } + + name = ap806_unique_name(dev, syscon_node, "cpu-cluster-1"); + ap806_clks[1] = clk_register_fixed_rate(dev, name, NULL, 0, + cpuclk_freq); + if (IS_ERR(ap806_clks[1])) { + ret = PTR_ERR(ap806_clks[1]); + goto fail1; + } + + /* Fixed clock is always 1200 Mhz */ + fixedclk_name = ap806_unique_name(dev, syscon_node, "fixed"); + ap806_clks[2] = clk_register_fixed_rate(dev, fixedclk_name, NULL, + 0, 1200 * 1000 * 1000); + if (IS_ERR(ap806_clks[2])) { + ret = PTR_ERR(ap806_clks[2]); + goto fail2; + } + + /* MSS Clock is fixed clock divided by 6 */ + name = ap806_unique_name(dev, syscon_node, "mss"); + ap806_clks[3] = clk_register_fixed_factor(NULL, name, fixedclk_name, + 0, 1, 6); + if (IS_ERR(ap806_clks[3])) { + ret = PTR_ERR(ap806_clks[3]); + goto fail3; + } + + /* SDIO(/eMMC) Clock is fixed clock divided by 3 */ + name = ap806_unique_name(dev, syscon_node, "sdio"); + ap806_clks[4] = clk_register_fixed_factor(NULL, name, + fixedclk_name, + 0, 1, 3); + if (IS_ERR(ap806_clks[4])) { + ret = PTR_ERR(ap806_clks[4]); + goto fail4; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data); + ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data); + if (ret) + goto fail_clk_add; + + return 0; + +fail_clk_add: + clk_unregister_fixed_factor(ap806_clks[4]); +fail4: + clk_unregister_fixed_factor(ap806_clks[3]); +fail3: + clk_unregister_fixed_rate(ap806_clks[2]); +fail2: + clk_unregister_fixed_rate(ap806_clks[1]); +fail1: + clk_unregister_fixed_rate(ap806_clks[0]); +fail0: + return ret; +} + +static int ap806_syscon_legacy_probe(struct platform_device *pdev) +{ + dev_warn(&pdev->dev, FW_WARN "Using legacy device tree binding\n"); + dev_warn(&pdev->dev, FW_WARN "Update your device tree:\n"); + dev_warn(&pdev->dev, FW_WARN + "This binding won't be supported in future kernel\n"); + + return ap806_syscon_common_probe(pdev, pdev->dev.of_node); + +} + +static int ap806_clock_probe(struct platform_device *pdev) +{ + return ap806_syscon_common_probe(pdev, pdev->dev.of_node->parent); +} + +static const struct of_device_id ap806_syscon_legacy_of_match[] = { + { .compatible = "marvell,ap806-system-controller", }, + { } +}; + +static struct platform_driver ap806_syscon_legacy_driver = { + .probe = ap806_syscon_legacy_probe, + .driver = { + .name = "marvell-ap806-system-controller", + .of_match_table = ap806_syscon_legacy_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(ap806_syscon_legacy_driver); + +static const struct of_device_id ap806_clock_of_match[] = { + { .compatible = "marvell,ap806-clock", }, + { } +}; + +static struct platform_driver ap806_clock_driver = { + .probe = ap806_clock_probe, + .driver = { + .name = "marvell-ap806-clock", + .of_match_table = ap806_clock_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(ap806_clock_driver); diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c new file mode 100644 index 000000000..8fdfa9790 --- /dev/null +++ b/drivers/clk/mvebu/armada-370.c @@ -0,0 +1,186 @@ +/* + * Marvell Armada 370 SoC clocks + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * Core Clocks + */ + +#define SARL 0 /* Low part [0:31] */ +#define SARL_A370_SSCG_ENABLE BIT(10) +#define SARL_A370_PCLK_FREQ_OPT 11 +#define SARL_A370_PCLK_FREQ_OPT_MASK 0xF +#define SARL_A370_FAB_FREQ_OPT 15 +#define SARL_A370_FAB_FREQ_OPT_MASK 0x1F +#define SARL_A370_TCLK_FREQ_OPT 20 +#define SARL_A370_TCLK_FREQ_OPT_MASK 0x1 + +enum { A370_CPU_TO_NBCLK, A370_CPU_TO_HCLK, A370_CPU_TO_DRAMCLK }; + +static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = { + { .id = A370_CPU_TO_NBCLK, .name = "nbclk" }, + { .id = A370_CPU_TO_HCLK, .name = "hclk" }, + { .id = A370_CPU_TO_DRAMCLK, .name = "dramclk" }, +}; + +static const u32 a370_tclk_freqs[] __initconst = { + 166000000, + 200000000, +}; + +static u32 __init a370_get_tclk_freq(void __iomem *sar) +{ + u8 tclk_freq_select = 0; + + tclk_freq_select = ((readl(sar) >> SARL_A370_TCLK_FREQ_OPT) & + SARL_A370_TCLK_FREQ_OPT_MASK); + return a370_tclk_freqs[tclk_freq_select]; +} + +static const u32 a370_cpu_freqs[] __initconst = { + 400000000, + 533000000, + 667000000, + 800000000, + 1000000000, + 1067000000, + 1200000000, +}; + +static u32 __init a370_get_cpu_freq(void __iomem *sar) +{ + u32 cpu_freq; + u8 cpu_freq_select = 0; + + cpu_freq_select = ((readl(sar) >> SARL_A370_PCLK_FREQ_OPT) & + SARL_A370_PCLK_FREQ_OPT_MASK); + if (cpu_freq_select >= ARRAY_SIZE(a370_cpu_freqs)) { + pr_err("CPU freq select unsupported %d\n", cpu_freq_select); + cpu_freq = 0; + } else + cpu_freq = a370_cpu_freqs[cpu_freq_select]; + + return cpu_freq; +} + +static const int a370_nbclk_ratios[32][2] __initconst = { + {0, 1}, {1, 2}, {2, 2}, {2, 2}, + {1, 2}, {1, 2}, {1, 1}, {2, 3}, + {0, 1}, {1, 2}, {2, 4}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {2, 2}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {2, 3}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int a370_hclk_ratios[32][2] __initconst = { + {0, 1}, {1, 2}, {2, 6}, {2, 3}, + {1, 3}, {1, 4}, {1, 2}, {2, 6}, + {0, 1}, {1, 6}, {2, 10}, {0, 1}, + {1, 4}, {0, 1}, {0, 1}, {2, 5}, + {0, 1}, {0, 1}, {0, 1}, {1, 2}, + {2, 6}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int a370_dramclk_ratios[32][2] __initconst = { + {0, 1}, {1, 2}, {2, 3}, {2, 3}, + {1, 3}, {1, 2}, {1, 2}, {2, 6}, + {0, 1}, {1, 3}, {2, 5}, {0, 1}, + {1, 4}, {0, 1}, {0, 1}, {2, 5}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {2, 3}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init a370_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SARL_A370_FAB_FREQ_OPT) & + SARL_A370_FAB_FREQ_OPT_MASK); + + switch (id) { + case A370_CPU_TO_NBCLK: + *mult = a370_nbclk_ratios[opt][0]; + *div = a370_nbclk_ratios[opt][1]; + break; + case A370_CPU_TO_HCLK: + *mult = a370_hclk_ratios[opt][0]; + *div = a370_hclk_ratios[opt][1]; + break; + case A370_CPU_TO_DRAMCLK: + *mult = a370_dramclk_ratios[opt][0]; + *div = a370_dramclk_ratios[opt][1]; + break; + } +} + +static bool a370_is_sscg_enabled(void __iomem *sar) +{ + return !(readl(sar) & SARL_A370_SSCG_ENABLE); +} + +static const struct coreclk_soc_desc a370_coreclks = { + .get_tclk_freq = a370_get_tclk_freq, + .get_cpu_freq = a370_get_cpu_freq, + .get_clk_ratio = a370_get_clk_ratio, + .is_sscg_enabled = a370_is_sscg_enabled, + .fix_sscg_deviation = kirkwood_fix_sscg_deviation, + .ratios = a370_coreclk_ratios, + .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), +}; + +/* + * Clock Gating Control + */ + +static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = { + { "audio", NULL, 0, 0 }, + { "pex0_en", NULL, 1, 0 }, + { "pex1_en", NULL, 2, 0 }, + { "ge1", NULL, 3, 0 }, + { "ge0", NULL, 4, 0 }, + { "pex0", "pex0_en", 5, 0 }, + { "pex1", "pex1_en", 9, 0 }, + { "sata0", NULL, 15, 0 }, + { "sdio", NULL, 17, 0 }, + { "crypto", NULL, 23, CLK_IGNORE_UNUSED }, + { "tdm", NULL, 25, 0 }, + { "ddr", NULL, 28, CLK_IGNORE_UNUSED }, + { "sata1", NULL, 30, 0 }, + { } +}; + +static void __init a370_clk_init(struct device_node *np) +{ + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock"); + + mvebu_coreclk_setup(np, &a370_coreclks); + + if (cgnp) { + mvebu_clk_gating_setup(cgnp, a370_gating_desc); + of_node_put(cgnp); + } +} +CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); + diff --git a/drivers/clk/mvebu/armada-375.c b/drivers/clk/mvebu/armada-375.c new file mode 100644 index 000000000..c7af2242b --- /dev/null +++ b/drivers/clk/mvebu/armada-375.c @@ -0,0 +1,184 @@ +/* + * Marvell Armada 375 SoC clocks + * + * Copyright (C) 2014 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * Core Clocks + */ + +/* + * For the Armada 375 SoCs, the CPU, DDR and L2 clocks frequencies are + * all modified at the same time, and not separately as for the Armada + * 370 or the Armada XP SoCs. + * + * SAR1[21:17] : CPU frequency DDR frequency L2 frequency + * 6 = 400 MHz 400 MHz 200 MHz + * 15 = 600 MHz 600 MHz 300 MHz + * 21 = 800 MHz 534 MHz 400 MHz + * 25 = 1000 MHz 500 MHz 500 MHz + * others reserved. + * + * SAR1[22] : TCLK frequency + * 0 = 166 MHz + * 1 = 200 MHz + */ + +#define SAR1_A375_TCLK_FREQ_OPT 22 +#define SAR1_A375_TCLK_FREQ_OPT_MASK 0x1 +#define SAR1_A375_CPU_DDR_L2_FREQ_OPT 17 +#define SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK 0x1F + +static const u32 armada_375_tclk_frequencies[] __initconst = { + 166000000, + 200000000, +}; + +static u32 __init armada_375_get_tclk_freq(void __iomem *sar) +{ + u8 tclk_freq_select; + + tclk_freq_select = ((readl(sar) >> SAR1_A375_TCLK_FREQ_OPT) & + SAR1_A375_TCLK_FREQ_OPT_MASK); + return armada_375_tclk_frequencies[tclk_freq_select]; +} + + +static const u32 armada_375_cpu_frequencies[] __initconst = { + 0, 0, 0, 0, 0, 0, + 400000000, + 0, 0, 0, 0, 0, 0, 0, 0, + 600000000, + 0, 0, 0, 0, 0, + 800000000, + 0, 0, 0, + 1000000000, +}; + +static u32 __init armada_375_get_cpu_freq(void __iomem *sar) +{ + u8 cpu_freq_select; + + cpu_freq_select = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) & + SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK); + if (cpu_freq_select >= ARRAY_SIZE(armada_375_cpu_frequencies)) { + pr_err("Selected CPU frequency (%d) unsupported\n", + cpu_freq_select); + return 0; + } else + return armada_375_cpu_frequencies[cpu_freq_select]; +} + +enum { A375_CPU_TO_DDR, A375_CPU_TO_L2 }; + +static const struct coreclk_ratio armada_375_coreclk_ratios[] __initconst = { + { .id = A375_CPU_TO_L2, .name = "l2clk" }, + { .id = A375_CPU_TO_DDR, .name = "ddrclk" }, +}; + +static const int armada_375_cpu_l2_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {1, 2}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 2}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int armada_375_cpu_ddr_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {1, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {2, 3}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {2, 3}, {0, 1}, {0, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init armada_375_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) & + SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK); + + switch (id) { + case A375_CPU_TO_L2: + *mult = armada_375_cpu_l2_ratios[opt][0]; + *div = armada_375_cpu_l2_ratios[opt][1]; + break; + case A375_CPU_TO_DDR: + *mult = armada_375_cpu_ddr_ratios[opt][0]; + *div = armada_375_cpu_ddr_ratios[opt][1]; + break; + } +} + +static const struct coreclk_soc_desc armada_375_coreclks = { + .get_tclk_freq = armada_375_get_tclk_freq, + .get_cpu_freq = armada_375_get_cpu_freq, + .get_clk_ratio = armada_375_get_clk_ratio, + .ratios = armada_375_coreclk_ratios, + .num_ratios = ARRAY_SIZE(armada_375_coreclk_ratios), +}; + +static void __init armada_375_coreclk_init(struct device_node *np) +{ + mvebu_coreclk_setup(np, &armada_375_coreclks); +} +CLK_OF_DECLARE(armada_375_core_clk, "marvell,armada-375-core-clock", + armada_375_coreclk_init); + +/* + * Clock Gating Control + */ +static const struct clk_gating_soc_desc armada_375_gating_desc[] __initconst = { + { "mu", NULL, 2 }, + { "pp", NULL, 3 }, + { "ptp", NULL, 4 }, + { "pex0", NULL, 5 }, + { "pex1", NULL, 6 }, + { "audio", NULL, 8 }, + { "nd_clk", "nand", 11 }, + { "sata0_link", "sata0_core", 14 }, + { "sata0_core", NULL, 15 }, + { "usb3", NULL, 16 }, + { "sdio", NULL, 17 }, + { "usb", NULL, 18 }, + { "gop", NULL, 19 }, + { "sata1_link", "sata1_core", 20 }, + { "sata1_core", NULL, 21 }, + { "xor0", NULL, 22 }, + { "xor1", NULL, 23 }, + { "copro", NULL, 24 }, + { "tdm", NULL, 25 }, + { "crypto0_enc", NULL, 28 }, + { "crypto0_core", NULL, 29 }, + { "crypto1_enc", NULL, 30 }, + { "crypto1_core", NULL, 31 }, + { } +}; + +static void __init armada_375_clk_gating_init(struct device_node *np) +{ + mvebu_clk_gating_setup(np, armada_375_gating_desc); +} +CLK_OF_DECLARE(armada_375_clk_gating, "marvell,armada-375-gating-clock", + armada_375_clk_gating_init); diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c new file mode 100644 index 000000000..5d10733d6 --- /dev/null +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -0,0 +1,762 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Marvell Armada 37xx SoC Peripheral clocks + * + * Copyright (C) 2016 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * + * Most of the peripheral clocks can be modelled like this: + * _____ _______ _______ + * TBG-A-P --| | | | | | ______ + * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk + * TBG-A-S --| | | | | | |______| + * TBG-B-S --|_____| |_______| |_______| + * + * However some clocks may use only one or two block or and use the + * xtal clock as parent. + */ + +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define TBG_SEL 0x0 +#define DIV_SEL0 0x4 +#define DIV_SEL1 0x8 +#define DIV_SEL2 0xC +#define CLK_SEL 0x10 +#define CLK_DIS 0x14 + +#define ARMADA_37XX_DVFS_LOAD_1 1 +#define LOAD_LEVEL_NR 4 + +#define ARMADA_37XX_NB_L0L1 0x18 +#define ARMADA_37XX_NB_L2L3 0x1C +#define ARMADA_37XX_NB_TBG_DIV_OFF 13 +#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7 +#define ARMADA_37XX_NB_CLK_SEL_OFF 11 +#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1 +#define ARMADA_37XX_NB_TBG_SEL_OFF 9 +#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3 +#define ARMADA_37XX_NB_CONFIG_SHIFT 16 +#define ARMADA_37XX_NB_DYN_MOD 0x24 +#define ARMADA_37XX_NB_DFS_EN 31 +#define ARMADA_37XX_NB_CPU_LOAD 0x30 +#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3 +#define ARMADA_37XX_DVFS_LOAD_0 0 +#define ARMADA_37XX_DVFS_LOAD_1 1 +#define ARMADA_37XX_DVFS_LOAD_2 2 +#define ARMADA_37XX_DVFS_LOAD_3 3 + +struct clk_periph_driver_data { + struct clk_hw_onecell_data *hw_data; + spinlock_t lock; +}; + +struct clk_double_div { + struct clk_hw hw; + void __iomem *reg1; + u8 shift1; + void __iomem *reg2; + u8 shift2; +}; + +struct clk_pm_cpu { + struct clk_hw hw; + void __iomem *reg_mux; + u8 shift_mux; + u32 mask_mux; + void __iomem *reg_div; + u8 shift_div; + struct regmap *nb_pm_base; + unsigned long l1_expiration; +}; + +#define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw) +#define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw) + +struct clk_periph_data { + const char *name; + const char * const *parent_names; + int num_parents; + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + struct clk_hw *muxrate_hw; + bool is_double_div; +}; + +static const struct clk_div_table clk_table6[] = { + { .val = 1, .div = 1, }, + { .val = 2, .div = 2, }, + { .val = 3, .div = 3, }, + { .val = 4, .div = 4, }, + { .val = 5, .div = 5, }, + { .val = 6, .div = 6, }, + { .val = 0, .div = 0, }, /* last entry */ +}; + +static const struct clk_div_table clk_table1[] = { + { .val = 0, .div = 1, }, + { .val = 1, .div = 2, }, + { .val = 0, .div = 0, }, /* last entry */ +}; + +static const struct clk_div_table clk_table2[] = { + { .val = 0, .div = 2, }, + { .val = 1, .div = 4, }, + { .val = 0, .div = 0, }, /* last entry */ +}; + +static const struct clk_ops clk_double_div_ops; +static const struct clk_ops clk_pm_cpu_ops; + +#define PERIPH_GATE(_name, _bit) \ +struct clk_gate gate_##_name = { \ + .reg = (void *)CLK_DIS, \ + .bit_idx = _bit, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_gate_ops, \ + } \ +}; + +#define PERIPH_MUX(_name, _shift) \ +struct clk_mux mux_##_name = { \ + .reg = (void *)TBG_SEL, \ + .shift = _shift, \ + .mask = 3, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_mux_ro_ops, \ + } \ +}; + +#define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \ +struct clk_double_div rate_##_name = { \ + .reg1 = (void *)_reg1, \ + .reg2 = (void *)_reg2, \ + .shift1 = _shift1, \ + .shift2 = _shift2, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_double_div_ops, \ + } \ +}; + +#define PERIPH_DIV(_name, _reg, _shift, _table) \ +struct clk_divider rate_##_name = { \ + .reg = (void *)_reg, \ + .table = _table, \ + .shift = _shift, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_divider_ro_ops, \ + } \ +}; + +#define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \ +struct clk_pm_cpu muxrate_##_name = { \ + .reg_mux = (void *)TBG_SEL, \ + .mask_mux = 3, \ + .shift_mux = _shift1, \ + .reg_div = (void *)_reg, \ + .shift_div = _shift2, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_pm_cpu_ops, \ + } \ +}; + +#define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\ +static PERIPH_GATE(_name, _bit); \ +static PERIPH_MUX(_name, _shift); \ +static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2); + +#define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \ +static PERIPH_GATE(_name, _bit); \ +static PERIPH_MUX(_name, _shift); \ +static PERIPH_DIV(_name, _reg, _shift1, _table); + +#define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \ +static PERIPH_GATE(_name, _bit); \ +static PERIPH_DIV(_name, _reg, _shift, _table); + +#define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\ +static PERIPH_MUX(_name, _shift); \ +static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2); + +#define REF_CLK_FULL(_name) \ + { .name = #_name, \ + .parent_names = (const char *[]){ "TBG-A-P", \ + "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \ + .num_parents = 4, \ + .mux_hw = &mux_##_name.hw, \ + .gate_hw = &gate_##_name.hw, \ + .rate_hw = &rate_##_name.hw, \ + } + +#define REF_CLK_FULL_DD(_name) \ + { .name = #_name, \ + .parent_names = (const char *[]){ "TBG-A-P", \ + "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \ + .num_parents = 4, \ + .mux_hw = &mux_##_name.hw, \ + .gate_hw = &gate_##_name.hw, \ + .rate_hw = &rate_##_name.hw, \ + .is_double_div = true, \ + } + +#define REF_CLK_GATE(_name, _parent_name) \ + { .name = #_name, \ + .parent_names = (const char *[]){ _parent_name}, \ + .num_parents = 1, \ + .gate_hw = &gate_##_name.hw, \ + } + +#define REF_CLK_GATE_DIV(_name, _parent_name) \ + { .name = #_name, \ + .parent_names = (const char *[]){ _parent_name}, \ + .num_parents = 1, \ + .gate_hw = &gate_##_name.hw, \ + .rate_hw = &rate_##_name.hw, \ + } + +#define REF_CLK_PM_CPU(_name) \ + { .name = #_name, \ + .parent_names = (const char *[]){ "TBG-A-P", \ + "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \ + .num_parents = 4, \ + .muxrate_hw = &muxrate_##_name.hw, \ + } + +#define REF_CLK_MUX_DD(_name) \ + { .name = #_name, \ + .parent_names = (const char *[]){ "TBG-A-P", \ + "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \ + .num_parents = 4, \ + .mux_hw = &mux_##_name.hw, \ + .rate_hw = &rate_##_name.hw, \ + .is_double_div = true, \ + } + +/* NB periph clocks */ +PERIPH_CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13); +PERIPH_CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7); +PERIPH_CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0); +PERIPH_CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6); +PERIPH_CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12); +PERIPH_CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, clk_table6); +static PERIPH_GATE(avs, 11); +PERIPH_CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0); +PERIPH_CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24); +static PERIPH_GATE(i2c_2, 16); +static PERIPH_GATE(i2c_1, 17); +PERIPH_CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, clk_table2); +PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12); +PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6); +PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6); +PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19); +static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28); + +static struct clk_periph_data data_nb[] = { + REF_CLK_FULL_DD(mmc), + REF_CLK_FULL_DD(sata_host), + REF_CLK_FULL_DD(sec_at), + REF_CLK_FULL_DD(sec_dap), + REF_CLK_FULL_DD(tscem), + REF_CLK_FULL(tscem_tmx), + REF_CLK_GATE(avs, "xtal"), + REF_CLK_FULL_DD(sqf), + REF_CLK_FULL_DD(pwm), + REF_CLK_GATE(i2c_2, "xtal"), + REF_CLK_GATE(i2c_1, "xtal"), + REF_CLK_GATE_DIV(ddr_phy, "TBG-A-S"), + REF_CLK_FULL_DD(ddr_fclk), + REF_CLK_FULL(trace), + REF_CLK_FULL(counter), + REF_CLK_FULL_DD(eip97), + REF_CLK_PM_CPU(cpu), + { }, +}; + +/* SB periph clocks */ +PERIPH_CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9); +PERIPH_CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21); +PERIPH_CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9); +static PERIPH_GATE(gbe1_50, 0); +static PERIPH_GATE(gbe0_50, 1); +static PERIPH_GATE(gbe1_125, 2); +static PERIPH_GATE(gbe0_125, 3); +PERIPH_CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, clk_table1); +PERIPH_CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, clk_table1); +PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1); +PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6); +PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12); +PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18); + +static struct clk_periph_data data_sb[] = { + REF_CLK_MUX_DD(gbe_50), + REF_CLK_MUX_DD(gbe_core), + REF_CLK_MUX_DD(gbe_125), + REF_CLK_GATE(gbe1_50, "gbe_50"), + REF_CLK_GATE(gbe0_50, "gbe_50"), + REF_CLK_GATE(gbe1_125, "gbe_125"), + REF_CLK_GATE(gbe0_125, "gbe_125"), + REF_CLK_GATE_DIV(gbe1_core, "gbe_core"), + REF_CLK_GATE_DIV(gbe0_core, "gbe_core"), + REF_CLK_GATE_DIV(gbe_bm, "gbe_core"), + REF_CLK_FULL_DD(sdio), + REF_CLK_FULL_DD(usb32_usb2_sys), + REF_CLK_FULL_DD(usb32_ss_sys), + { }, +}; + +static unsigned int get_div(void __iomem *reg, int shift) +{ + u32 val; + + val = (readl(reg) >> shift) & 0x7; + if (val > 6) + return 0; + return val; +} + +static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_double_div *double_div = to_clk_double_div(hw); + unsigned int div; + + div = get_div(double_div->reg1, double_div->shift1); + div *= get_div(double_div->reg2, double_div->shift2); + + return DIV_ROUND_UP_ULL((u64)parent_rate, div); +} + +static const struct clk_ops clk_double_div_ops = { + .recalc_rate = clk_double_div_recalc_rate, +}; + +static void armada_3700_pm_dvfs_update_regs(unsigned int load_level, + unsigned int *reg, + unsigned int *offset) +{ + if (load_level <= ARMADA_37XX_DVFS_LOAD_1) + *reg = ARMADA_37XX_NB_L0L1; + else + *reg = ARMADA_37XX_NB_L2L3; + + if (load_level == ARMADA_37XX_DVFS_LOAD_0 || + load_level == ARMADA_37XX_DVFS_LOAD_2) + *offset += ARMADA_37XX_NB_CONFIG_SHIFT; +} + +static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base) +{ + unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD; + + if (IS_ERR(base)) + return false; + + regmap_read(base, reg, &val); + + return !!(val & BIT(ARMADA_37XX_NB_DFS_EN)); +} + +static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base) +{ + unsigned int reg = ARMADA_37XX_NB_CPU_LOAD; + unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF; + unsigned int load_level, div; + + /* + * This function is always called after the function + * armada_3700_pm_dvfs_is_enabled, so no need to check again + * if the base is valid. + */ + regmap_read(base, reg, &load_level); + + /* + * The register and the offset inside this register accessed to + * read the current divider depend on the load level + */ + load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; + armada_3700_pm_dvfs_update_regs(load_level, ®, &offset); + + regmap_read(base, reg, &div); + + return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK; +} + +static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base) +{ + unsigned int reg = ARMADA_37XX_NB_CPU_LOAD; + unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF; + unsigned int load_level, sel; + + /* + * This function is always called after the function + * armada_3700_pm_dvfs_is_enabled, so no need to check again + * if the base is valid + */ + regmap_read(base, reg, &load_level); + + /* + * The register and the offset inside this register accessed to + * read the current divider depend on the load level + */ + load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; + armada_3700_pm_dvfs_update_regs(load_level, ®, &offset); + + regmap_read(base, reg, &sel); + + return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK; +} + +static u8 clk_pm_cpu_get_parent(struct clk_hw *hw) +{ + struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); + u32 val; + + if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) { + val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base); + } else { + val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux; + val &= pm_cpu->mask_mux; + } + + return val; +} + +static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); + unsigned int div; + + if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) + div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base); + else + div = get_div(pm_cpu->reg_div, pm_cpu->shift_div); + return DIV_ROUND_UP_ULL((u64)parent_rate, div); +} + +static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); + struct regmap *base = pm_cpu->nb_pm_base; + unsigned int div = *parent_rate / rate; + unsigned int load_level; + /* only available when DVFS is enabled */ + if (!armada_3700_pm_dvfs_is_enabled(base)) + return -EINVAL; + + for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) { + unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF; + + armada_3700_pm_dvfs_update_regs(load_level, ®, &offset); + + regmap_read(base, reg, &val); + + val >>= offset; + val &= ARMADA_37XX_NB_TBG_DIV_MASK; + if (val == div) + /* + * We found a load level matching the target + * divider, switch to this load level and + * return. + */ + return *parent_rate / div; + } + + /* We didn't find any valid divider */ + return -EINVAL; +} + +/* + * Workaround when base CPU frequnecy is 1000 or 1200 MHz + * + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz + * respectively) to L0 frequency (1/1.2 GHz) requires a significant + * amount of time to let VDD stabilize to the appropriate + * voltage. This amount of time is large enough that it cannot be + * covered by the hardware countdown register. Due to this, the CPU + * might start operating at L0 before the voltage is stabilized, + * leading to CPU stalls. + * + * To work around this problem, we prevent switching directly from the + * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 + * frequency in-between. The sequence therefore becomes: + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz) + * 2. Sleep 20ms for stabling VDD voltage + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz). + */ +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu, + unsigned int new_level, unsigned long rate, + struct regmap *base) +{ + unsigned int cur_level; + + regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level); + cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; + + if (cur_level == new_level) + return; + + /* + * System wants to go to L1 on its own. If we are going from L2/L3, + * remember when 20ms will expire. If from L0, set the value so that + * next switch to L0 won't have to wait. + */ + if (new_level == ARMADA_37XX_DVFS_LOAD_1) { + if (cur_level == ARMADA_37XX_DVFS_LOAD_0) + pm_cpu->l1_expiration = jiffies; + else + pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20); + return; + } + + /* + * If we are setting to L2/L3, just invalidate L1 expiration time, + * sleeping is not needed. + */ + if (rate < 1000*1000*1000) + goto invalidate_l1_exp; + + /* + * We are going to L0 with rate >= 1GHz. Check whether we have been at + * L1 for long enough time. If not, go to L1 for 20ms. + */ + if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration) + goto invalidate_l1_exp; + + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD, + ARMADA_37XX_NB_CPU_LOAD_MASK, + ARMADA_37XX_DVFS_LOAD_1); + msleep(20); + +invalidate_l1_exp: + pm_cpu->l1_expiration = 0; +} + +static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw); + struct regmap *base = pm_cpu->nb_pm_base; + unsigned int div = parent_rate / rate; + unsigned int load_level; + + /* only available when DVFS is enabled */ + if (!armada_3700_pm_dvfs_is_enabled(base)) + return -EINVAL; + + for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) { + unsigned int reg, mask, val, + offset = ARMADA_37XX_NB_TBG_DIV_OFF; + + armada_3700_pm_dvfs_update_regs(load_level, ®, &offset); + + regmap_read(base, reg, &val); + val >>= offset; + val &= ARMADA_37XX_NB_TBG_DIV_MASK; + + if (val == div) { + /* + * We found a load level matching the target + * divider, switch to this load level and + * return. + */ + reg = ARMADA_37XX_NB_CPU_LOAD; + mask = ARMADA_37XX_NB_CPU_LOAD_MASK; + + /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */ + if (parent_rate >= 1000*1000*1000) + clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base); + + regmap_update_bits(base, reg, mask, load_level); + + return rate; + } + } + + /* We didn't find any valid divider */ + return -EINVAL; +} + +static const struct clk_ops clk_pm_cpu_ops = { + .get_parent = clk_pm_cpu_get_parent, + .round_rate = clk_pm_cpu_round_rate, + .set_rate = clk_pm_cpu_set_rate, + .recalc_rate = clk_pm_cpu_recalc_rate, +}; + +static const struct of_device_id armada_3700_periph_clock_of_match[] = { + { .compatible = "marvell,armada-3700-periph-clock-nb", + .data = data_nb, }, + { .compatible = "marvell,armada-3700-periph-clock-sb", + .data = data_sb, }, + { } +}; + +static int armada_3700_add_composite_clk(const struct clk_periph_data *data, + void __iomem *reg, spinlock_t *lock, + struct device *dev, struct clk_hw **hw) +{ + const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, + *rate_ops = NULL; + struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *rate_hw = NULL; + + if (data->mux_hw) { + struct clk_mux *mux; + + mux_hw = data->mux_hw; + mux = to_clk_mux(mux_hw); + mux->lock = lock; + mux_ops = mux_hw->init->ops; + mux->reg = reg + (u64)mux->reg; + } + + if (data->gate_hw) { + struct clk_gate *gate; + + gate_hw = data->gate_hw; + gate = to_clk_gate(gate_hw); + gate->lock = lock; + gate_ops = gate_hw->init->ops; + gate->reg = reg + (u64)gate->reg; + gate->flags = CLK_GATE_SET_TO_DISABLE; + } + + if (data->rate_hw) { + rate_hw = data->rate_hw; + rate_ops = rate_hw->init->ops; + if (data->is_double_div) { + struct clk_double_div *rate; + + rate = to_clk_double_div(rate_hw); + rate->reg1 = reg + (u64)rate->reg1; + rate->reg2 = reg + (u64)rate->reg2; + } else { + struct clk_divider *rate = to_clk_divider(rate_hw); + const struct clk_div_table *clkt; + int table_size = 0; + + rate->reg = reg + (u64)rate->reg; + for (clkt = rate->table; clkt->div; clkt++) + table_size++; + rate->width = order_base_2(table_size); + rate->lock = lock; + } + } + + if (data->muxrate_hw) { + struct clk_pm_cpu *pmcpu_clk; + struct clk_hw *muxrate_hw = data->muxrate_hw; + struct regmap *map; + + pmcpu_clk = to_clk_pm_cpu(muxrate_hw); + pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux; + pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div; + + mux_hw = muxrate_hw; + rate_hw = muxrate_hw; + mux_ops = muxrate_hw->init->ops; + rate_ops = muxrate_hw->init->ops; + + map = syscon_regmap_lookup_by_compatible( + "marvell,armada-3700-nb-pm"); + pmcpu_clk->nb_pm_base = map; + } + + *hw = clk_hw_register_composite(dev, data->name, data->parent_names, + data->num_parents, mux_hw, + mux_ops, rate_hw, rate_ops, + gate_hw, gate_ops, CLK_IGNORE_UNUSED); + + return PTR_ERR_OR_ZERO(*hw); +} + +static int armada_3700_periph_clock_probe(struct platform_device *pdev) +{ + struct clk_periph_driver_data *driver_data; + struct device_node *np = pdev->dev.of_node; + const struct clk_periph_data *data; + struct device *dev = &pdev->dev; + int num_periph = 0, i, ret; + struct resource *res; + void __iomem *reg; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + while (data[num_periph].name) + num_periph++; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + driver_data = devm_kzalloc(dev, sizeof(*driver_data), GFP_KERNEL); + if (!driver_data) + return -ENOMEM; + + driver_data->hw_data = devm_kzalloc(dev, + struct_size(driver_data->hw_data, + hws, num_periph), + GFP_KERNEL); + if (!driver_data->hw_data) + return -ENOMEM; + driver_data->hw_data->num = num_periph; + + spin_lock_init(&driver_data->lock); + + for (i = 0; i < num_periph; i++) { + struct clk_hw **hw = &driver_data->hw_data->hws[i]; + + if (armada_3700_add_composite_clk(&data[i], reg, + &driver_data->lock, dev, hw)) + dev_err(dev, "Can't register periph clock %s\n", + data[i].name); + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + driver_data->hw_data); + if (ret) { + for (i = 0; i < num_periph; i++) + clk_hw_unregister(driver_data->hw_data->hws[i]); + return ret; + } + + platform_set_drvdata(pdev, driver_data); + return 0; +} + +static int armada_3700_periph_clock_remove(struct platform_device *pdev) +{ + struct clk_periph_driver_data *data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *hw_data = data->hw_data; + int i; + + of_clk_del_provider(pdev->dev.of_node); + + for (i = 0; i < hw_data->num; i++) + clk_hw_unregister(hw_data->hws[i]); + + return 0; +} + +static struct platform_driver armada_3700_periph_clock_driver = { + .probe = armada_3700_periph_clock_probe, + .remove = armada_3700_periph_clock_remove, + .driver = { + .name = "marvell-armada-3700-periph-clock", + .of_match_table = armada_3700_periph_clock_of_match, + }, +}; + +builtin_platform_driver(armada_3700_periph_clock_driver); diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c new file mode 100644 index 000000000..7ff041f73 --- /dev/null +++ b/drivers/clk/mvebu/armada-37xx-tbg.c @@ -0,0 +1,158 @@ +/* + * Marvell Armada 37xx SoC Time Base Generator clocks + * + * Copyright (C) 2016 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2 or later. This program is licensed "as is" + * without any warranty of any kind, whether express or implied. + */ + +#include <linux/clk-provider.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define NUM_TBG 4 + +#define TBG_CTRL0 0x4 +#define TBG_CTRL1 0x8 +#define TBG_CTRL7 0x20 +#define TBG_CTRL8 0x30 + +#define TBG_DIV_MASK 0x1FF + +#define TBG_A_REFDIV 0 +#define TBG_B_REFDIV 16 + +#define TBG_A_FBDIV 2 +#define TBG_B_FBDIV 18 + +#define TBG_A_VCODIV_SE 0 +#define TBG_B_VCODIV_SE 16 + +#define TBG_A_VCODIV_DIFF 1 +#define TBG_B_VCODIV_DIFF 17 + +struct tbg_def { + char *name; + u32 refdiv_offset; + u32 fbdiv_offset; + u32 vcodiv_reg; + u32 vcodiv_offset; +}; + +static const struct tbg_def tbg[NUM_TBG] = { + {"TBG-A-P", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL8, TBG_A_VCODIV_DIFF}, + {"TBG-B-P", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL8, TBG_B_VCODIV_DIFF}, + {"TBG-A-S", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL1, TBG_A_VCODIV_SE}, + {"TBG-B-S", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL1, TBG_B_VCODIV_SE}, +}; + +static unsigned int tbg_get_mult(void __iomem *reg, const struct tbg_def *ptbg) +{ + u32 val; + + val = readl(reg + TBG_CTRL0); + + return ((val >> ptbg->fbdiv_offset) & TBG_DIV_MASK) << 2; +} + +static unsigned int tbg_get_div(void __iomem *reg, const struct tbg_def *ptbg) +{ + u32 val; + unsigned int div; + + val = readl(reg + TBG_CTRL7); + + div = (val >> ptbg->refdiv_offset) & TBG_DIV_MASK; + if (div == 0) + div = 1; + val = readl(reg + ptbg->vcodiv_reg); + + div *= 1 << ((val >> ptbg->vcodiv_offset) & TBG_DIV_MASK); + + return div; +} + + +static int armada_3700_tbg_clock_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct clk_hw_onecell_data *hw_tbg_data; + struct device *dev = &pdev->dev; + const char *parent_name; + struct resource *res; + struct clk *parent; + void __iomem *reg; + int i, ret; + + hw_tbg_data = devm_kzalloc(&pdev->dev, + struct_size(hw_tbg_data, hws, NUM_TBG), + GFP_KERNEL); + if (!hw_tbg_data) + return -ENOMEM; + hw_tbg_data->num = NUM_TBG; + platform_set_drvdata(pdev, hw_tbg_data); + + parent = devm_clk_get(dev, NULL); + if (IS_ERR(parent)) { + dev_err(dev, "Could get the clock parent\n"); + return -EINVAL; + } + parent_name = __clk_get_name(parent); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + for (i = 0; i < NUM_TBG; i++) { + const char *name; + unsigned int mult, div; + + name = tbg[i].name; + mult = tbg_get_mult(reg, &tbg[i]); + div = tbg_get_div(reg, &tbg[i]); + hw_tbg_data->hws[i] = clk_hw_register_fixed_factor(NULL, name, + parent_name, 0, mult, div); + if (IS_ERR(hw_tbg_data->hws[i])) + dev_err(dev, "Can't register TBG clock %s\n", name); + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data); + + return ret; +} + +static int armada_3700_tbg_clock_remove(struct platform_device *pdev) +{ + int i; + struct clk_hw_onecell_data *hw_tbg_data = platform_get_drvdata(pdev); + + of_clk_del_provider(pdev->dev.of_node); + for (i = 0; i < hw_tbg_data->num; i++) + clk_hw_unregister_fixed_factor(hw_tbg_data->hws[i]); + + return 0; +} + +static const struct of_device_id armada_3700_tbg_clock_of_match[] = { + { .compatible = "marvell,armada-3700-tbg-clock", }, + { } +}; + +static struct platform_driver armada_3700_tbg_clock_driver = { + .probe = armada_3700_tbg_clock_probe, + .remove = armada_3700_tbg_clock_remove, + .driver = { + .name = "marvell-armada-3700-tbg-clock", + .of_match_table = armada_3700_tbg_clock_of_match, + }, +}; + +builtin_platform_driver(armada_3700_tbg_clock_driver); diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c new file mode 100644 index 000000000..537051495 --- /dev/null +++ b/drivers/clk/mvebu/armada-37xx-xtal.c @@ -0,0 +1,91 @@ +/* + * Marvell Armada 37xx SoC xtal clocks + * + * Copyright (C) 2016 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define NB_GPIO1_LATCH 0x8 +#define XTAL_MODE BIT(9) + +static int armada_3700_xtal_clock_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + const char *xtal_name = "xtal"; + struct device_node *parent; + struct regmap *regmap; + struct clk_hw *xtal_hw; + unsigned int rate; + u32 reg; + int ret; + + xtal_hw = devm_kzalloc(&pdev->dev, sizeof(*xtal_hw), GFP_KERNEL); + if (!xtal_hw) + return -ENOMEM; + + platform_set_drvdata(pdev, xtal_hw); + + parent = np->parent; + if (!parent) { + dev_err(&pdev->dev, "no parent\n"); + return -ENODEV; + } + + regmap = syscon_node_to_regmap(parent); + if (IS_ERR(regmap)) { + dev_err(&pdev->dev, "cannot get regmap\n"); + return PTR_ERR(regmap); + } + + ret = regmap_read(regmap, NB_GPIO1_LATCH, ®); + if (ret) { + dev_err(&pdev->dev, "cannot read from regmap\n"); + return ret; + } + + if (reg & XTAL_MODE) + rate = 40000000; + else + rate = 25000000; + + of_property_read_string_index(np, "clock-output-names", 0, &xtal_name); + xtal_hw = clk_hw_register_fixed_rate(NULL, xtal_name, NULL, 0, rate); + if (IS_ERR(xtal_hw)) + return PTR_ERR(xtal_hw); + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, xtal_hw); + + return ret; +} + +static int armada_3700_xtal_clock_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + + return 0; +} + +static const struct of_device_id armada_3700_xtal_clock_of_match[] = { + { .compatible = "marvell,armada-3700-xtal-clock", }, + { } +}; + +static struct platform_driver armada_3700_xtal_clock_driver = { + .probe = armada_3700_xtal_clock_probe, + .remove = armada_3700_xtal_clock_remove, + .driver = { + .name = "marvell-armada-3700-xtal-clock", + .of_match_table = armada_3700_xtal_clock_of_match, + }, +}; + +builtin_platform_driver(armada_3700_xtal_clock_driver); diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c new file mode 100644 index 000000000..9ff4ea639 --- /dev/null +++ b/drivers/clk/mvebu/armada-38x.c @@ -0,0 +1,168 @@ +/* + * Marvell Armada 380/385 SoC clocks + * + * Copyright (C) 2014 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * SAR[14:10] : Ratios between PCLK0, NBCLK, HCLK and DRAM clocks + * + * SAR[15] : TCLK frequency + * 0 = 250 MHz + * 1 = 200 MHz + */ + +#define SAR_A380_TCLK_FREQ_OPT 15 +#define SAR_A380_TCLK_FREQ_OPT_MASK 0x1 +#define SAR_A380_CPU_DDR_L2_FREQ_OPT 10 +#define SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK 0x1F + +static const u32 armada_38x_tclk_frequencies[] __initconst = { + 250000000, + 200000000, +}; + +static u32 __init armada_38x_get_tclk_freq(void __iomem *sar) +{ + u8 tclk_freq_select; + + tclk_freq_select = ((readl(sar) >> SAR_A380_TCLK_FREQ_OPT) & + SAR_A380_TCLK_FREQ_OPT_MASK); + return armada_38x_tclk_frequencies[tclk_freq_select]; +} + +static const u32 armada_38x_cpu_frequencies[] __initconst = { + 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0, + 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0, + 1332 * 1000 * 1000, 0, 0, 0, + 1600 * 1000 * 1000, 0, 0, 0, + 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000, +}; + +static u32 __init armada_38x_get_cpu_freq(void __iomem *sar) +{ + u8 cpu_freq_select; + + cpu_freq_select = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) & + SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK); + if (cpu_freq_select >= ARRAY_SIZE(armada_38x_cpu_frequencies)) { + pr_err("Selected CPU frequency (%d) unsupported\n", + cpu_freq_select); + return 0; + } + + return armada_38x_cpu_frequencies[cpu_freq_select]; +} + +enum { A380_CPU_TO_DDR, A380_CPU_TO_L2 }; + +static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = { + { .id = A380_CPU_TO_L2, .name = "l2clk" }, + { .id = A380_CPU_TO_DDR, .name = "ddrclk" }, +}; + +static const int armada_38x_cpu_l2_ratios[32][2] __initconst = { + {1, 2}, {0, 1}, {1, 2}, {0, 1}, + {1, 2}, {0, 1}, {1, 2}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {1, 2}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {7, 15}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init armada_38x_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) & + SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK); + + switch (id) { + case A380_CPU_TO_L2: + *mult = armada_38x_cpu_l2_ratios[opt][0]; + *div = armada_38x_cpu_l2_ratios[opt][1]; + break; + case A380_CPU_TO_DDR: + *mult = armada_38x_cpu_ddr_ratios[opt][0]; + *div = armada_38x_cpu_ddr_ratios[opt][1]; + break; + } +} + +static const struct coreclk_soc_desc armada_38x_coreclks = { + .get_tclk_freq = armada_38x_get_tclk_freq, + .get_cpu_freq = armada_38x_get_cpu_freq, + .get_clk_ratio = armada_38x_get_clk_ratio, + .ratios = armada_38x_coreclk_ratios, + .num_ratios = ARRAY_SIZE(armada_38x_coreclk_ratios), +}; + +static void __init armada_38x_coreclk_init(struct device_node *np) +{ + mvebu_coreclk_setup(np, &armada_38x_coreclks); +} +CLK_OF_DECLARE(armada_38x_core_clk, "marvell,armada-380-core-clock", + armada_38x_coreclk_init); + +/* + * Clock Gating Control + */ +static const struct clk_gating_soc_desc armada_38x_gating_desc[] __initconst = { + { "audio", NULL, 0 }, + { "ge2", NULL, 2 }, + { "ge1", NULL, 3 }, + { "ge0", NULL, 4 }, + { "pex1", NULL, 5 }, + { "pex2", NULL, 6 }, + { "pex3", NULL, 7 }, + { "pex0", NULL, 8 }, + { "usb3h0", NULL, 9 }, + { "usb3h1", NULL, 10 }, + { "usb3d", NULL, 11 }, + { "bm", NULL, 13 }, + { "crypto0z", NULL, 14 }, + { "sata0", NULL, 15 }, + { "crypto1z", NULL, 16 }, + { "sdio", NULL, 17 }, + { "usb2", NULL, 18 }, + { "crypto1", NULL, 21 }, + { "xor0", NULL, 22 }, + { "crypto0", NULL, 23 }, + { "tdm", NULL, 25 }, + { "xor1", NULL, 28 }, + { "sata1", NULL, 30 }, + { } +}; + +static void __init armada_38x_clk_gating_init(struct device_node *np) +{ + mvebu_clk_gating_setup(np, armada_38x_gating_desc); +} +CLK_OF_DECLARE(armada_38x_clk_gating, "marvell,armada-380-gating-clock", + armada_38x_clk_gating_init); diff --git a/drivers/clk/mvebu/armada-39x.c b/drivers/clk/mvebu/armada-39x.c new file mode 100644 index 000000000..4fdfd3224 --- /dev/null +++ b/drivers/clk/mvebu/armada-39x.c @@ -0,0 +1,158 @@ +/* + * Marvell Armada 39x SoC clocks + * + * Copyright (C) 2015 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * SARL[14:10] : Ratios between CPU, NBCLK, HCLK and DCLK. + * + * SARL[15] : TCLK frequency + * 0 = 250 MHz + * 1 = 200 MHz + * + * SARH[0] : Reference clock frequency + * 0 = 25 Mhz + * 1 = 40 Mhz + */ + +#define SARL 0 +#define SARL_A390_TCLK_FREQ_OPT 15 +#define SARL_A390_TCLK_FREQ_OPT_MASK 0x1 +#define SARL_A390_CPU_DDR_L2_FREQ_OPT 10 +#define SARL_A390_CPU_DDR_L2_FREQ_OPT_MASK 0x1F +#define SARH 4 +#define SARH_A390_REFCLK_FREQ BIT(0) + +static const u32 armada_39x_tclk_frequencies[] __initconst = { + 250000000, + 200000000, +}; + +static u32 __init armada_39x_get_tclk_freq(void __iomem *sar) +{ + u8 tclk_freq_select; + + tclk_freq_select = ((readl(sar + SARL) >> SARL_A390_TCLK_FREQ_OPT) & + SARL_A390_TCLK_FREQ_OPT_MASK); + return armada_39x_tclk_frequencies[tclk_freq_select]; +} + +static const u32 armada_39x_cpu_frequencies[] __initconst = { + [0x0] = 666 * 1000 * 1000, + [0x2] = 800 * 1000 * 1000, + [0x3] = 800 * 1000 * 1000, + [0x4] = 1066 * 1000 * 1000, + [0x5] = 1066 * 1000 * 1000, + [0x6] = 1200 * 1000 * 1000, + [0x8] = 1332 * 1000 * 1000, + [0xB] = 1600 * 1000 * 1000, + [0xC] = 1600 * 1000 * 1000, + [0x12] = 1800 * 1000 * 1000, + [0x1E] = 1800 * 1000 * 1000, +}; + +static u32 __init armada_39x_get_cpu_freq(void __iomem *sar) +{ + u8 cpu_freq_select; + + cpu_freq_select = ((readl(sar + SARL) >> SARL_A390_CPU_DDR_L2_FREQ_OPT) & + SARL_A390_CPU_DDR_L2_FREQ_OPT_MASK); + if (cpu_freq_select >= ARRAY_SIZE(armada_39x_cpu_frequencies)) { + pr_err("Selected CPU frequency (%d) unsupported\n", + cpu_freq_select); + return 0; + } + + return armada_39x_cpu_frequencies[cpu_freq_select]; +} + +enum { A390_CPU_TO_NBCLK, A390_CPU_TO_HCLK, A390_CPU_TO_DCLK }; + +static const struct coreclk_ratio armada_39x_coreclk_ratios[] __initconst = { + { .id = A390_CPU_TO_NBCLK, .name = "nbclk" }, + { .id = A390_CPU_TO_HCLK, .name = "hclk" }, + { .id = A390_CPU_TO_DCLK, .name = "dclk" }, +}; + +static void __init armada_39x_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + switch (id) { + case A390_CPU_TO_NBCLK: + *mult = 1; + *div = 2; + break; + case A390_CPU_TO_HCLK: + *mult = 1; + *div = 4; + break; + case A390_CPU_TO_DCLK: + *mult = 1; + *div = 2; + break; + } +} + +static u32 __init armada_39x_refclk_ratio(void __iomem *sar) +{ + if (readl(sar + SARH) & SARH_A390_REFCLK_FREQ) + return 40 * 1000 * 1000; + else + return 25 * 1000 * 1000; +} + +static const struct coreclk_soc_desc armada_39x_coreclks = { + .get_tclk_freq = armada_39x_get_tclk_freq, + .get_cpu_freq = armada_39x_get_cpu_freq, + .get_clk_ratio = armada_39x_get_clk_ratio, + .get_refclk_freq = armada_39x_refclk_ratio, + .ratios = armada_39x_coreclk_ratios, + .num_ratios = ARRAY_SIZE(armada_39x_coreclk_ratios), +}; + +static void __init armada_39x_coreclk_init(struct device_node *np) +{ + mvebu_coreclk_setup(np, &armada_39x_coreclks); +} +CLK_OF_DECLARE(armada_39x_core_clk, "marvell,armada-390-core-clock", + armada_39x_coreclk_init); + +/* + * Clock Gating Control + */ +static const struct clk_gating_soc_desc armada_39x_gating_desc[] __initconst = { + { "pex1", NULL, 5 }, + { "pex2", NULL, 6 }, + { "pex3", NULL, 7 }, + { "pex0", NULL, 8 }, + { "usb3h0", NULL, 9 }, + { "usb3h1", NULL, 10 }, + { "sata0", NULL, 15 }, + { "sdio", NULL, 17 }, + { "xor0", NULL, 22 }, + { "xor1", NULL, 28 }, + { } +}; + +static void __init armada_39x_clk_gating_init(struct device_node *np) +{ + mvebu_clk_gating_setup(np, armada_39x_gating_desc); +} +CLK_OF_DECLARE(armada_39x_clk_gating, "marvell,armada-390-gating-clock", + armada_39x_clk_gating_init); diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c new file mode 100644 index 000000000..df529982a --- /dev/null +++ b/drivers/clk/mvebu/armada-xp.c @@ -0,0 +1,236 @@ +/* + * Marvell Armada XP SoC clocks + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * Core Clocks + * + * Armada XP Sample At Reset is a 64 bit bitfiled split in two + * register of 32 bits + */ + +#define SARL 0 /* Low part [0:31] */ +#define SARL_AXP_PCLK_FREQ_OPT 21 +#define SARL_AXP_PCLK_FREQ_OPT_MASK 0x7 +#define SARL_AXP_FAB_FREQ_OPT 24 +#define SARL_AXP_FAB_FREQ_OPT_MASK 0xF +#define SARH 4 /* High part [32:63] */ +#define SARH_AXP_PCLK_FREQ_OPT (52-32) +#define SARH_AXP_PCLK_FREQ_OPT_MASK 0x1 +#define SARH_AXP_PCLK_FREQ_OPT_SHIFT 3 +#define SARH_AXP_FAB_FREQ_OPT (51-32) +#define SARH_AXP_FAB_FREQ_OPT_MASK 0x1 +#define SARH_AXP_FAB_FREQ_OPT_SHIFT 4 + +enum { AXP_CPU_TO_NBCLK, AXP_CPU_TO_HCLK, AXP_CPU_TO_DRAMCLK }; + +static const struct coreclk_ratio axp_coreclk_ratios[] __initconst = { + { .id = AXP_CPU_TO_NBCLK, .name = "nbclk" }, + { .id = AXP_CPU_TO_HCLK, .name = "hclk" }, + { .id = AXP_CPU_TO_DRAMCLK, .name = "dramclk" }, +}; + +/* Armada XP TCLK frequency is fixed to 250MHz */ +static u32 __init axp_get_tclk_freq(void __iomem *sar) +{ + return 250000000; +} + +/* MV98DX3236 TCLK frequency is fixed to 200MHz */ +static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar) +{ + return 200000000; +} + +static const u32 axp_cpu_freqs[] __initconst = { + 1000000000, + 1066000000, + 1200000000, + 1333000000, + 1500000000, + 1666000000, + 1800000000, + 2000000000, + 667000000, + 0, + 800000000, + 1600000000, +}; + +static u32 __init axp_get_cpu_freq(void __iomem *sar) +{ + u32 cpu_freq; + u8 cpu_freq_select = 0; + + cpu_freq_select = ((readl(sar + SARL) >> SARL_AXP_PCLK_FREQ_OPT) & + SARL_AXP_PCLK_FREQ_OPT_MASK); + /* + * The upper bit is not contiguous to the other ones and + * located in the high part of the SAR registers + */ + cpu_freq_select |= (((readl(sar + SARH) >> SARH_AXP_PCLK_FREQ_OPT) & + SARH_AXP_PCLK_FREQ_OPT_MASK) << SARH_AXP_PCLK_FREQ_OPT_SHIFT); + if (cpu_freq_select >= ARRAY_SIZE(axp_cpu_freqs)) { + pr_err("CPU freq select unsupported: %d\n", cpu_freq_select); + cpu_freq = 0; + } else + cpu_freq = axp_cpu_freqs[cpu_freq_select]; + + return cpu_freq; +} + +/* MV98DX3236 CLK frequency is fixed to 800MHz */ +static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar) +{ + return 800000000; +} + +static const int axp_nbclk_ratios[32][2] __initconst = { + {0, 1}, {1, 2}, {2, 2}, {2, 2}, + {1, 2}, {1, 2}, {1, 1}, {2, 3}, + {0, 1}, {1, 2}, {2, 4}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {2, 2}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {2, 3}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int axp_hclk_ratios[32][2] __initconst = { + {0, 1}, {1, 2}, {2, 6}, {2, 3}, + {1, 3}, {1, 4}, {1, 2}, {2, 6}, + {0, 1}, {1, 6}, {2, 10}, {0, 1}, + {1, 4}, {0, 1}, {0, 1}, {2, 5}, + {0, 1}, {0, 1}, {0, 1}, {1, 2}, + {2, 6}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int axp_dramclk_ratios[32][2] __initconst = { + {0, 1}, {1, 2}, {2, 3}, {2, 3}, + {1, 3}, {1, 2}, {1, 2}, {2, 6}, + {0, 1}, {1, 3}, {2, 5}, {0, 1}, + {1, 4}, {0, 1}, {0, 1}, {2, 5}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {2, 3}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init axp_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar + SARL) >> SARL_AXP_FAB_FREQ_OPT) & + SARL_AXP_FAB_FREQ_OPT_MASK); + /* + * The upper bit is not contiguous to the other ones and + * located in the high part of the SAR registers + */ + opt |= (((readl(sar + SARH) >> SARH_AXP_FAB_FREQ_OPT) & + SARH_AXP_FAB_FREQ_OPT_MASK) << SARH_AXP_FAB_FREQ_OPT_SHIFT); + + switch (id) { + case AXP_CPU_TO_NBCLK: + *mult = axp_nbclk_ratios[opt][0]; + *div = axp_nbclk_ratios[opt][1]; + break; + case AXP_CPU_TO_HCLK: + *mult = axp_hclk_ratios[opt][0]; + *div = axp_hclk_ratios[opt][1]; + break; + case AXP_CPU_TO_DRAMCLK: + *mult = axp_dramclk_ratios[opt][0]; + *div = axp_dramclk_ratios[opt][1]; + break; + } +} + +static const struct coreclk_soc_desc axp_coreclks = { + .get_tclk_freq = axp_get_tclk_freq, + .get_cpu_freq = axp_get_cpu_freq, + .get_clk_ratio = axp_get_clk_ratio, + .ratios = axp_coreclk_ratios, + .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), +}; + +static const struct coreclk_soc_desc mv98dx3236_coreclks = { + .get_tclk_freq = mv98dx3236_get_tclk_freq, + .get_cpu_freq = mv98dx3236_get_cpu_freq, +}; + +/* + * Clock Gating Control + */ + +static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = { + { "audio", NULL, 0, 0 }, + { "ge3", NULL, 1, 0 }, + { "ge2", NULL, 2, 0 }, + { "ge1", NULL, 3, 0 }, + { "ge0", NULL, 4, 0 }, + { "pex00", NULL, 5, 0 }, + { "pex01", NULL, 6, 0 }, + { "pex02", NULL, 7, 0 }, + { "pex03", NULL, 8, 0 }, + { "pex10", NULL, 9, 0 }, + { "pex11", NULL, 10, 0 }, + { "pex12", NULL, 11, 0 }, + { "pex13", NULL, 12, 0 }, + { "bp", NULL, 13, 0 }, + { "sata0lnk", NULL, 14, 0 }, + { "sata0", "sata0lnk", 15, 0 }, + { "lcd", NULL, 16, 0 }, + { "sdio", NULL, 17, 0 }, + { "usb0", NULL, 18, 0 }, + { "usb1", NULL, 19, 0 }, + { "usb2", NULL, 20, 0 }, + { "xor0", NULL, 22, 0 }, + { "crypto", NULL, 23, 0 }, + { "tdm", NULL, 25, 0 }, + { "pex20", NULL, 26, 0 }, + { "pex30", NULL, 27, 0 }, + { "xor1", NULL, 28, 0 }, + { "sata1lnk", NULL, 29, 0 }, + { "sata1", "sata1lnk", 30, 0 }, + { } +}; + +static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = { + { "ge1", NULL, 3, 0 }, + { "ge0", NULL, 4, 0 }, + { "pex00", NULL, 5, 0 }, + { "sdio", NULL, 17, 0 }, + { "xor0", NULL, 22, 0 }, + { } +}; + +static void __init axp_clk_init(struct device_node *np) +{ + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock"); + + mvebu_coreclk_setup(np, &axp_coreclks); + + if (cgnp) { + mvebu_clk_gating_setup(cgnp, axp_gating_desc); + of_node_put(cgnp); + } +} +CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c new file mode 100644 index 000000000..68f05c53d --- /dev/null +++ b/drivers/clk/mvebu/clk-corediv.c @@ -0,0 +1,338 @@ +/* + * MVEBU Core divider clock + * + * Copyright (C) 2013 Marvell + * + * Ezequiel Garcia <ezequiel.garcia@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include "common.h" + +#define CORE_CLK_DIV_RATIO_MASK 0xff + +/* + * This structure describes the hardware details (bit offset and mask) + * to configure one particular core divider clock. Those hardware + * details may differ from one SoC to another. This structure is + * therefore typically instantiated statically to describe the + * hardware details. + */ +struct clk_corediv_desc { + unsigned int mask; + unsigned int offset; + unsigned int fieldbit; +}; + +/* + * This structure describes the hardware details to configure the core + * divider clocks on a given SoC. Amongst others, it points to the + * array of core divider clock descriptors for this SoC, as well as + * the corresponding operations to manipulate them. + */ +struct clk_corediv_soc_desc { + const struct clk_corediv_desc *descs; + unsigned int ndescs; + const struct clk_ops ops; + u32 ratio_reload; + u32 enable_bit_offset; + u32 ratio_offset; +}; + +/* + * This structure represents one core divider clock for the clock + * framework, and is dynamically allocated for each core divider clock + * existing in the current SoC. + */ +struct clk_corediv { + struct clk_hw hw; + void __iomem *reg; + const struct clk_corediv_desc *desc; + const struct clk_corediv_soc_desc *soc_desc; + spinlock_t lock; +}; + +static struct clk_onecell_data clk_data; + +/* + * Description of the core divider clocks available. For now, we + * support only NAND, and it is available at the same register + * locations regardless of the SoC. + */ +static const struct clk_corediv_desc mvebu_corediv_desc[] = { + { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */ +}; + +static const struct clk_corediv_desc mv98dx3236_corediv_desc[] = { + { .mask = 0x0f, .offset = 6, .fieldbit = 27 }, /* NAND clock */ +}; + +#define to_corediv_clk(p) container_of(p, struct clk_corediv, hw) + +static int clk_corediv_is_enabled(struct clk_hw *hwclk) +{ + struct clk_corediv *corediv = to_corediv_clk(hwclk); + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; + u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset; + + return !!(readl(corediv->reg) & enable_mask); +} + +static int clk_corediv_enable(struct clk_hw *hwclk) +{ + struct clk_corediv *corediv = to_corediv_clk(hwclk); + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; + unsigned long flags = 0; + u32 reg; + + spin_lock_irqsave(&corediv->lock, flags); + + reg = readl(corediv->reg); + reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset); + writel(reg, corediv->reg); + + spin_unlock_irqrestore(&corediv->lock, flags); + + return 0; +} + +static void clk_corediv_disable(struct clk_hw *hwclk) +{ + struct clk_corediv *corediv = to_corediv_clk(hwclk); + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; + unsigned long flags = 0; + u32 reg; + + spin_lock_irqsave(&corediv->lock, flags); + + reg = readl(corediv->reg); + reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset); + writel(reg, corediv->reg); + + spin_unlock_irqrestore(&corediv->lock, flags); +} + +static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct clk_corediv *corediv = to_corediv_clk(hwclk); + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; + u32 reg, div; + + reg = readl(corediv->reg + soc_desc->ratio_offset); + div = (reg >> desc->offset) & desc->mask; + return parent_rate / div; +} + +static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long *parent_rate) +{ + /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */ + u32 div; + + div = *parent_rate / rate; + if (div < 4) + div = 4; + else if (div > 6) + div = 8; + + return *parent_rate / div; +} + +static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_corediv *corediv = to_corediv_clk(hwclk); + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; + unsigned long flags = 0; + u32 reg, div; + + div = parent_rate / rate; + + spin_lock_irqsave(&corediv->lock, flags); + + /* Write new divider to the divider ratio register */ + reg = readl(corediv->reg + soc_desc->ratio_offset); + reg &= ~(desc->mask << desc->offset); + reg |= (div & desc->mask) << desc->offset; + writel(reg, corediv->reg + soc_desc->ratio_offset); + + /* Set reload-force for this clock */ + reg = readl(corediv->reg) | BIT(desc->fieldbit); + writel(reg, corediv->reg); + + /* Now trigger the clock update */ + reg = readl(corediv->reg) | soc_desc->ratio_reload; + writel(reg, corediv->reg); + + /* + * Wait for clocks to settle down, and then clear all the + * ratios request and the reload request. + */ + udelay(1000); + reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload); + writel(reg, corediv->reg); + udelay(1000); + + spin_unlock_irqrestore(&corediv->lock, flags); + + return 0; +} + +static const struct clk_corediv_soc_desc armada370_corediv_soc = { + .descs = mvebu_corediv_desc, + .ndescs = ARRAY_SIZE(mvebu_corediv_desc), + .ops = { + .enable = clk_corediv_enable, + .disable = clk_corediv_disable, + .is_enabled = clk_corediv_is_enabled, + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(8), + .enable_bit_offset = 24, + .ratio_offset = 0x8, +}; + +static const struct clk_corediv_soc_desc armada380_corediv_soc = { + .descs = mvebu_corediv_desc, + .ndescs = ARRAY_SIZE(mvebu_corediv_desc), + .ops = { + .enable = clk_corediv_enable, + .disable = clk_corediv_disable, + .is_enabled = clk_corediv_is_enabled, + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(8), + .enable_bit_offset = 16, + .ratio_offset = 0x4, +}; + +static const struct clk_corediv_soc_desc armada375_corediv_soc = { + .descs = mvebu_corediv_desc, + .ndescs = ARRAY_SIZE(mvebu_corediv_desc), + .ops = { + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(8), + .ratio_offset = 0x4, +}; + +static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = { + .descs = mv98dx3236_corediv_desc, + .ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc), + .ops = { + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(10), + .ratio_offset = 0x8, +}; + +static void __init +mvebu_corediv_clk_init(struct device_node *node, + const struct clk_corediv_soc_desc *soc_desc) +{ + struct clk_init_data init; + struct clk_corediv *corediv; + struct clk **clks; + void __iomem *base; + const char *parent_name; + const char *clk_name; + int i; + + base = of_iomap(node, 0); + if (WARN_ON(!base)) + return; + + parent_name = of_clk_get_parent_name(node, 0); + + clk_data.clk_num = soc_desc->ndescs; + + /* clks holds the clock array */ + clks = kcalloc(clk_data.clk_num, sizeof(struct clk *), + GFP_KERNEL); + if (WARN_ON(!clks)) + goto err_unmap; + /* corediv holds the clock specific array */ + corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv), + GFP_KERNEL); + if (WARN_ON(!corediv)) + goto err_free_clks; + + spin_lock_init(&corediv->lock); + + for (i = 0; i < clk_data.clk_num; i++) { + of_property_read_string_index(node, "clock-output-names", + i, &clk_name); + init.num_parents = 1; + init.parent_names = &parent_name; + init.name = clk_name; + init.ops = &soc_desc->ops; + init.flags = 0; + + corediv[i].soc_desc = soc_desc; + corediv[i].desc = soc_desc->descs + i; + corediv[i].reg = base; + corediv[i].hw.init = &init; + + clks[i] = clk_register(NULL, &corediv[i].hw); + WARN_ON(IS_ERR(clks[i])); + } + + clk_data.clks = clks; + of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data); + return; + +err_free_clks: + kfree(clks); +err_unmap: + iounmap(base); +} + +static void __init armada370_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &armada370_corediv_soc); +} +CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock", + armada370_corediv_clk_init); + +static void __init armada375_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &armada375_corediv_soc); +} +CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock", + armada375_corediv_clk_init); + +static void __init armada380_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &armada380_corediv_soc); +} +CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock", + armada380_corediv_clk_init); + +static void __init mv98dx3236_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &mv98dx3236_corediv_soc); +} +CLK_OF_DECLARE(mv98dx3236_corediv_clk, "marvell,mv98dx3236-corediv-clock", + mv98dx3236_corediv_clk_init); diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c new file mode 100644 index 000000000..072aa3837 --- /dev/null +++ b/drivers/clk/mvebu/clk-cpu.c @@ -0,0 +1,255 @@ +/* + * Marvell MVEBU CPU clock handling. + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/delay.h> +#include <linux/mvebu-pmsu.h> +#include <asm/smp_plat.h> + +#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0 +#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff +#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8 +#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8 +#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16 +#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC +#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F + +#define PMU_DFS_RATIO_SHIFT 16 +#define PMU_DFS_RATIO_MASK 0x3F + +#define MAX_CPU 4 +struct cpu_clk { + struct clk_hw hw; + int cpu; + const char *clk_name; + const char *parent_name; + void __iomem *reg_base; + void __iomem *pmu_dfs; +}; + +static struct clk **clks; + +static struct clk_onecell_data clk_data; + +#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw) + +static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct cpu_clk *cpuclk = to_cpu_clk(hwclk); + u32 reg, div; + + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET); + div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK; + return parent_rate / div; +} + +static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long *parent_rate) +{ + /* Valid ratio are 1:1, 1:2 and 1:3 */ + u32 div; + + div = *parent_rate / rate; + if (div == 0) + div = 1; + else if (div > 3) + div = 3; + + return *parent_rate / div; +} + +static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) + +{ + struct cpu_clk *cpuclk = to_cpu_clk(hwclk); + u32 reg, div; + u32 reload_mask; + + div = parent_rate / rate; + reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET) + & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8)))) + | (div << (cpuclk->cpu * 8)); + writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET); + /* Set clock divider reload smooth bit mask */ + reload_mask = 1 << (20 + cpuclk->cpu); + + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET) + | reload_mask; + writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + + /* Now trigger the clock update */ + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET) + | 1 << 24; + writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + + /* Wait for clocks to settle down then clear reload request */ + udelay(1000); + reg &= ~(reload_mask | 1 << 24); + writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + udelay(1000); + + return 0; +} + +static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) +{ + u32 reg; + unsigned long fabric_div, target_div, cur_rate; + struct cpu_clk *cpuclk = to_cpu_clk(hwclk); + + /* + * PMU DFS registers are not mapped, Device Tree does not + * describes them. We cannot change the frequency dynamically. + */ + if (!cpuclk->pmu_dfs) + return -ENODEV; + + cur_rate = clk_hw_get_rate(hwclk); + + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET); + fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) & + SYS_CTRL_CLK_DIVIDER_MASK; + + /* Frequency is going up */ + if (rate == 2 * cur_rate) + target_div = fabric_div / 2; + /* Frequency is going down */ + else + target_div = fabric_div; + + if (target_div == 0) + target_div = 1; + + reg = readl(cpuclk->pmu_dfs); + reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT); + reg |= (target_div << PMU_DFS_RATIO_SHIFT); + writel(reg, cpuclk->pmu_dfs); + + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL << + SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT); + writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + + return mvebu_pmsu_dfs_request(cpuclk->cpu); +} + +static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) +{ + if (__clk_is_enabled(hwclk->clk)) + return clk_cpu_on_set_rate(hwclk, rate, parent_rate); + else + return clk_cpu_off_set_rate(hwclk, rate, parent_rate); +} + +static const struct clk_ops cpu_ops = { + .recalc_rate = clk_cpu_recalc_rate, + .round_rate = clk_cpu_round_rate, + .set_rate = clk_cpu_set_rate, +}; + +static void __init of_cpu_clk_setup(struct device_node *node) +{ + struct cpu_clk *cpuclk; + void __iomem *clock_complex_base = of_iomap(node, 0); + void __iomem *pmu_dfs_base = of_iomap(node, 1); + int ncpus = 0; + struct device_node *dn; + + if (clock_complex_base == NULL) { + pr_err("%s: clock-complex base register not set\n", + __func__); + return; + } + + if (pmu_dfs_base == NULL) + pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n", + __func__); + + for_each_node_by_type(dn, "cpu") + ncpus++; + + cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL); + if (WARN_ON(!cpuclk)) + goto cpuclk_out; + + clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL); + if (WARN_ON(!clks)) + goto clks_out; + + for_each_node_by_type(dn, "cpu") { + struct clk_init_data init; + struct clk *clk; + char *clk_name = kzalloc(5, GFP_KERNEL); + int cpu, err; + + if (WARN_ON(!clk_name)) + goto bail_out; + + err = of_property_read_u32(dn, "reg", &cpu); + if (WARN_ON(err)) + goto bail_out; + + sprintf(clk_name, "cpu%d", cpu); + + cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); + cpuclk[cpu].clk_name = clk_name; + cpuclk[cpu].cpu = cpu; + cpuclk[cpu].reg_base = clock_complex_base; + if (pmu_dfs_base) + cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu; + cpuclk[cpu].hw.init = &init; + + init.name = cpuclk[cpu].clk_name; + init.ops = &cpu_ops; + init.flags = 0; + init.parent_names = &cpuclk[cpu].parent_name; + init.num_parents = 1; + + clk = clk_register(NULL, &cpuclk[cpu].hw); + if (WARN_ON(IS_ERR(clk))) + goto bail_out; + clks[cpu] = clk; + } + clk_data.clk_num = MAX_CPU; + clk_data.clks = clks; + of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data); + + return; +bail_out: + kfree(clks); + while(ncpus--) + kfree(cpuclk[ncpus].clk_name); +clks_out: + kfree(cpuclk); +cpuclk_out: + iounmap(clock_complex_base); +} + +CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock", + of_cpu_clk_setup); + +static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node) +{ + of_clk_add_provider(node, of_clk_src_simple_get, NULL); +} + +CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock", + of_mv98dx3236_cpu_clk_setup); diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c new file mode 100644 index 000000000..472c88b90 --- /dev/null +++ b/drivers/clk/mvebu/common.c @@ -0,0 +1,296 @@ +/* + * Marvell EBU SoC common clock handling + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include "common.h" + +/* + * Core Clocks + */ + +#define SSCG_CONF_MODE(reg) (((reg) >> 16) & 0x3) +#define SSCG_SPREAD_DOWN 0x0 +#define SSCG_SPREAD_UP 0x1 +#define SSCG_SPREAD_CENTRAL 0x2 +#define SSCG_CONF_LOW(reg) (((reg) >> 8) & 0xFF) +#define SSCG_CONF_HIGH(reg) ((reg) & 0xFF) + +static struct clk_onecell_data clk_data; + +/* + * This function can be used by the Kirkwood, the Armada 370, the + * Armada XP and the Armada 375 SoC. The name of the function was + * chosen following the dt convention: using the first known SoC + * compatible with it. + */ +u32 kirkwood_fix_sscg_deviation(u32 system_clk) +{ + struct device_node *sscg_np = NULL; + void __iomem *sscg_map; + u32 sscg_reg; + s32 low_bound, high_bound; + u64 freq_swing_half; + + sscg_np = of_find_node_by_name(NULL, "sscg"); + if (sscg_np == NULL) { + pr_err("cannot get SSCG register node\n"); + return system_clk; + } + + sscg_map = of_iomap(sscg_np, 0); + if (sscg_map == NULL) { + pr_err("cannot map SSCG register\n"); + goto out; + } + + sscg_reg = readl(sscg_map); + high_bound = SSCG_CONF_HIGH(sscg_reg); + low_bound = SSCG_CONF_LOW(sscg_reg); + + if ((high_bound - low_bound) <= 0) + goto out; + /* + * From Marvell engineer we got the following formula (when + * this code was written, the datasheet was erroneous) + * Spread percentage = 1/96 * (H - L) / H + * H = SSCG_High_Boundary + * L = SSCG_Low_Boundary + * + * As the deviation is half of spread then it lead to the + * following formula in the code. + * + * To avoid an overflow and not lose any significant digit in + * the same time we have to use a 64 bit integer. + */ + + freq_swing_half = (((u64)high_bound - (u64)low_bound) + * (u64)system_clk); + do_div(freq_swing_half, (2 * 96 * high_bound)); + + switch (SSCG_CONF_MODE(sscg_reg)) { + case SSCG_SPREAD_DOWN: + system_clk -= freq_swing_half; + break; + case SSCG_SPREAD_UP: + system_clk += freq_swing_half; + break; + case SSCG_SPREAD_CENTRAL: + default: + break; + } + + iounmap(sscg_map); + +out: + of_node_put(sscg_np); + + return system_clk; +} + +void __init mvebu_coreclk_setup(struct device_node *np, + const struct coreclk_soc_desc *desc) +{ + const char *tclk_name = "tclk"; + const char *cpuclk_name = "cpuclk"; + void __iomem *base; + unsigned long rate; + int n; + + base = of_iomap(np, 0); + if (WARN_ON(!base)) + return; + + /* Allocate struct for TCLK, cpu clk, and core ratio clocks */ + clk_data.clk_num = 2 + desc->num_ratios; + + /* One more clock for the optional refclk */ + if (desc->get_refclk_freq) + clk_data.clk_num += 1; + + clk_data.clks = kcalloc(clk_data.clk_num, sizeof(*clk_data.clks), + GFP_KERNEL); + if (WARN_ON(!clk_data.clks)) { + iounmap(base); + return; + } + + /* Register TCLK */ + of_property_read_string_index(np, "clock-output-names", 0, + &tclk_name); + rate = desc->get_tclk_freq(base); + clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL, 0, + rate); + WARN_ON(IS_ERR(clk_data.clks[0])); + + /* Register CPU clock */ + of_property_read_string_index(np, "clock-output-names", 1, + &cpuclk_name); + rate = desc->get_cpu_freq(base); + + if (desc->is_sscg_enabled && desc->fix_sscg_deviation + && desc->is_sscg_enabled(base)) + rate = desc->fix_sscg_deviation(rate); + + clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL, 0, + rate); + WARN_ON(IS_ERR(clk_data.clks[1])); + + /* Register fixed-factor clocks derived from CPU clock */ + for (n = 0; n < desc->num_ratios; n++) { + const char *rclk_name = desc->ratios[n].name; + int mult, div; + + of_property_read_string_index(np, "clock-output-names", + 2+n, &rclk_name); + desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div); + clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name, + cpuclk_name, 0, mult, div); + WARN_ON(IS_ERR(clk_data.clks[2+n])); + } + + /* Register optional refclk */ + if (desc->get_refclk_freq) { + const char *name = "refclk"; + of_property_read_string_index(np, "clock-output-names", + 2 + desc->num_ratios, &name); + rate = desc->get_refclk_freq(base); + clk_data.clks[2 + desc->num_ratios] = + clk_register_fixed_rate(NULL, name, NULL, 0, rate); + WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios])); + } + + /* SAR register isn't needed anymore */ + iounmap(base); + + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); +} + +/* + * Clock Gating Control + */ + +DEFINE_SPINLOCK(ctrl_gating_lock); + +struct clk_gating_ctrl { + spinlock_t *lock; + struct clk **gates; + int num_gates; + void __iomem *base; + u32 saved_reg; +}; + +static struct clk_gating_ctrl *ctrl; + +static struct clk *clk_gating_get_src( + struct of_phandle_args *clkspec, void *data) +{ + int n; + + if (clkspec->args_count < 1) + return ERR_PTR(-EINVAL); + + for (n = 0; n < ctrl->num_gates; n++) { + struct clk_gate *gate = + to_clk_gate(__clk_get_hw(ctrl->gates[n])); + if (clkspec->args[0] == gate->bit_idx) + return ctrl->gates[n]; + } + return ERR_PTR(-ENODEV); +} + +static int mvebu_clk_gating_suspend(void) +{ + ctrl->saved_reg = readl(ctrl->base); + return 0; +} + +static void mvebu_clk_gating_resume(void) +{ + writel(ctrl->saved_reg, ctrl->base); +} + +static struct syscore_ops clk_gate_syscore_ops = { + .suspend = mvebu_clk_gating_suspend, + .resume = mvebu_clk_gating_resume, +}; + +void __init mvebu_clk_gating_setup(struct device_node *np, + const struct clk_gating_soc_desc *desc) +{ + struct clk *clk; + void __iomem *base; + const char *default_parent = NULL; + int n; + + if (ctrl) { + pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n"); + return; + } + + base = of_iomap(np, 0); + if (WARN_ON(!base)) + return; + + clk = of_clk_get(np, 0); + if (!IS_ERR(clk)) { + default_parent = __clk_get_name(clk); + clk_put(clk); + } + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (WARN_ON(!ctrl)) + goto ctrl_out; + + /* lock must already be initialized */ + ctrl->lock = &ctrl_gating_lock; + + ctrl->base = base; + + /* Count, allocate, and register clock gates */ + for (n = 0; desc[n].name;) + n++; + + ctrl->num_gates = n; + ctrl->gates = kcalloc(ctrl->num_gates, sizeof(*ctrl->gates), + GFP_KERNEL); + if (WARN_ON(!ctrl->gates)) + goto gates_out; + + for (n = 0; n < ctrl->num_gates; n++) { + const char *parent = + (desc[n].parent) ? desc[n].parent : default_parent; + ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent, + desc[n].flags, base, desc[n].bit_idx, + 0, ctrl->lock); + WARN_ON(IS_ERR(ctrl->gates[n])); + } + + of_clk_add_provider(np, clk_gating_get_src, ctrl); + + register_syscore_ops(&clk_gate_syscore_ops); + + return; +gates_out: + kfree(ctrl); +ctrl_out: + iounmap(base); +} diff --git a/drivers/clk/mvebu/common.h b/drivers/clk/mvebu/common.h new file mode 100644 index 000000000..f0de6c8a4 --- /dev/null +++ b/drivers/clk/mvebu/common.h @@ -0,0 +1,58 @@ +/* + * Marvell EBU SoC common clock handling + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __CLK_MVEBU_COMMON_H_ +#define __CLK_MVEBU_COMMON_H_ + +#include <linux/kernel.h> + +extern spinlock_t ctrl_gating_lock; + +struct device_node; + +struct coreclk_ratio { + int id; + const char *name; +}; + +struct coreclk_soc_desc { + u32 (*get_tclk_freq)(void __iomem *sar); + u32 (*get_cpu_freq)(void __iomem *sar); + void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div); + u32 (*get_refclk_freq)(void __iomem *sar); + bool (*is_sscg_enabled)(void __iomem *sar); + u32 (*fix_sscg_deviation)(u32 system_clk); + const struct coreclk_ratio *ratios; + int num_ratios; +}; + +struct clk_gating_soc_desc { + const char *name; + const char *parent; + int bit_idx; + unsigned long flags; +}; + +void __init mvebu_coreclk_setup(struct device_node *np, + const struct coreclk_soc_desc *desc); + +void __init mvebu_clk_gating_setup(struct device_node *np, + const struct clk_gating_soc_desc *desc); + +/* + * This function is shared among the Kirkwood, Armada 370, Armada XP + * and Armada 375 SoC + */ +u32 kirkwood_fix_sscg_deviation(u32 system_clk); +#endif diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c new file mode 100644 index 000000000..0153c76d4 --- /dev/null +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -0,0 +1,452 @@ +/* + * Marvell Armada CP110 System Controller + * + * Copyright (C) 2016 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +/* + * CP110 has 6 core clocks: + * + * - PLL0 (1 Ghz) + * - PPv2 core (1/3 PLL0) + * - x2 Core (1/2 PLL0) + * - Core (1/2 x2 Core) + * - SDIO (2/5 PLL0) + * + * - NAND clock, which is either: + * - Equal to SDIO clock + * - 2/5 PLL0 + * + * CP110 has 32 gatable clocks, for the various peripherals in the IP. + */ + +#define pr_fmt(fmt) "cp110-system-controller: " fmt + +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define CP110_PM_CLOCK_GATING_REG 0x220 +#define CP110_NAND_FLASH_CLK_CTRL_REG 0x700 +#define NF_CLOCK_SEL_400_MASK BIT(0) + +enum { + CP110_CLK_TYPE_CORE, + CP110_CLK_TYPE_GATABLE, +}; + +#define CP110_MAX_CORE_CLOCKS 6 +#define CP110_MAX_GATABLE_CLOCKS 32 + +#define CP110_CLK_NUM \ + (CP110_MAX_CORE_CLOCKS + CP110_MAX_GATABLE_CLOCKS) + +#define CP110_CORE_PLL0 0 +#define CP110_CORE_PPV2 1 +#define CP110_CORE_X2CORE 2 +#define CP110_CORE_CORE 3 +#define CP110_CORE_NAND 4 +#define CP110_CORE_SDIO 5 + +/* A number of gatable clocks need special handling */ +#define CP110_GATE_AUDIO 0 +#define CP110_GATE_COMM_UNIT 1 +#define CP110_GATE_NAND 2 +#define CP110_GATE_PPV2 3 +#define CP110_GATE_SDIO 4 +#define CP110_GATE_MG 5 +#define CP110_GATE_MG_CORE 6 +#define CP110_GATE_XOR1 7 +#define CP110_GATE_XOR0 8 +#define CP110_GATE_GOP_DP 9 +#define CP110_GATE_PCIE_X1_0 11 +#define CP110_GATE_PCIE_X1_1 12 +#define CP110_GATE_PCIE_X4 13 +#define CP110_GATE_PCIE_XOR 14 +#define CP110_GATE_SATA 15 +#define CP110_GATE_SATA_USB 16 +#define CP110_GATE_MAIN 17 +#define CP110_GATE_SDMMC_GOP 18 +#define CP110_GATE_SLOW_IO 21 +#define CP110_GATE_USB3H0 22 +#define CP110_GATE_USB3H1 23 +#define CP110_GATE_USB3DEV 24 +#define CP110_GATE_EIP150 25 +#define CP110_GATE_EIP197 26 + +static const char * const gate_base_names[] = { + [CP110_GATE_AUDIO] = "audio", + [CP110_GATE_COMM_UNIT] = "communit", + [CP110_GATE_NAND] = "nand", + [CP110_GATE_PPV2] = "ppv2", + [CP110_GATE_SDIO] = "sdio", + [CP110_GATE_MG] = "mg-domain", + [CP110_GATE_MG_CORE] = "mg-core", + [CP110_GATE_XOR1] = "xor1", + [CP110_GATE_XOR0] = "xor0", + [CP110_GATE_GOP_DP] = "gop-dp", + [CP110_GATE_PCIE_X1_0] = "pcie_x10", + [CP110_GATE_PCIE_X1_1] = "pcie_x11", + [CP110_GATE_PCIE_X4] = "pcie_x4", + [CP110_GATE_PCIE_XOR] = "pcie-xor", + [CP110_GATE_SATA] = "sata", + [CP110_GATE_SATA_USB] = "sata-usb", + [CP110_GATE_MAIN] = "main", + [CP110_GATE_SDMMC_GOP] = "sd-mmc-gop", + [CP110_GATE_SLOW_IO] = "slow-io", + [CP110_GATE_USB3H0] = "usb3h0", + [CP110_GATE_USB3H1] = "usb3h1", + [CP110_GATE_USB3DEV] = "usb3dev", + [CP110_GATE_EIP150] = "eip150", + [CP110_GATE_EIP197] = "eip197" +}; + +struct cp110_gate_clk { + struct clk_hw hw; + struct regmap *regmap; + u8 bit_idx; +}; + +#define to_cp110_gate_clk(hw) container_of(hw, struct cp110_gate_clk, hw) + +static int cp110_gate_enable(struct clk_hw *hw) +{ + struct cp110_gate_clk *gate = to_cp110_gate_clk(hw); + + regmap_update_bits(gate->regmap, CP110_PM_CLOCK_GATING_REG, + BIT(gate->bit_idx), BIT(gate->bit_idx)); + + return 0; +} + +static void cp110_gate_disable(struct clk_hw *hw) +{ + struct cp110_gate_clk *gate = to_cp110_gate_clk(hw); + + regmap_update_bits(gate->regmap, CP110_PM_CLOCK_GATING_REG, + BIT(gate->bit_idx), 0); +} + +static int cp110_gate_is_enabled(struct clk_hw *hw) +{ + struct cp110_gate_clk *gate = to_cp110_gate_clk(hw); + u32 val; + + regmap_read(gate->regmap, CP110_PM_CLOCK_GATING_REG, &val); + + return val & BIT(gate->bit_idx); +} + +static const struct clk_ops cp110_gate_ops = { + .enable = cp110_gate_enable, + .disable = cp110_gate_disable, + .is_enabled = cp110_gate_is_enabled, +}; + +static struct clk_hw *cp110_register_gate(const char *name, + const char *parent_name, + struct regmap *regmap, u8 bit_idx) +{ + struct cp110_gate_clk *gate; + struct clk_hw *hw; + struct clk_init_data init; + int ret; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + memset(&init, 0, sizeof(init)); + + init.name = name; + init.ops = &cp110_gate_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + + gate->regmap = regmap; + gate->bit_idx = bit_idx; + gate->hw.init = &init; + + hw = &gate->hw; + ret = clk_hw_register(NULL, hw); + if (ret) { + kfree(gate); + hw = ERR_PTR(ret); + } + + return hw; +} + +static void cp110_unregister_gate(struct clk_hw *hw) +{ + clk_hw_unregister(hw); + kfree(to_cp110_gate_clk(hw)); +} + +static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec, + void *data) +{ + struct clk_hw_onecell_data *clk_data = data; + unsigned int type = clkspec->args[0]; + unsigned int idx = clkspec->args[1]; + + if (type == CP110_CLK_TYPE_CORE) { + if (idx >= CP110_MAX_CORE_CLOCKS) + return ERR_PTR(-EINVAL); + return clk_data->hws[idx]; + } else if (type == CP110_CLK_TYPE_GATABLE) { + if (idx >= CP110_MAX_GATABLE_CLOCKS) + return ERR_PTR(-EINVAL); + return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx]; + } + + return ERR_PTR(-EINVAL); +} + +static char *cp110_unique_name(struct device *dev, struct device_node *np, + const char *name) +{ + const __be32 *reg; + u64 addr; + + /* Do not create a name if there is no clock */ + if (!name) + return NULL; + + reg = of_get_property(np, "reg", NULL); + addr = of_translate_address(np, reg); + return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s", + (unsigned long long)addr, name); +} + +static int cp110_syscon_common_probe(struct platform_device *pdev, + struct device_node *syscon_node) +{ + struct regmap *regmap; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const char *ppv2_name, *pll0_name, *core_name, *x2core_name, *nand_name, + *sdio_name; + struct clk_hw_onecell_data *cp110_clk_data; + struct clk_hw *hw, **cp110_clks; + u32 nand_clk_ctrl; + int i, ret; + char *gate_name[ARRAY_SIZE(gate_base_names)]; + + regmap = syscon_node_to_regmap(syscon_node); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = regmap_read(regmap, CP110_NAND_FLASH_CLK_CTRL_REG, + &nand_clk_ctrl); + if (ret) + return ret; + + cp110_clk_data = devm_kzalloc(dev, sizeof(*cp110_clk_data) + + sizeof(struct clk_hw *) * CP110_CLK_NUM, + GFP_KERNEL); + if (!cp110_clk_data) + return -ENOMEM; + + cp110_clks = cp110_clk_data->hws; + cp110_clk_data->num = CP110_CLK_NUM; + + /* Register the PLL0 which is the root of the hw tree */ + pll0_name = cp110_unique_name(dev, syscon_node, "pll0"); + hw = clk_hw_register_fixed_rate(NULL, pll0_name, NULL, 0, + 1000 * 1000 * 1000); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_pll0; + } + + cp110_clks[CP110_CORE_PLL0] = hw; + + /* PPv2 is PLL0/3 */ + ppv2_name = cp110_unique_name(dev, syscon_node, "ppv2-core"); + hw = clk_hw_register_fixed_factor(NULL, ppv2_name, pll0_name, 0, 1, 3); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_ppv2; + } + + cp110_clks[CP110_CORE_PPV2] = hw; + + /* X2CORE clock is PLL0/2 */ + x2core_name = cp110_unique_name(dev, syscon_node, "x2core"); + hw = clk_hw_register_fixed_factor(NULL, x2core_name, pll0_name, + 0, 1, 2); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_eip; + } + + cp110_clks[CP110_CORE_X2CORE] = hw; + + /* Core clock is X2CORE/2 */ + core_name = cp110_unique_name(dev, syscon_node, "core"); + hw = clk_hw_register_fixed_factor(NULL, core_name, x2core_name, + 0, 1, 2); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_core; + } + + cp110_clks[CP110_CORE_CORE] = hw; + /* NAND can be either PLL0/2.5 or core clock */ + nand_name = cp110_unique_name(dev, syscon_node, "nand-core"); + if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK) + hw = clk_hw_register_fixed_factor(NULL, nand_name, + pll0_name, 0, 2, 5); + else + hw = clk_hw_register_fixed_factor(NULL, nand_name, + core_name, 0, 1, 1); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_nand; + } + + cp110_clks[CP110_CORE_NAND] = hw; + + /* SDIO clock is PLL0/2.5 */ + sdio_name = cp110_unique_name(dev, syscon_node, "sdio-core"); + hw = clk_hw_register_fixed_factor(NULL, sdio_name, + pll0_name, 0, 2, 5); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_sdio; + } + + cp110_clks[CP110_CORE_SDIO] = hw; + + /* create the unique name for all the gate clocks */ + for (i = 0; i < ARRAY_SIZE(gate_base_names); i++) + gate_name[i] = cp110_unique_name(dev, syscon_node, + gate_base_names[i]); + + for (i = 0; i < ARRAY_SIZE(gate_base_names); i++) { + const char *parent; + + if (gate_name[i] == NULL) + continue; + + switch (i) { + case CP110_GATE_NAND: + parent = nand_name; + break; + case CP110_GATE_MG: + case CP110_GATE_GOP_DP: + case CP110_GATE_PPV2: + parent = ppv2_name; + break; + case CP110_GATE_SDIO: + parent = sdio_name; + break; + case CP110_GATE_MAIN: + case CP110_GATE_PCIE_XOR: + case CP110_GATE_PCIE_X4: + case CP110_GATE_EIP150: + case CP110_GATE_EIP197: + parent = x2core_name; + break; + default: + parent = core_name; + break; + } + hw = cp110_register_gate(gate_name[i], parent, regmap, i); + + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto fail_gate; + } + + cp110_clks[CP110_MAX_CORE_CLOCKS + i] = hw; + } + + ret = of_clk_add_hw_provider(np, cp110_of_clk_get, cp110_clk_data); + if (ret) + goto fail_clk_add; + + platform_set_drvdata(pdev, cp110_clks); + + return 0; + +fail_clk_add: +fail_gate: + for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) { + hw = cp110_clks[CP110_MAX_CORE_CLOCKS + i]; + + if (hw) + cp110_unregister_gate(hw); + } + + clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_SDIO]); +fail_sdio: + clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]); +fail_nand: + clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]); +fail_core: + clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_X2CORE]); +fail_eip: + clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]); +fail_ppv2: + clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_PLL0]); +fail_pll0: + return ret; +} + +static int cp110_syscon_legacy_clk_probe(struct platform_device *pdev) +{ + dev_warn(&pdev->dev, FW_WARN "Using legacy device tree binding\n"); + dev_warn(&pdev->dev, FW_WARN "Update your device tree:\n"); + dev_warn(&pdev->dev, FW_WARN + "This binding won't be supported in future kernels\n"); + + return cp110_syscon_common_probe(pdev, pdev->dev.of_node); +} + +static int cp110_clk_probe(struct platform_device *pdev) +{ + return cp110_syscon_common_probe(pdev, pdev->dev.of_node->parent); +} + +static const struct of_device_id cp110_syscon_legacy_of_match[] = { + { .compatible = "marvell,cp110-system-controller0", }, + { } +}; + +static struct platform_driver cp110_syscon_legacy_driver = { + .probe = cp110_syscon_legacy_clk_probe, + .driver = { + .name = "marvell-cp110-system-controller0", + .of_match_table = cp110_syscon_legacy_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(cp110_syscon_legacy_driver); + +static const struct of_device_id cp110_clock_of_match[] = { + { .compatible = "marvell,cp110-clock", }, + { } +}; + +static struct platform_driver cp110_clock_driver = { + .probe = cp110_clk_probe, + .driver = { + .name = "marvell-cp110-clock", + .of_match_table = cp110_clock_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(cp110_clock_driver); diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c new file mode 100644 index 000000000..7e35c891e --- /dev/null +++ b/drivers/clk/mvebu/dove-divider.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Marvell Dove PMU Core PLL divider driver + * + * Cleaned up by substantially rewriting, and converted to DT by + * Russell King. Origin is not known. + */ +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#include "dove-divider.h" + +struct dove_clk { + const char *name; + struct clk_hw hw; + void __iomem *base; + spinlock_t *lock; + u8 div_bit_start; + u8 div_bit_end; + u8 div_bit_load; + u8 div_bit_size; + u32 *divider_table; +}; + +enum { + DIV_CTRL0 = 0, + DIV_CTRL1 = 4, + DIV_CTRL1_N_RESET_MASK = BIT(10), +}; + +#define to_dove_clk(hw) container_of(hw, struct dove_clk, hw) + +static void dove_load_divider(void __iomem *base, u32 val, u32 mask, u32 load) +{ + u32 v; + + v = readl_relaxed(base + DIV_CTRL1) | DIV_CTRL1_N_RESET_MASK; + writel_relaxed(v, base + DIV_CTRL1); + + v = (readl_relaxed(base + DIV_CTRL0) & ~(mask | load)) | val; + writel_relaxed(v, base + DIV_CTRL0); + writel_relaxed(v | load, base + DIV_CTRL0); + ndelay(250); + writel_relaxed(v, base + DIV_CTRL0); +} + +static unsigned int dove_get_divider(struct dove_clk *dc) +{ + unsigned int divider; + u32 val; + + val = readl_relaxed(dc->base + DIV_CTRL0); + val >>= dc->div_bit_start; + + divider = val & ~(~0 << dc->div_bit_size); + + if (dc->divider_table) + divider = dc->divider_table[divider]; + + return divider; +} + +static int dove_calc_divider(const struct dove_clk *dc, unsigned long rate, + unsigned long parent_rate, bool set) +{ + unsigned int divider, max; + + divider = DIV_ROUND_CLOSEST(parent_rate, rate); + + if (dc->divider_table) { + unsigned int i; + + for (i = 0; dc->divider_table[i]; i++) + if (divider == dc->divider_table[i]) { + divider = i; + break; + } + + if (!dc->divider_table[i]) + return -EINVAL; + } else { + max = 1 << dc->div_bit_size; + + if (set && (divider == 0 || divider >= max)) + return -EINVAL; + if (divider >= max) + divider = max - 1; + else if (divider == 0) + divider = 1; + } + + return divider; +} + +static unsigned long dove_recalc_rate(struct clk_hw *hw, unsigned long parent) +{ + struct dove_clk *dc = to_dove_clk(hw); + unsigned int divider = dove_get_divider(dc); + unsigned long rate = DIV_ROUND_CLOSEST(parent, divider); + + pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n", + __func__, dc->name, divider, parent, rate); + + return rate; +} + +static long dove_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent) +{ + struct dove_clk *dc = to_dove_clk(hw); + unsigned long parent_rate = *parent; + int divider; + + divider = dove_calc_divider(dc, rate, parent_rate, false); + if (divider < 0) + return divider; + + rate = DIV_ROUND_CLOSEST(parent_rate, divider); + + pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n", + __func__, dc->name, divider, parent_rate, rate); + + return rate; +} + +static int dove_set_clock(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dove_clk *dc = to_dove_clk(hw); + u32 mask, load, div; + int divider; + + divider = dove_calc_divider(dc, rate, parent_rate, true); + if (divider < 0) + return divider; + + pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n", + __func__, dc->name, divider, parent_rate, rate); + + div = (u32)divider << dc->div_bit_start; + mask = ~(~0 << dc->div_bit_size) << dc->div_bit_start; + load = BIT(dc->div_bit_load); + + spin_lock(dc->lock); + dove_load_divider(dc->base, div, mask, load); + spin_unlock(dc->lock); + + return 0; +} + +static const struct clk_ops dove_divider_ops = { + .set_rate = dove_set_clock, + .round_rate = dove_round_rate, + .recalc_rate = dove_recalc_rate, +}; + +static struct clk *clk_register_dove_divider(struct device *dev, + struct dove_clk *dc, const char **parent_names, size_t num_parents, + void __iomem *base) +{ + char name[32]; + struct clk_init_data init = { + .name = name, + .ops = &dove_divider_ops, + .parent_names = parent_names, + .num_parents = num_parents, + }; + + strlcpy(name, dc->name, sizeof(name)); + + dc->hw.init = &init; + dc->base = base; + dc->div_bit_size = dc->div_bit_end - dc->div_bit_start + 1; + + return clk_register(dev, &dc->hw); +} + +static DEFINE_SPINLOCK(dove_divider_lock); + +static u32 axi_divider[] = {-1, 2, 1, 3, 4, 6, 5, 7, 8, 10, 9, 0}; + +static struct dove_clk dove_hw_clocks[4] = { + { + .name = "axi", + .lock = &dove_divider_lock, + .div_bit_start = 1, + .div_bit_end = 6, + .div_bit_load = 7, + .divider_table = axi_divider, + }, { + .name = "gpu", + .lock = &dove_divider_lock, + .div_bit_start = 8, + .div_bit_end = 13, + .div_bit_load = 14, + }, { + .name = "vmeta", + .lock = &dove_divider_lock, + .div_bit_start = 15, + .div_bit_end = 20, + .div_bit_load = 21, + }, { + .name = "lcd", + .lock = &dove_divider_lock, + .div_bit_start = 22, + .div_bit_end = 27, + .div_bit_load = 28, + }, +}; + +static const char *core_pll[] = { + "core-pll", +}; + +static int dove_divider_init(struct device *dev, void __iomem *base, + struct clk **clks) +{ + struct clk *clk; + int i; + + /* + * Create the core PLL clock. We treat this as a fixed rate + * clock as we don't know any better, and documentation is sparse. + */ + clk = clk_register_fixed_rate(dev, core_pll[0], NULL, 0, 2000000000UL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + for (i = 0; i < ARRAY_SIZE(dove_hw_clocks); i++) + clks[i] = clk_register_dove_divider(dev, &dove_hw_clocks[i], + core_pll, + ARRAY_SIZE(core_pll), base); + + return 0; +} + +static struct clk *dove_divider_clocks[4]; + +static struct clk_onecell_data dove_divider_data = { + .clks = dove_divider_clocks, + .clk_num = ARRAY_SIZE(dove_divider_clocks), +}; + +void __init dove_divider_clk_init(struct device_node *np) +{ + void __iomem *base; + + base = of_iomap(np, 0); + if (WARN_ON(!base)) + return; + + if (WARN_ON(dove_divider_init(NULL, base, dove_divider_clocks))) { + iounmap(base); + return; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, &dove_divider_data); +} diff --git a/drivers/clk/mvebu/dove-divider.h b/drivers/clk/mvebu/dove-divider.h new file mode 100644 index 000000000..38ea37308 --- /dev/null +++ b/drivers/clk/mvebu/dove-divider.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DOVE_DIVIDER_H +#define DOVE_DIVIDER_H + +void __init dove_divider_clk_init(struct device_node *np); + +#endif diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c new file mode 100644 index 000000000..5f258c9bb --- /dev/null +++ b/drivers/clk/mvebu/dove.c @@ -0,0 +1,203 @@ +/* + * Marvell Dove SoC clocks + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" +#include "dove-divider.h" + +/* + * Core Clocks + * + * Dove PLL sample-at-reset configuration + * + * SAR0[8:5] : CPU frequency + * 5 = 1000 MHz + * 6 = 933 MHz + * 7 = 933 MHz + * 8 = 800 MHz + * 9 = 800 MHz + * 10 = 800 MHz + * 11 = 1067 MHz + * 12 = 667 MHz + * 13 = 533 MHz + * 14 = 400 MHz + * 15 = 333 MHz + * others reserved. + * + * SAR0[11:9] : CPU to L2 Clock divider ratio + * 0 = (1/1) * CPU + * 2 = (1/2) * CPU + * 4 = (1/3) * CPU + * 6 = (1/4) * CPU + * others reserved. + * + * SAR0[15:12] : CPU to DDR DRAM Clock divider ratio + * 0 = (1/1) * CPU + * 2 = (1/2) * CPU + * 3 = (2/5) * CPU + * 4 = (1/3) * CPU + * 6 = (1/4) * CPU + * 8 = (1/5) * CPU + * 10 = (1/6) * CPU + * 12 = (1/7) * CPU + * 14 = (1/8) * CPU + * 15 = (1/10) * CPU + * others reserved. + * + * SAR0[24:23] : TCLK frequency + * 0 = 166 MHz + * 1 = 125 MHz + * others reserved. + */ + +#define SAR_DOVE_CPU_FREQ 5 +#define SAR_DOVE_CPU_FREQ_MASK 0xf +#define SAR_DOVE_L2_RATIO 9 +#define SAR_DOVE_L2_RATIO_MASK 0x7 +#define SAR_DOVE_DDR_RATIO 12 +#define SAR_DOVE_DDR_RATIO_MASK 0xf +#define SAR_DOVE_TCLK_FREQ 23 +#define SAR_DOVE_TCLK_FREQ_MASK 0x3 + +enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR }; + +static const struct coreclk_ratio dove_coreclk_ratios[] __initconst = { + { .id = DOVE_CPU_TO_L2, .name = "l2clk", }, + { .id = DOVE_CPU_TO_DDR, .name = "ddrclk", } +}; + +static const u32 dove_tclk_freqs[] __initconst = { + 166666667, + 125000000, + 0, 0 +}; + +static u32 __init dove_get_tclk_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_DOVE_TCLK_FREQ) & + SAR_DOVE_TCLK_FREQ_MASK; + return dove_tclk_freqs[opt]; +} + +static const u32 dove_cpu_freqs[] __initconst = { + 0, 0, 0, 0, 0, + 1000000000, + 933333333, 933333333, + 800000000, 800000000, 800000000, + 1066666667, + 666666667, + 533333333, + 400000000, + 333333333 +}; + +static u32 __init dove_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_DOVE_CPU_FREQ) & + SAR_DOVE_CPU_FREQ_MASK; + return dove_cpu_freqs[opt]; +} + +static const int dove_cpu_l2_ratios[8][2] __initconst = { + { 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 }, + { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 } +}; + +static const int dove_cpu_ddr_ratios[16][2] __initconst = { + { 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 }, + { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }, + { 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 }, + { 1, 7 }, { 0, 1 }, { 1, 8 }, { 1, 10 } +}; + +static void __init dove_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + switch (id) { + case DOVE_CPU_TO_L2: + { + u32 opt = (readl(sar) >> SAR_DOVE_L2_RATIO) & + SAR_DOVE_L2_RATIO_MASK; + *mult = dove_cpu_l2_ratios[opt][0]; + *div = dove_cpu_l2_ratios[opt][1]; + break; + } + case DOVE_CPU_TO_DDR: + { + u32 opt = (readl(sar) >> SAR_DOVE_DDR_RATIO) & + SAR_DOVE_DDR_RATIO_MASK; + *mult = dove_cpu_ddr_ratios[opt][0]; + *div = dove_cpu_ddr_ratios[opt][1]; + break; + } + } +} + +static const struct coreclk_soc_desc dove_coreclks = { + .get_tclk_freq = dove_get_tclk_freq, + .get_cpu_freq = dove_get_cpu_freq, + .get_clk_ratio = dove_get_clk_ratio, + .ratios = dove_coreclk_ratios, + .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), +}; + +/* + * Clock Gating Control + */ + +static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = { + { "usb0", NULL, 0, 0 }, + { "usb1", NULL, 1, 0 }, + { "ge", "gephy", 2, 0 }, + { "sata", NULL, 3, 0 }, + { "pex0", NULL, 4, 0 }, + { "pex1", NULL, 5, 0 }, + { "sdio0", NULL, 8, 0 }, + { "sdio1", NULL, 9, 0 }, + { "nand", NULL, 10, 0 }, + { "camera", NULL, 11, 0 }, + { "i2s0", NULL, 12, 0 }, + { "i2s1", NULL, 13, 0 }, + { "crypto", NULL, 15, 0 }, + { "ac97", NULL, 21, 0 }, + { "pdma", NULL, 22, 0 }, + { "xor0", NULL, 23, 0 }, + { "xor1", NULL, 24, 0 }, + { "gephy", NULL, 30, 0 }, + { } +}; + +static void __init dove_clk_init(struct device_node *np) +{ + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); + struct device_node *ddnp = + of_find_compatible_node(NULL, NULL, "marvell,dove-divider-clock"); + + mvebu_coreclk_setup(np, &dove_coreclks); + + if (ddnp) { + dove_divider_clk_init(ddnp); + of_node_put(ddnp); + } + + if (cgnp) { + mvebu_clk_gating_setup(cgnp, dove_gating_desc); + of_node_put(cgnp); + } +} +CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c new file mode 100644 index 000000000..38612cd90 --- /dev/null +++ b/drivers/clk/mvebu/kirkwood.c @@ -0,0 +1,344 @@ +/* + * Marvell Kirkwood SoC clocks + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include "common.h" + +/* + * Core Clocks + * + * Kirkwood PLL sample-at-reset configuration + * (6180 has different SAR layout than other Kirkwood SoCs) + * + * SAR0[4:3,22,1] : CPU frequency (6281,6292,6282) + * 4 = 600 MHz + * 6 = 800 MHz + * 7 = 1000 MHz + * 9 = 1200 MHz + * 12 = 1500 MHz + * 13 = 1600 MHz + * 14 = 1800 MHz + * 15 = 2000 MHz + * others reserved. + * + * SAR0[19,10:9] : CPU to L2 Clock divider ratio (6281,6292,6282) + * 1 = (1/2) * CPU + * 3 = (1/3) * CPU + * 5 = (1/4) * CPU + * others reserved. + * + * SAR0[8:5] : CPU to DDR DRAM Clock divider ratio (6281,6292,6282) + * 2 = (1/2) * CPU + * 4 = (1/3) * CPU + * 6 = (1/4) * CPU + * 7 = (2/9) * CPU + * 8 = (1/5) * CPU + * 9 = (1/6) * CPU + * others reserved. + * + * SAR0[4:2] : Kirkwood 6180 cpu/l2/ddr clock configuration (6180 only) + * 5 = [CPU = 600 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/3) * CPU] + * 6 = [CPU = 800 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/4) * CPU] + * 7 = [CPU = 1000 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/5) * CPU] + * others reserved. + * + * SAR0[21] : TCLK frequency + * 0 = 200 MHz + * 1 = 166 MHz + * others reserved. + */ + +#define SAR_KIRKWOOD_CPU_FREQ(x) \ + (((x & (1 << 1)) >> 1) | \ + ((x & (1 << 22)) >> 21) | \ + ((x & (3 << 3)) >> 1)) +#define SAR_KIRKWOOD_L2_RATIO(x) \ + (((x & (3 << 9)) >> 9) | \ + (((x & (1 << 19)) >> 17))) +#define SAR_KIRKWOOD_DDR_RATIO 5 +#define SAR_KIRKWOOD_DDR_RATIO_MASK 0xf +#define SAR_MV88F6180_CLK 2 +#define SAR_MV88F6180_CLK_MASK 0x7 +#define SAR_KIRKWOOD_TCLK_FREQ 21 +#define SAR_KIRKWOOD_TCLK_FREQ_MASK 0x1 + +enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR }; + +static const struct coreclk_ratio kirkwood_coreclk_ratios[] __initconst = { + { .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", }, + { .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", } +}; + +static u32 __init kirkwood_get_tclk_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_KIRKWOOD_TCLK_FREQ) & + SAR_KIRKWOOD_TCLK_FREQ_MASK; + return (opt) ? 166666667 : 200000000; +} + +static const u32 kirkwood_cpu_freqs[] __initconst = { + 0, 0, 0, 0, + 600000000, + 0, + 800000000, + 1000000000, + 0, + 1200000000, + 0, 0, + 1500000000, + 1600000000, + 1800000000, + 2000000000 +}; + +static u32 __init kirkwood_get_cpu_freq(void __iomem *sar) +{ + u32 opt = SAR_KIRKWOOD_CPU_FREQ(readl(sar)); + return kirkwood_cpu_freqs[opt]; +} + +static const int kirkwood_cpu_l2_ratios[8][2] __initconst = { + { 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 }, + { 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 } +}; + +static const int kirkwood_cpu_ddr_ratios[16][2] __initconst = { + { 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 }, + { 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 }, + { 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 } +}; + +static void __init kirkwood_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + switch (id) { + case KIRKWOOD_CPU_TO_L2: + { + u32 opt = SAR_KIRKWOOD_L2_RATIO(readl(sar)); + *mult = kirkwood_cpu_l2_ratios[opt][0]; + *div = kirkwood_cpu_l2_ratios[opt][1]; + break; + } + case KIRKWOOD_CPU_TO_DDR: + { + u32 opt = (readl(sar) >> SAR_KIRKWOOD_DDR_RATIO) & + SAR_KIRKWOOD_DDR_RATIO_MASK; + *mult = kirkwood_cpu_ddr_ratios[opt][0]; + *div = kirkwood_cpu_ddr_ratios[opt][1]; + break; + } + } +} + +static const u32 mv88f6180_cpu_freqs[] __initconst = { + 0, 0, 0, 0, 0, + 600000000, + 800000000, + 1000000000 +}; + +static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) & SAR_MV88F6180_CLK_MASK; + return mv88f6180_cpu_freqs[opt]; +} + +static const int mv88f6180_cpu_ddr_ratios[8][2] __initconst = { + { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, + { 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 } +}; + +static void __init mv88f6180_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + switch (id) { + case KIRKWOOD_CPU_TO_L2: + { + /* mv88f6180 has a fixed 1:2 CPU-to-L2 ratio */ + *mult = 1; + *div = 2; + break; + } + case KIRKWOOD_CPU_TO_DDR: + { + u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) & + SAR_MV88F6180_CLK_MASK; + *mult = mv88f6180_cpu_ddr_ratios[opt][0]; + *div = mv88f6180_cpu_ddr_ratios[opt][1]; + break; + } + } +} + +static const struct coreclk_soc_desc kirkwood_coreclks = { + .get_tclk_freq = kirkwood_get_tclk_freq, + .get_cpu_freq = kirkwood_get_cpu_freq, + .get_clk_ratio = kirkwood_get_clk_ratio, + .ratios = kirkwood_coreclk_ratios, + .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), +}; + +static const struct coreclk_soc_desc mv88f6180_coreclks = { + .get_tclk_freq = kirkwood_get_tclk_freq, + .get_cpu_freq = mv88f6180_get_cpu_freq, + .get_clk_ratio = mv88f6180_get_clk_ratio, + .ratios = kirkwood_coreclk_ratios, + .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), +}; + +/* + * Clock Gating Control + */ + +static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = { + { "ge0", NULL, 0, 0 }, + { "pex0", NULL, 2, 0 }, + { "usb0", NULL, 3, 0 }, + { "sdio", NULL, 4, 0 }, + { "tsu", NULL, 5, 0 }, + { "runit", NULL, 7, 0 }, + { "xor0", NULL, 8, 0 }, + { "audio", NULL, 9, 0 }, + { "sata0", NULL, 14, 0 }, + { "sata1", NULL, 15, 0 }, + { "xor1", NULL, 16, 0 }, + { "crypto", NULL, 17, 0 }, + { "pex1", NULL, 18, 0 }, + { "ge1", NULL, 19, 0 }, + { "tdm", NULL, 20, 0 }, + { } +}; + + +/* + * Clock Muxing Control + */ + +struct clk_muxing_soc_desc { + const char *name; + const char **parents; + int num_parents; + int shift; + int width; + unsigned long flags; +}; + +struct clk_muxing_ctrl { + spinlock_t *lock; + struct clk **muxes; + int num_muxes; +}; + +static const char *powersave_parents[] = { + "cpuclk", + "ddrclk", +}; + +static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = { + { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents), + 11, 1, 0 }, + { } +}; + +static struct clk *clk_muxing_get_src( + struct of_phandle_args *clkspec, void *data) +{ + struct clk_muxing_ctrl *ctrl = (struct clk_muxing_ctrl *)data; + int n; + + if (clkspec->args_count < 1) + return ERR_PTR(-EINVAL); + + for (n = 0; n < ctrl->num_muxes; n++) { + struct clk_mux *mux = + to_clk_mux(__clk_get_hw(ctrl->muxes[n])); + if (clkspec->args[0] == mux->shift) + return ctrl->muxes[n]; + } + return ERR_PTR(-ENODEV); +} + +static void __init kirkwood_clk_muxing_setup(struct device_node *np, + const struct clk_muxing_soc_desc *desc) +{ + struct clk_muxing_ctrl *ctrl; + void __iomem *base; + int n; + + base = of_iomap(np, 0); + if (WARN_ON(!base)) + return; + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (WARN_ON(!ctrl)) + goto ctrl_out; + + /* lock must already be initialized */ + ctrl->lock = &ctrl_gating_lock; + + /* Count, allocate, and register clock muxes */ + for (n = 0; desc[n].name;) + n++; + + ctrl->num_muxes = n; + ctrl->muxes = kcalloc(ctrl->num_muxes, sizeof(struct clk *), + GFP_KERNEL); + if (WARN_ON(!ctrl->muxes)) + goto muxes_out; + + for (n = 0; n < ctrl->num_muxes; n++) { + ctrl->muxes[n] = clk_register_mux(NULL, desc[n].name, + desc[n].parents, desc[n].num_parents, + desc[n].flags, base, desc[n].shift, + desc[n].width, desc[n].flags, ctrl->lock); + WARN_ON(IS_ERR(ctrl->muxes[n])); + } + + of_clk_add_provider(np, clk_muxing_get_src, ctrl); + + return; +muxes_out: + kfree(ctrl); +ctrl_out: + iounmap(base); +} + +static void __init kirkwood_clk_init(struct device_node *np) +{ + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock"); + + + if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) + mvebu_coreclk_setup(np, &mv88f6180_coreclks); + else + mvebu_coreclk_setup(np, &kirkwood_coreclks); + + if (cgnp) { + mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); + kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc); + + of_node_put(cgnp); + } +} +CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", + kirkwood_clk_init); +CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", + kirkwood_clk_init); diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c new file mode 100644 index 000000000..c8a0d03d2 --- /dev/null +++ b/drivers/clk/mvebu/mv98dx3236.c @@ -0,0 +1,182 @@ +/* + * Marvell MV98DX3236 SoC clocks + * + * Copyright (C) 2012 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + + +/* + * For 98DX4251 Sample At Reset the CPU, DDR and Main PLL clocks are all + * defined at the same time + * + * SAR1[20:18] : CPU frequency DDR frequency MPLL frequency + * 0 = 400 MHz 400 MHz 800 MHz + * 2 = 667 MHz 667 MHz 2000 MHz + * 3 = 800 MHz 800 MHz 1600 MHz + * others reserved. + * + * For 98DX3236 Sample At Reset the CPU, DDR and Main PLL clocks are all + * defined at the same time + * + * SAR1[20:18] : CPU frequency DDR frequency MPLL frequency + * 1 = 667 MHz 667 MHz 2000 MHz + * 2 = 400 MHz 400 MHz 400 MHz + * 3 = 800 MHz 800 MHz 800 MHz + * 5 = 800 MHz 400 MHz 800 MHz + * others reserved. + */ + +#define SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT 18 +#define SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK 0x7 + +static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar) +{ + /* Tclk = 200MHz, no SaR dependency */ + return 200000000; +} + +static const u32 mv98dx3236_cpu_frequencies[] __initconst = { + 0, + 667000000, + 400000000, + 800000000, + 0, + 800000000, + 0, 0, +}; + +static const u32 mv98dx4251_cpu_frequencies[] __initconst = { + 400000000, + 0, + 667000000, + 800000000, + 0, 0, 0, 0, +}; + +static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar) +{ + u32 cpu_freq = 0; + u8 cpu_freq_select = 0; + + cpu_freq_select = ((readl(sar) >> SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT) & + SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK); + + if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) + cpu_freq = mv98dx4251_cpu_frequencies[cpu_freq_select]; + else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) + cpu_freq = mv98dx3236_cpu_frequencies[cpu_freq_select]; + + if (!cpu_freq) + pr_err("CPU freq select unsupported %d\n", cpu_freq_select); + + return cpu_freq; +} + +enum { + MV98DX3236_CPU_TO_DDR, + MV98DX3236_CPU_TO_MPLL +}; + +static const struct coreclk_ratio mv98dx3236_core_ratios[] __initconst = { + { .id = MV98DX3236_CPU_TO_DDR, .name = "ddrclk" }, + { .id = MV98DX3236_CPU_TO_MPLL, .name = "mpll" }, +}; + +static const int __initconst mv98dx3236_cpu_mpll_ratios[8][2] = { + {0, 1}, {3, 1}, {1, 1}, {1, 1}, + {0, 1}, {1, 1}, {0, 1}, {0, 1}, +}; + +static const int __initconst mv98dx3236_cpu_ddr_ratios[8][2] = { + {0, 1}, {1, 1}, {1, 1}, {1, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, +}; + +static const int __initconst mv98dx4251_cpu_mpll_ratios[8][2] = { + {2, 1}, {0, 1}, {3, 1}, {2, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int __initconst mv98dx4251_cpu_ddr_ratios[8][2] = { + {1, 1}, {0, 1}, {1, 1}, {1, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init mv98dx3236_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT) & + SAR1_MV98DX3236_CPU_DDR_MPLL_FREQ_OPT_MASK); + + switch (id) { + case MV98DX3236_CPU_TO_DDR: + if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) { + *mult = mv98dx4251_cpu_ddr_ratios[opt][0]; + *div = mv98dx4251_cpu_ddr_ratios[opt][1]; + } else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) { + *mult = mv98dx3236_cpu_ddr_ratios[opt][0]; + *div = mv98dx3236_cpu_ddr_ratios[opt][1]; + } + break; + case MV98DX3236_CPU_TO_MPLL: + if (of_machine_is_compatible("marvell,armadaxp-98dx4251")) { + *mult = mv98dx4251_cpu_mpll_ratios[opt][0]; + *div = mv98dx4251_cpu_mpll_ratios[opt][1]; + } else if (of_machine_is_compatible("marvell,armadaxp-98dx3236")) { + *mult = mv98dx3236_cpu_mpll_ratios[opt][0]; + *div = mv98dx3236_cpu_mpll_ratios[opt][1]; + } + break; + } +} + +static const struct coreclk_soc_desc mv98dx3236_core_clocks = { + .get_tclk_freq = mv98dx3236_get_tclk_freq, + .get_cpu_freq = mv98dx3236_get_cpu_freq, + .get_clk_ratio = mv98dx3236_get_clk_ratio, + .ratios = mv98dx3236_core_ratios, + .num_ratios = ARRAY_SIZE(mv98dx3236_core_ratios), +}; + + +/* + * Clock Gating Control + */ + +static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = { + { "ge1", NULL, 3, 0 }, + { "ge0", NULL, 4, 0 }, + { "pex00", NULL, 5, 0 }, + { "sdio", NULL, 17, 0 }, + { "usb0", NULL, 18, 0 }, + { "xor0", NULL, 22, 0 }, + { } +}; + +static void __init mv98dx3236_clk_init(struct device_node *np) +{ + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,mv98dx3236-gating-clock"); + + mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); + + if (cgnp) { + mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); + of_node_put(cgnp); + } +} +CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init); diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c new file mode 100644 index 000000000..a6e5bee23 --- /dev/null +++ b/drivers/clk/mvebu/orion.c @@ -0,0 +1,280 @@ +/* + * Marvell Orion SoC clocks + * + * Copyright (C) 2014 Thomas Petazzoni + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +static const struct coreclk_ratio orion_coreclk_ratios[] __initconst = { + { .id = 0, .name = "ddrclk", } +}; + +/* + * Orion 5181 + */ + +#define SAR_MV88F5181_TCLK_FREQ 8 +#define SAR_MV88F5181_TCLK_FREQ_MASK 0x3 + +static u32 __init mv88f5181_get_tclk_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5181_TCLK_FREQ) & + SAR_MV88F5181_TCLK_FREQ_MASK; + if (opt == 0) + return 133333333; + else if (opt == 1) + return 150000000; + else if (opt == 2) + return 166666667; + else + return 0; +} + +#define SAR_MV88F5181_CPU_FREQ 4 +#define SAR_MV88F5181_CPU_FREQ_MASK 0xf + +static u32 __init mv88f5181_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) & + SAR_MV88F5181_CPU_FREQ_MASK; + if (opt == 0) + return 333333333; + else if (opt == 1 || opt == 2) + return 400000000; + else if (opt == 3) + return 500000000; + else + return 0; +} + +static void __init mv88f5181_get_clk_ratio(void __iomem *sar, int id, + int *mult, int *div) +{ + u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) & + SAR_MV88F5181_CPU_FREQ_MASK; + if (opt == 0 || opt == 1) { + *mult = 1; + *div = 2; + } else if (opt == 2 || opt == 3) { + *mult = 1; + *div = 3; + } else { + *mult = 0; + *div = 1; + } +} + +static const struct coreclk_soc_desc mv88f5181_coreclks = { + .get_tclk_freq = mv88f5181_get_tclk_freq, + .get_cpu_freq = mv88f5181_get_cpu_freq, + .get_clk_ratio = mv88f5181_get_clk_ratio, + .ratios = orion_coreclk_ratios, + .num_ratios = ARRAY_SIZE(orion_coreclk_ratios), +}; + +static void __init mv88f5181_clk_init(struct device_node *np) +{ + return mvebu_coreclk_setup(np, &mv88f5181_coreclks); +} + +CLK_OF_DECLARE(mv88f5181_clk, "marvell,mv88f5181-core-clock", mv88f5181_clk_init); + +/* + * Orion 5182 + */ + +#define SAR_MV88F5182_TCLK_FREQ 8 +#define SAR_MV88F5182_TCLK_FREQ_MASK 0x3 + +static u32 __init mv88f5182_get_tclk_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5182_TCLK_FREQ) & + SAR_MV88F5182_TCLK_FREQ_MASK; + if (opt == 1) + return 150000000; + else if (opt == 2) + return 166666667; + else + return 0; +} + +#define SAR_MV88F5182_CPU_FREQ 4 +#define SAR_MV88F5182_CPU_FREQ_MASK 0xf + +static u32 __init mv88f5182_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5182_CPU_FREQ) & + SAR_MV88F5182_CPU_FREQ_MASK; + if (opt == 0) + return 333333333; + else if (opt == 1 || opt == 2) + return 400000000; + else if (opt == 3) + return 500000000; + else + return 0; +} + +static void __init mv88f5182_get_clk_ratio(void __iomem *sar, int id, + int *mult, int *div) +{ + u32 opt = (readl(sar) >> SAR_MV88F5182_CPU_FREQ) & + SAR_MV88F5182_CPU_FREQ_MASK; + if (opt == 0 || opt == 1) { + *mult = 1; + *div = 2; + } else if (opt == 2 || opt == 3) { + *mult = 1; + *div = 3; + } else { + *mult = 0; + *div = 1; + } +} + +static const struct coreclk_soc_desc mv88f5182_coreclks = { + .get_tclk_freq = mv88f5182_get_tclk_freq, + .get_cpu_freq = mv88f5182_get_cpu_freq, + .get_clk_ratio = mv88f5182_get_clk_ratio, + .ratios = orion_coreclk_ratios, + .num_ratios = ARRAY_SIZE(orion_coreclk_ratios), +}; + +static void __init mv88f5182_clk_init(struct device_node *np) +{ + return mvebu_coreclk_setup(np, &mv88f5182_coreclks); +} + +CLK_OF_DECLARE(mv88f5182_clk, "marvell,mv88f5182-core-clock", mv88f5182_clk_init); + +/* + * Orion 5281 + */ + +static u32 __init mv88f5281_get_tclk_freq(void __iomem *sar) +{ + /* On 5281, tclk is always 166 Mhz */ + return 166666667; +} + +#define SAR_MV88F5281_CPU_FREQ 4 +#define SAR_MV88F5281_CPU_FREQ_MASK 0xf + +static u32 __init mv88f5281_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5281_CPU_FREQ) & + SAR_MV88F5281_CPU_FREQ_MASK; + if (opt == 1 || opt == 2) + return 400000000; + else if (opt == 3) + return 500000000; + else + return 0; +} + +static void __init mv88f5281_get_clk_ratio(void __iomem *sar, int id, + int *mult, int *div) +{ + u32 opt = (readl(sar) >> SAR_MV88F5281_CPU_FREQ) & + SAR_MV88F5281_CPU_FREQ_MASK; + if (opt == 1) { + *mult = 1; + *div = 2; + } else if (opt == 2 || opt == 3) { + *mult = 1; + *div = 3; + } else { + *mult = 0; + *div = 1; + } +} + +static const struct coreclk_soc_desc mv88f5281_coreclks = { + .get_tclk_freq = mv88f5281_get_tclk_freq, + .get_cpu_freq = mv88f5281_get_cpu_freq, + .get_clk_ratio = mv88f5281_get_clk_ratio, + .ratios = orion_coreclk_ratios, + .num_ratios = ARRAY_SIZE(orion_coreclk_ratios), +}; + +static void __init mv88f5281_clk_init(struct device_node *np) +{ + return mvebu_coreclk_setup(np, &mv88f5281_coreclks); +} + +CLK_OF_DECLARE(mv88f5281_clk, "marvell,mv88f5281-core-clock", mv88f5281_clk_init); + +/* + * Orion 6183 + */ + +#define SAR_MV88F6183_TCLK_FREQ 9 +#define SAR_MV88F6183_TCLK_FREQ_MASK 0x1 + +static u32 __init mv88f6183_get_tclk_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F6183_TCLK_FREQ) & + SAR_MV88F6183_TCLK_FREQ_MASK; + if (opt == 0) + return 133333333; + else if (opt == 1) + return 166666667; + else + return 0; +} + +#define SAR_MV88F6183_CPU_FREQ 1 +#define SAR_MV88F6183_CPU_FREQ_MASK 0x3f + +static u32 __init mv88f6183_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F6183_CPU_FREQ) & + SAR_MV88F6183_CPU_FREQ_MASK; + if (opt == 9) + return 333333333; + else if (opt == 17) + return 400000000; + else + return 0; +} + +static void __init mv88f6183_get_clk_ratio(void __iomem *sar, int id, + int *mult, int *div) +{ + u32 opt = (readl(sar) >> SAR_MV88F6183_CPU_FREQ) & + SAR_MV88F6183_CPU_FREQ_MASK; + if (opt == 9 || opt == 17) { + *mult = 1; + *div = 2; + } else { + *mult = 0; + *div = 1; + } +} + +static const struct coreclk_soc_desc mv88f6183_coreclks = { + .get_tclk_freq = mv88f6183_get_tclk_freq, + .get_cpu_freq = mv88f6183_get_cpu_freq, + .get_clk_ratio = mv88f6183_get_clk_ratio, + .ratios = orion_coreclk_ratios, + .num_ratios = ARRAY_SIZE(orion_coreclk_ratios), +}; + + +static void __init mv88f6183_clk_init(struct device_node *np) +{ + return mvebu_coreclk_setup(np, &mv88f6183_coreclks); +} + +CLK_OF_DECLARE(mv88f6183_clk, "marvell,mv88f6183-core-clock", mv88f6183_clk_init); |