diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/clk/baikal-t1 | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/clk/baikal-t1')
-rw-r--r-- | drivers/clk/baikal-t1/Kconfig | 52 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/Makefile | 4 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/ccu-div.c | 648 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/ccu-div.h | 121 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/ccu-pll.c | 558 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/ccu-pll.h | 72 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/ccu-rst.c | 217 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/ccu-rst.h | 67 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/clk-ccu-div.c | 521 | ||||
-rw-r--r-- | drivers/clk/baikal-t1/clk-ccu-pll.c | 277 |
10 files changed, 2537 insertions, 0 deletions
diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig new file mode 100644 index 000000000..f0b186830 --- /dev/null +++ b/drivers/clk/baikal-t1/Kconfig @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CLK_BAIKAL_T1 + bool "Baikal-T1 Clocks Control Unit interface" + depends on (MIPS_BAIKAL_T1 && OF) || COMPILE_TEST + default MIPS_BAIKAL_T1 + help + Clocks Control Unit is the core of Baikal-T1 SoC System Controller + responsible for the chip subsystems clocking and resetting. It + consists of multiple global clock domains, which can be reset by + means of the CCU control registers. These domains and devices placed + in them are fed with clocks generated by a hierarchy of PLLs, + configurable and fixed clock dividers. Enable this option to be able + to select Baikal-T1 CCU PLLs and Dividers drivers. + +if CLK_BAIKAL_T1 + +config CLK_BT1_CCU_PLL + bool "Baikal-T1 CCU PLLs support" + select MFD_SYSCON + default MIPS_BAIKAL_T1 + help + Enable this to support the PLLs embedded into the Baikal-T1 SoC + System Controller. These are five PLLs placed at the root of the + clocks hierarchy, right after an external reference oscillator + (normally of 25MHz). They are used to generate high frequency + signals, which are either directly wired to the consumers (like + CPUs, DDR, etc.) or passed over the clock dividers to be only + then used as an individual reference clock of a target device. + +config CLK_BT1_CCU_DIV + bool "Baikal-T1 CCU Dividers support" + select MFD_SYSCON + default MIPS_BAIKAL_T1 + help + Enable this to support the CCU dividers used to distribute clocks + between AXI-bus and system devices coming from CCU PLLs of Baikal-T1 + SoC. CCU dividers can be either configurable or with fixed divider, + either gateable or ungateable. Some of the CCU dividers can be as well + used to reset the domains they're supplying clock to. + +config CLK_BT1_CCU_RST + bool "Baikal-T1 CCU Resets support" + select RESET_CONTROLLER + select MFD_SYSCON + default MIPS_BAIKAL_T1 + help + Enable this to support the CCU reset blocks responsible for the + AXI-bus and some subsystems reset. These are mainly the + self-deasserted reset controls but there are several lines which + can be directly asserted/de-asserted (PCIe and DDR sub-domains). + +endif diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile new file mode 100644 index 000000000..9c3637de9 --- /dev/null +++ b/drivers/clk/baikal-t1/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o +obj-$(CONFIG_CLK_BT1_CCU_DIV) += ccu-div.o clk-ccu-div.o +obj-$(CONFIG_CLK_BT1_CCU_RST) += ccu-rst.o diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c new file mode 100644 index 000000000..8d5fc7158 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-div.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru> + * + * Baikal-T1 CCU Dividers interface driver + */ + +#define pr_fmt(fmt) "bt1-ccu-div: " fmt + +#include <linux/kernel.h> +#include <linux/printk.h> +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/slab.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> +#include <linux/delay.h> +#include <linux/time64.h> +#include <linux/debugfs.h> + +#include "ccu-div.h" + +#define CCU_DIV_CTL 0x00 +#define CCU_DIV_CTL_EN BIT(0) +#define CCU_DIV_CTL_RST BIT(1) +#define CCU_DIV_CTL_SET_CLKDIV BIT(2) +#define CCU_DIV_CTL_CLKDIV_FLD 4 +#define CCU_DIV_CTL_CLKDIV_MASK(_width) \ + GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD) +#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27) +#define CCU_DIV_CTL_GATE_REF_BUF BIT(28) +#define CCU_DIV_CTL_LOCK_NORMAL BIT(31) + +#define CCU_DIV_LOCK_CHECK_RETRIES 50 + +#define CCU_DIV_CLKDIV_MIN 0 +#define CCU_DIV_CLKDIV_MAX(_mask) \ + ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD) + +/* + * Use the next two methods until there are generic field setter and + * getter available with non-constant mask support. + */ +static inline u32 ccu_div_get(u32 mask, u32 val) +{ + return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD; +} + +static inline u32 ccu_div_prep(u32 mask, u32 val) +{ + return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask; +} + +static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk, + unsigned long div) +{ + u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC; + + do_div(ns, ref_clk); + + return ns; +} + +static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk, + unsigned long div) +{ + return ref_clk / (div ?: 1); +} + +static int ccu_div_var_update_clkdiv(struct ccu_div *div, + unsigned long parent_rate, + unsigned long divider) +{ + unsigned long nd; + u32 val = 0; + u32 lock; + int count; + + nd = ccu_div_lock_delay_ns(parent_rate, divider); + + if (div->features & CCU_DIV_LOCK_SHIFTED) + lock = CCU_DIV_CTL_LOCK_SHIFTED; + else + lock = CCU_DIV_CTL_LOCK_NORMAL; + + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV); + + /* + * Until there is nsec-version of readl_poll_timeout() is available + * we have to implement the next polling loop. + */ + count = CCU_DIV_LOCK_CHECK_RETRIES; + do { + ndelay(nd); + regmap_read(div->sys_regs, div->reg_ctl, &val); + if (val & lock) + return 0; + } while (--count); + + return -ETIMEDOUT; +} + +static int ccu_div_var_enable(struct clk_hw *hw) +{ + struct clk_hw *parent_hw = clk_hw_get_parent(hw); + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + u32 val = 0; + int ret; + + if (!parent_hw) { + pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw)); + return -EINVAL; + } + + regmap_read(div->sys_regs, div->reg_ctl, &val); + if (val & CCU_DIV_CTL_EN) + return 0; + + spin_lock_irqsave(&div->lock, flags); + ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw), + ccu_div_get(div->mask, val)); + if (!ret) + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_EN, CCU_DIV_CTL_EN); + spin_unlock_irqrestore(&div->lock, flags); + if (ret) + pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +static int ccu_div_gate_enable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_EN, CCU_DIV_CTL_EN); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static void ccu_div_gate_disable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0); + spin_unlock_irqrestore(&div->lock, flags); +} + +static int ccu_div_gate_is_enabled(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + + return !!(val & CCU_DIV_CTL_EN); +} + +static int ccu_div_buf_enable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_GATE_REF_BUF, 0); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static void ccu_div_buf_disable(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF); + spin_unlock_irqrestore(&div->lock, flags); +} + +static int ccu_div_buf_is_enabled(struct clk_hw *hw) +{ + struct ccu_div *div = to_ccu_div(hw); + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + + return !(val & CCU_DIV_CTL_GATE_REF_BUF); +} + +static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long divider; + u32 val = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &val); + divider = ccu_div_get(div->mask, val); + + return ccu_div_calc_freq(parent_rate, divider); +} + +static inline unsigned long ccu_div_var_calc_divider(unsigned long rate, + unsigned long parent_rate, + unsigned int mask) +{ + unsigned long divider; + + divider = parent_rate / rate; + return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN, + CCU_DIV_CLKDIV_MAX(mask)); +} + +static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long divider; + + divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask); + + return ccu_div_calc_freq(*parent_rate, divider); +} + +/* + * This method is used for the clock divider blocks, which support the + * on-the-fly rate change. So due to lacking the EN bit functionality + * they can't be gated before the rate adjustment. + */ +static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags, divider; + u32 val; + int ret; + + divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask); + if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) { + divider = 0; + } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) { + if (divider == 1 || divider == 2) + divider = 0; + else if (divider == 3) + divider = 4; + } + + val = ccu_div_prep(div->mask, divider); + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val); + ret = ccu_div_var_update_clkdiv(div, parent_rate, divider); + spin_unlock_irqrestore(&div->lock, flags); + if (ret) + pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +/* + * This method is used for the clock divider blocks, which don't support + * the on-the-fly rate change. + */ +static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + unsigned long flags, divider; + u32 val; + + divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask); + val = ccu_div_prep(div->mask, divider); + + /* + * Also disable the clock divider block if it was enabled by default + * or by the bootloader. + */ + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + div->mask | CCU_DIV_CTL_EN, val); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + + return ccu_div_calc_freq(parent_rate, div->divider); +} + +static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct ccu_div *div = to_ccu_div(hw); + + return ccu_div_calc_freq(*parent_rate, div->divider); +} + +static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +struct ccu_div_dbgfs_bit { + struct ccu_div *div; + const char *name; + u32 mask; +}; + +#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \ + .name = _name, \ + .mask = _mask \ + } + +static const struct ccu_div_dbgfs_bit ccu_div_bits[] = { + CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN), + CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST), + CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV), + CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF), + CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL) +}; + +#define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits) + +/* + * It can be dangerous to change the Divider settings behind clock framework + * back, therefore we don't provide any kernel config based compile time option + * for this feature to enable. + */ +#undef CCU_DIV_ALLOW_WRITE_DEBUGFS +#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS + +static int ccu_div_dbgfs_bit_set(void *priv, u64 val) +{ + const struct ccu_div_dbgfs_bit *bit = priv; + struct ccu_div *div = bit->div; + unsigned long flags; + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, + bit->mask, val ? bit->mask : 0); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val) +{ + struct ccu_div *div = priv; + unsigned long flags; + u32 data; + + val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN, + CCU_DIV_CLKDIV_MAX(div->mask)); + data = ccu_div_prep(div->mask, val); + + spin_lock_irqsave(&div->lock, flags); + regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data); + spin_unlock_irqrestore(&div->lock, flags); + + return 0; +} + +#define ccu_div_dbgfs_mode 0644 + +#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */ + +#define ccu_div_dbgfs_bit_set NULL +#define ccu_div_dbgfs_var_clkdiv_set NULL +#define ccu_div_dbgfs_mode 0444 + +#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */ + +static int ccu_div_dbgfs_bit_get(void *priv, u64 *val) +{ + const struct ccu_div_dbgfs_bit *bit = priv; + struct ccu_div *div = bit->div; + u32 data = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &data); + *val = !!(data & bit->mask); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops, + ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n"); + +static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val) +{ + struct ccu_div *div = priv; + u32 data = 0; + + regmap_read(div->sys_regs, div->reg_ctl, &data); + *val = ccu_div_get(div->mask, data); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops, + ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n"); + +static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val) +{ + struct ccu_div *div = priv; + + *val = div->divider; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops, + ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n"); + +static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bits; + int didx, bidx, num = 2; + const char *name; + + num += !!(div->flags & CLK_SET_RATE_GATE) + + !!(div->features & CCU_DIV_RESET_DOMAIN); + + bits = kcalloc(num, sizeof(*bits), GFP_KERNEL); + if (!bits) + return; + + for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) { + name = ccu_div_bits[bidx].name; + if (!(div->flags & CLK_SET_RATE_GATE) && + !strcmp("div_en", name)) { + continue; + } + + if (!(div->features & CCU_DIV_RESET_DOMAIN) && + !strcmp("div_rst", name)) { + continue; + } + + if (!strcmp("div_buf", name)) + continue; + + bits[didx] = ccu_div_bits[bidx]; + bits[didx].div = div; + + if (div->features & CCU_DIV_LOCK_SHIFTED && + !strcmp("div_lock", name)) { + bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED; + } + + debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode, + dentry, &bits[didx], + &ccu_div_dbgfs_bit_fops); + ++didx; + } + + debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry, + div, &ccu_div_dbgfs_var_clkdiv_fops); +} + +static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bit; + + bit = kmalloc(sizeof(*bit), GFP_KERNEL); + if (!bit) + return; + + *bit = ccu_div_bits[0]; + bit->div = div; + debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit, + &ccu_div_dbgfs_bit_fops); + + debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div, + &ccu_div_dbgfs_fixed_clkdiv_fops); +} + +static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + struct ccu_div_dbgfs_bit *bit; + + bit = kmalloc(sizeof(*bit), GFP_KERNEL); + if (!bit) + return; + + *bit = ccu_div_bits[3]; + bit->div = div; + debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit, + &ccu_div_dbgfs_bit_fops); +} + +static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_div *div = to_ccu_div(hw); + + debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div, + &ccu_div_dbgfs_fixed_clkdiv_fops); +} + +#else /* !CONFIG_DEBUG_FS */ + +#define ccu_div_var_debug_init NULL +#define ccu_div_gate_debug_init NULL +#define ccu_div_buf_debug_init NULL +#define ccu_div_fixed_debug_init NULL + +#endif /* !CONFIG_DEBUG_FS */ + +static const struct clk_ops ccu_div_var_gate_to_set_ops = { + .enable = ccu_div_var_enable, + .disable = ccu_div_gate_disable, + .is_enabled = ccu_div_gate_is_enabled, + .recalc_rate = ccu_div_var_recalc_rate, + .round_rate = ccu_div_var_round_rate, + .set_rate = ccu_div_var_set_rate_fast, + .debug_init = ccu_div_var_debug_init +}; + +static const struct clk_ops ccu_div_var_nogate_ops = { + .recalc_rate = ccu_div_var_recalc_rate, + .round_rate = ccu_div_var_round_rate, + .set_rate = ccu_div_var_set_rate_slow, + .debug_init = ccu_div_var_debug_init +}; + +static const struct clk_ops ccu_div_gate_ops = { + .enable = ccu_div_gate_enable, + .disable = ccu_div_gate_disable, + .is_enabled = ccu_div_gate_is_enabled, + .recalc_rate = ccu_div_fixed_recalc_rate, + .round_rate = ccu_div_fixed_round_rate, + .set_rate = ccu_div_fixed_set_rate, + .debug_init = ccu_div_gate_debug_init +}; + +static const struct clk_ops ccu_div_buf_ops = { + .enable = ccu_div_buf_enable, + .disable = ccu_div_buf_disable, + .is_enabled = ccu_div_buf_is_enabled, + .debug_init = ccu_div_buf_debug_init +}; + +static const struct clk_ops ccu_div_fixed_ops = { + .recalc_rate = ccu_div_fixed_recalc_rate, + .round_rate = ccu_div_fixed_round_rate, + .set_rate = ccu_div_fixed_set_rate, + .debug_init = ccu_div_fixed_debug_init +}; + +struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init) +{ + struct clk_parent_data parent_data = { }; + struct clk_init_data hw_init = { }; + struct ccu_div *div; + int ret; + + if (!div_init) + return ERR_PTR(-EINVAL); + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + /* + * Note since Baikal-T1 System Controller registers are MMIO-backed + * we won't check the regmap IO operations return status, because it + * must be zero anyway. + */ + div->hw.init = &hw_init; + div->id = div_init->id; + div->reg_ctl = div_init->base + CCU_DIV_CTL; + div->sys_regs = div_init->sys_regs; + div->flags = div_init->flags; + div->features = div_init->features; + spin_lock_init(&div->lock); + + hw_init.name = div_init->name; + hw_init.flags = div_init->flags; + + if (div_init->type == CCU_DIV_VAR) { + if (hw_init.flags & CLK_SET_RATE_GATE) + hw_init.ops = &ccu_div_var_gate_to_set_ops; + else + hw_init.ops = &ccu_div_var_nogate_ops; + div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width); + } else if (div_init->type == CCU_DIV_GATE) { + hw_init.ops = &ccu_div_gate_ops; + div->divider = div_init->divider; + } else if (div_init->type == CCU_DIV_BUF) { + hw_init.ops = &ccu_div_buf_ops; + } else if (div_init->type == CCU_DIV_FIXED) { + hw_init.ops = &ccu_div_fixed_ops; + div->divider = div_init->divider; + } else { + ret = -EINVAL; + goto err_free_div; + } + + if (!div_init->parent_name) { + ret = -EINVAL; + goto err_free_div; + } + parent_data.fw_name = div_init->parent_name; + parent_data.name = div_init->parent_name; + hw_init.parent_data = &parent_data; + hw_init.num_parents = 1; + + ret = of_clk_hw_register(div_init->np, &div->hw); + if (ret) + goto err_free_div; + + return div; + +err_free_div: + kfree(div); + + return ERR_PTR(ret); +} + +void ccu_div_hw_unregister(struct ccu_div *div) +{ + clk_hw_unregister(&div->hw); + + kfree(div); +} diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h new file mode 100644 index 000000000..76d8ee44d --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-div.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU Dividers interface driver + */ +#ifndef __CLK_BT1_CCU_DIV_H__ +#define __CLK_BT1_CCU_DIV_H__ + +#include <linux/clk-provider.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> +#include <linux/bits.h> +#include <linux/of.h> + +/* + * CCU Divider private clock IDs + * @CCU_SYS_SATA_CLK: CCU SATA internal clock + * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock + */ +#define CCU_SYS_SATA_CLK -1 +#define CCU_SYS_XGMAC_CLK -2 + +/* + * CCU Divider private flags + * @CCU_DIV_BASIC: Basic divider clock required by the kernel as early as + * possible. + * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1. + * It can be 0 though, which is functionally the same. + * @CCU_DIV_SKIP_ONE_TO_THREE: For some reason divider can't be within [1,3]. + * It can be either 0 or greater than 3. + * @CCU_DIV_LOCK_SHIFTED: Find lock-bit at non-standard position. + * @CCU_DIV_RESET_DOMAIN: There is a clock domain reset handle. + */ +#define CCU_DIV_BASIC BIT(0) +#define CCU_DIV_SKIP_ONE BIT(1) +#define CCU_DIV_SKIP_ONE_TO_THREE BIT(2) +#define CCU_DIV_LOCK_SHIFTED BIT(3) +#define CCU_DIV_RESET_DOMAIN BIT(4) + +/* + * enum ccu_div_type - CCU Divider types + * @CCU_DIV_VAR: Clocks gate with variable divider. + * @CCU_DIV_GATE: Clocks gate with fixed divider. + * @CCU_DIV_BUF: Clock gate with no divider. + * @CCU_DIV_FIXED: Ungateable clock with fixed divider. + */ +enum ccu_div_type { + CCU_DIV_VAR, + CCU_DIV_GATE, + CCU_DIV_BUF, + CCU_DIV_FIXED +}; + +/* + * struct ccu_div_init_data - CCU Divider initialization data + * @id: Clocks private identifier. + * @name: Clocks name. + * @parent_name: Parent clocks name in a fw node. + * @base: Divider register base address with respect to the sys_regs base. + * @sys_regs: Baikal-T1 System Controller registers map. + * @np: Pointer to the node describing the CCU Dividers. + * @type: CCU divider type (variable, fixed with and without gate). + * @width: Divider width if it's variable. + * @divider: Divider fixed value. + * @flags: CCU Divider clock flags. + * @features: CCU Divider private features. + */ +struct ccu_div_init_data { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + struct regmap *sys_regs; + struct device_node *np; + enum ccu_div_type type; + union { + unsigned int width; + unsigned int divider; + }; + unsigned long flags; + unsigned long features; +}; + +/* + * struct ccu_div - CCU Divider descriptor + * @hw: clk_hw of the divider. + * @id: Clock private identifier. + * @reg_ctl: Divider control register base address. + * @sys_regs: Baikal-T1 System Controller registers map. + * @lock: Divider state change spin-lock. + * @mask: Divider field mask. + * @divider: Divider fixed value. + * @flags: Divider clock flags. + * @features: CCU Divider private features. + */ +struct ccu_div { + struct clk_hw hw; + unsigned int id; + unsigned int reg_ctl; + struct regmap *sys_regs; + spinlock_t lock; + union { + u32 mask; + unsigned int divider; + }; + unsigned long flags; + unsigned long features; +}; +#define to_ccu_div(_hw) container_of(_hw, struct ccu_div, hw) + +static inline struct clk_hw *ccu_div_get_clk_hw(struct ccu_div *div) +{ + return div ? &div->hw : NULL; +} + +struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *init); + +void ccu_div_hw_unregister(struct ccu_div *div); + +#endif /* __CLK_BT1_CCU_DIV_H__ */ diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c new file mode 100644 index 000000000..13ef28001 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-pll.c @@ -0,0 +1,558 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru> + * + * Baikal-T1 CCU PLL interface driver + */ + +#define pr_fmt(fmt) "bt1-ccu-pll: " fmt + +#include <linux/kernel.h> +#include <linux/printk.h> +#include <linux/limits.h> +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/slab.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> +#include <linux/iopoll.h> +#include <linux/time64.h> +#include <linux/rational.h> +#include <linux/debugfs.h> + +#include "ccu-pll.h" + +#define CCU_PLL_CTL 0x000 +#define CCU_PLL_CTL_EN BIT(0) +#define CCU_PLL_CTL_RST BIT(1) +#define CCU_PLL_CTL_CLKR_FLD 2 +#define CCU_PLL_CTL_CLKR_MASK GENMASK(7, CCU_PLL_CTL_CLKR_FLD) +#define CCU_PLL_CTL_CLKF_FLD 8 +#define CCU_PLL_CTL_CLKF_MASK GENMASK(20, CCU_PLL_CTL_CLKF_FLD) +#define CCU_PLL_CTL_CLKOD_FLD 21 +#define CCU_PLL_CTL_CLKOD_MASK GENMASK(24, CCU_PLL_CTL_CLKOD_FLD) +#define CCU_PLL_CTL_BYPASS BIT(30) +#define CCU_PLL_CTL_LOCK BIT(31) +#define CCU_PLL_CTL1 0x004 +#define CCU_PLL_CTL1_BWADJ_FLD 3 +#define CCU_PLL_CTL1_BWADJ_MASK GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD) + +#define CCU_PLL_LOCK_CHECK_RETRIES 50 + +#define CCU_PLL_NR_MAX \ + ((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1) +#define CCU_PLL_NF_MAX \ + ((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1) +#define CCU_PLL_OD_MAX \ + ((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1) +#define CCU_PLL_NB_MAX \ + ((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1) +#define CCU_PLL_FDIV_MIN 427000UL +#define CCU_PLL_FDIV_MAX 3500000000UL +#define CCU_PLL_FOUT_MIN 200000000UL +#define CCU_PLL_FOUT_MAX 2500000000UL +#define CCU_PLL_FVCO_MIN 700000000UL +#define CCU_PLL_FVCO_MAX 3500000000UL +#define CCU_PLL_CLKOD_FACTOR 2 + +static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk, + unsigned long nr) +{ + u64 us = 500ULL * nr * USEC_PER_SEC; + + do_div(us, ref_clk); + + return us; +} + +static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk, + unsigned long nr, + unsigned long nf, + unsigned long od) +{ + u64 tmp = ref_clk; + + do_div(tmp, nr); + tmp *= nf; + do_div(tmp, od); + + return tmp; +} + +static int ccu_pll_reset(struct ccu_pll *pll, unsigned long ref_clk, + unsigned long nr) +{ + unsigned long ud, ut; + u32 val; + + ud = ccu_pll_lock_delay_us(ref_clk, nr); + ut = ud * CCU_PLL_LOCK_CHECK_RETRIES; + + regmap_update_bits(pll->sys_regs, pll->reg_ctl, + CCU_PLL_CTL_RST, CCU_PLL_CTL_RST); + + return regmap_read_poll_timeout_atomic(pll->sys_regs, pll->reg_ctl, val, + val & CCU_PLL_CTL_LOCK, ud, ut); +} + +static int ccu_pll_enable(struct clk_hw *hw) +{ + struct clk_hw *parent_hw = clk_hw_get_parent(hw); + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long flags; + u32 val = 0; + int ret; + + if (!parent_hw) { + pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw)); + return -EINVAL; + } + + regmap_read(pll->sys_regs, pll->reg_ctl, &val); + if (val & CCU_PLL_CTL_EN) + return 0; + + spin_lock_irqsave(&pll->lock, flags); + regmap_write(pll->sys_regs, pll->reg_ctl, val | CCU_PLL_CTL_EN); + ret = ccu_pll_reset(pll, clk_hw_get_rate(parent_hw), + FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1); + spin_unlock_irqrestore(&pll->lock, flags); + if (ret) + pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +static void ccu_pll_disable(struct clk_hw *hw) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long flags; + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl, CCU_PLL_CTL_EN, 0); + spin_unlock_irqrestore(&pll->lock, flags); +} + +static int ccu_pll_is_enabled(struct clk_hw *hw) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + u32 val = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl, &val); + + return !!(val & CCU_PLL_CTL_EN); +} + +static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long nr, nf, od; + u32 val = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl, &val); + nr = FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1; + nf = FIELD_GET(CCU_PLL_CTL_CLKF_MASK, val) + 1; + od = FIELD_GET(CCU_PLL_CTL_CLKOD_MASK, val) + 1; + + return ccu_pll_calc_freq(parent_rate, nr, nf, od); +} + +static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate, + unsigned long *nr, unsigned long *nf, + unsigned long *od) +{ + unsigned long err, freq, min_err = ULONG_MAX; + unsigned long num, denom, n1, d1, nri; + unsigned long nr_max, nf_max, od_max; + + /* + * Make sure PLL is working with valid input signal (Fdiv). If + * you want to speed the function up just reduce CCU_PLL_NR_MAX. + * This will cause a worse approximation though. + */ + nri = (parent_rate / CCU_PLL_FDIV_MAX) + 1; + nr_max = min(parent_rate / CCU_PLL_FDIV_MIN, CCU_PLL_NR_MAX); + + /* + * Find a closest [nr;nf;od] vector taking into account the + * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is + * either 1 or even number within the acceptable range (alas 1s + * is also excluded by the next loop). + */ + for (; nri <= nr_max; ++nri) { + /* Use Od factor to fulfill the limitation 2). */ + num = CCU_PLL_CLKOD_FACTOR * rate; + denom = parent_rate / nri; + + /* + * Make sure Fvco is within the acceptable range to fulfill + * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value + * the actual upper limit is also divided by that factor. + * It's not big problem for us since practically there is no + * need in clocks with that high frequency. + */ + nf_max = min(CCU_PLL_FVCO_MAX / denom, CCU_PLL_NF_MAX); + od_max = CCU_PLL_OD_MAX / CCU_PLL_CLKOD_FACTOR; + + /* + * Bypass the out-of-bound values, which can't be properly + * handled by the rational fraction approximation algorithm. + */ + if (num / denom >= nf_max) { + n1 = nf_max; + d1 = 1; + } else if (denom / num >= od_max) { + n1 = 1; + d1 = od_max; + } else { + rational_best_approximation(num, denom, nf_max, od_max, + &n1, &d1); + } + + /* Select the best approximation of the target rate. */ + freq = ccu_pll_calc_freq(parent_rate, nri, n1, d1); + err = abs((int64_t)freq - num); + if (err < min_err) { + min_err = err; + *nr = nri; + *nf = n1; + *od = CCU_PLL_CLKOD_FACTOR * d1; + } + } +} + +static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long nr = 1, nf = 1, od = 1; + + ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od); + + return ccu_pll_calc_freq(*parent_rate, nr, nf, od); +} + +/* + * This method is used for PLLs, which support the on-the-fly dividers + * adjustment. So there is no need in gating such clocks. + */ +static int ccu_pll_set_rate_reset(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long nr, nf, od; + unsigned long flags; + u32 mask, val; + int ret; + + ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od); + + mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK | + CCU_PLL_CTL_CLKOD_MASK; + val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1); + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val); + ret = ccu_pll_reset(pll, parent_rate, nr); + spin_unlock_irqrestore(&pll->lock, flags); + if (ret) + pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw)); + + return ret; +} + +/* + * This method is used for PLLs, which don't support the on-the-fly dividers + * adjustment. So the corresponding clocks are supposed to be gated first. + */ +static int ccu_pll_set_rate_norst(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + unsigned long nr, nf, od; + unsigned long flags; + u32 mask, val; + + ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od); + + /* + * Disable PLL if it was enabled by default or left enabled by the + * system bootloader. + */ + mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK | + CCU_PLL_CTL_CLKOD_MASK | CCU_PLL_CTL_EN; + val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) | + FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1); + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val); + spin_unlock_irqrestore(&pll->lock, flags); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +struct ccu_pll_dbgfs_bit { + struct ccu_pll *pll; + const char *name; + unsigned int reg; + u32 mask; +}; + +struct ccu_pll_dbgfs_fld { + struct ccu_pll *pll; + const char *name; + unsigned int reg; + unsigned int lsb; + u32 mask; + u32 min; + u32 max; +}; + +#define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask) \ + { \ + .name = _name, \ + .reg = _reg, \ + .mask = _mask \ + } + +#define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max) \ + { \ + .name = _name, \ + .reg = _reg, \ + .lsb = _lsb, \ + .mask = _mask, \ + .min = _min, \ + .max = _max \ + } + +static const struct ccu_pll_dbgfs_bit ccu_pll_bits[] = { + CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL, CCU_PLL_CTL_EN), + CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL, CCU_PLL_CTL_RST), + CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL, CCU_PLL_CTL_BYPASS), + CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL, CCU_PLL_CTL_LOCK) +}; + +#define CCU_PLL_DBGFS_BIT_NUM ARRAY_SIZE(ccu_pll_bits) + +static const struct ccu_pll_dbgfs_fld ccu_pll_flds[] = { + CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL, CCU_PLL_CTL_CLKR_FLD, + CCU_PLL_CTL_CLKR_MASK, 1, CCU_PLL_NR_MAX), + CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL, CCU_PLL_CTL_CLKF_FLD, + CCU_PLL_CTL_CLKF_MASK, 1, CCU_PLL_NF_MAX), + CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL, CCU_PLL_CTL_CLKOD_FLD, + CCU_PLL_CTL_CLKOD_MASK, 1, CCU_PLL_OD_MAX), + CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1, CCU_PLL_CTL1_BWADJ_FLD, + CCU_PLL_CTL1_BWADJ_MASK, 1, CCU_PLL_NB_MAX) +}; + +#define CCU_PLL_DBGFS_FLD_NUM ARRAY_SIZE(ccu_pll_flds) + +/* + * It can be dangerous to change the PLL settings behind clock framework back, + * therefore we don't provide any kernel config based compile time option for + * this feature to enable. + */ +#undef CCU_PLL_ALLOW_WRITE_DEBUGFS +#ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS + +static int ccu_pll_dbgfs_bit_set(void *priv, u64 val) +{ + const struct ccu_pll_dbgfs_bit *bit = priv; + struct ccu_pll *pll = bit->pll; + unsigned long flags; + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl + bit->reg, + bit->mask, val ? bit->mask : 0); + spin_unlock_irqrestore(&pll->lock, flags); + + return 0; +} + +static int ccu_pll_dbgfs_fld_set(void *priv, u64 val) +{ + struct ccu_pll_dbgfs_fld *fld = priv; + struct ccu_pll *pll = fld->pll; + unsigned long flags; + u32 data; + + val = clamp_t(u64, val, fld->min, fld->max); + data = ((val - 1) << fld->lsb) & fld->mask; + + spin_lock_irqsave(&pll->lock, flags); + regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask, + data); + spin_unlock_irqrestore(&pll->lock, flags); + + return 0; +} + +#define ccu_pll_dbgfs_mode 0644 + +#else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */ + +#define ccu_pll_dbgfs_bit_set NULL +#define ccu_pll_dbgfs_fld_set NULL +#define ccu_pll_dbgfs_mode 0444 + +#endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */ + +static int ccu_pll_dbgfs_bit_get(void *priv, u64 *val) +{ + struct ccu_pll_dbgfs_bit *bit = priv; + struct ccu_pll *pll = bit->pll; + u32 data = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl + bit->reg, &data); + *val = !!(data & bit->mask); + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops, + ccu_pll_dbgfs_bit_get, ccu_pll_dbgfs_bit_set, "%llu\n"); + +static int ccu_pll_dbgfs_fld_get(void *priv, u64 *val) +{ + struct ccu_pll_dbgfs_fld *fld = priv; + struct ccu_pll *pll = fld->pll; + u32 data = 0; + + regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data); + *val = ((data & fld->mask) >> fld->lsb) + 1; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops, + ccu_pll_dbgfs_fld_get, ccu_pll_dbgfs_fld_set, "%llu\n"); + +static void ccu_pll_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + struct ccu_pll *pll = to_ccu_pll(hw); + struct ccu_pll_dbgfs_bit *bits; + struct ccu_pll_dbgfs_fld *flds; + int idx; + + bits = kcalloc(CCU_PLL_DBGFS_BIT_NUM, sizeof(*bits), GFP_KERNEL); + if (!bits) + return; + + for (idx = 0; idx < CCU_PLL_DBGFS_BIT_NUM; ++idx) { + bits[idx] = ccu_pll_bits[idx]; + bits[idx].pll = pll; + + debugfs_create_file_unsafe(bits[idx].name, ccu_pll_dbgfs_mode, + dentry, &bits[idx], + &ccu_pll_dbgfs_bit_fops); + } + + flds = kcalloc(CCU_PLL_DBGFS_FLD_NUM, sizeof(*flds), GFP_KERNEL); + if (!flds) + return; + + for (idx = 0; idx < CCU_PLL_DBGFS_FLD_NUM; ++idx) { + flds[idx] = ccu_pll_flds[idx]; + flds[idx].pll = pll; + + debugfs_create_file_unsafe(flds[idx].name, ccu_pll_dbgfs_mode, + dentry, &flds[idx], + &ccu_pll_dbgfs_fld_fops); + } +} + +#else /* !CONFIG_DEBUG_FS */ + +#define ccu_pll_debug_init NULL + +#endif /* !CONFIG_DEBUG_FS */ + +static const struct clk_ops ccu_pll_gate_to_set_ops = { + .enable = ccu_pll_enable, + .disable = ccu_pll_disable, + .is_enabled = ccu_pll_is_enabled, + .recalc_rate = ccu_pll_recalc_rate, + .round_rate = ccu_pll_round_rate, + .set_rate = ccu_pll_set_rate_norst, + .debug_init = ccu_pll_debug_init +}; + +static const struct clk_ops ccu_pll_straight_set_ops = { + .enable = ccu_pll_enable, + .disable = ccu_pll_disable, + .is_enabled = ccu_pll_is_enabled, + .recalc_rate = ccu_pll_recalc_rate, + .round_rate = ccu_pll_round_rate, + .set_rate = ccu_pll_set_rate_reset, + .debug_init = ccu_pll_debug_init +}; + +struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *pll_init) +{ + struct clk_parent_data parent_data = { }; + struct clk_init_data hw_init = { }; + struct ccu_pll *pll; + int ret; + + if (!pll_init) + return ERR_PTR(-EINVAL); + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + /* + * Note since Baikal-T1 System Controller registers are MMIO-backed + * we won't check the regmap IO operations return status, because it + * must be zero anyway. + */ + pll->hw.init = &hw_init; + pll->reg_ctl = pll_init->base + CCU_PLL_CTL; + pll->reg_ctl1 = pll_init->base + CCU_PLL_CTL1; + pll->sys_regs = pll_init->sys_regs; + pll->id = pll_init->id; + spin_lock_init(&pll->lock); + + hw_init.name = pll_init->name; + hw_init.flags = pll_init->flags; + + if (hw_init.flags & CLK_SET_RATE_GATE) + hw_init.ops = &ccu_pll_gate_to_set_ops; + else + hw_init.ops = &ccu_pll_straight_set_ops; + + if (!pll_init->parent_name) { + ret = -EINVAL; + goto err_free_pll; + } + parent_data.fw_name = pll_init->parent_name; + hw_init.parent_data = &parent_data; + hw_init.num_parents = 1; + + ret = of_clk_hw_register(pll_init->np, &pll->hw); + if (ret) + goto err_free_pll; + + return pll; + +err_free_pll: + kfree(pll); + + return ERR_PTR(ret); +} + +void ccu_pll_hw_unregister(struct ccu_pll *pll) +{ + clk_hw_unregister(&pll->hw); + + kfree(pll); +} diff --git a/drivers/clk/baikal-t1/ccu-pll.h b/drivers/clk/baikal-t1/ccu-pll.h new file mode 100644 index 000000000..a71bfd7b9 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-pll.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU PLL interface driver + */ +#ifndef __CLK_BT1_CCU_PLL_H__ +#define __CLK_BT1_CCU_PLL_H__ + +#include <linux/clk-provider.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> +#include <linux/bits.h> +#include <linux/of.h> + +/* + * CCU PLL private flags + * @CCU_PLL_BASIC: Basic PLL required by the kernel as early as possible. + */ +#define CCU_PLL_BASIC BIT(0) + +/* + * struct ccu_pll_init_data - CCU PLL initialization data + * @id: Clock private identifier. + * @name: Clocks name. + * @parent_name: Clocks parent name in a fw node. + * @base: PLL registers base address with respect to the sys_regs base. + * @sys_regs: Baikal-T1 System Controller registers map. + * @np: Pointer to the node describing the CCU PLLs. + * @flags: PLL clock flags. + * @features: PLL private features. + */ +struct ccu_pll_init_data { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + struct regmap *sys_regs; + struct device_node *np; + unsigned long flags; + unsigned long features; +}; + +/* + * struct ccu_pll - CCU PLL descriptor + * @hw: clk_hw of the PLL. + * @id: Clock private identifier. + * @reg_ctl: PLL control register base. + * @reg_ctl1: PLL control1 register base. + * @sys_regs: Baikal-T1 System Controller registers map. + * @lock: PLL state change spin-lock. + */ +struct ccu_pll { + struct clk_hw hw; + unsigned int id; + unsigned int reg_ctl; + unsigned int reg_ctl1; + struct regmap *sys_regs; + spinlock_t lock; +}; +#define to_ccu_pll(_hw) container_of(_hw, struct ccu_pll, hw) + +static inline struct clk_hw *ccu_pll_get_clk_hw(struct ccu_pll *pll) +{ + return pll ? &pll->hw : NULL; +} + +struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *init); + +void ccu_pll_hw_unregister(struct ccu_pll *pll); + +#endif /* __CLK_BT1_CCU_PLL_H__ */ diff --git a/drivers/clk/baikal-t1/ccu-rst.c b/drivers/clk/baikal-t1/ccu-rst.c new file mode 100644 index 000000000..40023ea67 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-rst.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * + * Baikal-T1 CCU Resets interface driver + */ + +#define pr_fmt(fmt) "bt1-ccu-rst: " fmt + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/printk.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> + +#include <dt-bindings/reset/bt1-ccu.h> + +#include "ccu-rst.h" + +#define CCU_AXI_MAIN_BASE 0x030 +#define CCU_AXI_DDR_BASE 0x034 +#define CCU_AXI_SATA_BASE 0x038 +#define CCU_AXI_GMAC0_BASE 0x03C +#define CCU_AXI_GMAC1_BASE 0x040 +#define CCU_AXI_XGMAC_BASE 0x044 +#define CCU_AXI_PCIE_M_BASE 0x048 +#define CCU_AXI_PCIE_S_BASE 0x04C +#define CCU_AXI_USB_BASE 0x050 +#define CCU_AXI_HWA_BASE 0x054 +#define CCU_AXI_SRAM_BASE 0x058 + +#define CCU_SYS_DDR_BASE 0x02c +#define CCU_SYS_SATA_REF_BASE 0x060 +#define CCU_SYS_APB_BASE 0x064 +#define CCU_SYS_PCIE_BASE 0x144 + +#define CCU_RST_DELAY_US 1 + +#define CCU_RST_TRIG(_base, _ofs) \ + { \ + .type = CCU_RST_TRIG, \ + .base = _base, \ + .mask = BIT(_ofs), \ + } + +#define CCU_RST_DIR(_base, _ofs) \ + { \ + .type = CCU_RST_DIR, \ + .base = _base, \ + .mask = BIT(_ofs), \ + } + +struct ccu_rst_info { + enum ccu_rst_type type; + unsigned int base; + unsigned int mask; +}; + +/* + * Each AXI-bus clock divider is equipped with the corresponding clock-consumer + * domain reset (it's self-deasserted reset control). + */ +static const struct ccu_rst_info axi_rst_info[] = { + [CCU_AXI_MAIN_RST] = CCU_RST_TRIG(CCU_AXI_MAIN_BASE, 1), + [CCU_AXI_DDR_RST] = CCU_RST_TRIG(CCU_AXI_DDR_BASE, 1), + [CCU_AXI_SATA_RST] = CCU_RST_TRIG(CCU_AXI_SATA_BASE, 1), + [CCU_AXI_GMAC0_RST] = CCU_RST_TRIG(CCU_AXI_GMAC0_BASE, 1), + [CCU_AXI_GMAC1_RST] = CCU_RST_TRIG(CCU_AXI_GMAC1_BASE, 1), + [CCU_AXI_XGMAC_RST] = CCU_RST_TRIG(CCU_AXI_XGMAC_BASE, 1), + [CCU_AXI_PCIE_M_RST] = CCU_RST_TRIG(CCU_AXI_PCIE_M_BASE, 1), + [CCU_AXI_PCIE_S_RST] = CCU_RST_TRIG(CCU_AXI_PCIE_S_BASE, 1), + [CCU_AXI_USB_RST] = CCU_RST_TRIG(CCU_AXI_USB_BASE, 1), + [CCU_AXI_HWA_RST] = CCU_RST_TRIG(CCU_AXI_HWA_BASE, 1), + [CCU_AXI_SRAM_RST] = CCU_RST_TRIG(CCU_AXI_SRAM_BASE, 1), +}; + +/* + * SATA reference clock domain and APB-bus domain are connected with the + * sefl-deasserted reset control, which can be activated via the corresponding + * clock divider register. DDR and PCIe sub-domains can be reset with directly + * controlled reset signals. Resetting the DDR controller though won't end up + * well while the Linux kernel is working. + */ +static const struct ccu_rst_info sys_rst_info[] = { + [CCU_SYS_SATA_REF_RST] = CCU_RST_TRIG(CCU_SYS_SATA_REF_BASE, 1), + [CCU_SYS_APB_RST] = CCU_RST_TRIG(CCU_SYS_APB_BASE, 1), + [CCU_SYS_DDR_FULL_RST] = CCU_RST_DIR(CCU_SYS_DDR_BASE, 1), + [CCU_SYS_DDR_INIT_RST] = CCU_RST_DIR(CCU_SYS_DDR_BASE, 2), + [CCU_SYS_PCIE_PCS_PHY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 0), + [CCU_SYS_PCIE_PIPE0_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 4), + [CCU_SYS_PCIE_CORE_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 8), + [CCU_SYS_PCIE_PWR_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 9), + [CCU_SYS_PCIE_STICKY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 10), + [CCU_SYS_PCIE_NSTICKY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 11), + [CCU_SYS_PCIE_HOT_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 12), +}; + +static int ccu_rst_reset(struct reset_controller_dev *rcdev, unsigned long idx) +{ + struct ccu_rst *rst = to_ccu_rst(rcdev); + const struct ccu_rst_info *info = &rst->rsts_info[idx]; + + if (info->type != CCU_RST_TRIG) + return -EOPNOTSUPP; + + regmap_update_bits(rst->sys_regs, info->base, info->mask, info->mask); + + /* The next delay must be enough to cover all the resets. */ + udelay(CCU_RST_DELAY_US); + + return 0; +} + +static int ccu_rst_set(struct reset_controller_dev *rcdev, + unsigned long idx, bool high) +{ + struct ccu_rst *rst = to_ccu_rst(rcdev); + const struct ccu_rst_info *info = &rst->rsts_info[idx]; + + if (info->type != CCU_RST_DIR) + return high ? -EOPNOTSUPP : 0; + + return regmap_update_bits(rst->sys_regs, info->base, + info->mask, high ? info->mask : 0); +} + +static int ccu_rst_assert(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + return ccu_rst_set(rcdev, idx, true); +} + +static int ccu_rst_deassert(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + return ccu_rst_set(rcdev, idx, false); +} + +static int ccu_rst_status(struct reset_controller_dev *rcdev, + unsigned long idx) +{ + struct ccu_rst *rst = to_ccu_rst(rcdev); + const struct ccu_rst_info *info = &rst->rsts_info[idx]; + u32 val; + + if (info->type != CCU_RST_DIR) + return -EOPNOTSUPP; + + regmap_read(rst->sys_regs, info->base, &val); + + return !!(val & info->mask); +} + +static const struct reset_control_ops ccu_rst_ops = { + .reset = ccu_rst_reset, + .assert = ccu_rst_assert, + .deassert = ccu_rst_deassert, + .status = ccu_rst_status, +}; + +struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *rst_init) +{ + struct ccu_rst *rst; + int ret; + + if (!rst_init) + return ERR_PTR(-EINVAL); + + rst = kzalloc(sizeof(*rst), GFP_KERNEL); + if (!rst) + return ERR_PTR(-ENOMEM); + + rst->sys_regs = rst_init->sys_regs; + if (of_device_is_compatible(rst_init->np, "baikal,bt1-ccu-axi")) { + rst->rcdev.nr_resets = ARRAY_SIZE(axi_rst_info); + rst->rsts_info = axi_rst_info; + } else if (of_device_is_compatible(rst_init->np, "baikal,bt1-ccu-sys")) { + rst->rcdev.nr_resets = ARRAY_SIZE(sys_rst_info); + rst->rsts_info = sys_rst_info; + } else { + pr_err("Incompatible DT node '%s' specified\n", + of_node_full_name(rst_init->np)); + ret = -EINVAL; + goto err_kfree_rst; + } + + rst->rcdev.owner = THIS_MODULE; + rst->rcdev.ops = &ccu_rst_ops; + rst->rcdev.of_node = rst_init->np; + + ret = reset_controller_register(&rst->rcdev); + if (ret) { + pr_err("Couldn't register '%s' reset controller\n", + of_node_full_name(rst_init->np)); + goto err_kfree_rst; + } + + return rst; + +err_kfree_rst: + kfree(rst); + + return ERR_PTR(ret); +} + +void ccu_rst_hw_unregister(struct ccu_rst *rst) +{ + reset_controller_unregister(&rst->rcdev); + + kfree(rst); +} diff --git a/drivers/clk/baikal-t1/ccu-rst.h b/drivers/clk/baikal-t1/ccu-rst.h new file mode 100644 index 000000000..d6e8b2f67 --- /dev/null +++ b/drivers/clk/baikal-t1/ccu-rst.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU Resets interface driver + */ +#ifndef __CLK_BT1_CCU_RST_H__ +#define __CLK_BT1_CCU_RST_H__ + +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +struct ccu_rst_info; + +/* + * enum ccu_rst_type - CCU Reset types + * @CCU_RST_TRIG: Self-deasserted reset signal. + * @CCU_RST_DIR: Directly controlled reset signal. + */ +enum ccu_rst_type { + CCU_RST_TRIG, + CCU_RST_DIR, +}; + +/* + * struct ccu_rst_init_data - CCU Resets initialization data + * @sys_regs: Baikal-T1 System Controller registers map. + * @np: Pointer to the node with the System CCU block. + */ +struct ccu_rst_init_data { + struct regmap *sys_regs; + struct device_node *np; +}; + +/* + * struct ccu_rst - CCU Reset descriptor + * @rcdev: Reset controller descriptor. + * @sys_regs: Baikal-T1 System Controller registers map. + * @rsts_info: Reset flag info (base address and mask). + */ +struct ccu_rst { + struct reset_controller_dev rcdev; + struct regmap *sys_regs; + const struct ccu_rst_info *rsts_info; +}; +#define to_ccu_rst(_rcdev) container_of(_rcdev, struct ccu_rst, rcdev) + +#ifdef CONFIG_CLK_BT1_CCU_RST + +struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *init); + +void ccu_rst_hw_unregister(struct ccu_rst *rst); + +#else + +static inline +struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *init) +{ + return NULL; +} + +static inline void ccu_rst_hw_unregister(struct ccu_rst *rst) {} + +#endif + +#endif /* __CLK_BT1_CCU_RST_H__ */ diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c new file mode 100644 index 000000000..0e772e034 --- /dev/null +++ b/drivers/clk/baikal-t1/clk-ccu-div.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru> + * + * Baikal-T1 CCU Dividers clock driver + */ + +#define pr_fmt(fmt) "bt1-ccu-div: " fmt + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/clk-provider.h> +#include <linux/reset-controller.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/ioport.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/bt1-ccu.h> + +#include "ccu-div.h" +#include "ccu-rst.h" + +#define CCU_AXI_MAIN_BASE 0x030 +#define CCU_AXI_DDR_BASE 0x034 +#define CCU_AXI_SATA_BASE 0x038 +#define CCU_AXI_GMAC0_BASE 0x03C +#define CCU_AXI_GMAC1_BASE 0x040 +#define CCU_AXI_XGMAC_BASE 0x044 +#define CCU_AXI_PCIE_M_BASE 0x048 +#define CCU_AXI_PCIE_S_BASE 0x04C +#define CCU_AXI_USB_BASE 0x050 +#define CCU_AXI_HWA_BASE 0x054 +#define CCU_AXI_SRAM_BASE 0x058 + +#define CCU_SYS_SATA_REF_BASE 0x060 +#define CCU_SYS_APB_BASE 0x064 +#define CCU_SYS_GMAC0_BASE 0x068 +#define CCU_SYS_GMAC1_BASE 0x06C +#define CCU_SYS_XGMAC_BASE 0x070 +#define CCU_SYS_USB_BASE 0x074 +#define CCU_SYS_PVT_BASE 0x078 +#define CCU_SYS_HWA_BASE 0x07C +#define CCU_SYS_UART_BASE 0x084 +#define CCU_SYS_TIMER0_BASE 0x088 +#define CCU_SYS_TIMER1_BASE 0x08C +#define CCU_SYS_TIMER2_BASE 0x090 +#define CCU_SYS_WDT_BASE 0x150 + +#define CCU_DIV_VAR_INFO(_id, _name, _pname, _base, _width, _flags, _features) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_VAR, \ + .width = _width, \ + .flags = _flags, \ + .features = _features \ + } + +#define CCU_DIV_GATE_INFO(_id, _name, _pname, _base, _divider) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_GATE, \ + .divider = _divider \ + } + +#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .type = CCU_DIV_BUF, \ + .flags = _flags \ + } + +#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .type = CCU_DIV_FIXED, \ + .divider = _divider \ + } + +struct ccu_div_info { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + enum ccu_div_type type; + union { + unsigned int width; + unsigned int divider; + }; + unsigned long flags; + unsigned long features; +}; + +struct ccu_div_data { + struct device_node *np; + struct regmap *sys_regs; + + unsigned int divs_num; + const struct ccu_div_info *divs_info; + struct ccu_div **divs; + + struct ccu_rst *rsts; +}; + +/* + * AXI Main Interconnect (axi_main_clk) and DDR AXI-bus (axi_ddr_clk) clocks + * must be left enabled in any case, since former one is responsible for + * clocking a bus between CPU cores and the rest of the SoC components, while + * the later is clocking the AXI-bus between DDR controller and the Main + * Interconnect. So should any of these clocks get to be disabled, the system + * will literally stop working. That's why we marked them as critical. + */ +static const struct ccu_div_info axi_info[] = { + CCU_DIV_VAR_INFO(CCU_AXI_MAIN_CLK, "axi_main_clk", "pcie_clk", + CCU_AXI_MAIN_BASE, 4, + CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_DDR_CLK, "axi_ddr_clk", "sata_clk", + CCU_AXI_DDR_BASE, 4, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE, + CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_SATA_CLK, "axi_sata_clk", "sata_clk", + CCU_AXI_SATA_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_GMAC0_CLK, "axi_gmac0_clk", "eth_clk", + CCU_AXI_GMAC0_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_GMAC1_CLK, "axi_gmac1_clk", "eth_clk", + CCU_AXI_GMAC1_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_XGMAC_CLK, "axi_xgmac_clk", "eth_clk", + CCU_AXI_XGMAC_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_PCIE_M_CLK, "axi_pcie_m_clk", "pcie_clk", + CCU_AXI_PCIE_M_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_PCIE_S_CLK, "axi_pcie_s_clk", "pcie_clk", + CCU_AXI_PCIE_S_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_USB_CLK, "axi_usb_clk", "sata_clk", + CCU_AXI_USB_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_HWA_CLK, "axi_hwa_clk", "sata_clk", + CCU_AXI_HWA_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN), + CCU_DIV_VAR_INFO(CCU_AXI_SRAM_CLK, "axi_sram_clk", "eth_clk", + CCU_AXI_SRAM_BASE, 4, + CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN) +}; + +/* + * APB-bus clock is marked as critical since it's a main communication bus + * for the SoC devices registers IO-operations. + */ +static const struct ccu_div_info sys_info[] = { + CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk", + "sata_clk", CCU_SYS_SATA_REF_BASE, 4, + CLK_SET_RATE_GATE, + CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED | + CCU_DIV_RESET_DOMAIN), + CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk", + "sys_sata_clk", CCU_SYS_SATA_REF_BASE, + CLK_SET_RATE_PARENT), + CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk", + "pcie_clk", CCU_SYS_APB_BASE, 5, + CLK_IS_CRITICAL, CCU_DIV_BASIC | CCU_DIV_RESET_DOMAIN), + CCU_DIV_GATE_INFO(CCU_SYS_GMAC0_TX_CLK, "sys_gmac0_tx_clk", + "eth_clk", CCU_SYS_GMAC0_BASE, 5), + CCU_DIV_FIXED_INFO(CCU_SYS_GMAC0_PTP_CLK, "sys_gmac0_ptp_clk", + "eth_clk", 10), + CCU_DIV_GATE_INFO(CCU_SYS_GMAC1_TX_CLK, "sys_gmac1_tx_clk", + "eth_clk", CCU_SYS_GMAC1_BASE, 5), + CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk", + "eth_clk", 10), + CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk", + "eth_clk", CCU_SYS_XGMAC_BASE, 1), + CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk", + "sys_xgmac_clk", 8), + CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk", + "sys_xgmac_clk", 8), + CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk", + "eth_clk", CCU_SYS_USB_BASE, 10), + CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk", + "ref_clk", CCU_SYS_PVT_BASE, 5, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_HWA_CLK, "sys_hwa_clk", + "sata_clk", CCU_SYS_HWA_BASE, 4, + CLK_SET_RATE_GATE, 0), + CCU_DIV_VAR_INFO(CCU_SYS_UART_CLK, "sys_uart_clk", + "eth_clk", CCU_SYS_UART_BASE, 17, + CLK_SET_RATE_GATE, 0), + CCU_DIV_FIXED_INFO(CCU_SYS_I2C1_CLK, "sys_i2c1_clk", + "eth_clk", 10), + CCU_DIV_FIXED_INFO(CCU_SYS_I2C2_CLK, "sys_i2c2_clk", + "eth_clk", 10), + CCU_DIV_FIXED_INFO(CCU_SYS_GPIO_CLK, "sys_gpio_clk", + "ref_clk", 25), + CCU_DIV_VAR_INFO(CCU_SYS_TIMER0_CLK, "sys_timer0_clk", + "ref_clk", CCU_SYS_TIMER0_BASE, 17, + CLK_SET_RATE_GATE, CCU_DIV_BASIC), + CCU_DIV_VAR_INFO(CCU_SYS_TIMER1_CLK, "sys_timer1_clk", + "ref_clk", CCU_SYS_TIMER1_BASE, 17, + CLK_SET_RATE_GATE, CCU_DIV_BASIC), + CCU_DIV_VAR_INFO(CCU_SYS_TIMER2_CLK, "sys_timer2_clk", + "ref_clk", CCU_SYS_TIMER2_BASE, 17, + CLK_SET_RATE_GATE, CCU_DIV_BASIC), + CCU_DIV_VAR_INFO(CCU_SYS_WDT_CLK, "sys_wdt_clk", + "eth_clk", CCU_SYS_WDT_BASE, 17, + CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE_TO_THREE) +}; + +static struct ccu_div_data *axi_data; +static struct ccu_div_data *sys_data; + +static void ccu_div_set_data(struct ccu_div_data *data) +{ + struct device_node *np = data->np; + + if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) + axi_data = data; + else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) + sys_data = data; + else + pr_err("Invalid DT node '%s' specified\n", of_node_full_name(np)); +} + +static struct ccu_div_data *ccu_div_get_data(struct device_node *np) +{ + if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) + return axi_data; + else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) + return sys_data; + + pr_err("Invalid DT node '%s' specified\n", of_node_full_name(np)); + + return NULL; +} + +static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data, + unsigned int clk_id) +{ + int idx; + + for (idx = 0; idx < data->divs_num; ++idx) { + if (data->divs_info[idx].id == clk_id) + return data->divs[idx]; + } + + return ERR_PTR(-EINVAL); +} + +static struct ccu_div_data *ccu_div_create_data(struct device_node *np) +{ + struct ccu_div_data *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + data->np = np; + if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) { + data->divs_num = ARRAY_SIZE(axi_info); + data->divs_info = axi_info; + } else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) { + data->divs_num = ARRAY_SIZE(sys_info); + data->divs_info = sys_info; + } else { + pr_err("Incompatible DT node '%s' specified\n", + of_node_full_name(np)); + ret = -EINVAL; + goto err_kfree_data; + } + + data->divs = kcalloc(data->divs_num, sizeof(*data->divs), GFP_KERNEL); + if (!data->divs) { + ret = -ENOMEM; + goto err_kfree_data; + } + + return data; + +err_kfree_data: + kfree(data); + + return ERR_PTR(ret); +} + +static void ccu_div_free_data(struct ccu_div_data *data) +{ + kfree(data->divs); + + kfree(data); +} + +static int ccu_div_find_sys_regs(struct ccu_div_data *data) +{ + data->sys_regs = syscon_node_to_regmap(data->np->parent); + if (IS_ERR(data->sys_regs)) { + pr_err("Failed to find syscon regs for '%s'\n", + of_node_full_name(data->np)); + return PTR_ERR(data->sys_regs); + } + + return 0; +} + +static struct clk_hw *ccu_div_of_clk_hw_get(struct of_phandle_args *clkspec, + void *priv) +{ + struct ccu_div_data *data = priv; + struct ccu_div *div; + unsigned int clk_id; + + clk_id = clkspec->args[0]; + div = ccu_div_find_desc(data, clk_id); + if (IS_ERR(div)) { + if (div != ERR_PTR(-EPROBE_DEFER)) + pr_info("Invalid clock ID %d specified\n", clk_id); + + return ERR_CAST(div); + } + + return ccu_div_get_clk_hw(div); +} + +static int ccu_div_clk_register(struct ccu_div_data *data, bool defer) +{ + int idx, ret; + + for (idx = 0; idx < data->divs_num; ++idx) { + const struct ccu_div_info *info = &data->divs_info[idx]; + struct ccu_div_init_data init = {0}; + + if (!!(info->features & CCU_DIV_BASIC) ^ defer) { + if (!data->divs[idx]) + data->divs[idx] = ERR_PTR(-EPROBE_DEFER); + + continue; + } + + init.id = info->id; + init.name = info->name; + init.parent_name = info->parent_name; + init.np = data->np; + init.type = info->type; + init.flags = info->flags; + init.features = info->features; + + if (init.type == CCU_DIV_VAR) { + init.base = info->base; + init.sys_regs = data->sys_regs; + init.width = info->width; + } else if (init.type == CCU_DIV_GATE) { + init.base = info->base; + init.sys_regs = data->sys_regs; + init.divider = info->divider; + } else if (init.type == CCU_DIV_BUF) { + init.base = info->base; + init.sys_regs = data->sys_regs; + } else { + init.divider = info->divider; + } + + data->divs[idx] = ccu_div_hw_register(&init); + if (IS_ERR(data->divs[idx])) { + ret = PTR_ERR(data->divs[idx]); + pr_err("Couldn't register divider '%s' hw\n", + init.name); + goto err_hw_unregister; + } + } + + return 0; + +err_hw_unregister: + for (--idx; idx >= 0; --idx) { + if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer) + continue; + + ccu_div_hw_unregister(data->divs[idx]); + } + + return ret; +} + +static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer) +{ + int idx; + + /* Uninstall only the clocks registered on the specfied stage */ + for (idx = 0; idx < data->divs_num; ++idx) { + if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer) + continue; + + ccu_div_hw_unregister(data->divs[idx]); + } +} + +static int ccu_div_of_register(struct ccu_div_data *data) +{ + int ret; + + ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data); + if (ret) { + pr_err("Couldn't register dividers '%s' clock provider\n", + of_node_full_name(data->np)); + } + + return ret; +} + +static int ccu_div_rst_register(struct ccu_div_data *data) +{ + struct ccu_rst_init_data init = {0}; + + init.sys_regs = data->sys_regs; + init.np = data->np; + + data->rsts = ccu_rst_hw_register(&init); + if (IS_ERR(data->rsts)) { + pr_err("Couldn't register divider '%s' reset controller\n", + of_node_full_name(data->np)); + return PTR_ERR(data->rsts); + } + + return 0; +} + +static int ccu_div_probe(struct platform_device *pdev) +{ + struct ccu_div_data *data; + int ret; + + data = ccu_div_get_data(dev_of_node(&pdev->dev)); + if (!data) + return -EINVAL; + + ret = ccu_div_clk_register(data, false); + if (ret) + return ret; + + ret = ccu_div_rst_register(data); + if (ret) + goto err_clk_unregister; + + return 0; + +err_clk_unregister: + ccu_div_clk_unregister(data, false); + + return ret; +} + +static const struct of_device_id ccu_div_of_match[] = { + { .compatible = "baikal,bt1-ccu-axi" }, + { .compatible = "baikal,bt1-ccu-sys" }, + { } +}; + +static struct platform_driver ccu_div_driver = { + .probe = ccu_div_probe, + .driver = { + .name = "clk-ccu-div", + .of_match_table = ccu_div_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(ccu_div_driver); + +static __init void ccu_div_init(struct device_node *np) +{ + struct ccu_div_data *data; + int ret; + + data = ccu_div_create_data(np); + if (IS_ERR(data)) + return; + + ret = ccu_div_find_sys_regs(data); + if (ret) + goto err_free_data; + + ret = ccu_div_clk_register(data, true); + if (ret) + goto err_free_data; + + ret = ccu_div_of_register(data); + if (ret) + goto err_clk_unregister; + + ccu_div_set_data(data); + + return; + +err_clk_unregister: + ccu_div_clk_unregister(data, true); + +err_free_data: + ccu_div_free_data(data); +} +CLK_OF_DECLARE_DRIVER(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init); +CLK_OF_DECLARE_DRIVER(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init); diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c new file mode 100644 index 000000000..fce02ce77 --- /dev/null +++ b/drivers/clk/baikal-t1/clk-ccu-pll.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru> + * + * Baikal-T1 CCU PLL clocks driver + */ + +#define pr_fmt(fmt) "bt1-ccu-pll: " fmt + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/ioport.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/bt1-ccu.h> + +#include "ccu-pll.h" + +#define CCU_CPU_PLL_BASE 0x000 +#define CCU_SATA_PLL_BASE 0x008 +#define CCU_DDR_PLL_BASE 0x010 +#define CCU_PCIE_PLL_BASE 0x018 +#define CCU_ETH_PLL_BASE 0x020 + +#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags, _features) \ + { \ + .id = _id, \ + .name = _name, \ + .parent_name = _pname, \ + .base = _base, \ + .flags = _flags, \ + .features = _features, \ + } + +#define CCU_PLL_NUM ARRAY_SIZE(pll_info) + +struct ccu_pll_info { + unsigned int id; + const char *name; + const char *parent_name; + unsigned int base; + unsigned long flags; + unsigned long features; +}; + +/* + * Alas we have to mark all PLLs as critical. CPU and DDR PLLs are sources of + * CPU cores and DDR controller reference clocks, due to which they obviously + * shouldn't be ever gated. SATA and PCIe PLLs are the parents of APB-bus and + * DDR controller AXI-bus clocks. If they are gated the system will be + * unusable. Moreover disabling SATA and Ethernet PLLs causes automatic reset + * of the corresponding subsystems. So until we aren't ready to re-initialize + * all the devices consuming those PLLs, they will be marked as critical too. + */ +static const struct ccu_pll_info pll_info[] = { + CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE, + CLK_IS_CRITICAL, CCU_PLL_BASIC), + CCU_PLL_INFO(CCU_SATA_PLL, "sata_pll", "ref_clk", CCU_SATA_PLL_BASE, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0), + CCU_PLL_INFO(CCU_DDR_PLL, "ddr_pll", "ref_clk", CCU_DDR_PLL_BASE, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0), + CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE, + CLK_IS_CRITICAL, CCU_PLL_BASIC), + CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE, + CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0) +}; + +struct ccu_pll_data { + struct device_node *np; + struct regmap *sys_regs; + struct ccu_pll *plls[CCU_PLL_NUM]; +}; + +static struct ccu_pll_data *pll_data; + +static struct ccu_pll *ccu_pll_find_desc(struct ccu_pll_data *data, + unsigned int clk_id) +{ + int idx; + + for (idx = 0; idx < CCU_PLL_NUM; ++idx) { + if (pll_info[idx].id == clk_id) + return data->plls[idx]; + } + + return ERR_PTR(-EINVAL); +} + +static struct ccu_pll_data *ccu_pll_create_data(struct device_node *np) +{ + struct ccu_pll_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + data->np = np; + + return data; +} + +static void ccu_pll_free_data(struct ccu_pll_data *data) +{ + kfree(data); +} + +static int ccu_pll_find_sys_regs(struct ccu_pll_data *data) +{ + data->sys_regs = syscon_node_to_regmap(data->np->parent); + if (IS_ERR(data->sys_regs)) { + pr_err("Failed to find syscon regs for '%s'\n", + of_node_full_name(data->np)); + return PTR_ERR(data->sys_regs); + } + + return 0; +} + +static struct clk_hw *ccu_pll_of_clk_hw_get(struct of_phandle_args *clkspec, + void *priv) +{ + struct ccu_pll_data *data = priv; + struct ccu_pll *pll; + unsigned int clk_id; + + clk_id = clkspec->args[0]; + pll = ccu_pll_find_desc(data, clk_id); + if (IS_ERR(pll)) { + if (pll != ERR_PTR(-EPROBE_DEFER)) + pr_info("Invalid PLL clock ID %d specified\n", clk_id); + + return ERR_CAST(pll); + } + + return ccu_pll_get_clk_hw(pll); +} + +static int ccu_pll_clk_register(struct ccu_pll_data *data, bool defer) +{ + int idx, ret; + + for (idx = 0; idx < CCU_PLL_NUM; ++idx) { + const struct ccu_pll_info *info = &pll_info[idx]; + struct ccu_pll_init_data init = {0}; + + /* Defer non-basic PLLs allocation for the probe stage */ + if (!!(info->features & CCU_PLL_BASIC) ^ defer) { + if (!data->plls[idx]) + data->plls[idx] = ERR_PTR(-EPROBE_DEFER); + + continue; + } + + init.id = info->id; + init.name = info->name; + init.parent_name = info->parent_name; + init.base = info->base; + init.sys_regs = data->sys_regs; + init.np = data->np; + init.flags = info->flags; + init.features = info->features; + + data->plls[idx] = ccu_pll_hw_register(&init); + if (IS_ERR(data->plls[idx])) { + ret = PTR_ERR(data->plls[idx]); + pr_err("Couldn't register PLL hw '%s'\n", + init.name); + goto err_hw_unregister; + } + } + + return 0; + +err_hw_unregister: + for (--idx; idx >= 0; --idx) { + if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer) + continue; + + ccu_pll_hw_unregister(data->plls[idx]); + } + + return ret; +} + +static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer) +{ + int idx; + + /* Uninstall only the clocks registered on the specfied stage */ + for (idx = 0; idx < CCU_PLL_NUM; ++idx) { + if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer) + continue; + + ccu_pll_hw_unregister(data->plls[idx]); + } +} + +static int ccu_pll_of_register(struct ccu_pll_data *data) +{ + int ret; + + ret = of_clk_add_hw_provider(data->np, ccu_pll_of_clk_hw_get, data); + if (ret) { + pr_err("Couldn't register PLL provider of '%s'\n", + of_node_full_name(data->np)); + } + + return ret; +} + +static int ccu_pll_probe(struct platform_device *pdev) +{ + struct ccu_pll_data *data = pll_data; + + if (!data) + return -EINVAL; + + return ccu_pll_clk_register(data, false); +} + +static const struct of_device_id ccu_pll_of_match[] = { + { .compatible = "baikal,bt1-ccu-pll" }, + { } +}; + +static struct platform_driver ccu_pll_driver = { + .probe = ccu_pll_probe, + .driver = { + .name = "clk-ccu-pll", + .of_match_table = ccu_pll_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(ccu_pll_driver); + +static __init void ccu_pll_init(struct device_node *np) +{ + struct ccu_pll_data *data; + int ret; + + data = ccu_pll_create_data(np); + if (IS_ERR(data)) + return; + + ret = ccu_pll_find_sys_regs(data); + if (ret) + goto err_free_data; + + ret = ccu_pll_clk_register(data, true); + if (ret) + goto err_free_data; + + ret = ccu_pll_of_register(data); + if (ret) + goto err_clk_unregister; + + pll_data = data; + + return; + +err_clk_unregister: + ccu_pll_clk_unregister(data, true); + +err_free_data: + ccu_pll_free_data(data); +} +CLK_OF_DECLARE_DRIVER(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init); |