summaryrefslogtreecommitdiffstats
path: root/drivers/phy/cadence
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/phy/cadence')
-rw-r--r--drivers/phy/cadence/Kconfig48
-rw-r--r--drivers/phy/cadence/Makefile6
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c255
-rw-r--r--drivers/phy/cadence/cdns-dphy.c488
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c318
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c2534
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c4721
7 files changed, 8370 insertions, 0 deletions
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
new file mode 100644
index 000000000..1adde2d99
--- /dev/null
+++ b/drivers/phy/cadence/Kconfig
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for Cadence PHYs
+#
+
+config PHY_CADENCE_TORRENT
+ tristate "Cadence Torrent PHY driver"
+ depends on OF
+ depends on HAS_IOMEM
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Support for Cadence Torrent PHY.
+
+config PHY_CADENCE_DPHY
+ tristate "Cadence D-PHY Support"
+ depends on HAS_IOMEM && OF
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Choose this option if you have a Cadence D-PHY in your
+ system. If M is selected, the module will be called
+ cdns-dphy.
+
+config PHY_CADENCE_DPHY_RX
+ tristate "Cadence D-PHY Rx Support"
+ depends on HAS_IOMEM && OF
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Support for Cadence D-PHY in Rx configuration.
+
+config PHY_CADENCE_SIERRA
+ tristate "Cadence Sierra PHY Driver"
+ depends on OF && HAS_IOMEM && RESET_CONTROLLER
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Enable this to support the Cadence Sierra PHY driver
+
+config PHY_CADENCE_SALVO
+ tristate "Cadence Salvo PHY Driver"
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the Cadence SALVO PHY driver,
+ this PHY is a legacy PHY, and only are used for USB3
+ and USB2.
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
new file mode 100644
index 000000000..e17f035dd
--- /dev/null
+++ b/drivers/phy/cadence/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PHY_CADENCE_TORRENT) += phy-cadence-torrent.o
+obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o
+obj-$(CONFIG_PHY_CADENCE_DPHY_RX) += cdns-dphy-rx.o
+obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o
+obj-$(CONFIG_PHY_CADENCE_SALVO) += phy-cadence-salvo.o
diff --git a/drivers/phy/cadence/cdns-dphy-rx.c b/drivers/phy/cadence/cdns-dphy-rx.c
new file mode 100644
index 000000000..572c70089
--- /dev/null
+++ b/drivers/phy/cadence/cdns-dphy-rx.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/platform_device.h>
+
+#define DPHY_PMA_CMN(reg) (reg)
+#define DPHY_PCS(reg) (0xb00 + (reg))
+#define DPHY_ISO(reg) (0xc00 + (reg))
+
+#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
+#define DPHY_CMN_RX_MODE_EN BIT(10)
+#define DPHY_CMN_RX_BANDGAP_TIMER_MASK GENMASK(8, 1)
+#define DPHY_CMN_SSM_EN BIT(0)
+
+#define DPHY_CMN_RX_BANDGAP_TIMER 0x14
+
+#define DPHY_BAND_CFG DPHY_PCS(0x0)
+#define DPHY_BAND_CFG_RIGHT_BAND GENMASK(9, 5)
+#define DPHY_BAND_CFG_LEFT_BAND GENMASK(4, 0)
+
+#define DPHY_POWER_ISLAND_EN_DATA DPHY_PCS(0x8)
+#define DPHY_POWER_ISLAND_EN_DATA_VAL 0xaaaaaaaa
+
+#define DPHY_POWER_ISLAND_EN_CLK DPHY_PCS(0xc)
+#define DPHY_POWER_ISLAND_EN_CLK_VAL 0xaa
+
+#define DPHY_ISO_CL_CTRL_L DPHY_ISO(0x10)
+#define DPHY_ISO_DL_CTRL_L0 DPHY_ISO(0x14)
+#define DPHY_ISO_DL_CTRL_L1 DPHY_ISO(0x20)
+#define DPHY_ISO_DL_CTRL_L2 DPHY_ISO(0x30)
+#define DPHY_ISO_DL_CTRL_L3 DPHY_ISO(0x3c)
+
+#define DPHY_ISO_LANE_READY_BIT 0
+#define DPHY_ISO_LANE_READY_TIMEOUT_MS 100UL
+
+#define DPHY_LANES_MIN 1
+#define DPHY_LANES_MAX 4
+
+struct cdns_dphy_rx {
+ void __iomem *regs;
+ struct device *dev;
+ struct phy *phy;
+};
+
+struct cdns_dphy_rx_band {
+ /* Rates are in Mbps. */
+ unsigned int min_rate;
+ unsigned int max_rate;
+};
+
+/* Order of bands is important since the index is the band number. */
+static const struct cdns_dphy_rx_band bands[] = {
+ { 80, 100 }, { 100, 120 }, { 120, 160 }, { 160, 200 }, { 200, 240 },
+ { 240, 280 }, { 280, 320 }, { 320, 360 }, { 360, 400 }, { 400, 480 },
+ { 480, 560 }, { 560, 640 }, { 640, 720 }, { 720, 800 }, { 800, 880 },
+ { 880, 1040 }, { 1040, 1200 }, { 1200, 1350 }, { 1350, 1500 },
+ { 1500, 1750 }, { 1750, 2000 }, { 2000, 2250 }, { 2250, 2500 }
+};
+
+static int cdns_dphy_rx_power_on(struct phy *phy)
+{
+ struct cdns_dphy_rx *dphy = phy_get_drvdata(phy);
+
+ /* Start RX state machine. */
+ writel(DPHY_CMN_SSM_EN | DPHY_CMN_RX_MODE_EN |
+ FIELD_PREP(DPHY_CMN_RX_BANDGAP_TIMER_MASK,
+ DPHY_CMN_RX_BANDGAP_TIMER),
+ dphy->regs + DPHY_CMN_SSM);
+
+ return 0;
+}
+
+static int cdns_dphy_rx_power_off(struct phy *phy)
+{
+ struct cdns_dphy_rx *dphy = phy_get_drvdata(phy);
+
+ writel(0, dphy->regs + DPHY_CMN_SSM);
+
+ return 0;
+}
+
+static int cdns_dphy_rx_get_band_ctrl(unsigned long hs_clk_rate)
+{
+ unsigned int rate, i;
+
+ rate = hs_clk_rate / 1000000UL;
+ /* Since CSI-2 clock is DDR, the bit rate is twice the clock rate. */
+ rate *= 2;
+
+ if (rate < bands[0].min_rate)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(bands); i++)
+ if (rate < bands[i].max_rate)
+ return i;
+
+ return -EOPNOTSUPP;
+}
+
+static inline int cdns_dphy_rx_wait_for_bit(void __iomem *addr,
+ unsigned int bit)
+{
+ u32 val;
+
+ return readl_relaxed_poll_timeout(addr, val, val & BIT(bit), 10,
+ DPHY_ISO_LANE_READY_TIMEOUT_MS * 1000);
+}
+
+static int cdns_dphy_rx_wait_lane_ready(struct cdns_dphy_rx *dphy,
+ unsigned int lanes)
+{
+ static const u32 data_lane_ctrl[] = {DPHY_ISO_DL_CTRL_L0,
+ DPHY_ISO_DL_CTRL_L1,
+ DPHY_ISO_DL_CTRL_L2,
+ DPHY_ISO_DL_CTRL_L3};
+ void __iomem *reg = dphy->regs;
+ unsigned int i;
+ int ret;
+
+ /* Clock lane */
+ ret = cdns_dphy_rx_wait_for_bit(reg + DPHY_ISO_CL_CTRL_L,
+ DPHY_ISO_LANE_READY_BIT);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lanes; i++) {
+ ret = cdns_dphy_rx_wait_for_bit(reg + data_lane_ctrl[i],
+ DPHY_ISO_LANE_READY_BIT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_dphy_rx_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct cdns_dphy_rx *dphy = phy_get_drvdata(phy);
+ unsigned int reg, lanes = opts->mipi_dphy.lanes;
+ int band_ctrl, ret;
+
+ /* Data lanes. Minimum one lane is mandatory. */
+ if (lanes < DPHY_LANES_MIN || lanes > DPHY_LANES_MAX)
+ return -EINVAL;
+
+ band_ctrl = cdns_dphy_rx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (band_ctrl < 0)
+ return band_ctrl;
+
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
+ /*
+ * Set the required power island phase 2 time. This is mandated by DPHY
+ * specs.
+ */
+ reg = DPHY_POWER_ISLAND_EN_DATA_VAL;
+ writel(reg, dphy->regs + DPHY_POWER_ISLAND_EN_DATA);
+ reg = DPHY_POWER_ISLAND_EN_CLK_VAL;
+ writel(reg, dphy->regs + DPHY_POWER_ISLAND_EN_CLK);
+
+ ret = cdns_dphy_rx_wait_lane_ready(dphy, lanes);
+ if (ret) {
+ dev_err(dphy->dev, "DPHY wait for lane ready timeout\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_dphy_rx_validate(struct phy *phy, enum phy_mode mode,
+ int submode, union phy_configure_opts *opts)
+{
+ int ret;
+
+ if (mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
+
+ ret = cdns_dphy_rx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (ret < 0)
+ return ret;
+
+ return phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+}
+
+static const struct phy_ops cdns_dphy_rx_ops = {
+ .power_on = cdns_dphy_rx_power_on,
+ .power_off = cdns_dphy_rx_power_off,
+ .configure = cdns_dphy_rx_configure,
+ .validate = cdns_dphy_rx_validate,
+};
+
+static int cdns_dphy_rx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *provider;
+ struct cdns_dphy_rx *dphy;
+
+ dphy = devm_kzalloc(dev, sizeof(*dphy), GFP_KERNEL);
+ if (!dphy)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dphy);
+ dphy->dev = dev;
+
+ dphy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dphy->regs))
+ return PTR_ERR(dphy->regs);
+
+ dphy->phy = devm_phy_create(dev, NULL, &cdns_dphy_rx_ops);
+ if (IS_ERR(dphy->phy)) {
+ dev_err(dev, "Failed to create PHY: %ld\n", PTR_ERR(dphy->phy));
+ return PTR_ERR(dphy->phy);
+ }
+
+ phy_set_drvdata(dphy->phy, dphy);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "Failed to register PHY provider: %ld\n",
+ PTR_ERR(provider));
+ return PTR_ERR(provider);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cdns_dphy_rx_of_match[] = {
+ { .compatible = "cdns,dphy-rx" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cdns_dphy_rx_of_match);
+
+static struct platform_driver cdns_dphy_rx_platform_driver = {
+ .probe = cdns_dphy_rx_probe,
+ .driver = {
+ .name = "cdns-mipi-dphy-rx",
+ .of_match_table = cdns_dphy_rx_of_match,
+ },
+};
+module_platform_driver(cdns_dphy_rx_platform_driver);
+
+MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
+MODULE_DESCRIPTION("Cadence D-PHY Rx Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
new file mode 100644
index 000000000..3dfdfb33c
--- /dev/null
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright: 2017-2018 Cadence Design Systems, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-mipi-dphy.h>
+
+#define REG_WAKEUP_TIME_NS 800
+#define DPHY_PLL_RATE_HZ 108000000
+#define POLL_TIMEOUT_US 1000
+
+/* DPHY registers */
+#define DPHY_PMA_CMN(reg) (reg)
+#define DPHY_PMA_LCLK(reg) (0x100 + (reg))
+#define DPHY_PMA_LDATA(lane, reg) (0x200 + ((lane) * 0x100) + (reg))
+#define DPHY_PMA_RCLK(reg) (0x600 + (reg))
+#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg))
+#define DPHY_PCS(reg) (0xb00 + (reg))
+
+#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
+#define DPHY_CMN_SSM_EN BIT(0)
+#define DPHY_CMN_TX_MODE_EN BIT(9)
+
+#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
+#define DPHY_CMN_PWM_DIV(x) ((x) << 20)
+#define DPHY_CMN_PWM_LOW(x) ((x) << 10)
+#define DPHY_CMN_PWM_HIGH(x) (x)
+
+#define DPHY_CMN_FBDIV DPHY_PMA_CMN(0x4c)
+#define DPHY_CMN_FBDIV_VAL(low, high) (((high) << 11) | ((low) << 22))
+#define DPHY_CMN_FBDIV_FROM_REG (BIT(10) | BIT(21))
+
+#define DPHY_CMN_OPIPDIV DPHY_PMA_CMN(0x50)
+#define DPHY_CMN_IPDIV_FROM_REG BIT(0)
+#define DPHY_CMN_IPDIV(x) ((x) << 1)
+#define DPHY_CMN_OPDIV_FROM_REG BIT(6)
+#define DPHY_CMN_OPDIV(x) ((x) << 7)
+
+#define DPHY_BAND_CFG DPHY_PCS(0x0)
+#define DPHY_BAND_CFG_LEFT_BAND GENMASK(4, 0)
+#define DPHY_BAND_CFG_RIGHT_BAND GENMASK(9, 5)
+
+#define DPHY_PSM_CFG DPHY_PCS(0x4)
+#define DPHY_PSM_CFG_FROM_REG BIT(0)
+#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
+
+#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HSA_FRAME_OVERHEAD 14
+#define DSI_HFP_FRAME_OVERHEAD 6
+#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
+#define DSI_BLANKING_FRAME_OVERHEAD 6
+#define DSI_NULL_FRAME_OVERHEAD 6
+#define DSI_EOT_PKT_SIZE 4
+
+#define DPHY_TX_J721E_WIZ_PLL_CTRL 0xF04
+#define DPHY_TX_J721E_WIZ_STATUS 0xF08
+#define DPHY_TX_J721E_WIZ_RST_CTRL 0xF0C
+#define DPHY_TX_J721E_WIZ_PSM_FREQ 0xF10
+
+#define DPHY_TX_J721E_WIZ_IPDIV GENMASK(4, 0)
+#define DPHY_TX_J721E_WIZ_OPDIV GENMASK(13, 8)
+#define DPHY_TX_J721E_WIZ_FBDIV GENMASK(25, 16)
+#define DPHY_TX_J721E_WIZ_LANE_RSTB BIT(31)
+#define DPHY_TX_WIZ_PLL_LOCK BIT(31)
+#define DPHY_TX_WIZ_O_CMN_READY BIT(31)
+
+struct cdns_dphy_cfg {
+ u8 pll_ipdiv;
+ u8 pll_opdiv;
+ u16 pll_fbdiv;
+ unsigned int nlanes;
+};
+
+enum cdns_dphy_clk_lane_cfg {
+ DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0,
+ DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1,
+ DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2,
+ DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3,
+};
+
+struct cdns_dphy;
+struct cdns_dphy_ops {
+ int (*probe)(struct cdns_dphy *dphy);
+ void (*remove)(struct cdns_dphy *dphy);
+ void (*set_psm_div)(struct cdns_dphy *dphy, u8 div);
+ void (*set_clk_lane_cfg)(struct cdns_dphy *dphy,
+ enum cdns_dphy_clk_lane_cfg cfg);
+ void (*set_pll_cfg)(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg);
+ unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+};
+
+struct cdns_dphy {
+ struct cdns_dphy_cfg cfg;
+ void __iomem *regs;
+ struct clk *psm_clk;
+ struct clk *pll_ref_clk;
+ const struct cdns_dphy_ops *ops;
+ struct phy *phy;
+};
+
+/* Order of bands is important since the index is the band number. */
+static const unsigned int tx_bands[] = {
+ 80, 100, 120, 160, 200, 240, 320, 390, 450, 510, 560, 640, 690, 770,
+ 870, 950, 1000, 1200, 1400, 1600, 1800, 2000, 2200, 2500
+};
+
+static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
+ struct cdns_dphy_cfg *cfg,
+ struct phy_configure_opts_mipi_dphy *opts,
+ unsigned int *dsi_hfp_ext)
+{
+ unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
+ u64 dlane_bps;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000)
+ return -EINVAL;
+ else if (pll_ref_hz < 19200000)
+ cfg->pll_ipdiv = 1;
+ else if (pll_ref_hz < 38400000)
+ cfg->pll_ipdiv = 2;
+ else if (pll_ref_hz < 76800000)
+ cfg->pll_ipdiv = 4;
+ else
+ cfg->pll_ipdiv = 8;
+
+ dlane_bps = opts->hs_clk_rate;
+
+ if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
+ return -EINVAL;
+ else if (dlane_bps >= 1250000000)
+ cfg->pll_opdiv = 1;
+ else if (dlane_bps >= 630000000)
+ cfg->pll_opdiv = 2;
+ else if (dlane_bps >= 320000000)
+ cfg->pll_opdiv = 4;
+ else if (dlane_bps >= 160000000)
+ cfg->pll_opdiv = 8;
+
+ cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
+ cfg->pll_ipdiv,
+ pll_ref_hz);
+
+ return 0;
+}
+
+static int cdns_dphy_setup_psm(struct cdns_dphy *dphy)
+{
+ unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk);
+ unsigned long psm_div;
+
+ if (!psm_clk_hz || psm_clk_hz > 100000000)
+ return -EINVAL;
+
+ psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000);
+ if (dphy->ops->set_psm_div)
+ dphy->ops->set_psm_div(dphy, psm_div);
+
+ return 0;
+}
+
+static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy,
+ enum cdns_dphy_clk_lane_cfg cfg)
+{
+ if (dphy->ops->set_clk_lane_cfg)
+ dphy->ops->set_clk_lane_cfg(dphy, cfg);
+}
+
+static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ if (dphy->ops->set_pll_cfg)
+ dphy->ops->set_pll_cfg(dphy, cfg);
+}
+
+static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ return dphy->ops->get_wakeup_time_ns(dphy);
+}
+
+static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ /* Default wakeup time is 800 ns (in a simulated environment). */
+ return 800;
+}
+
+static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ u32 fbdiv_low, fbdiv_high;
+
+ fbdiv_low = (cfg->pll_fbdiv / 4) - 2;
+ fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2;
+
+ writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG |
+ DPHY_CMN_IPDIV(cfg->pll_ipdiv) |
+ DPHY_CMN_OPDIV(cfg->pll_opdiv),
+ dphy->regs + DPHY_CMN_OPIPDIV);
+ writel(DPHY_CMN_FBDIV_FROM_REG |
+ DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high),
+ dphy->regs + DPHY_CMN_FBDIV);
+ writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+ DPHY_CMN_PWM_DIV(0x8),
+ dphy->regs + DPHY_CMN_PWM);
+}
+
+static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+ writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div),
+ dphy->regs + DPHY_PSM_CFG);
+}
+
+static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
+{
+ /* Minimum wakeup time as per MIPI D-PHY spec v1.2 */
+ return 1000000;
+}
+
+static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
+ const struct cdns_dphy_cfg *cfg)
+{
+ u32 status;
+
+ /*
+ * set the PWM and PLL Byteclk divider settings to recommended values
+ * which is same as that of in ref ops
+ */
+ writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) |
+ DPHY_CMN_PWM_DIV(0x8),
+ dphy->regs + DPHY_CMN_PWM);
+
+ writel((FIELD_PREP(DPHY_TX_J721E_WIZ_IPDIV, cfg->pll_ipdiv) |
+ FIELD_PREP(DPHY_TX_J721E_WIZ_OPDIV, cfg->pll_opdiv) |
+ FIELD_PREP(DPHY_TX_J721E_WIZ_FBDIV, cfg->pll_fbdiv)),
+ dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL);
+
+ writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
+ dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
+
+ readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+ (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
+
+ readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+ (status & DPHY_TX_WIZ_O_CMN_READY), 0,
+ POLL_TIMEOUT_US);
+}
+
+static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
+{
+ writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
+}
+
+/*
+ * This is the reference implementation of DPHY hooks. Specific integration of
+ * this IP may have to re-implement some of them depending on how they decided
+ * to wire things in the SoC.
+ */
+static const struct cdns_dphy_ops ref_dphy_ops = {
+ .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_ref_set_pll_cfg,
+ .set_psm_div = cdns_dphy_ref_set_psm_div,
+};
+
+static const struct cdns_dphy_ops j721e_dphy_ops = {
+ .get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
+ .set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
+ .set_psm_div = cdns_dphy_j721e_set_psm_div,
+};
+
+static int cdns_dphy_config_from_opts(struct phy *phy,
+ struct phy_configure_opts_mipi_dphy *opts,
+ struct cdns_dphy_cfg *cfg)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ unsigned int dsi_hfp_ext = 0;
+ int ret;
+
+ ret = phy_mipi_dphy_config_validate(opts);
+ if (ret)
+ return ret;
+
+ ret = cdns_dsi_get_dphy_pll_cfg(dphy, cfg,
+ opts, &dsi_hfp_ext);
+ if (ret)
+ return ret;
+
+ opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
+
+ return 0;
+}
+
+static int cdns_dphy_tx_get_band_ctrl(unsigned long hs_clk_rate)
+{
+ unsigned int rate;
+ int i;
+
+ rate = hs_clk_rate / 1000000UL;
+
+ if (rate < tx_bands[0])
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(tx_bands) - 1; i++) {
+ if (rate >= tx_bands[i] && rate < tx_bands[i + 1])
+ return i;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts)
+{
+ struct cdns_dphy_cfg cfg = { 0 };
+
+ if (mode != PHY_MODE_MIPI_DPHY)
+ return -EINVAL;
+
+ return cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+}
+
+static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ struct cdns_dphy_cfg cfg = { 0 };
+ int ret, band_ctrl;
+ unsigned int reg;
+
+ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the internal PSM clk divider so that the DPHY has a
+ * 1MHz clk (or something close).
+ */
+ ret = cdns_dphy_setup_psm(dphy);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
+ * and 8 data lanes, each clk lane can be attache different set of
+ * data lanes. The 2 groups are named 'left' and 'right', so here we
+ * just say that we want the 'left' clk lane to drive the 'left' data
+ * lanes.
+ */
+ cdns_dphy_set_clk_lane_cfg(dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT);
+
+ /*
+ * Configure the DPHY PLL that will be used to generate the TX byte
+ * clk.
+ */
+ cdns_dphy_set_pll_cfg(dphy, &cfg);
+
+ band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
+ if (band_ctrl < 0)
+ return band_ctrl;
+
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ writel(reg, dphy->regs + DPHY_BAND_CFG);
+
+ return 0;
+}
+
+static int cdns_dphy_power_on(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ clk_prepare_enable(dphy->psm_clk);
+ clk_prepare_enable(dphy->pll_ref_clk);
+
+ /* Start TX state machine. */
+ writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
+
+ return 0;
+}
+
+static int cdns_dphy_power_off(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
+ return 0;
+}
+
+static const struct phy_ops cdns_dphy_ops = {
+ .configure = cdns_dphy_configure,
+ .validate = cdns_dphy_validate,
+ .power_on = cdns_dphy_power_on,
+ .power_off = cdns_dphy_power_off,
+};
+
+static int cdns_dphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct cdns_dphy *dphy;
+ int ret;
+
+ dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+ if (!dphy)
+ return -ENOMEM;
+ dev_set_drvdata(&pdev->dev, dphy);
+
+ dphy->ops = of_device_get_match_data(&pdev->dev);
+ if (!dphy->ops)
+ return -EINVAL;
+
+ dphy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dphy->regs))
+ return PTR_ERR(dphy->regs);
+
+ dphy->psm_clk = devm_clk_get(&pdev->dev, "psm");
+ if (IS_ERR(dphy->psm_clk))
+ return PTR_ERR(dphy->psm_clk);
+
+ dphy->pll_ref_clk = devm_clk_get(&pdev->dev, "pll_ref");
+ if (IS_ERR(dphy->pll_ref_clk))
+ return PTR_ERR(dphy->pll_ref_clk);
+
+ if (dphy->ops->probe) {
+ ret = dphy->ops->probe(dphy);
+ if (ret)
+ return ret;
+ }
+
+ dphy->phy = devm_phy_create(&pdev->dev, NULL, &cdns_dphy_ops);
+ if (IS_ERR(dphy->phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ if (dphy->ops->remove)
+ dphy->ops->remove(dphy);
+ return PTR_ERR(dphy->phy);
+ }
+
+ phy_set_drvdata(dphy->phy, dphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int cdns_dphy_remove(struct platform_device *pdev)
+{
+ struct cdns_dphy *dphy = dev_get_drvdata(&pdev->dev);
+
+ if (dphy->ops->remove)
+ dphy->ops->remove(dphy);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_dphy_of_match[] = {
+ { .compatible = "cdns,dphy", .data = &ref_dphy_ops },
+ { .compatible = "ti,j721e-dphy", .data = &j721e_dphy_ops },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cdns_dphy_of_match);
+
+static struct platform_driver cdns_dphy_platform_driver = {
+ .probe = cdns_dphy_probe,
+ .remove = cdns_dphy_remove,
+ .driver = {
+ .name = "cdns-mipi-dphy",
+ .of_match_table = cdns_dphy_of_match,
+ },
+};
+module_platform_driver(cdns_dphy_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cadence MIPI D-PHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
new file mode 100644
index 000000000..e569f5f67
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Salvo PHY is a 28nm PHY, it is a legacy PHY, and only
+ * for USB3 and USB2.
+ *
+ * Copyright (c) 2019-2020 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+/* PHY register definition */
+#define PHY_PMA_CMN_CTRL1 0xC800
+#define TB_ADDR_CMN_DIAG_HSCLK_SEL 0x01e0
+#define TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR 0x0084
+#define TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR 0x0085
+#define TB_ADDR_CMN_PLL0_INTDIV 0x0094
+#define TB_ADDR_CMN_PLL0_FRACDIV 0x0095
+#define TB_ADDR_CMN_PLL0_HIGH_THR 0x0096
+#define TB_ADDR_CMN_PLL0_SS_CTRL1 0x0098
+#define TB_ADDR_CMN_PLL0_SS_CTRL2 0x0099
+#define TB_ADDR_CMN_PLL0_DSM_DIAG 0x0097
+#define TB_ADDR_CMN_DIAG_PLL0_OVRD 0x01c2
+#define TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD 0x01c0
+#define TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD 0x01c1
+#define TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE 0x01C5
+#define TB_ADDR_CMN_DIAG_PLL0_CP_TUNE 0x01C6
+#define TB_ADDR_CMN_DIAG_PLL0_LF_PROG 0x01C7
+#define TB_ADDR_CMN_DIAG_PLL0_TEST_MODE 0x01c4
+#define TB_ADDR_CMN_PSM_CLK_CTRL 0x0061
+#define TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR 0x40ea
+#define TB_ADDR_XCVR_PSM_RCTRL 0x4001
+#define TB_ADDR_TX_PSC_A0 0x4100
+#define TB_ADDR_TX_PSC_A1 0x4101
+#define TB_ADDR_TX_PSC_A2 0x4102
+#define TB_ADDR_TX_PSC_A3 0x4103
+#define TB_ADDR_TX_DIAG_ECTRL_OVRD 0x41f5
+#define TB_ADDR_TX_PSC_CAL 0x4106
+#define TB_ADDR_TX_PSC_RDY 0x4107
+#define TB_ADDR_RX_PSC_A0 0x8000
+#define TB_ADDR_RX_PSC_A1 0x8001
+#define TB_ADDR_RX_PSC_A2 0x8002
+#define TB_ADDR_RX_PSC_A3 0x8003
+#define TB_ADDR_RX_PSC_CAL 0x8006
+#define TB_ADDR_RX_PSC_RDY 0x8007
+#define TB_ADDR_TX_TXCC_MGNLS_MULT_000 0x4058
+#define TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY 0x41e7
+#define TB_ADDR_RX_SLC_CU_ITER_TMR 0x80e3
+#define TB_ADDR_RX_SIGDET_HL_FILT_TMR 0x8090
+#define TB_ADDR_RX_SAMP_DAC_CTRL 0x8058
+#define TB_ADDR_RX_DIAG_SIGDET_TUNE 0x81dc
+#define TB_ADDR_RX_DIAG_LFPSDET_TUNE2 0x81df
+#define TB_ADDR_RX_DIAG_BS_TM 0x81f5
+#define TB_ADDR_RX_DIAG_DFE_CTRL1 0x81d3
+#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM4 0x81c7
+#define TB_ADDR_RX_DIAG_ILL_E_TRIM0 0x81c2
+#define TB_ADDR_RX_DIAG_ILL_IQ_TRIM0 0x81c1
+#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM6 0x81c9
+#define TB_ADDR_RX_DIAG_RXFE_TM3 0x81f8
+#define TB_ADDR_RX_DIAG_RXFE_TM4 0x81f9
+#define TB_ADDR_RX_DIAG_LFPSDET_TUNE 0x81dd
+#define TB_ADDR_RX_DIAG_DFE_CTRL3 0x81d5
+#define TB_ADDR_RX_DIAG_SC2C_DELAY 0x81e1
+#define TB_ADDR_RX_REE_VGA_GAIN_NODFE 0x81bf
+#define TB_ADDR_XCVR_PSM_CAL_TMR 0x4002
+#define TB_ADDR_XCVR_PSM_A0BYP_TMR 0x4004
+#define TB_ADDR_XCVR_PSM_A0IN_TMR 0x4003
+#define TB_ADDR_XCVR_PSM_A1IN_TMR 0x4005
+#define TB_ADDR_XCVR_PSM_A2IN_TMR 0x4006
+#define TB_ADDR_XCVR_PSM_A3IN_TMR 0x4007
+#define TB_ADDR_XCVR_PSM_A4IN_TMR 0x4008
+#define TB_ADDR_XCVR_PSM_A5IN_TMR 0x4009
+#define TB_ADDR_XCVR_PSM_A0OUT_TMR 0x400a
+#define TB_ADDR_XCVR_PSM_A1OUT_TMR 0x400b
+#define TB_ADDR_XCVR_PSM_A2OUT_TMR 0x400c
+#define TB_ADDR_XCVR_PSM_A3OUT_TMR 0x400d
+#define TB_ADDR_XCVR_PSM_A4OUT_TMR 0x400e
+#define TB_ADDR_XCVR_PSM_A5OUT_TMR 0x400f
+#define TB_ADDR_TX_RCVDET_EN_TMR 0x4122
+#define TB_ADDR_TX_RCVDET_ST_TMR 0x4123
+#define TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR 0x40f2
+#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
+
+/* TB_ADDR_TX_RCVDETSC_CTRL */
+#define RXDET_IN_P3_32KHZ BIT(0)
+
+struct cdns_reg_pairs {
+ u16 val;
+ u32 off;
+};
+
+struct cdns_salvo_data {
+ u8 reg_offset_shift;
+ const struct cdns_reg_pairs *init_sequence_val;
+ u8 init_sequence_length;
+};
+
+struct cdns_salvo_phy {
+ struct phy *phy;
+ struct clk *clk;
+ void __iomem *base;
+ struct cdns_salvo_data *data;
+};
+
+static const struct of_device_id cdns_salvo_phy_of_match[];
+static u16 cdns_salvo_read(struct cdns_salvo_phy *salvo_phy, u32 reg)
+{
+ return (u16)readl(salvo_phy->base +
+ reg * (1 << salvo_phy->data->reg_offset_shift));
+}
+
+static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy,
+ u32 reg, u16 val)
+{
+ writel(val, salvo_phy->base +
+ reg * (1 << salvo_phy->data->reg_offset_shift));
+}
+
+/*
+ * Below bringup sequence pair are from Cadence PHY's User Guide
+ * and NXP platform tuning results.
+ */
+static const struct cdns_reg_pairs cdns_nxp_sequence_pair[] = {
+ {0x0830, PHY_PMA_CMN_CTRL1},
+ {0x0010, TB_ADDR_CMN_DIAG_HSCLK_SEL},
+ {0x00f0, TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0018, TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00d0, TB_ADDR_CMN_PLL0_INTDIV},
+ {0x4aaa, TB_ADDR_CMN_PLL0_FRACDIV},
+ {0x0034, TB_ADDR_CMN_PLL0_HIGH_THR},
+ {0x01ee, TB_ADDR_CMN_PLL0_SS_CTRL1},
+ {0x7f03, TB_ADDR_CMN_PLL0_SS_CTRL2},
+ {0x0020, TB_ADDR_CMN_PLL0_DSM_DIAG},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_OVRD},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD},
+ {0x0007, TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE},
+ {0x0027, TB_ADDR_CMN_DIAG_PLL0_CP_TUNE},
+ {0x0008, TB_ADDR_CMN_DIAG_PLL0_LF_PROG},
+ {0x0022, TB_ADDR_CMN_DIAG_PLL0_TEST_MODE},
+ {0x000a, TB_ADDR_CMN_PSM_CLK_CTRL},
+ {0x0139, TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR},
+ {0xbefc, TB_ADDR_XCVR_PSM_RCTRL},
+
+ {0x7799, TB_ADDR_TX_PSC_A0},
+ {0x7798, TB_ADDR_TX_PSC_A1},
+ {0x509b, TB_ADDR_TX_PSC_A2},
+ {0x0003, TB_ADDR_TX_DIAG_ECTRL_OVRD},
+ {0x509b, TB_ADDR_TX_PSC_A3},
+ {0x2090, TB_ADDR_TX_PSC_CAL},
+ {0x2090, TB_ADDR_TX_PSC_RDY},
+
+ {0xA6FD, TB_ADDR_RX_PSC_A0},
+ {0xA6FD, TB_ADDR_RX_PSC_A1},
+ {0xA410, TB_ADDR_RX_PSC_A2},
+ {0x2410, TB_ADDR_RX_PSC_A3},
+
+ {0x23FF, TB_ADDR_RX_PSC_CAL},
+ {0x2010, TB_ADDR_RX_PSC_RDY},
+
+ {0x0020, TB_ADDR_TX_TXCC_MGNLS_MULT_000},
+ {0x00ff, TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY},
+ {0x0002, TB_ADDR_RX_SLC_CU_ITER_TMR},
+ {0x0013, TB_ADDR_RX_SIGDET_HL_FILT_TMR},
+ {0x0000, TB_ADDR_RX_SAMP_DAC_CTRL},
+ {0x1004, TB_ADDR_RX_DIAG_SIGDET_TUNE},
+ {0x4041, TB_ADDR_RX_DIAG_LFPSDET_TUNE2},
+ {0x0480, TB_ADDR_RX_DIAG_BS_TM},
+ {0x8006, TB_ADDR_RX_DIAG_DFE_CTRL1},
+ {0x003f, TB_ADDR_RX_DIAG_ILL_IQE_TRIM4},
+ {0x543f, TB_ADDR_RX_DIAG_ILL_E_TRIM0},
+ {0x543f, TB_ADDR_RX_DIAG_ILL_IQ_TRIM0},
+ {0x0000, TB_ADDR_RX_DIAG_ILL_IQE_TRIM6},
+ {0x8000, TB_ADDR_RX_DIAG_RXFE_TM3},
+ {0x0003, TB_ADDR_RX_DIAG_RXFE_TM4},
+ {0x2408, TB_ADDR_RX_DIAG_LFPSDET_TUNE},
+ {0x05ca, TB_ADDR_RX_DIAG_DFE_CTRL3},
+ {0x0258, TB_ADDR_RX_DIAG_SC2C_DELAY},
+ {0x1fff, TB_ADDR_RX_REE_VGA_GAIN_NODFE},
+
+ {0x02c6, TB_ADDR_XCVR_PSM_CAL_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A0BYP_TMR},
+ {0x02c6, TB_ADDR_XCVR_PSM_A0IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A1IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A2IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A3IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A4IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A5IN_TMR},
+
+ {0x0002, TB_ADDR_XCVR_PSM_A0OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A1OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A2OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A3OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A4OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A5OUT_TMR},
+ /* Change rx detect parameter */
+ {0x0960, TB_ADDR_TX_RCVDET_EN_TMR},
+ {0x01e0, TB_ADDR_TX_RCVDET_ST_TMR},
+ {0x0090, TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR},
+};
+
+static int cdns_salvo_phy_init(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+ struct cdns_salvo_data *data = salvo_phy->data;
+ int ret, i;
+ u16 value;
+
+ ret = clk_prepare_enable(salvo_phy->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < data->init_sequence_length; i++) {
+ const struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
+
+ cdns_salvo_write(salvo_phy, reg_pair->off, reg_pair->val);
+ }
+
+ /* RXDET_IN_P3_32KHZ, Receiver detect slow clock enable */
+ value = cdns_salvo_read(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL);
+ value |= RXDET_IN_P3_32KHZ;
+ cdns_salvo_write(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL,
+ RXDET_IN_P3_32KHZ);
+
+ udelay(10);
+
+ clk_disable_unprepare(salvo_phy->clk);
+
+ return ret;
+}
+
+static int cdns_salvo_phy_power_on(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(salvo_phy->clk);
+}
+
+static int cdns_salvo_phy_power_off(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(salvo_phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops cdns_salvo_phy_ops = {
+ .init = cdns_salvo_phy_init,
+ .power_on = cdns_salvo_phy_power_on,
+ .power_off = cdns_salvo_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_salvo_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct cdns_salvo_phy *salvo_phy;
+ struct cdns_salvo_data *data;
+
+ data = (struct cdns_salvo_data *)of_device_get_match_data(dev);
+ salvo_phy = devm_kzalloc(dev, sizeof(*salvo_phy), GFP_KERNEL);
+ if (!salvo_phy)
+ return -ENOMEM;
+
+ salvo_phy->data = data;
+ salvo_phy->clk = devm_clk_get_optional(dev, "salvo_phy_clk");
+ if (IS_ERR(salvo_phy->clk))
+ return PTR_ERR(salvo_phy->clk);
+
+ salvo_phy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(salvo_phy->base))
+ return PTR_ERR(salvo_phy->base);
+
+ salvo_phy->phy = devm_phy_create(dev, NULL, &cdns_salvo_phy_ops);
+ if (IS_ERR(salvo_phy->phy))
+ return PTR_ERR(salvo_phy->phy);
+
+ phy_set_drvdata(salvo_phy->phy, salvo_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct cdns_salvo_data cdns_nxp_salvo_data = {
+ 2,
+ cdns_nxp_sequence_pair,
+ ARRAY_SIZE(cdns_nxp_sequence_pair),
+};
+
+static const struct of_device_id cdns_salvo_phy_of_match[] = {
+ {
+ .compatible = "nxp,salvo-phy",
+ .data = &cdns_nxp_salvo_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_salvo_phy_of_match);
+
+static struct platform_driver cdns_salvo_phy_driver = {
+ .probe = cdns_salvo_phy_probe,
+ .driver = {
+ .name = "cdns-salvo-phy",
+ .of_match_table = cdns_salvo_phy_of_match,
+ }
+};
+module_platform_driver(cdns_salvo_phy_driver);
+
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence SALVO PHY Driver");
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
new file mode 100644
index 000000000..6e86a6517
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -0,0 +1,2534 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence Sierra PHY Driver
+ *
+ * Copyright (c) 2018 Cadence Design Systems
+ * Author: Alan Douglas <adouglas@cadence.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
+
+#define NUM_SSC_MODE 3
+#define NUM_PHY_TYPE 4
+
+/* PHY register offsets */
+#define SIERRA_COMMON_CDB_OFFSET 0x0
+#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
+#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
+#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
+#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
+#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
+#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
+#define SIERRA_CMN_PLLLC_SS_PREG 0x52
+#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
+#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
+#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
+#define SIERRA_CMN_REFRCV_PREG 0x98
+#define SIERRA_CMN_REFRCV1_PREG 0xB8
+#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
+#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
+#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
+#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
+
+#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x4000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define SIERRA_DET_STANDEC_A_PREG 0x000
+#define SIERRA_DET_STANDEC_B_PREG 0x001
+#define SIERRA_DET_STANDEC_C_PREG 0x002
+#define SIERRA_DET_STANDEC_D_PREG 0x003
+#define SIERRA_DET_STANDEC_E_PREG 0x004
+#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
+#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_A3IN_TMR_PREG 0x00C
+#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_LN_A3_PREG 0x023
+#define SIERRA_PSC_LN_A4_PREG 0x024
+#define SIERRA_PSC_LN_IDLE_PREG 0x026
+#define SIERRA_PSC_TX_A0_PREG 0x028
+#define SIERRA_PSC_TX_A1_PREG 0x029
+#define SIERRA_PSC_TX_A2_PREG 0x02A
+#define SIERRA_PSC_TX_A3_PREG 0x02B
+#define SIERRA_PSC_RX_A0_PREG 0x030
+#define SIERRA_PSC_RX_A1_PREG 0x031
+#define SIERRA_PSC_RX_A2_PREG 0x032
+#define SIERRA_PSC_RX_A3_PREG 0x033
+#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
+#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
+#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
+#define SIERRA_PLLCTRL_STATUS_PREG 0x044
+#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
+#define SIERRA_DFE_BIASTRIM_PREG 0x04C
+#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
+#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
+#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
+#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
+#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
+#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
+#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
+#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_CAL_PREG 0x08F
+#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
+#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
+#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
+#define SIERRA_CREQ_SPARE_PREG 0x096
+#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
+#define SIERRA_CTLELUT_CTRL_PREG 0x098
+#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
+#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
+#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
+#define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8
+#define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9
+#define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD
+#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE
+#define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0
+#define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8
+#define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0
+#define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1
+#define SIERRA_DEQ_GLUT0 0x0E8
+#define SIERRA_DEQ_GLUT1 0x0E9
+#define SIERRA_DEQ_GLUT2 0x0EA
+#define SIERRA_DEQ_GLUT3 0x0EB
+#define SIERRA_DEQ_GLUT4 0x0EC
+#define SIERRA_DEQ_GLUT5 0x0ED
+#define SIERRA_DEQ_GLUT6 0x0EE
+#define SIERRA_DEQ_GLUT7 0x0EF
+#define SIERRA_DEQ_GLUT8 0x0F0
+#define SIERRA_DEQ_GLUT9 0x0F1
+#define SIERRA_DEQ_GLUT10 0x0F2
+#define SIERRA_DEQ_GLUT11 0x0F3
+#define SIERRA_DEQ_GLUT12 0x0F4
+#define SIERRA_DEQ_GLUT13 0x0F5
+#define SIERRA_DEQ_GLUT14 0x0F6
+#define SIERRA_DEQ_GLUT15 0x0F7
+#define SIERRA_DEQ_GLUT16 0x0F8
+#define SIERRA_DEQ_ALUT0 0x108
+#define SIERRA_DEQ_ALUT1 0x109
+#define SIERRA_DEQ_ALUT2 0x10A
+#define SIERRA_DEQ_ALUT3 0x10B
+#define SIERRA_DEQ_ALUT4 0x10C
+#define SIERRA_DEQ_ALUT5 0x10D
+#define SIERRA_DEQ_ALUT6 0x10E
+#define SIERRA_DEQ_ALUT7 0x10F
+#define SIERRA_DEQ_ALUT8 0x110
+#define SIERRA_DEQ_ALUT9 0x111
+#define SIERRA_DEQ_ALUT10 0x112
+#define SIERRA_DEQ_ALUT11 0x113
+#define SIERRA_DEQ_ALUT12 0x114
+#define SIERRA_DEQ_ALUT13 0x115
+#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DEQ_DFETAP0 0x129
+#define SIERRA_DEQ_DFETAP1 0x12B
+#define SIERRA_DEQ_DFETAP2 0x12D
+#define SIERRA_DEQ_DFETAP3 0x12F
+#define SIERRA_DEQ_DFETAP4 0x131
+#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_PRECUR_PREG 0x138
+#define SIERRA_DEQ_POSTCUR_PREG 0x140
+#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
+#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
+#define SIERRA_DEQ_PICTRL_PREG 0x161
+#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
+#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
+#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
+#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
+#define SIERRA_CPI_TRIM_PREG 0x17F
+#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_EPI_CTRL_PREG 0x187
+#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
+#define SIERRA_LFPSFILT_NS_PREG 0x18A
+#define SIERRA_LFPSFILT_RD_PREG 0x18B
+#define SIERRA_LFPSFILT_MP_PREG 0x18C
+#define SIERRA_SIGDET_SUPPORT_PREG 0x190
+#define SIERRA_SDFILT_H2L_A_PREG 0x191
+#define SIERRA_SDFILT_L2H_PREG 0x193
+#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
+#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
+#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
+#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
+#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
+
+/* PHY PCS common registers */
+#define SIERRA_PHY_PCS_COMMON_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
+#define SIERRA_PHY_PLL_CFG 0xe
+
+/* PHY PCS lane registers */
+#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xD000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_ISO_LINK_CTRL 0xB
+
+/* PHY PMA common registers */
+#define SIERRA_PHY_PMA_COMMON_OFFSET(block_offset) \
+ (0xE000 << (block_offset))
+#define SIERRA_PHY_PMA_CMN_CTRL 0x000
+
+/* PHY PMA lane registers */
+#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xF000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_PMA_XCVR_CTRL 0x000
+
+#define SIERRA_MACRO_ID 0x00007364
+#define SIERRA_MAX_LANES 16
+#define PLL_LOCK_TIME 100000
+
+#define CDNS_SIERRA_OUTPUT_CLOCKS 3
+#define CDNS_SIERRA_INPUT_CLOCKS 5
+enum cdns_sierra_clock_input {
+ PHY_CLK,
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+};
+
+#define SIERRA_NUM_CMN_PLLC 2
+#define SIERRA_NUM_CMN_PLLC_PARENTS 2
+
+static const struct reg_field macro_id_type =
+ REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
+static const struct reg_field phy_pll_cfg_1 =
+ REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pma_cmn_ready =
+ REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
+static const struct reg_field pllctrl_lock =
+ REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
+static const struct reg_field phy_iso_link_ctrl_1 =
+ REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
+static const struct reg_field cmn_plllc_clk1outdiv_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 0, 6);
+static const struct reg_field cmn_plllc_clk1_en_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 12, 12);
+
+static const char * const clk_names[] = {
+ [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
+ [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+ [CDNS_SIERRA_DERIVED_REFCLK] = "refclk_der",
+};
+
+enum cdns_sierra_cmn_plllc {
+ CMN_PLLLC,
+ CMN_PLLLC1,
+};
+
+struct cdns_sierra_pll_mux_reg_fields {
+ struct reg_field pfdclk_sel_preg;
+ struct reg_field plllc1en_field;
+ struct reg_field termen_field;
+};
+
+static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
+ [CMN_PLLLC] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
+ },
+ [CMN_PLLLC1] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
+ },
+};
+
+struct cdns_sierra_pll_mux {
+ struct clk_hw hw;
+ struct regmap_field *pfdclk_sel_preg;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_pll_mux(_hw) \
+ container_of(_hw, struct cdns_sierra_pll_mux, hw)
+
+static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { PLL0_REFCLK, PLL1_REFCLK },
+ [CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK },
+};
+
+static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { 0, 1 },
+ [CMN_PLLLC1] = { 1, 0 },
+};
+
+struct cdns_sierra_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *cmn_plllc_clk1outdiv_preg;
+ struct regmap_field *cmn_plllc_clk1_en_preg;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_sierra_derived_refclk, hw)
+
+enum cdns_sierra_phy_type {
+ TYPE_NONE,
+ TYPE_PCIE,
+ TYPE_USB,
+ TYPE_QSGMII
+};
+
+enum cdns_sierra_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
+
+struct cdns_sierra_inst {
+ struct phy *phy;
+ enum cdns_sierra_phy_type phy_type;
+ u32 num_lanes;
+ u32 mlane;
+ struct reset_control *lnk_rst;
+ enum cdns_sierra_ssc_mode ssc_mode;
+};
+
+struct cdns_reg_pairs {
+ u16 val;
+ u32 off;
+};
+
+struct cdns_sierra_vals {
+ const struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
+struct cdns_sierra_data {
+ u32 id_value;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+};
+
+struct cdns_regmap_cdb_context {
+ struct device *dev;
+ void __iomem *base;
+ u8 reg_offset_shift;
+};
+
+struct cdns_sierra_phy {
+ struct device *dev;
+ const struct cdns_sierra_data *init_data;
+ struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
+ struct reset_control *phy_rst;
+ struct reset_control *apb_rst;
+ struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_common_cdb;
+ struct regmap_field *macro_id_type;
+ struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pma_cmn_ready;
+ struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
+ struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
+ struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
+ struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
+ int nsubnodes;
+ u32 num_lanes;
+ bool autoconf;
+ int already_configured;
+ struct clk_onecell_data clk_data;
+ struct clk *output_clks[CDNS_SIERRA_OUTPUT_CLOCKS];
+};
+
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ writew(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ *val = readw(ctx->base + offset);
+ return 0;
+}
+
+#define SIERRA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_lane_cdb_config[] = {
+ SIERRA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static const struct regmap_config cdns_sierra_common_cdb_config = {
+ .name = "sierra_common_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static const struct regmap_config cdns_sierra_phy_pcs_cmn_cdb_config = {
+ .name = "sierra_phy_pcs_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+#define SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pcs_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pcs_lane_cdb_config[] = {
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = {
+ .name = "sierra_phy_pma_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pma_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = {
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static int cdns_sierra_phy_init(struct phy *gphy)
+{
+ struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+ struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ const struct cdns_sierra_data *init_data = phy->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
+ struct regmap *regmap;
+ u32 num_regs;
+ int i, j;
+
+ /* Initialise the PHY registers, unless auto configured */
+ if (phy->autoconf || phy->already_configured || phy->nsubnodes > 1)
+ return 0;
+
+ clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
+ regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
+ regmap = phy->regmap_lane_cdb[i + ins->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_phy_on(struct phy *gphy)
+{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
+ struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+ struct device *dev = sp->dev;
+ u32 val;
+ int ret;
+
+ if (sp->nsubnodes == 1) {
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY out of reset\n");
+ return ret;
+ }
+ }
+
+ /* Take the PHY lane group out of reset */
+ ret = reset_control_deassert(ins->lnk_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY lane out of reset\n");
+ return ret;
+ }
+
+ if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
+ val, !val, 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
+ 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
+ ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
+ val, val, 1000, PLL_LOCK_TIME);
+ if (ret < 0)
+ dev_err(dev, "PLL lock of lane failed\n");
+
+ return ret;
+}
+
+static int cdns_sierra_phy_off(struct phy *gphy)
+{
+ struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
+
+ return reset_control_assert(ins->lnk_rst);
+}
+
+static int cdns_sierra_phy_reset(struct phy *gphy)
+{
+ struct cdns_sierra_phy *sp = dev_get_drvdata(gphy->dev.parent);
+
+ reset_control_assert(sp->phy_rst);
+ reset_control_deassert(sp->phy_rst);
+ return 0;
+};
+
+static const struct phy_ops ops = {
+ .init = cdns_sierra_phy_init,
+ .power_on = cdns_sierra_phy_on,
+ .power_off = cdns_sierra_phy_off,
+ .reset = cdns_sierra_phy_reset,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_sierra_noop_phy_on(struct phy *gphy)
+{
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static const struct phy_ops noop_ops = {
+ .power_on = cdns_sierra_noop_phy_on,
+ .owner = THIS_MODULE,
+};
+
+static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
+{
+ struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
+ struct regmap_field *field = mux->pfdclk_sel_preg;
+ unsigned int val;
+ int index;
+
+ regmap_field_read(field, &val);
+
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC1], 0, val);
+ if (index == 1) {
+ regmap_field_write(plllc1en_field, 1);
+ regmap_field_write(termen_field, 1);
+ }
+ } else {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC], 0, val);
+ }
+
+ return index;
+}
+
+static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
+ struct regmap_field *field = mux->pfdclk_sel_preg;
+ int val, ret;
+
+ ret = regmap_field_write(plllc1en_field, 0);
+ ret |= regmap_field_write(termen_field, 0);
+ if (index == 1) {
+ ret |= regmap_field_write(plllc1en_field, 1);
+ ret |= regmap_field_write(termen_field, 1);
+ }
+
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1]))
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC1][index];
+ else
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC][index];
+
+ ret |= regmap_field_write(field, val);
+
+ return ret;
+}
+
+static const struct clk_ops cdns_sierra_pll_mux_ops = {
+ .set_parent = cdns_sierra_pll_mux_set_parent,
+ .get_parent = cdns_sierra_pll_mux_get_parent,
+};
+
+static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
+ struct regmap_field *pfdclk1_sel_field,
+ struct regmap_field *plllc1en_field,
+ struct regmap_field *termen_field,
+ int clk_index)
+{
+ struct cdns_sierra_pll_mux *mux;
+ struct device *dev = sp->dev;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ char clk_name[100];
+ struct clk *clk;
+ int i;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ num_parents = SIERRA_NUM_CMN_PLLC_PARENTS;
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++) {
+ clk = sp->input_clks[pll_mux_parent_index[clk_index][i]];
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(dev, "No parent clock for PLL mux clocks\n");
+ return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT;
+ }
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev), clk_names[clk_index]);
+
+ init = &mux->clk_data;
+
+ init->ops = &cdns_sierra_pll_mux_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ mux->pfdclk_sel_preg = pfdclk1_sel_field;
+ mux->plllc1en_field = plllc1en_field;
+ mux->termen_field = termen_field;
+ mux->hw.init = init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[clk_index] = clk;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
+{
+ struct regmap_field *pfdclk1_sel_field;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+ struct device *dev = sp->dev;
+ int ret = 0, i, clk_index;
+
+ clk_index = CDNS_SIERRA_PLL_CMNLC;
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++, clk_index++) {
+ pfdclk1_sel_field = sp->cmn_plllc_pfdclk1_sel_preg[i];
+ plllc1en_field = sp->cmn_refrcv_refclk_plllc1en_preg[i];
+ termen_field = sp->cmn_refrcv_refclk_termen_preg[i];
+
+ ret = cdns_sierra_pll_mux_register(sp, pfdclk1_sel_field, plllc1en_field,
+ termen_field, clk_index);
+ if (ret) {
+ dev_err(dev, "Fail to register cmn plllc mux\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_derived_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0x1);
+
+ /* Programming to get 100Mhz clock output in ref_der_clk_out 5GHz VCO/50 = 100MHz */
+ regmap_field_write(derived_refclk->cmn_plllc_clk1outdiv_preg, 0x2E);
+
+ return 0;
+}
+
+static void cdns_sierra_derived_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0);
+}
+
+static int cdns_sierra_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+ int val;
+
+ regmap_field_read(derived_refclk->cmn_plllc_clk1_en_preg, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops cdns_sierra_derived_refclk_ops = {
+ .enable = cdns_sierra_derived_refclk_enable,
+ .disable = cdns_sierra_derived_refclk_disable,
+ .is_enabled = cdns_sierra_derived_refclk_is_enabled,
+};
+
+static int cdns_sierra_derived_refclk_register(struct cdns_sierra_phy *sp)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk;
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk *clk;
+
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_SIERRA_DERIVED_REFCLK]);
+
+ init = &derived_refclk->clk_data;
+
+ init->ops = &cdns_sierra_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ regmap = sp->regmap_common_cdb;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1outdiv_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1outdiv_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1outdiv_preg = field;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1_en_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1_en_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1_en_preg = field;
+
+ derived_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &derived_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[CDNS_SIERRA_DERIVED_REFCLK] = clk;
+
+ return 0;
+}
+
+static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct device_node *node = dev->of_node;
+
+ of_clk_del_provider(node);
+}
+
+static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ret = cdns_sierra_phy_register_pll_mux(sp);
+ if (ret) {
+ dev_err(dev, "Failed to pll mux clocks\n");
+ return ret;
+ }
+
+ ret = cdns_sierra_derived_refclk_register(sp);
+ if (ret) {
+ dev_err(dev, "Failed to register derived refclk\n");
+ return ret;
+ }
+
+ sp->clk_data.clks = sp->output_clks;
+ sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS;
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+
+ return ret;
+}
+
+static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
+ struct device_node *child)
+{
+ u32 phy_type;
+
+ if (of_property_read_u32(child, "reg", &inst->mlane))
+ return -EINVAL;
+
+ if (of_property_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
+ return -EINVAL;
+
+ if (of_property_read_u32(child, "cdns,phy-type", &phy_type))
+ return -EINVAL;
+
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ inst->phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_USB3:
+ inst->phy_type = TYPE_USB;
+ break;
+ case PHY_TYPE_QSGMII:
+ inst->phy_type = TYPE_QSGMII;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ inst->ssc_mode = EXTERNAL_SSC;
+ of_property_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
+
+ return 0;
+}
+
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+ u32 block_offset, u8 reg_offset_shift,
+ const struct regmap_config *config)
+{
+ struct cdns_regmap_cdb_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->base = base + block_offset;
+ ctx->reg_offset_shift = reg_offset_shift;
+
+ return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_regfield_init(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct reg_field reg_field;
+ struct regmap *regmap;
+ int i;
+
+ regmap = sp->regmap_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, macro_id_type);
+ if (IS_ERR(field)) {
+ dev_err(dev, "MACRO_ID_TYPE reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->macro_id_type = field;
+
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_termen_preg[i] = field;
+ }
+
+ regmap = sp->regmap_phy_pcs_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->phy_pll_cfg_1 = field;
+
+ regmap = sp->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->pma_cmn_ready = field;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
+ if (IS_ERR(field)) {
+ dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->pllctrl_lock[i] = field;
+ }
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->phy_iso_link_ctrl_1[i] = field;
+ }
+
+ return 0;
+}
+
+static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
+ void __iomem *base, u8 block_offset_shift,
+ u8 reg_offset_shift)
+{
+ struct device *dev = sp->dev;
+ struct regmap *regmap;
+ u32 block_offset;
+ int i;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_lane_cdb[i] = regmap;
+ }
+
+ regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET,
+ reg_offset_shift,
+ &cdns_sierra_common_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_common_cdb = regmap;
+
+ block_offset = SIERRA_PHY_PCS_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_pcs_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pcs_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_lane_cdb[i] = regmap;
+ }
+
+ block_offset = SIERRA_PHY_PMA_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_pma_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pma_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_lane_cdb[i] = regmap;
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
+ struct device *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll0_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll0_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL0_REFCLK] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll1_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll1_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL1_REFCLK] = clk;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_clk(struct cdns_sierra_phy *sp)
+{
+ struct device *dev = sp->dev;
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "phy_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock phy_clk\n");
+ return PTR_ERR(clk);
+ }
+ sp->input_clks[PHY_CLK] = clk;
+
+ ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_enable_clocks(struct cdns_sierra_phy *sp)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+ if (ret)
+ goto err_pll_cmnlc1;
+
+ return 0;
+
+err_pll_cmnlc1:
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+
+ return ret;
+}
+
+static void cdns_sierra_phy_disable_clocks(struct cdns_sierra_phy *sp)
+{
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC1]);
+ clk_disable_unprepare(sp->output_clks[CDNS_SIERRA_PLL_CMNLC]);
+ if (!sp->already_configured)
+ clk_disable_unprepare(sp->input_clks[PHY_CLK]);
+}
+
+static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
+ struct device *dev)
+{
+ struct reset_control *rst;
+
+ rst = devm_reset_control_get_exclusive(dev, "sierra_reset");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->phy_rst = rst;
+
+ rst = devm_reset_control_get_optional_exclusive(dev, "sierra_apb");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get apb reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->apb_rst = rst;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
+{
+ const struct cdns_sierra_data *init_data = sp->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_t1, phy_t2;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ enum cdns_sierra_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (sp->nsubnodes != 2)
+ return -EINVAL;
+
+ clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY configured to use both PLL LC and LC1 */
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+
+ phy_t1 = sp->phys[0].phy_type;
+ phy_t2 = sp->phys[1].phy_type;
+
+ /*
+ * PHY configuration for multi-link operation is done in two steps.
+ * e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes.
+ * Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe
+ * and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below.
+ *
+ * [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII
+ * So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc].
+ * This will configure PHY registers associated for PCIe (i.e. first protocol)
+ * involving PLLLC registers and registers for first 2 lanes of PHY.
+ * [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now,
+ * phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as
+ * [TYPE_QSGMII][TYPE_PCIE][ssc].
+ * This will configure PHY registers associated for QSGMII (i.e. second protocol)
+ * involving PLLLC1 registers and registers for other 2 lanes of PHY.
+ *
+ * This completes the PHY configuration for multilink operation. This approach enables
+ * dividing the large number of PHY register configurations into protocol specific
+ * smaller groups.
+ */
+ for (node = 0; node < sp->nsubnodes; node++) {
+ if (node == 1) {
+ /*
+ * If first link with phy_t1 is configured, then configure the PHY for
+ * second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ swap(phy_t1, phy_t2);
+ }
+
+ mlane = sp->phys[node].mlane;
+ ssc = sp->phys[node].ssc_mode;
+ num_lanes = sp->phys[node].num_lanes;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = sp->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = sp->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ if (phy_t1 == TYPE_QSGMII)
+ reset_control_deassert(sp->phys[node].lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_probe(struct platform_device *pdev)
+{
+ struct cdns_sierra_phy *sp;
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ const struct cdns_sierra_data *data;
+ unsigned int id_value;
+ int ret, node = 0;
+ void __iomem *base;
+ struct device_node *dn = dev->of_node, *child;
+
+ if (of_get_child_count(dn) == 0)
+ return -ENODEV;
+
+ /* Get init data for this PHY */
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+ dev_set_drvdata(dev, sp);
+ sp->dev = dev;
+ sp->init_data = data;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "missing \"reg\"\n");
+ return PTR_ERR(base);
+ }
+
+ ret = cdns_regmap_init_blocks(sp, base, data->block_offset_shift,
+ data->reg_offset_shift);
+ if (ret)
+ return ret;
+
+ ret = cdns_regfield_init(sp);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, sp);
+
+ ret = cdns_sierra_phy_get_clocks(sp, dev);
+ if (ret)
+ return ret;
+
+ ret = cdns_sierra_clk_register(sp);
+ if (ret)
+ return ret;
+
+ ret = cdns_sierra_phy_enable_clocks(sp);
+ if (ret)
+ goto unregister_clk;
+
+ regmap_field_read(sp->pma_cmn_ready, &sp->already_configured);
+
+ if (!sp->already_configured) {
+ ret = cdns_sierra_phy_clk(sp);
+ if (ret)
+ goto clk_disable;
+
+ ret = cdns_sierra_phy_get_resets(sp, dev);
+ if (ret)
+ goto clk_disable;
+
+ /* Enable APB */
+ reset_control_deassert(sp->apb_rst);
+ }
+
+ /* Check that PHY is present */
+ regmap_field_read(sp->macro_id_type, &id_value);
+ if (sp->init_data->id_value != id_value) {
+ ret = -EINVAL;
+ goto ctrl_assert;
+ }
+
+ sp->autoconf = of_property_read_bool(dn, "cdns,autoconf");
+
+ for_each_available_child_of_node(dn, child) {
+ struct phy *gphy;
+
+ if (!(of_node_name_eq(child, "phy") ||
+ of_node_name_eq(child, "link")))
+ continue;
+
+ sp->phys[node].lnk_rst =
+ of_reset_control_array_get_exclusive(child);
+
+ if (IS_ERR(sp->phys[node].lnk_rst)) {
+ dev_err(dev, "failed to get reset %s\n",
+ child->full_name);
+ ret = PTR_ERR(sp->phys[node].lnk_rst);
+ of_node_put(child);
+ goto put_control;
+ }
+
+ if (!sp->autoconf) {
+ ret = cdns_sierra_get_optional(&sp->phys[node], child);
+ if (ret) {
+ dev_err(dev, "missing property in node %s\n",
+ child->name);
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
+ }
+ }
+
+ sp->num_lanes += sp->phys[node].num_lanes;
+
+ if (!sp->already_configured)
+ gphy = devm_phy_create(dev, child, &ops);
+ else
+ gphy = devm_phy_create(dev, child, &noop_ops);
+ if (IS_ERR(gphy)) {
+ ret = PTR_ERR(gphy);
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
+ }
+ sp->phys[node].phy = gphy;
+ phy_set_drvdata(gphy, &sp->phys[node]);
+
+ node++;
+ }
+ sp->nsubnodes = node;
+
+ if (sp->num_lanes > SIERRA_MAX_LANES) {
+ ret = -EINVAL;
+ dev_err(dev, "Invalid lane configuration\n");
+ goto put_control;
+ }
+
+ /* If more than one subnode, configure the PHY as multilink */
+ if (!sp->already_configured && !sp->autoconf && sp->nsubnodes > 1) {
+ ret = cdns_sierra_phy_configure_multilink(sp);
+ if (ret)
+ goto put_control;
+ }
+
+ pm_runtime_enable(dev);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_control;
+ }
+
+ return 0;
+
+put_control:
+ while (--node >= 0)
+ reset_control_put(sp->phys[node].lnk_rst);
+ctrl_assert:
+ if (!sp->already_configured)
+ reset_control_assert(sp->apb_rst);
+clk_disable:
+ cdns_sierra_phy_disable_clocks(sp);
+unregister_clk:
+ cdns_sierra_clk_unregister(sp);
+ return ret;
+}
+
+static int cdns_sierra_phy_remove(struct platform_device *pdev)
+{
+ struct cdns_sierra_phy *phy = platform_get_drvdata(pdev);
+ int i;
+
+ reset_control_assert(phy->phy_rst);
+ reset_control_assert(phy->apb_rst);
+ pm_runtime_disable(&pdev->dev);
+
+ cdns_sierra_phy_disable_clocks(phy);
+ /*
+ * The device level resets will be put automatically.
+ * Need to put the subnode resets here though.
+ */
+ for (i = 0; i < phy->nsubnodes; i++) {
+ reset_control_assert(phy->phys[i].lnk_rst);
+ reset_control_put(phy->phys[i].lnk_rst);
+ }
+
+ cdns_sierra_clk_unregister(phy);
+
+ return 0;
+}
+
+/* QSGMII PHY PMA lane configuration */
+static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+ {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
+};
+
+static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+ .reg_pairs = qsgmii_phy_pma_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
+};
+
+/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
+ {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x0252, SIERRA_DET_STANDEC_E_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8422, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0186, SIERRA_DEQ_GLUT0},
+ {0x0186, SIERRA_DEQ_GLUT1},
+ {0x0186, SIERRA_DEQ_GLUT2},
+ {0x0186, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x0861, SIERRA_DEQ_ALUT0},
+ {0x07E0, SIERRA_DEQ_ALUT1},
+ {0x079E, SIERRA_DEQ_ALUT2},
+ {0x071D, SIERRA_DEQ_ALUT3},
+ {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
+ {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
+};
+
+/* PCIE PHY PCS common configuration */
+static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+ {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
+};
+
+static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+ .reg_pairs = pcie_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
+};
+
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
+};
+
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/*
+ * TI J721E:
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ti_ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG},
+ {0x0002, SIERRA_TX_RCVDET_OVRD_PREG}
+};
+
+static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_ext_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
+};
+
+/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
+static const struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/* refclk100MHz_20b_USB_ln_ext_ssc */
+static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
+ {0xFE0A, SIERRA_DET_STANDEC_A_PREG},
+ {0x000F, SIERRA_DET_STANDEC_B_PREG},
+ {0x55A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x69ad, SIERRA_DET_STANDEC_D_PREG},
+ {0x0241, SIERRA_DET_STANDEC_E_PREG},
+ {0x0110, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0014, SIERRA_PSM_A0IN_TMR_PREG},
+ {0xCF00, SIERRA_PSM_DIAG_PREG},
+ {0x001F, SIERRA_PSC_TX_A0_PREG},
+ {0x0007, SIERRA_PSC_TX_A1_PREG},
+ {0x0003, SIERRA_PSC_TX_A2_PREG},
+ {0x0003, SIERRA_PSC_TX_A3_PREG},
+ {0x0FFF, SIERRA_PSC_RX_A0_PREG},
+ {0x0003, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A2_PREG},
+ {0x0001, SIERRA_PSC_RX_A3_PREG},
+ {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0406, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
+ {0x2512, SIERRA_DFE_BIASTRIM_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x023C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x0000, SIERRA_CREQ_SPARE_PREG},
+ {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x8452, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0003, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x9999, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0014, SIERRA_DEQ_GLUT0},
+ {0x0014, SIERRA_DEQ_GLUT1},
+ {0x0014, SIERRA_DEQ_GLUT2},
+ {0x0014, SIERRA_DEQ_GLUT3},
+ {0x0014, SIERRA_DEQ_GLUT4},
+ {0x0014, SIERRA_DEQ_GLUT5},
+ {0x0014, SIERRA_DEQ_GLUT6},
+ {0x0014, SIERRA_DEQ_GLUT7},
+ {0x0014, SIERRA_DEQ_GLUT8},
+ {0x0014, SIERRA_DEQ_GLUT9},
+ {0x0014, SIERRA_DEQ_GLUT10},
+ {0x0014, SIERRA_DEQ_GLUT11},
+ {0x0014, SIERRA_DEQ_GLUT12},
+ {0x0014, SIERRA_DEQ_GLUT13},
+ {0x0014, SIERRA_DEQ_GLUT14},
+ {0x0014, SIERRA_DEQ_GLUT15},
+ {0x0014, SIERRA_DEQ_GLUT16},
+ {0x0BAE, SIERRA_DEQ_ALUT0},
+ {0x0AEB, SIERRA_DEQ_ALUT1},
+ {0x0A28, SIERRA_DEQ_ALUT2},
+ {0x0965, SIERRA_DEQ_ALUT3},
+ {0x08A2, SIERRA_DEQ_ALUT4},
+ {0x07DF, SIERRA_DEQ_ALUT5},
+ {0x071C, SIERRA_DEQ_ALUT6},
+ {0x0659, SIERRA_DEQ_ALUT7},
+ {0x0596, SIERRA_DEQ_ALUT8},
+ {0x0514, SIERRA_DEQ_ALUT9},
+ {0x0492, SIERRA_DEQ_ALUT10},
+ {0x0410, SIERRA_DEQ_ALUT11},
+ {0x038E, SIERRA_DEQ_ALUT12},
+ {0x030C, SIERRA_DEQ_ALUT13},
+ {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG},
+ {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG},
+ {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG},
+ {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x0005, SIERRA_LFPSDET_SUPPORT_PREG},
+ {0x000F, SIERRA_LFPSFILT_NS_PREG},
+ {0x0009, SIERRA_LFPSFILT_RD_PREG},
+ {0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x6013, SIERRA_SIGDET_SUPPORT_PREG},
+ {0x8013, SIERRA_SDFILT_H2L_A_PREG},
+ {0x8009, SIERRA_SDFILT_L2H_PREG},
+ {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_usb_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_usb_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+};
+
+static const struct cdns_sierra_data cdns_map_sierra = {
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
+};
+
+static const struct cdns_sierra_data cdns_ti_map_sierra = {
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .phy_pma_ln_vals = {
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_phy_pma_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
+};
+
+static const struct of_device_id cdns_sierra_id_table[] = {
+ {
+ .compatible = "cdns,sierra-phy-t0",
+ .data = &cdns_map_sierra,
+ },
+ {
+ .compatible = "ti,sierra-phy-t0",
+ .data = &cdns_ti_map_sierra,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_sierra_id_table);
+
+static struct platform_driver cdns_sierra_driver = {
+ .probe = cdns_sierra_phy_probe,
+ .remove = cdns_sierra_phy_remove,
+ .driver = {
+ .name = "cdns-sierra-phy",
+ .of_match_table = cdns_sierra_id_table,
+ },
+};
+module_platform_driver(cdns_sierra_driver);
+
+MODULE_ALIAS("platform:cdns_sierra");
+MODULE_AUTHOR("Cadence Design Systems");
+MODULE_DESCRIPTION("CDNS sierra phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
new file mode 100644
index 000000000..f099053c5
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -0,0 +1,4721 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cadence Torrent SD0801 PHY driver.
+ *
+ * Copyright 2018 Cadence Design Systems, Inc.
+ *
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#define REF_CLK_19_2MHZ 19200000
+#define REF_CLK_25MHZ 25000000
+#define REF_CLK_100MHZ 100000000
+
+#define MAX_NUM_LANES 4
+#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
+
+#define NUM_SSC_MODE 3
+#define NUM_REF_CLK 3
+#define NUM_PHY_TYPE 6
+
+#define POLL_TIMEOUT_US 5000
+#define PLL_LOCK_TIMEOUT 100000
+
+#define TORRENT_COMMON_CDB_OFFSET 0x0
+
+#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x4000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define TORRENT_RX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0x8000 << (block_offset)) + \
+ (((ln) << 9) << (reg_offset)))
+
+#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \
+ (0xC000 << (block_offset))
+
+#define TORRENT_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xD000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \
+ (0xE000 << (block_offset))
+
+#define TORRENT_DPTX_PHY_OFFSET 0x0
+
+/*
+ * register offsets from DPTX PHY register block base (i.e MHDP
+ * register base + 0x30a00)
+ */
+#define PHY_AUX_CTRL 0x04
+#define PHY_RESET 0x20
+#define PMA_TX_ELEC_IDLE_MASK 0xF0U
+#define PMA_TX_ELEC_IDLE_SHIFT 4
+#define PHY_L00_RESET_N_MASK 0x01U
+#define PHY_PMA_XCVR_PLLCLK_EN 0x24
+#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
+#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
+#define PHY_POWER_STATE_LN_0 0x0000
+#define PHY_POWER_STATE_LN_1 0x0008
+#define PHY_POWER_STATE_LN_2 0x0010
+#define PHY_POWER_STATE_LN_3 0x0018
+#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU
+#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
+#define PHY_PMA_CMN_READY 0x34
+
+/*
+ * register offsets from SD0801 PHY register block base (i.e MHDP
+ * register base + 0x500000)
+ */
+#define CMN_SSM_BANDGAP_TMR 0x0021U
+#define CMN_SSM_BIAS_TMR 0x0022U
+#define CMN_PLLSM0_PLLPRE_TMR 0x002AU
+#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU
+#define CMN_PLLSM1_PLLPRE_TMR 0x0032U
+#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U
+#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U
+#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U
+#define CMN_CDIAG_REFCLK_OVRD 0x004CU
+#define CMN_CDIAG_REFCLK_DRV0_CTRL 0x0050U
+#define CMN_BGCAL_INIT_TMR 0x0064U
+#define CMN_BGCAL_ITER_TMR 0x0065U
+#define CMN_IBCAL_INIT_TMR 0x0074U
+#define CMN_PLL0_VCOCAL_TCTRL 0x0082U
+#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084U
+#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085U
+#define CMN_PLL0_VCOCAL_REFTIM_START 0x0086U
+#define CMN_PLL0_VCOCAL_PLLCNT_START 0x0088U
+#define CMN_PLL0_INTDIV_M0 0x0090U
+#define CMN_PLL0_FRACDIVL_M0 0x0091U
+#define CMN_PLL0_FRACDIVH_M0 0x0092U
+#define CMN_PLL0_HIGH_THR_M0 0x0093U
+#define CMN_PLL0_DSM_DIAG_M0 0x0094U
+#define CMN_PLL0_DSM_FBH_OVRD_M0 0x0095U
+#define CMN_PLL0_SS_CTRL1_M0 0x0098U
+#define CMN_PLL0_SS_CTRL2_M0 0x0099U
+#define CMN_PLL0_SS_CTRL3_M0 0x009AU
+#define CMN_PLL0_SS_CTRL4_M0 0x009BU
+#define CMN_PLL0_LOCK_REFCNT_START 0x009CU
+#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU
+#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU
+#define CMN_PLL0_INTDIV_M1 0x00A0U
+#define CMN_PLL0_FRACDIVH_M1 0x00A2U
+#define CMN_PLL0_HIGH_THR_M1 0x00A3U
+#define CMN_PLL0_DSM_DIAG_M1 0x00A4U
+#define CMN_PLL0_SS_CTRL1_M1 0x00A8U
+#define CMN_PLL0_SS_CTRL2_M1 0x00A9U
+#define CMN_PLL0_SS_CTRL3_M1 0x00AAU
+#define CMN_PLL0_SS_CTRL4_M1 0x00ABU
+#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U
+#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U
+#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U
+#define CMN_PLL1_VCOCAL_REFTIM_START 0x00C6U
+#define CMN_PLL1_VCOCAL_PLLCNT_START 0x00C8U
+#define CMN_PLL1_INTDIV_M0 0x00D0U
+#define CMN_PLL1_FRACDIVL_M0 0x00D1U
+#define CMN_PLL1_FRACDIVH_M0 0x00D2U
+#define CMN_PLL1_HIGH_THR_M0 0x00D3U
+#define CMN_PLL1_DSM_DIAG_M0 0x00D4U
+#define CMN_PLL1_DSM_FBH_OVRD_M0 0x00D5U
+#define CMN_PLL1_DSM_FBL_OVRD_M0 0x00D6U
+#define CMN_PLL1_SS_CTRL1_M0 0x00D8U
+#define CMN_PLL1_SS_CTRL2_M0 0x00D9U
+#define CMN_PLL1_SS_CTRL3_M0 0x00DAU
+#define CMN_PLL1_SS_CTRL4_M0 0x00DBU
+#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU
+#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU
+#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU
+#define CMN_TXPUCAL_TUNE 0x0103U
+#define CMN_TXPUCAL_INIT_TMR 0x0104U
+#define CMN_TXPUCAL_ITER_TMR 0x0105U
+#define CMN_TXPDCAL_TUNE 0x010BU
+#define CMN_TXPDCAL_INIT_TMR 0x010CU
+#define CMN_TXPDCAL_ITER_TMR 0x010DU
+#define CMN_RXCAL_INIT_TMR 0x0114U
+#define CMN_RXCAL_ITER_TMR 0x0115U
+#define CMN_SD_CAL_INIT_TMR 0x0124U
+#define CMN_SD_CAL_ITER_TMR 0x0125U
+#define CMN_SD_CAL_REFTIM_START 0x0126U
+#define CMN_SD_CAL_PLLCNT_START 0x0128U
+#define CMN_PDIAG_PLL0_CTRL_M0 0x01A0U
+#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x01A1U
+#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U
+#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U
+#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U
+#define CMN_PDIAG_PLL0_CTRL_M1 0x01B0U
+#define CMN_PDIAG_PLL0_CLK_SEL_M1 0x01B1U
+#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U
+#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U
+#define CMN_PDIAG_PLL0_FILT_PADJ_M1 0x01B6U
+#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U
+#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U
+#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U
+#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U
+#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U
+#define CMN_DIAG_BIAS_OVRD1 0x01E1U
+
+/* PMA TX Lane registers */
+#define TX_TXCC_CTRL 0x0040U
+#define TX_TXCC_CPOST_MULT_00 0x004CU
+#define TX_TXCC_CPOST_MULT_01 0x004DU
+#define TX_TXCC_MGNFS_MULT_000 0x0050U
+#define TX_TXCC_MGNFS_MULT_100 0x0054U
+#define DRV_DIAG_TX_DRV 0x00C6U
+#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U
+#define XCVR_DIAG_HSCLK_SEL 0x00E6U
+#define XCVR_DIAG_HSCLK_DIV 0x00E7U
+#define XCVR_DIAG_RXCLK_CTRL 0x00E9U
+#define XCVR_DIAG_BIDI_CTRL 0x00EAU
+#define XCVR_DIAG_PSC_OVRD 0x00EBU
+#define TX_PSC_A0 0x0100U
+#define TX_PSC_A1 0x0101U
+#define TX_PSC_A2 0x0102U
+#define TX_PSC_A3 0x0103U
+#define TX_RCVDET_ST_TMR 0x0123U
+#define TX_DIAG_ACYA 0x01E7U
+#define TX_DIAG_ACYA_HBDC_MASK 0x0001U
+
+/* PMA RX Lane registers */
+#define RX_PSC_A0 0x0000U
+#define RX_PSC_A1 0x0001U
+#define RX_PSC_A2 0x0002U
+#define RX_PSC_A3 0x0003U
+#define RX_PSC_CAL 0x0006U
+#define RX_CDRLF_CNFG 0x0080U
+#define RX_CDRLF_CNFG3 0x0082U
+#define RX_SIGDET_HL_FILT_TMR 0x0090U
+#define RX_REE_GCSM1_CTRL 0x0108U
+#define RX_REE_GCSM1_EQENM_PH1 0x0109U
+#define RX_REE_GCSM1_EQENM_PH2 0x010AU
+#define RX_REE_GCSM2_CTRL 0x0110U
+#define RX_REE_PERGCSM_CTRL 0x0118U
+#define RX_REE_ATTEN_THR 0x0149U
+#define RX_REE_TAP1_CLIP 0x0171U
+#define RX_REE_TAP2TON_CLIP 0x0172U
+#define RX_REE_SMGM_CTRL1 0x0177U
+#define RX_REE_SMGM_CTRL2 0x0178U
+#define RX_DIAG_DFE_CTRL 0x01E0U
+#define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U
+#define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U
+#define RX_DIAG_NQST_CTRL 0x01E5U
+#define RX_DIAG_SIGDET_TUNE 0x01E8U
+#define RX_DIAG_PI_RATE 0x01F4U
+#define RX_DIAG_PI_CAP 0x01F5U
+#define RX_DIAG_ACYA 0x01FFU
+
+/* PHY PCS common registers */
+#define PHY_PIPE_CMN_CTRL1 0x0000U
+#define PHY_PLL_CFG 0x000EU
+#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U
+#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U
+#define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U
+
+/* PHY PCS lane registers */
+#define PHY_PCS_ISO_LINK_CTRL 0x000BU
+
+/* PHY PMA common registers */
+#define PHY_PMA_CMN_CTRL1 0x0000U
+#define PHY_PMA_CMN_CTRL2 0x0001U
+#define PHY_PMA_PLL_RAW_CTRL 0x0003U
+
+#define CDNS_TORRENT_OUTPUT_CLOCKS 3
+
+static const char * const clk_names[] = {
+ [CDNS_TORRENT_REFCLK_DRIVER] = "refclk-driver",
+ [CDNS_TORRENT_DERIVED_REFCLK] = "refclk-der",
+ [CDNS_TORRENT_RECEIVED_REFCLK] = "refclk-rec",
+};
+
+static const struct reg_field phy_pll_cfg =
+ REG_FIELD(PHY_PLL_CFG, 0, 1);
+
+static const struct reg_field phy_pma_cmn_ctrl_1 =
+ REG_FIELD(PHY_PMA_CMN_CTRL1, 0, 0);
+
+static const struct reg_field phy_pma_cmn_ctrl_2 =
+ REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7);
+
+static const struct reg_field phy_pma_pll_raw_ctrl =
+ REG_FIELD(PHY_PMA_PLL_RAW_CTRL, 0, 1);
+
+static const struct reg_field phy_reset_ctrl =
+ REG_FIELD(PHY_RESET, 8, 8);
+
+static const struct reg_field phy_pcs_iso_link_ctrl_1 =
+ REG_FIELD(PHY_PCS_ISO_LINK_CTRL, 1, 1);
+
+static const struct reg_field phy_pipe_cmn_ctrl1_0 = REG_FIELD(PHY_PIPE_CMN_CTRL1, 0, 0);
+
+static const struct reg_field cmn_cdiag_refclk_ovrd_4 =
+ REG_FIELD(CMN_CDIAG_REFCLK_OVRD, 4, 4);
+
+#define REFCLK_OUT_NUM_CMN_CONFIG 4
+
+enum cdns_torrent_refclk_out_cmn {
+ CMN_CDIAG_REFCLK_DRV0_CTRL_1,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_4,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_5,
+ CMN_CDIAG_REFCLK_DRV0_CTRL_6,
+};
+
+static const struct reg_field refclk_out_cmn_cfg[] = {
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_1] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 1, 1),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_4] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 4, 4),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_5] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 5, 5),
+ [CMN_CDIAG_REFCLK_DRV0_CTRL_6] = REG_FIELD(CMN_CDIAG_REFCLK_DRV0_CTRL, 6, 6),
+};
+
+static const int refclk_driver_parent_index[] = {
+ CDNS_TORRENT_DERIVED_REFCLK,
+ CDNS_TORRENT_RECEIVED_REFCLK
+};
+
+static u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 };
+
+enum cdns_torrent_phy_type {
+ TYPE_NONE,
+ TYPE_DP,
+ TYPE_PCIE,
+ TYPE_SGMII,
+ TYPE_QSGMII,
+ TYPE_USB,
+};
+
+enum cdns_torrent_ref_clk {
+ CLK_19_2_MHZ,
+ CLK_25_MHZ,
+ CLK_100_MHZ
+};
+
+enum cdns_torrent_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
+
+struct cdns_torrent_inst {
+ struct phy *phy;
+ u32 mlane;
+ enum cdns_torrent_phy_type phy_type;
+ u32 num_lanes;
+ struct reset_control *lnk_rst;
+ enum cdns_torrent_ssc_mode ssc_mode;
+};
+
+struct cdns_torrent_phy {
+ void __iomem *base; /* DPTX registers base */
+ void __iomem *sd_base; /* SD0801 registers base */
+ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ struct reset_control *phy_rst;
+ struct reset_control *apb_rst;
+ struct device *dev;
+ struct clk *clk;
+ enum cdns_torrent_ref_clk ref_clk_rate;
+ struct cdns_torrent_inst phys[MAX_NUM_LANES];
+ int nsubnodes;
+ const struct cdns_torrent_data *init_data;
+ struct regmap *regmap_common_cdb;
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES];
+ struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES];
+ struct regmap *regmap_phy_pcs_lane_cdb[MAX_NUM_LANES];
+ struct regmap *regmap_dptx_phy_reg;
+ struct regmap_field *phy_pll_cfg;
+ struct regmap_field *phy_pipe_cmn_ctrl1_0;
+ struct regmap_field *cmn_cdiag_refclk_ovrd_4;
+ struct regmap_field *phy_pma_cmn_ctrl_1;
+ struct regmap_field *phy_pma_cmn_ctrl_2;
+ struct regmap_field *phy_pma_pll_raw_ctrl;
+ struct regmap_field *phy_reset_ctrl;
+ struct regmap_field *phy_pcs_iso_link_ctrl_1[MAX_NUM_LANES];
+ struct clk_hw_onecell_data *clk_hw_data;
+};
+
+enum phy_powerstate {
+ POWERSTATE_A0 = 0,
+ /* Powerstate A1 is unused */
+ POWERSTATE_A2 = 2,
+ POWERSTATE_A3 = 3,
+};
+
+struct cdns_torrent_refclk_driver {
+ struct clk_hw hw;
+ struct regmap_field *cmn_fields[REFCLK_OUT_NUM_CMN_CONFIG];
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_torrent_refclk_driver(_hw) \
+ container_of(_hw, struct cdns_torrent_refclk_driver, hw)
+
+struct cdns_torrent_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *phy_pipe_cmn_ctrl1_0;
+ struct regmap_field *cmn_cdiag_refclk_ovrd_4;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_torrent_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_torrent_derived_refclk, hw)
+
+struct cdns_torrent_received_refclk {
+ struct clk_hw hw;
+ struct regmap_field *phy_pipe_cmn_ctrl1_0;
+ struct regmap_field *cmn_cdiag_refclk_ovrd_4;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_torrent_received_refclk(_hw) \
+ container_of(_hw, struct cdns_torrent_received_refclk, hw)
+
+struct cdns_reg_pairs {
+ u32 val;
+ u32 off;
+};
+
+struct cdns_torrent_vals {
+ struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
+struct cdns_torrent_data {
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *cmn_vals[NUM_REF_CLK][NUM_PHY_TYPE]
+ [NUM_PHY_TYPE][NUM_SSC_MODE];
+ struct cdns_torrent_vals *tx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE]
+ [NUM_PHY_TYPE][NUM_SSC_MODE];
+ struct cdns_torrent_vals *rx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE]
+ [NUM_PHY_TYPE][NUM_SSC_MODE];
+};
+
+struct cdns_regmap_cdb_context {
+ struct device *dev;
+ void __iomem *base;
+ u8 reg_offset_shift;
+};
+
+static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ writew(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg << ctx->reg_offset_shift;
+
+ *val = readw(ctx->base + offset);
+ return 0;
+}
+
+static int cdns_regmap_dptx_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg;
+
+ writel(val, ctx->base + offset);
+
+ return 0;
+}
+
+static int cdns_regmap_dptx_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct cdns_regmap_cdb_context *ctx = context;
+ u32 offset = reg;
+
+ *val = readl(ctx->base + offset);
+ return 0;
+}
+
+#define TORRENT_TX_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "torrent_tx_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+#define TORRENT_RX_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "torrent_rx_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_torrent_tx_lane_cdb_config[] = {
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("0"),
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("1"),
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("2"),
+ TORRENT_TX_LANE_CDB_REGMAP_CONF("3"),
+};
+
+static const struct regmap_config cdns_torrent_rx_lane_cdb_config[] = {
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("0"),
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("1"),
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("2"),
+ TORRENT_RX_LANE_CDB_REGMAP_CONF("3"),
+};
+
+static const struct regmap_config cdns_torrent_common_cdb_config = {
+ .name = "torrent_common_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+#define TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "torrent_phy_pcs_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_torrent_phy_pcs_lane_cdb_config[] = {
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
+ TORRENT_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
+};
+
+static const struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
+ .name = "torrent_phy_pcs_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static const struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = {
+ .name = "torrent_phy_pma_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+static const struct regmap_config cdns_torrent_dptx_phy_config = {
+ .name = "torrent_dptx_phy",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_dptx_write,
+ .reg_read = cdns_regmap_dptx_read,
+};
+
+/* PHY mmr access functions */
+
+static void cdns_torrent_phy_write(struct regmap *regmap, u32 offset, u32 val)
+{
+ regmap_write(regmap, offset, val);
+}
+
+static u32 cdns_torrent_phy_read(struct regmap *regmap, u32 offset)
+{
+ unsigned int val;
+
+ regmap_read(regmap, offset, &val);
+ return val;
+}
+
+/* DPTX mmr access functions */
+
+static void cdns_torrent_dp_write(struct regmap *regmap, u32 offset, u32 val)
+{
+ regmap_write(regmap, offset, val);
+}
+
+static u32 cdns_torrent_dp_read(struct regmap *regmap, u32 offset)
+{
+ u32 val;
+
+ regmap_read(regmap, offset, &val);
+ return val;
+}
+
+/*
+ * Structure used to store values of PHY registers for voltage-related
+ * coefficients, for particular voltage swing and pre-emphasis level. Values
+ * are shared across all physical lanes.
+ */
+struct coefficients {
+ /* Value of DRV_DIAG_TX_DRV register to use */
+ u16 diag_tx_drv;
+ /* Value of TX_TXCC_MGNFS_MULT_000 register to use */
+ u16 mgnfs_mult;
+ /* Value of TX_TXCC_CPOST_MULT_00 register to use */
+ u16 cpost_mult;
+};
+
+/*
+ * Array consists of values of voltage-related registers for sd0801 PHY. A value
+ * of 0xFFFF is a placeholder for invalid combination, and will never be used.
+ */
+static const struct coefficients vltg_coeff[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x002A,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F,
+ .cpost_mult = 0x0014},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0012,
+ .cpost_mult = 0x0020},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x002A}
+ },
+
+ /* voltage swing 1, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x001F,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013,
+ .cpost_mult = 0x0012},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x001F},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF}
+ },
+
+ /* voltage swing 2, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0013,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x0013},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF}
+ },
+
+ /* voltage swing 3, pre-emphasis 0->3 */
+ { {.diag_tx_drv = 0x0003, .mgnfs_mult = 0x0000,
+ .cpost_mult = 0x0000},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF},
+ {.diag_tx_drv = 0xFFFF, .mgnfs_mult = 0xFFFF,
+ .cpost_mult = 0xFFFF}
+ }
+};
+
+static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type)
+{
+ switch (phy_type) {
+ case TYPE_DP:
+ return "DisplayPort";
+ case TYPE_PCIE:
+ return "PCIe";
+ case TYPE_SGMII:
+ return "SGMII";
+ case TYPE_QSGMII:
+ return "QSGMII";
+ case TYPE_USB:
+ return "USB";
+ default:
+ return "None";
+ }
+}
+
+/*
+ * Set registers responsible for enabling and configuring SSC, with second and
+ * third register values provided by parameters.
+ */
+static
+void cdns_torrent_dp_enable_ssc_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 ctrl2_val, u32 ctrl3_val)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl3_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl3_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 19.2 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0119);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00BC);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0012);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0119);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00BC);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0012);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x033A, 0x006A);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01FA);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0152);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01FA);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x4000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0152);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x05DD, 0x0069);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01C2);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x012C);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01C2);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x012C);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x0536, 0x0069);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01A5);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xE000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x011A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01A5);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xE000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x011A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_19_2mhz(cdns_phy, 0x04D7, 0x006A);
+ break;
+ }
+
+ if (ssc) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x025E);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x025E);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
+ } else {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_VCOCAL_PLLCNT_START, 0x0260);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_VCOCAL_PLLCNT_START, 0x0260);
+ /* Set reset register values to disable SSC */
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x0099);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
+}
+
+/*
+ * Set registers responsible for enabling and configuring SSC, with second
+ * register value provided by a parameter.
+ */
+static void cdns_torrent_dp_enable_ssc_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 ctrl2_val)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x007F);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0001);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, ctrl2_val);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x007F);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0003);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 25 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x01B0);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0120);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x01B0);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0120);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x0423);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0184);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0xCCCD);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0104);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0184);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0xCCCD);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0104);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x03B9);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0159);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x999A);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00E7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0159);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x999A);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00E7);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x034F);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0144);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x00D8);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0144);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x00D8);
+ if (ssc)
+ cdns_torrent_dp_enable_ssc_25mhz(cdns_phy, 0x031A);
+ break;
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+
+ if (ssc) {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0315);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0005);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0315);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0005);
+ } else {
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_VCOCAL_PLLCNT_START, 0x0317);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_VCOCAL_PLLCNT_START, 0x0317);
+ /* Set reset register values to disable SSC */
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL0_LOCK_PLLCNT_THR, 0x0003);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL1_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL2_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL3_M0, 0x0000);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_SS_CTRL4_M0, 0x0000);
+ cdns_torrent_phy_write(regmap,
+ CMN_PLL1_LOCK_PLLCNT_THR, 0x0003);
+ }
+
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_REFCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_LOCK_PLLCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_REFCNT_START, 0x00C7);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
+}
+
+static
+void cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, bool ssc)
+{
+ struct regmap *regmap = cdns_phy->regmap_common_cdb;
+
+ /* Assumes 100 MHz refclock */
+ switch (rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 1620:
+ case 2430:
+ case 3240:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ break;
+ }
+}
+
+/*
+ * Enable or disable PLL for selected lanes.
+ */
+static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp,
+ bool enable)
+{
+ u32 rd_val;
+ u32 ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ /*
+ * Used to determine, which bits to check for or enable in
+ * PHY_PMA_XCVR_PLLCLK_EN register.
+ */
+ u32 pll_bits;
+ /* Used to enable or disable lanes. */
+ u32 pll_val;
+
+ /* Select values of registers and mask, depending on enabled lane
+ * count.
+ */
+ switch (dp->lanes) {
+ /* lane 0 */
+ case (1):
+ pll_bits = 0x00000001;
+ break;
+ /* lanes 0-1 */
+ case (2):
+ pll_bits = 0x00000003;
+ break;
+ /* lanes 0-3, all */
+ default:
+ pll_bits = 0x0000000F;
+ break;
+ }
+
+ if (enable)
+ pll_val = pll_bits;
+ else
+ pll_val = 0x00000000;
+
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val);
+
+ /* Wait for acknowledgment from PHY. */
+ ret = regmap_read_poll_timeout(regmap,
+ PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ rd_val,
+ (rd_val & pll_bits) == pll_val,
+ 0, POLL_TIMEOUT_US);
+ ndelay(100);
+ return ret;
+}
+
+static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ u32 num_lanes,
+ enum phy_powerstate powerstate)
+{
+ /* Register value for power state for a single byte. */
+ u32 value_part;
+ u32 value;
+ u32 mask;
+ u32 read_val;
+ u32 ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ switch (powerstate) {
+ case (POWERSTATE_A0):
+ value_part = 0x01U;
+ break;
+ case (POWERSTATE_A2):
+ value_part = 0x04U;
+ break;
+ default:
+ /* Powerstate A3 */
+ value_part = 0x08U;
+ break;
+ }
+
+ /* Select values of registers and mask, depending on enabled
+ * lane count.
+ */
+ switch (num_lanes) {
+ /* lane 0 */
+ case (1):
+ value = value_part;
+ mask = 0x0000003FU;
+ break;
+ /* lanes 0-1 */
+ case (2):
+ value = (value_part
+ | (value_part << 8));
+ mask = 0x00003F3FU;
+ break;
+ /* lanes 0-3, all */
+ default:
+ value = (value_part
+ | (value_part << 8)
+ | (value_part << 16)
+ | (value_part << 24));
+ mask = 0x3F3F3F3FU;
+ break;
+ }
+
+ /* Set power state A<n>. */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, value);
+ /* Wait, until PHY acknowledges power state completion. */
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == value, 0,
+ POLL_TIMEOUT_US);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000);
+ ndelay(100);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
+{
+ unsigned int read_val;
+ int ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ /*
+ * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
+ * master lane
+ */
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ read_val, read_val & 1,
+ 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link PLL clock enable ack\n");
+ return ret;
+ }
+
+ ndelay(100);
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ POWERSTATE_A2);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ POWERSTATE_A0);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
+{
+ unsigned int reg;
+ int ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+
+ ret = regmap_read_poll_timeout(regmap, PHY_PMA_CMN_READY, reg,
+ reg & 1, 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev,
+ "timeout waiting for PMA common ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
+ u32 rate, u32 num_lanes)
+{
+ unsigned int clk_sel_val = 0;
+ unsigned int hsclk_div_val = 0;
+ unsigned int i;
+
+ switch (rate) {
+ case 1620:
+ clk_sel_val = 0x0f01;
+ hsclk_div_val = 2;
+ break;
+ case 2160:
+ case 2430:
+ case 2700:
+ clk_sel_val = 0x0701;
+ hsclk_div_val = 1;
+ break;
+ case 3240:
+ clk_sel_val = 0x0b00;
+ hsclk_div_val = 2;
+ break;
+ case 4320:
+ case 5400:
+ clk_sel_val = 0x0301;
+ hsclk_div_val = 0;
+ break;
+ case 8100:
+ clk_sel_val = 0x0200;
+ hsclk_div_val = 0;
+ break;
+ }
+
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < num_lanes; i++)
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i],
+ XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
+}
+
+/*
+ * Perform register operations related to setting link rate, once powerstate is
+ * set and PLL disable request was processed.
+ */
+static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 read_val, ret;
+
+ /* Disable the cmn_pll0_en before re-programming the new data rate. */
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0);
+
+ /*
+ * Wait for PLL ready de-assertion.
+ * For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 2) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ ndelay(200);
+
+ /* DP Rate Change - VCO Output settings. */
+ if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ)
+ /* PMA common configuration 19.2MHz */
+ cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
+ else if (cdns_phy->ref_clk_rate == CLK_25_MHZ)
+ /* PMA common configuration 25MHz */
+ cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
+ else if (cdns_phy->ref_clk_rate == CLK_100_MHZ)
+ /* PMA common configuration 100MHz */
+ cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, dp->link_rate, dp->ssc);
+
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
+
+ /* Enable the cmn_pll0_en. */
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3);
+
+ /*
+ * Wait for PLL ready assertion.
+ * For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ (read_val & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ return ret;
+}
+
+/*
+ * Verify, that parameters to configure PHY with are correct.
+ */
+static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst,
+ struct phy_configure_opts_dp *dp)
+{
+ u8 i;
+
+ /* If changing link rate was required, verify it's supported. */
+ if (dp->set_rate) {
+ switch (dp->link_rate) {
+ case 1620:
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Verify lane count. */
+ switch (dp->lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid lane count. */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check against actual number of PHY's lanes. */
+ if (dp->lanes > inst->num_lanes)
+ return -EINVAL;
+
+ /*
+ * If changing voltages is required, check swing and pre-emphasis
+ * levels, per-lane.
+ */
+ if (dp->set_voltages) {
+ /* Lane count verified previously. */
+ for (i = 0; i < dp->lanes; i++) {
+ if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+ return -EINVAL;
+
+ /* Sum of voltage swing and pre-emphasis levels cannot
+ * exceed 3.
+ */
+ if (dp->voltage[i] + dp->pre[i] > 3)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
+static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
+ u32 num_lanes)
+{
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 pwr_state = cdns_torrent_dp_read(regmap,
+ PHY_PMA_XCVR_POWER_STATE_REQ);
+ u32 pll_clk_en = cdns_torrent_dp_read(regmap,
+ PHY_PMA_XCVR_PLLCLK_EN);
+
+ /* Lane 0 is always enabled. */
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_0);
+ pll_clk_en &= ~0x01U;
+
+ if (num_lanes > 1) {
+ /* lane 1 */
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_1);
+ pll_clk_en &= ~(0x01U << 1);
+ }
+
+ if (num_lanes > 2) {
+ /* lanes 2 and 3 */
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_2);
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
+ PHY_POWER_STATE_LN_3);
+ pll_clk_en &= ~(0x01U << 2);
+ pll_clk_en &= ~(0x01U << 3);
+ }
+
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_clk_en);
+}
+
+/* Configure lane count as required. */
+static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 value;
+ u32 ret;
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u8 lane_mask = (1 << dp->lanes) - 1;
+
+ value = cdns_torrent_dp_read(regmap, PHY_RESET);
+ /* clear pma_tx_elec_idle_ln_* bits. */
+ value &= ~PMA_TX_ELEC_IDLE_MASK;
+ /* Assert pma_tx_elec_idle_ln_* for disabled lanes. */
+ value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) &
+ PMA_TX_ELEC_IDLE_MASK;
+ cdns_torrent_dp_write(regmap, PHY_RESET, value);
+
+ /* reset the link by asserting phy_l00_reset_n low */
+ cdns_torrent_dp_write(regmap, PHY_RESET,
+ value & (~PHY_L00_RESET_N_MASK));
+
+ /*
+ * Assert lane reset on unused lanes and lane 0 so they remain in reset
+ * and powered down when re-enabling the link
+ */
+ value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
+ cdns_torrent_dp_write(regmap, PHY_RESET, value);
+
+ cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes);
+
+ /* release phy_l0*_reset_n based on used laneCount */
+ value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
+ cdns_torrent_dp_write(regmap, PHY_RESET, value);
+
+ /* Wait, until PHY gets ready after releasing PHY reset signal. */
+ ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
+ if (ret)
+ return ret;
+
+ ndelay(100);
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+
+ ret = cdns_torrent_dp_run(cdns_phy, dp->lanes);
+
+ return ret;
+}
+
+/* Configure link rate as required. */
+static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 ret;
+
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ POWERSTATE_A3);
+ if (ret)
+ return ret;
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false);
+ if (ret)
+ return ret;
+ ndelay(200);
+
+ ret = cdns_torrent_dp_configure_rate(cdns_phy, dp);
+ if (ret)
+ return ret;
+ ndelay(200);
+
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true);
+ if (ret)
+ return ret;
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ POWERSTATE_A2);
+ if (ret)
+ return ret;
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ POWERSTATE_A0);
+ if (ret)
+ return ret;
+ ndelay(900);
+
+ return ret;
+}
+
+/* Configure voltage swing and pre-emphasis for all enabled lanes. */
+static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
+ struct phy_configure_opts_dp *dp)
+{
+ u8 lane;
+ u16 val;
+
+ for (lane = 0; lane < dp->lanes; lane++) {
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA);
+ /*
+ * Write 1 to register bit TX_DIAG_ACYA[0] to freeze the
+ * current state of the analog TX driver.
+ */
+ val |= TX_DIAG_ACYA_HBDC_MASK;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA, val);
+
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_TXCC_CTRL, 0x08A4);
+ val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ DRV_DIAG_TX_DRV, val);
+ val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_TXCC_MGNFS_MULT_000,
+ val);
+ val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_TXCC_CPOST_MULT_00,
+ val);
+
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA);
+ /*
+ * Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of
+ * analog TX driver to reflect the new programmed one.
+ */
+ val &= ~TX_DIAG_ACYA_HBDC_MASK;
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ TX_DIAG_ACYA, val);
+ }
+};
+
+static int cdns_torrent_dp_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ ret = cdns_torrent_dp_verify_config(inst, &opts->dp);
+ if (ret) {
+ dev_err(&phy->dev, "invalid params for phy configure\n");
+ return ret;
+ }
+
+ if (opts->dp.set_lanes) {
+ ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp);
+ if (ret) {
+ dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
+ return ret;
+ }
+ }
+
+ if (opts->dp.set_rate) {
+ ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp);
+ if (ret) {
+ dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
+ return ret;
+ }
+ }
+
+ if (opts->dp.set_voltages)
+ cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp);
+
+ return ret;
+}
+
+static int cdns_torrent_phy_on(struct phy *phy)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ u32 read_val;
+ int ret;
+
+ if (cdns_phy->nsubnodes == 1) {
+ /* Take the PHY lane group out of reset */
+ reset_control_deassert(inst->lnk_rst);
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL1[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1,
+ read_val, read_val, 1000,
+ PLL_LOCK_TIMEOUT);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
+ if (inst->phy_type == TYPE_PCIE || inst->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pcs_iso_link_ctrl_1[inst->mlane],
+ read_val, !read_val, 1000,
+ PLL_LOCK_TIMEOUT);
+ if (ret == -ETIMEDOUT) {
+ dev_err(cdns_phy->dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_phy_off(struct phy *phy)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ if (cdns_phy->nsubnodes != 1)
+ return 0;
+
+ ret = reset_control_assert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+
+ return reset_control_assert(inst->lnk_rst);
+}
+
+static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst)
+{
+ struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ unsigned char lane_bits;
+
+ cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
+
+ /*
+ * Set lines power state to A0
+ * Set lines pll clk enable to 0
+ */
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes);
+
+ /*
+ * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
+ * used lanes
+ */
+ lane_bits = (1 << inst->num_lanes) - 1;
+ cdns_torrent_dp_write(regmap, PHY_RESET,
+ ((0xF & ~lane_bits) << 4) | (0xF & lane_bits));
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+
+ /*
+ * PHY PMA registers configuration functions
+ * Initialize PHY with max supported link rate, without SSC.
+ */
+ if (cdns_phy->ref_clk_rate == CLK_19_2_MHZ)
+ cdns_torrent_dp_pma_cmn_vco_cfg_19_2mhz(cdns_phy,
+ cdns_phy->max_bit_rate,
+ false);
+ else if (cdns_phy->ref_clk_rate == CLK_25_MHZ)
+ cdns_torrent_dp_pma_cmn_vco_cfg_25mhz(cdns_phy,
+ cdns_phy->max_bit_rate,
+ false);
+ else if (cdns_phy->ref_clk_rate == CLK_100_MHZ)
+ cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy,
+ cdns_phy->max_bit_rate,
+ false);
+
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate,
+ inst->num_lanes);
+
+ /* take out of reset */
+ regmap_field_write(cdns_phy->phy_reset_ctrl, 0x1);
+}
+
+static int cdns_torrent_dp_start(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ struct phy *phy)
+{
+ int ret;
+
+ cdns_torrent_phy_on(phy);
+
+ ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes);
+
+ return ret;
+}
+
+static int cdns_torrent_dp_init(struct phy *phy)
+{
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+
+ switch (cdns_phy->ref_clk_rate) {
+ case CLK_19_2_MHZ:
+ case CLK_25_MHZ:
+ case CLK_100_MHZ:
+ /* Valid Ref Clock Rate */
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+ return -EINVAL;
+ }
+
+ cdns_torrent_dp_common_init(cdns_phy, inst);
+
+ return cdns_torrent_dp_start(cdns_phy, inst, phy);
+}
+
+static int cdns_torrent_derived_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_cdiag_refclk_ovrd_4, 1);
+ regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 1);
+
+ return 0;
+}
+
+static void cdns_torrent_derived_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->phy_pipe_cmn_ctrl1_0, 0);
+ regmap_field_write(derived_refclk->cmn_cdiag_refclk_ovrd_4, 0);
+}
+
+static int cdns_torrent_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk = to_cdns_torrent_derived_refclk(hw);
+ int val;
+
+ regmap_field_read(derived_refclk->cmn_cdiag_refclk_ovrd_4, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops cdns_torrent_derived_refclk_ops = {
+ .enable = cdns_torrent_derived_refclk_enable,
+ .disable = cdns_torrent_derived_refclk_disable,
+ .is_enabled = cdns_torrent_derived_refclk_is_enabled,
+};
+
+static int cdns_torrent_derived_refclk_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct cdns_torrent_derived_refclk *derived_refclk;
+ struct device *dev = cdns_phy->dev;
+ struct clk_init_data *init;
+ const char *parent_name;
+ char clk_name[100];
+ struct clk_hw *hw;
+ struct clk *clk;
+ int ret;
+
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_TORRENT_DERIVED_REFCLK]);
+
+ clk = devm_clk_get_optional(dev, "phy_en_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No parent clock for derived_refclk\n");
+ return PTR_ERR(clk);
+ }
+
+ init = &derived_refclk->clk_data;
+
+ if (clk) {
+ parent_name = __clk_get_name(clk);
+ init->parent_names = &parent_name;
+ init->num_parents = 1;
+ }
+ init->ops = &cdns_torrent_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ derived_refclk->phy_pipe_cmn_ctrl1_0 = cdns_phy->phy_pipe_cmn_ctrl1_0;
+ derived_refclk->cmn_cdiag_refclk_ovrd_4 = cdns_phy->cmn_cdiag_refclk_ovrd_4;
+
+ derived_refclk->hw.init = init;
+
+ hw = &derived_refclk->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ cdns_phy->clk_hw_data->hws[CDNS_TORRENT_DERIVED_REFCLK] = hw;
+
+ return 0;
+}
+
+static int cdns_torrent_received_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_torrent_received_refclk *received_refclk = to_cdns_torrent_received_refclk(hw);
+
+ regmap_field_write(received_refclk->phy_pipe_cmn_ctrl1_0, 1);
+
+ return 0;
+}
+
+static void cdns_torrent_received_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_torrent_received_refclk *received_refclk = to_cdns_torrent_received_refclk(hw);
+
+ regmap_field_write(received_refclk->phy_pipe_cmn_ctrl1_0, 0);
+}
+
+static int cdns_torrent_received_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_torrent_received_refclk *received_refclk = to_cdns_torrent_received_refclk(hw);
+ int val, cmn_val;
+
+ regmap_field_read(received_refclk->phy_pipe_cmn_ctrl1_0, &val);
+ regmap_field_read(received_refclk->cmn_cdiag_refclk_ovrd_4, &cmn_val);
+
+ return val && !cmn_val;
+}
+
+static const struct clk_ops cdns_torrent_received_refclk_ops = {
+ .enable = cdns_torrent_received_refclk_enable,
+ .disable = cdns_torrent_received_refclk_disable,
+ .is_enabled = cdns_torrent_received_refclk_is_enabled,
+};
+
+static int cdns_torrent_received_refclk_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct cdns_torrent_received_refclk *received_refclk;
+ struct device *dev = cdns_phy->dev;
+ struct clk_init_data *init;
+ const char *parent_name;
+ char clk_name[100];
+ struct clk_hw *hw;
+ struct clk *clk;
+ int ret;
+
+ received_refclk = devm_kzalloc(dev, sizeof(*received_refclk), GFP_KERNEL);
+ if (!received_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_TORRENT_RECEIVED_REFCLK]);
+
+ clk = devm_clk_get_optional(dev, "phy_en_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "No parent clock for received_refclk\n");
+ return PTR_ERR(clk);
+ }
+
+ init = &received_refclk->clk_data;
+
+ if (clk) {
+ parent_name = __clk_get_name(clk);
+ init->parent_names = &parent_name;
+ init->num_parents = 1;
+ }
+ init->ops = &cdns_torrent_received_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ received_refclk->phy_pipe_cmn_ctrl1_0 = cdns_phy->phy_pipe_cmn_ctrl1_0;
+ received_refclk->cmn_cdiag_refclk_ovrd_4 = cdns_phy->cmn_cdiag_refclk_ovrd_4;
+
+ received_refclk->hw.init = init;
+
+ hw = &received_refclk->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ cdns_phy->clk_hw_data->hws[CDNS_TORRENT_RECEIVED_REFCLK] = hw;
+
+ return 0;
+}
+
+static int cdns_torrent_refclk_driver_enable(struct clk_hw *hw)
+{
+ struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
+
+ regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_6], 0);
+ regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_5], 1);
+ regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 0);
+
+ return 0;
+}
+
+static void cdns_torrent_refclk_driver_disable(struct clk_hw *hw)
+{
+ struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
+
+ regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], 1);
+}
+
+static int cdns_torrent_refclk_driver_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
+ int val;
+
+ regmap_field_read(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_1], &val);
+
+ return !val;
+}
+
+static u8 cdns_torrent_refclk_driver_get_parent(struct clk_hw *hw)
+{
+ struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
+ unsigned int val;
+
+ regmap_field_read(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], &val);
+ return clk_mux_val_to_index(hw, cdns_torrent_refclk_driver_mux_table, 0, val);
+}
+
+static int cdns_torrent_refclk_driver_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct cdns_torrent_refclk_driver *refclk_driver = to_cdns_torrent_refclk_driver(hw);
+ unsigned int val;
+
+ val = cdns_torrent_refclk_driver_mux_table[index];
+ return regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], val);
+}
+
+static const struct clk_ops cdns_torrent_refclk_driver_ops = {
+ .enable = cdns_torrent_refclk_driver_enable,
+ .disable = cdns_torrent_refclk_driver_disable,
+ .is_enabled = cdns_torrent_refclk_driver_is_enabled,
+ .set_parent = cdns_torrent_refclk_driver_set_parent,
+ .get_parent = cdns_torrent_refclk_driver_get_parent,
+};
+
+static int cdns_torrent_refclk_driver_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct cdns_torrent_refclk_driver *refclk_driver;
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk_hw *hw;
+ int i, ret;
+
+ refclk_driver = devm_kzalloc(dev, sizeof(*refclk_driver), GFP_KERNEL);
+ if (!refclk_driver)
+ return -ENOMEM;
+
+ num_parents = ARRAY_SIZE(refclk_driver_parent_index);
+ parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_parents; i++) {
+ hw = cdns_phy->clk_hw_data->hws[refclk_driver_parent_index[i]];
+ if (IS_ERR_OR_NULL(hw)) {
+ dev_err(dev, "No parent clock for refclk driver clock\n");
+ return IS_ERR(hw) ? PTR_ERR(hw) : -ENOENT;
+ }
+ parent_names[i] = clk_hw_get_name(hw);
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_TORRENT_REFCLK_DRIVER]);
+
+ init = &refclk_driver->clk_data;
+
+ init->ops = &cdns_torrent_refclk_driver_ops;
+ init->flags = CLK_SET_RATE_NO_REPARENT;
+ init->parent_names = parent_names;
+ init->num_parents = num_parents;
+ init->name = clk_name;
+
+ regmap = cdns_phy->regmap_common_cdb;
+
+ for (i = 0; i < REFCLK_OUT_NUM_CMN_CONFIG; i++) {
+ field = devm_regmap_field_alloc(dev, regmap, refclk_out_cmn_cfg[i]);
+ if (IS_ERR(field)) {
+ dev_err(dev, "Refclk driver CMN reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ refclk_driver->cmn_fields[i] = field;
+ }
+
+ /* Enable Derived reference clock as default */
+ regmap_field_write(refclk_driver->cmn_fields[CMN_CDIAG_REFCLK_DRV0_CTRL_4], 1);
+
+ refclk_driver->hw.init = init;
+
+ hw = &refclk_driver->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ cdns_phy->clk_hw_data->hws[CDNS_TORRENT_REFCLK_DRIVER] = hw;
+
+ return 0;
+}
+
+static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
+ u32 block_offset,
+ u8 reg_offset_shift,
+ const struct regmap_config *config)
+{
+ struct cdns_regmap_cdb_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+ ctx->base = base + block_offset;
+ ctx->reg_offset_shift = reg_offset_shift;
+
+ return devm_regmap_init(dev, NULL, ctx, config);
+}
+
+static int cdns_torrent_dp_regfield_init(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+
+ regmap = cdns_phy->regmap_dptx_phy_reg;
+ field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_RESET reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_reset_ctrl = field;
+
+ return 0;
+}
+
+static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+ int i;
+
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PLL_CFG reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pll_cfg = field;
+
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pipe_cmn_ctrl1_0);
+ if (IS_ERR(field)) {
+ dev_err(dev, "phy_pipe_cmn_ctrl1_0 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pipe_cmn_ctrl1_0 = field;
+
+ regmap = cdns_phy->regmap_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, cmn_cdiag_refclk_ovrd_4);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_cdiag_refclk_ovrd_4 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->cmn_cdiag_refclk_ovrd_4 = field;
+
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL1 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pma_cmn_ctrl_1 = field;
+
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pma_cmn_ctrl_2 = field;
+
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pma_pll_raw_ctrl);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_PLL_RAW_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pma_pll_raw_ctrl = field;
+
+ for (i = 0; i < MAX_NUM_LANES; i++) {
+ regmap = cdns_phy->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_pcs_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PCS_ISO_LINK_CTRL reg field init for ln %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pcs_iso_link_ctrl_1[i] = field;
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_dp_regmap_init(struct cdns_torrent_phy *cdns_phy)
+{
+ void __iomem *base = cdns_phy->base;
+ struct device *dev = cdns_phy->dev;
+ struct regmap *regmap;
+ u8 reg_offset_shift;
+ u32 block_offset;
+
+ reg_offset_shift = cdns_phy->init_data->reg_offset_shift;
+
+ block_offset = TORRENT_DPTX_PHY_OFFSET;
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_dptx_phy_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init DPTX PHY regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_dptx_phy_reg = regmap;
+
+ return 0;
+}
+
+static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy)
+{
+ void __iomem *sd_base = cdns_phy->sd_base;
+ u8 block_offset_shift, reg_offset_shift;
+ struct device *dev = cdns_phy->dev;
+ struct regmap *regmap;
+ u32 block_offset;
+ int i;
+
+ block_offset_shift = cdns_phy->init_data->block_offset_shift;
+ reg_offset_shift = cdns_phy->init_data->reg_offset_shift;
+
+ for (i = 0; i < MAX_NUM_LANES; i++) {
+ block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_tx_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init tx lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_tx_lane_cdb[i] = regmap;
+
+ block_offset = TORRENT_RX_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_rx_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init rx lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_rx_lane_cdb[i] = regmap;
+
+ block_offset = TORRENT_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_phy_pcs_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_phy_pcs_lane_cdb[i] = regmap;
+ }
+
+ block_offset = TORRENT_COMMON_CDB_OFFSET;
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_common_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_common_cdb = regmap;
+
+ block_offset = TORRENT_PHY_PCS_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_phy_pcs_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_phy_pcs_common_cdb = regmap;
+
+ block_offset = TORRENT_PHY_PMA_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, sd_base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_phy_pma_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ cdns_phy->regmap_phy_pma_common_cdb = regmap;
+
+ return 0;
+}
+
+static int cdns_torrent_phy_init(struct phy *phy)
+{
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ const struct cdns_torrent_data *init_data = cdns_phy->init_data;
+ struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
+ struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ enum cdns_torrent_phy_type phy_type = inst->phy_type;
+ enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
+ struct cdns_torrent_vals *pcs_cmn_vals;
+ struct cdns_reg_pairs *reg_pairs;
+ struct regmap *regmap;
+ u32 num_regs;
+ int i, j;
+
+ if (cdns_phy->nsubnodes > 1)
+ return 0;
+
+ /**
+ * Spread spectrum generation is not required or supported
+ * for SGMII/QSGMII
+ */
+ if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII)
+ ssc = NO_SSC;
+
+ /* PHY configuration specific registers for single link */
+ link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (link_cmn_vals) {
+ reg_pairs = link_cmn_vals->reg_pairs;
+ num_regs = link_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+
+ /**
+ * First array value in link_cmn_vals must be of
+ * PHY_PLL_CFG register
+ */
+ regmap_field_write(cdns_phy->phy_pll_cfg, reg_pairs[0].val);
+
+ for (i = 1; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc];
+ if (xcvr_diag_vals) {
+ reg_pairs = xcvr_diag_vals->reg_pairs;
+ num_regs = xcvr_diag_vals->num_regs;
+ for (i = 0; i < inst->num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA common registers configurations */
+ cmn_vals = init_data->cmn_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ if (cmn_vals) {
+ reg_pairs = cmn_vals->reg_pairs;
+ num_regs = cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA TX lane registers configurations */
+ tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ if (tx_ln_vals) {
+ reg_pairs = tx_ln_vals->reg_pairs;
+ num_regs = tx_ln_vals->num_regs;
+ for (i = 0; i < inst->num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PMA RX lane registers configurations */
+ rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ if (rx_ln_vals) {
+ reg_pairs = rx_ln_vals->reg_pairs;
+ num_regs = rx_ln_vals->num_regs;
+ for (i = 0; i < inst->num_lanes; i++) {
+ regmap = cdns_phy->regmap_rx_lane_cdb[i + inst->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ if (phy_type == TYPE_DP)
+ return cdns_torrent_dp_init(phy);
+
+ return 0;
+}
+
+static const struct phy_ops cdns_torrent_phy_ops = {
+ .init = cdns_torrent_phy_init,
+ .configure = cdns_torrent_dp_configure,
+ .power_on = cdns_torrent_phy_on,
+ .power_off = cdns_torrent_phy_off,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_torrent_noop_phy_on(struct phy *phy)
+{
+ /* Give 5ms to 10ms delay for the PIPE clock to be stable */
+ usleep_range(5000, 10000);
+
+ return 0;
+}
+
+static const struct phy_ops noop_ops = {
+ .power_on = cdns_torrent_noop_phy_on,
+ .owner = THIS_MODULE,
+};
+
+static
+int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
+{
+ const struct cdns_torrent_data *init_data = cdns_phy->init_data;
+ struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
+ struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
+ enum cdns_torrent_phy_type phy_t1, phy_t2;
+ struct cdns_torrent_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ struct cdns_reg_pairs *reg_pairs;
+ enum cdns_torrent_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (cdns_phy->nsubnodes != 2)
+ return -EINVAL;
+
+ phy_t1 = cdns_phy->phys[0].phy_type;
+ phy_t2 = cdns_phy->phys[1].phy_type;
+
+ /**
+ * First configure the PHY for first link with phy_t1. Get the array
+ * values as [phy_t1][phy_t2][ssc].
+ */
+ for (node = 0; node < cdns_phy->nsubnodes; node++) {
+ if (node == 1) {
+ /**
+ * If first link with phy_t1 is configured, then
+ * configure the PHY for second link with phy_t2.
+ * Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ swap(phy_t1, phy_t2);
+ }
+
+ mlane = cdns_phy->phys[node].mlane;
+ ssc = cdns_phy->phys[node].ssc_mode;
+ num_lanes = cdns_phy->phys[node].num_lanes;
+
+ /**
+ * PHY configuration specific registers:
+ * link_cmn_vals depend on combination of PHY types being
+ * configured and are common for both PHY types, so array
+ * values should be same for [phy_t1][phy_t2][ssc] and
+ * [phy_t2][phy_t1][ssc].
+ * xcvr_diag_vals also depend on combination of PHY types
+ * being configured, but these can be different for particular
+ * PHY type and are per lane.
+ */
+ link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc];
+ if (link_cmn_vals) {
+ reg_pairs = link_cmn_vals->reg_pairs;
+ num_regs = link_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+
+ /**
+ * First array value in link_cmn_vals must be of
+ * PHY_PLL_CFG register
+ */
+ regmap_field_write(cdns_phy->phy_pll_cfg,
+ reg_pairs[0].val);
+
+ for (i = 1; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc];
+ if (xcvr_diag_vals) {
+ reg_pairs = xcvr_diag_vals->reg_pairs;
+ num_regs = xcvr_diag_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA common registers configurations */
+ cmn_vals = init_data->cmn_vals[ref_clk][phy_t1][phy_t2][ssc];
+ if (cmn_vals) {
+ reg_pairs = cmn_vals->reg_pairs;
+ num_regs = cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA TX lane registers configurations */
+ tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_t1][phy_t2][ssc];
+ if (tx_ln_vals) {
+ reg_pairs = tx_ln_vals->reg_pairs;
+ num_regs = tx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PMA RX lane registers configurations */
+ rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_t1][phy_t2][ssc];
+ if (rx_ln_vals) {
+ reg_pairs = rx_ln_vals->reg_pairs;
+ num_regs = rx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ reset_control_deassert(cdns_phy->phys[node].lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void cdns_torrent_clk_cleanup(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+
+ of_clk_del_provider(dev->of_node);
+}
+
+static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct device_node *node = dev->of_node;
+ struct clk_hw_onecell_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, struct_size(data, hws, CDNS_TORRENT_OUTPUT_CLOCKS), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->num = CDNS_TORRENT_OUTPUT_CLOCKS;
+ cdns_phy->clk_hw_data = data;
+
+ ret = cdns_torrent_derived_refclk_register(cdns_phy);
+ if (ret) {
+ dev_err(dev, "failed to register derived refclk\n");
+ return ret;
+ }
+
+ ret = cdns_torrent_received_refclk_register(cdns_phy);
+ if (ret) {
+ dev_err(dev, "failed to register received refclk\n");
+ return ret;
+ }
+
+ ret = cdns_torrent_refclk_driver_register(cdns_phy);
+ if (ret) {
+ dev_err(dev, "failed to register refclk driver\n");
+ return ret;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, data);
+ if (ret) {
+ dev_err(dev, "Failed to add clock provider: %s\n", node->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+
+ cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
+ if (IS_ERR(cdns_phy->phy_rst)) {
+ dev_err(dev, "%s: failed to get reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->phy_rst);
+ }
+
+ cdns_phy->apb_rst = devm_reset_control_get_optional_exclusive(dev, "torrent_apb");
+ if (IS_ERR(cdns_phy->apb_rst)) {
+ dev_err(dev, "%s: failed to get apb reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->apb_rst);
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ unsigned long ref_clk_rate;
+ int ret;
+
+ cdns_phy->clk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(cdns_phy->clk)) {
+ dev_err(dev, "phy ref clock not found\n");
+ return PTR_ERR(cdns_phy->clk);
+ }
+
+ ret = clk_prepare_enable(cdns_phy->clk);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ return ret;
+ }
+
+ ref_clk_rate = clk_get_rate(cdns_phy->clk);
+ if (!ref_clk_rate) {
+ dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+ }
+
+ switch (ref_clk_rate) {
+ case REF_CLK_19_2MHZ:
+ cdns_phy->ref_clk_rate = CLK_19_2_MHZ;
+ break;
+ case REF_CLK_25MHZ:
+ cdns_phy->ref_clk_rate = CLK_25_MHZ;
+ break;
+ case REF_CLK_100MHZ:
+ cdns_phy->ref_clk_rate = CLK_100_MHZ;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdns_torrent_phy_probe(struct platform_device *pdev)
+{
+ struct cdns_torrent_phy *cdns_phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ const struct cdns_torrent_data *data;
+ struct device_node *child;
+ int ret, subnodes, node = 0, i;
+ u32 total_num_lanes = 0;
+ int already_configured;
+ u8 init_dp_regmap = 0;
+ u32 phy_type;
+
+ /* Get init data for this PHY */
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
+ if (!cdns_phy)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, cdns_phy);
+ cdns_phy->dev = dev;
+ cdns_phy->init_data = data;
+
+ cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cdns_phy->sd_base))
+ return PTR_ERR(cdns_phy->sd_base);
+
+ subnodes = of_get_available_child_count(dev->of_node);
+ if (subnodes == 0) {
+ dev_err(dev, "No available link subnodes found\n");
+ return -EINVAL;
+ }
+
+ ret = cdns_torrent_regmap_init(cdns_phy);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_regfield_init(cdns_phy);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_clk_register(cdns_phy);
+ if (ret)
+ return ret;
+
+ regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured);
+
+ if (!already_configured) {
+ ret = cdns_torrent_reset(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
+
+ ret = cdns_torrent_clk(cdns_phy);
+ if (ret)
+ goto clk_cleanup;
+
+ /* Enable APB */
+ reset_control_deassert(cdns_phy->apb_rst);
+ }
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct phy *gphy;
+
+ /* PHY subnode name must be 'phy'. */
+ if (!(of_node_name_eq(child, "phy")))
+ continue;
+
+ cdns_phy->phys[node].lnk_rst =
+ of_reset_control_array_get_exclusive(child);
+ if (IS_ERR(cdns_phy->phys[node].lnk_rst)) {
+ dev_err(dev, "%s: failed to get reset\n",
+ child->full_name);
+ ret = PTR_ERR(cdns_phy->phys[node].lnk_rst);
+ goto put_lnk_rst;
+ }
+
+ if (of_property_read_u32(child, "reg",
+ &cdns_phy->phys[node].mlane)) {
+ dev_err(dev, "%s: No \"reg\"-property.\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ if (of_property_read_u32(child, "cdns,phy-type", &phy_type)) {
+ dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ cdns_phy->phys[node].phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_DP:
+ cdns_phy->phys[node].phy_type = TYPE_DP;
+ break;
+ case PHY_TYPE_SGMII:
+ cdns_phy->phys[node].phy_type = TYPE_SGMII;
+ break;
+ case PHY_TYPE_QSGMII:
+ cdns_phy->phys[node].phy_type = TYPE_QSGMII;
+ break;
+ case PHY_TYPE_USB3:
+ cdns_phy->phys[node].phy_type = TYPE_USB;
+ break;
+ default:
+ dev_err(dev, "Unsupported protocol\n");
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ if (of_property_read_u32(child, "cdns,num-lanes",
+ &cdns_phy->phys[node].num_lanes)) {
+ dev_err(dev, "%s: No \"cdns,num-lanes\"-property.\n",
+ child->full_name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ total_num_lanes += cdns_phy->phys[node].num_lanes;
+
+ /* Get SSC mode */
+ cdns_phy->phys[node].ssc_mode = NO_SSC;
+ of_property_read_u32(child, "cdns,ssc-mode",
+ &cdns_phy->phys[node].ssc_mode);
+
+ if (!already_configured)
+ gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+ else
+ gphy = devm_phy_create(dev, child, &noop_ops);
+ if (IS_ERR(gphy)) {
+ ret = PTR_ERR(gphy);
+ goto put_child;
+ }
+
+ if (cdns_phy->phys[node].phy_type == TYPE_DP) {
+ switch (cdns_phy->phys[node].num_lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid number of lanes */
+ break;
+ default:
+ dev_err(dev, "unsupported number of lanes: %d\n",
+ cdns_phy->phys[node].num_lanes);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
+ of_property_read_u32(child, "cdns,max-bit-rate",
+ &cdns_phy->max_bit_rate);
+
+ switch (cdns_phy->max_bit_rate) {
+ case 1620:
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ dev_err(dev, "unsupported max bit rate: %dMbps\n",
+ cdns_phy->max_bit_rate);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ /* DPTX registers */
+ cdns_phy->base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(cdns_phy->base)) {
+ ret = PTR_ERR(cdns_phy->base);
+ goto put_child;
+ }
+
+ if (!init_dp_regmap) {
+ ret = cdns_torrent_dp_regmap_init(cdns_phy);
+ if (ret)
+ goto put_child;
+
+ ret = cdns_torrent_dp_regfield_init(cdns_phy);
+ if (ret)
+ goto put_child;
+
+ init_dp_regmap++;
+ }
+
+ dev_dbg(dev, "DP max bit rate %d.%03d Gbps\n",
+ cdns_phy->max_bit_rate / 1000,
+ cdns_phy->max_bit_rate % 1000);
+
+ gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes;
+ gphy->attrs.max_link_rate = cdns_phy->max_bit_rate;
+ gphy->attrs.mode = PHY_MODE_DP;
+ }
+
+ cdns_phy->phys[node].phy = gphy;
+ phy_set_drvdata(gphy, &cdns_phy->phys[node]);
+
+ node++;
+ }
+ cdns_phy->nsubnodes = node;
+
+ if (total_num_lanes > MAX_NUM_LANES) {
+ dev_err(dev, "Invalid lane configuration\n");
+ ret = -EINVAL;
+ goto put_lnk_rst;
+ }
+
+ if (cdns_phy->nsubnodes > 1 && !already_configured) {
+ ret = cdns_torrent_phy_configure_multilink(cdns_phy);
+ if (ret)
+ goto put_lnk_rst;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_lnk_rst;
+ }
+
+ if (cdns_phy->nsubnodes > 1)
+ dev_dbg(dev, "Multi-link: %s (%d lanes) & %s (%d lanes)",
+ cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
+ cdns_phy->phys[0].num_lanes,
+ cdns_torrent_get_phy_type(cdns_phy->phys[1].phy_type),
+ cdns_phy->phys[1].num_lanes);
+ else
+ dev_dbg(dev, "Single link: %s (%d lanes)",
+ cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type),
+ cdns_phy->phys[0].num_lanes);
+
+ return 0;
+
+put_child:
+ node++;
+put_lnk_rst:
+ for (i = 0; i < node; i++)
+ reset_control_put(cdns_phy->phys[i].lnk_rst);
+ of_node_put(child);
+ reset_control_assert(cdns_phy->apb_rst);
+ clk_disable_unprepare(cdns_phy->clk);
+clk_cleanup:
+ cdns_torrent_clk_cleanup(cdns_phy);
+ return ret;
+}
+
+static int cdns_torrent_phy_remove(struct platform_device *pdev)
+{
+ struct cdns_torrent_phy *cdns_phy = platform_get_drvdata(pdev);
+ int i;
+
+ reset_control_assert(cdns_phy->phy_rst);
+ reset_control_assert(cdns_phy->apb_rst);
+ for (i = 0; i < cdns_phy->nsubnodes; i++) {
+ reset_control_assert(cdns_phy->phys[i].lnk_rst);
+ reset_control_put(cdns_phy->phys[i].lnk_rst);
+ }
+
+ clk_disable_unprepare(cdns_phy->clk);
+ cdns_torrent_clk_cleanup(cdns_phy);
+
+ return 0;
+}
+
+/* Single DisplayPort(DP) link configuration */
+static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+};
+
+static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_link_cmn_vals = {
+ .reg_pairs = sl_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs),
+};
+
+/* Single DP, 19.2 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0027, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A1, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0027, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A1, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0060, CMN_BGCAL_INIT_TMR},
+ {0x0060, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x0240, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x0002, CMN_SD_CAL_INIT_TMR},
+ {0x0002, CMN_SD_CAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0137, CMN_SD_CAL_PLLCNT_START},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x00C0, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00C0, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x0260, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0260, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = {
+ {0x0780, TX_RCVDET_ST_TMR},
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = {
+ .reg_pairs = sl_dp_19_2_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = {
+ .reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = {
+ .reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs),
+};
+
+/* Single DP, 25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = {
+ {0x0019, CMN_SSM_BIAS_TMR},
+ {0x0032, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00D1, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0032, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00D1, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x007D, CMN_BGCAL_INIT_TMR},
+ {0x007D, CMN_BGCAL_ITER_TMR},
+ {0x0019, CMN_IBCAL_INIT_TMR},
+ {0x001E, CMN_TXPUCAL_INIT_TMR},
+ {0x0006, CMN_TXPUCAL_ITER_TMR},
+ {0x001E, CMN_TXPDCAL_INIT_TMR},
+ {0x0006, CMN_TXPDCAL_ITER_TMR},
+ {0x02EE, CMN_RXCAL_INIT_TMR},
+ {0x0006, CMN_RXCAL_ITER_TMR},
+ {0x0002, CMN_SD_CAL_INIT_TMR},
+ {0x0002, CMN_SD_CAL_ITER_TMR},
+ {0x000E, CMN_SD_CAL_REFTIM_START},
+ {0x012B, CMN_SD_CAL_PLLCNT_START},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x00FA, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00FA, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0004, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x0317, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0317, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = {
+ {0x09C4, TX_RCVDET_ST_TMR},
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = {
+ .reg_pairs = sl_dp_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = sl_dp_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = sl_dp_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs),
+};
+
+/* Single DP, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = {
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_dp_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = sl_dp_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = sl_dp_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs),
+};
+
+/* USB and SGMII/QSGMII link configuration */
+static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x009B, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
+ .reg_pairs = usb_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs),
+};
+
+/* PCIe and USB Unique SSC link configuration */
+static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
+ {0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x00C9, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
+ .reg_pairs = pcie_usb_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs),
+};
+
+/* USB 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
+ .reg_pairs = usb_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs),
+};
+
+/* Single USB link configuration */
+static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_usb_link_cmn_vals = {
+ .reg_pairs = sl_usb_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs),
+};
+
+/* USB PHY PCS common configuration */
+static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
+ {0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0},
+ {0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0},
+ {0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1}
+};
+
+static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
+ .reg_pairs = usb_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs),
+};
+
+/* USB 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+};
+
+static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_usb_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
+ {0x02FF, TX_PSC_A0},
+ {0x06AF, TX_PSC_A1},
+ {0x06AE, TX_PSC_A2},
+ {0x06AE, TX_PSC_A3},
+ {0x2A82, TX_TXCC_CTRL},
+ {0x0014, TX_TXCC_CPOST_MULT_01},
+ {0x0003, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
+ {0x0D1D, RX_PSC_A0},
+ {0x0D1D, RX_PSC_A1},
+ {0x0D00, RX_PSC_A2},
+ {0x0500, RX_PSC_A3},
+ {0x0013, RX_SIGDET_HL_FILT_TMR},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0C02, RX_REE_ATTEN_THR},
+ {0x0330, RX_REE_SMGM_CTRL1},
+ {0x0300, RX_REE_SMGM_CTRL2},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x1004, RX_DIAG_SIGDET_TUNE},
+ {0x00F9, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0031, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG},
+ {0x0003, RX_CDRLF_CNFG3}
+};
+
+static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
+ .reg_pairs = usb_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = usb_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = usb_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs),
+};
+
+/* Single link USB, 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+};
+
+static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
+ .reg_pairs = sl_usb_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs),
+};
+
+/* PCIe and SGMII/QSGMII Unique SSC link configuration */
+static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
+ {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x009B, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
+ .reg_pairs = pcie_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs),
+};
+
+/* SGMII 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV}
+};
+
+static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG},
+};
+
+static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
+};
+
+/* SGMII 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
+ .reg_pairs = sgmii_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs),
+};
+
+/* QSGMII 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
+ {0x0003, DRV_DIAG_TX_DRV}
+};
+
+static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0011, TX_TXCC_MGNFS_MULT_100},
+ {0x0003, DRV_DIAG_TX_DRV},
+ {0x4000, XCVR_DIAG_RXCLK_CTRL},
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG},
+};
+
+static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
+};
+
+/* QSGMII 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
+ .reg_pairs = qsgmii_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs),
+};
+
+/* Single SGMII/QSGMII link configuration */
+static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x0013, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
+ .reg_pairs = sl_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs),
+};
+
+/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+};
+
+static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs),
+};
+
+/* Single link PCIe, 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0050, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0036, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x0058, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x0012, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+};
+
+static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = sl_pcie_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs),
+};
+
+/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
+static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}
+};
+
+static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0001, RX_DIAG_ACYA}
+};
+
+static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
+};
+
+static const struct cdns_torrent_data cdns_map_torrent = {
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+ .link_cmn_vals = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_link_cmn_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ },
+ .xcvr_diag_vals = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ },
+ },
+ .pcs_cmn_vals = {
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .cmn_vals = {
+ [CLK_19_2_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals,
+ },
+ },
+ },
+ [CLK_25_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals,
+ },
+ },
+ },
+ [CLK_100_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ },
+ },
+ },
+ .tx_ln_vals = {
+ [CLK_19_2_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ [CLK_25_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ [CLK_100_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ },
+ .rx_ln_vals = {
+ [CLK_19_2_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals,
+ },
+ },
+ },
+ [CLK_25_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals,
+ },
+ },
+ },
+ [CLK_100_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ },
+ },
+ },
+};
+
+static const struct cdns_torrent_data ti_j721e_map_torrent = {
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .link_cmn_vals = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_link_cmn_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ },
+ .xcvr_diag_vals = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ },
+ },
+ .pcs_cmn_vals = {
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .cmn_vals = {
+ [CLK_19_2_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals,
+ },
+ },
+ },
+ [CLK_25_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals,
+ },
+ },
+ },
+ [CLK_100_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ },
+ },
+ },
+ .tx_ln_vals = {
+ [CLK_19_2_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ [CLK_25_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ [CLK_100_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ },
+ .rx_ln_vals = {
+ [CLK_19_2_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals,
+ },
+ },
+ },
+ [CLK_25_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals,
+ },
+ },
+ },
+ [CLK_100_MHZ] = {
+ [TYPE_DP] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ },
+ },
+ },
+};
+
+static const struct of_device_id cdns_torrent_phy_of_match[] = {
+ {
+ .compatible = "cdns,torrent-phy",
+ .data = &cdns_map_torrent,
+ },
+ {
+ .compatible = "ti,j721e-serdes-10g",
+ .data = &ti_j721e_map_torrent,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_torrent_phy_of_match);
+
+static struct platform_driver cdns_torrent_phy_driver = {
+ .probe = cdns_torrent_phy_probe,
+ .remove = cdns_torrent_phy_remove,
+ .driver = {
+ .name = "cdns-torrent-phy",
+ .of_match_table = cdns_torrent_phy_of_match,
+ }
+};
+module_platform_driver(cdns_torrent_phy_driver);
+
+MODULE_AUTHOR("Cadence Design Systems, Inc.");
+MODULE_DESCRIPTION("Cadence Torrent PHY driver");
+MODULE_LICENSE("GPL v2");