summaryrefslogtreecommitdiffstats
path: root/drivers/memory/tegra
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/memory/tegra
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/memory/tegra')
-rw-r--r--drivers/memory/tegra/Kconfig64
-rw-r--r--drivers/memory/tegra/Makefile25
-rw-r--r--drivers/memory/tegra/mc.c1046
-rw-r--r--drivers/memory/tegra/mc.h212
-rw-r--r--drivers/memory/tegra/tegra114.c1117
-rw-r--r--drivers/memory/tegra/tegra124-emc.c1535
-rw-r--r--drivers/memory/tegra/tegra124.c1311
-rw-r--r--drivers/memory/tegra/tegra186-emc.c421
-rw-r--r--drivers/memory/tegra/tegra186.c884
-rw-r--r--drivers/memory/tegra/tegra194.c1361
-rw-r--r--drivers/memory/tegra/tegra20-emc.c1279
-rw-r--r--drivers/memory/tegra/tegra20.c809
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c1774
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2064
-rw-r--r--drivers/memory/tegra/tegra210-emc-table.c88
-rw-r--r--drivers/memory/tegra/tegra210-emc.h1016
-rw-r--r--drivers/memory/tegra/tegra210-mc.h50
-rw-r--r--drivers/memory/tegra/tegra210.c1290
-rw-r--r--drivers/memory/tegra/tegra234.c1065
-rw-r--r--drivers/memory/tegra/tegra30-emc.c1751
-rw-r--r--drivers/memory/tegra/tegra30.c1403
21 files changed, 20565 insertions, 0 deletions
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
new file mode 100644
index 000000000..3fe83d7c2
--- /dev/null
+++ b/drivers/memory/tegra/Kconfig
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config TEGRA_MC
+ bool "NVIDIA Tegra Memory Controller support"
+ default y
+ depends on ARCH_TEGRA || (COMPILE_TEST && COMMON_CLK)
+ select INTERCONNECT
+ help
+ This driver supports the Memory Controller (MC) hardware found on
+ NVIDIA Tegra SoCs.
+
+if TEGRA_MC
+
+config TEGRA20_EMC
+ tristate "NVIDIA Tegra20 External Memory Controller driver"
+ default y
+ depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_DEVFREQ
+ select DDR
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra20 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+config TEGRA30_EMC
+ tristate "NVIDIA Tegra30 External Memory Controller driver"
+ default y
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ select PM_OPP
+ select DDR
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra30 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+config TEGRA124_EMC
+ tristate "NVIDIA Tegra124 External Memory Controller driver"
+ default y
+ depends on ARCH_TEGRA_124_SOC || COMPILE_TEST
+ select TEGRA124_CLK_EMC if ARCH_TEGRA
+ select PM_OPP
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra124 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+config TEGRA210_EMC_TABLE
+ bool
+ depends on ARCH_TEGRA_210_SOC || COMPILE_TEST
+
+config TEGRA210_EMC
+ tristate "NVIDIA Tegra210 External Memory Controller driver"
+ depends on ARCH_TEGRA_210_SOC || COMPILE_TEST
+ select TEGRA210_EMC_TABLE
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra210 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+endif
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
new file mode 100644
index 000000000..0750847da
--- /dev/null
+++ b/drivers/memory/tegra/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+tegra-mc-y := mc.o
+
+tegra-mc-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra194.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra186.o tegra234.o
+
+obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
+
+obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
+obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
+obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
+obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
+obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra186-emc.o
+
+tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
new file mode 100644
index 000000000..67d6e70b4
--- /dev/null
+++ b/drivers/memory/tegra/mc.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/tegra-icc.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "mc.h"
+
+static const struct of_device_id tegra_mc_of_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ { .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ { .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+ { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+ { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+ { .compatible = "nvidia,tegra234-mc", .data = &tegra234_mc_soc },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
+
+static void tegra_mc_devm_action_put_device(void *data)
+{
+ struct tegra_mc *mc = data;
+
+ put_device(mc->dev);
+}
+
+/**
+ * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
+ * @dev: device pointer for the consumer device
+ *
+ * This function will search for the Memory Controller node in a device-tree
+ * and retrieve the Memory Controller handle.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
+ */
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct tegra_mc *mc;
+ int err;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ mc = platform_get_drvdata(pdev);
+ if (!mc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
+ if (err)
+ return ERR_PTR(err);
+
+ return mc;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
+
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ if (mc->soc->ops && mc->soc->ops->probe_device)
+ return mc->soc->ops->probe_device(mc, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_probe_device);
+
+int tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
+ phys_addr_t *base, u64 *size)
+{
+ u32 offset;
+
+ if (id < 1 || id >= mc->soc->num_carveouts)
+ return -EINVAL;
+
+ if (id < 6)
+ offset = 0xc0c + 0x50 * (id - 1);
+ else
+ offset = 0x2004 + 0x50 * (id - 6);
+
+ *base = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x0);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ *base |= (phys_addr_t)mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x4) << 32;
+#endif
+
+ if (size)
+ *size = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x8) << 17;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_get_carveout_info);
+
+static int tegra_mc_block_dma_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) | BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
+}
+
+static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static int tegra_mc_reset_status_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
+}
+
+const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
+ .block_dma = tegra_mc_block_dma_common,
+ .dma_idling = tegra_mc_dma_idling_common,
+ .unblock_dma = tegra_mc_unblock_dma_common,
+ .reset_status = tegra_mc_reset_status_common,
+};
+
+static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tegra_mc, reset);
+}
+
+static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc,
+ unsigned long id)
+{
+ unsigned int i;
+
+ for (i = 0; i < mc->soc->num_resets; i++)
+ if (mc->soc->resets[i].id == id)
+ return &mc->soc->resets[i];
+
+ return NULL;
+}
+
+static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tegra_mc *mc = reset_to_mc(rcdev);
+ const struct tegra_mc_reset_ops *rst_ops;
+ const struct tegra_mc_reset *rst;
+ int retries = 500;
+ int err;
+
+ rst = tegra_mc_reset_find(mc, id);
+ if (!rst)
+ return -ENODEV;
+
+ rst_ops = mc->soc->reset_ops;
+ if (!rst_ops)
+ return -ENODEV;
+
+ /* DMA flushing will fail if reset is already asserted */
+ if (rst_ops->reset_status) {
+ /* check whether reset is asserted */
+ if (rst_ops->reset_status(mc, rst))
+ return 0;
+ }
+
+ if (rst_ops->block_dma) {
+ /* block clients DMA requests */
+ err = rst_ops->block_dma(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to block %s DMA: %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ if (rst_ops->dma_idling) {
+ /* wait for completion of the outstanding DMA requests */
+ while (!rst_ops->dma_idling(mc, rst)) {
+ if (!retries--) {
+ dev_err(mc->dev, "failed to flush %s DMA\n",
+ rst->name);
+ return -EBUSY;
+ }
+
+ usleep_range(10, 100);
+ }
+ }
+
+ if (rst_ops->hotreset_assert) {
+ /* clear clients DMA requests sitting before arbitration */
+ err = rst_ops->hotreset_assert(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to hot reset %s: %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tegra_mc *mc = reset_to_mc(rcdev);
+ const struct tegra_mc_reset_ops *rst_ops;
+ const struct tegra_mc_reset *rst;
+ int err;
+
+ rst = tegra_mc_reset_find(mc, id);
+ if (!rst)
+ return -ENODEV;
+
+ rst_ops = mc->soc->reset_ops;
+ if (!rst_ops)
+ return -ENODEV;
+
+ if (rst_ops->hotreset_deassert) {
+ /* take out client from hot reset */
+ err = rst_ops->hotreset_deassert(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ if (rst_ops->unblock_dma) {
+ /* allow new DMA requests to proceed to arbitration */
+ err = rst_ops->unblock_dma(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tegra_mc *mc = reset_to_mc(rcdev);
+ const struct tegra_mc_reset_ops *rst_ops;
+ const struct tegra_mc_reset *rst;
+
+ rst = tegra_mc_reset_find(mc, id);
+ if (!rst)
+ return -ENODEV;
+
+ rst_ops = mc->soc->reset_ops;
+ if (!rst_ops)
+ return -ENODEV;
+
+ return rst_ops->reset_status(mc, rst);
+}
+
+static const struct reset_control_ops tegra_mc_reset_ops = {
+ .assert = tegra_mc_hotreset_assert,
+ .deassert = tegra_mc_hotreset_deassert,
+ .status = tegra_mc_hotreset_status,
+};
+
+static int tegra_mc_reset_setup(struct tegra_mc *mc)
+{
+ int err;
+
+ mc->reset.ops = &tegra_mc_reset_ops;
+ mc->reset.owner = THIS_MODULE;
+ mc->reset.of_node = mc->dev->of_node;
+ mc->reset.of_reset_n_cells = 1;
+ mc->reset.nr_resets = mc->soc->num_resets;
+
+ err = reset_controller_register(&mc->reset);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
+{
+ unsigned int i;
+ struct tegra_mc_timing *timing = NULL;
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (mc->timings[i].rate == rate) {
+ timing = &mc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(mc->dev, "no memory timing registered for rate %lu\n",
+ rate);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc->soc->num_emem_regs; ++i)
+ mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
+
+unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
+{
+ u8 dram_count;
+
+ dram_count = mc_readl(mc, MC_EMEM_ADR_CFG);
+ dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV;
+ dram_count++;
+
+ return dram_count;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
+{
+ unsigned long long tick;
+ unsigned int i;
+ u32 value;
+
+ /* compute the number of MC clock cycles per tick */
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
+ do_div(tick, NSEC_PER_SEC);
+
+ value = mc_readl(mc, MC_EMEM_ARB_CFG);
+ value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
+ value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
+ mc_writel(mc, value, MC_EMEM_ARB_CFG);
+
+ /* write latency allowance defaults */
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+ u32 value;
+
+ value = mc_readl(mc, client->regs.la.reg);
+ value &= ~(client->regs.la.mask << client->regs.la.shift);
+ value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift;
+ mc_writel(mc, value, client->regs.la.reg);
+ }
+
+ /* latch new values */
+ mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+
+ return 0;
+}
+
+static int load_one_timing(struct tegra_mc *mc,
+ struct tegra_mc_timing *timing,
+ struct device_node *node)
+{
+ int err;
+ u32 tmp;
+
+ err = of_property_read_u32(node, "clock-frequency", &tmp);
+ if (err) {
+ dev_err(mc->dev,
+ "timing %pOFn: failed to read rate\n", node);
+ return err;
+ }
+
+ timing->rate = tmp;
+ timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs,
+ sizeof(u32), GFP_KERNEL);
+ if (!timing->emem_data)
+ return -ENOMEM;
+
+ err = of_property_read_u32_array(node, "nvidia,emem-configuration",
+ timing->emem_data,
+ mc->soc->num_emem_regs);
+ if (err) {
+ dev_err(mc->dev,
+ "timing %pOFn: failed to read EMEM configuration\n",
+ node);
+ return err;
+ }
+
+ return 0;
+}
+
+static int load_timings(struct tegra_mc *mc, struct device_node *node)
+{
+ struct device_node *child;
+ struct tegra_mc_timing *timing;
+ int child_count = of_get_child_count(node);
+ int i = 0, err;
+
+ mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!mc->timings)
+ return -ENOMEM;
+
+ mc->num_timings = child_count;
+
+ for_each_child_of_node(node, child) {
+ timing = &mc->timings[i++];
+
+ err = load_one_timing(mc, timing, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_setup_timings(struct tegra_mc *mc)
+{
+ struct device_node *node;
+ u32 ram_code, node_ram_code;
+ int err;
+
+ ram_code = tegra_read_ram_code();
+
+ mc->num_timings = 0;
+
+ for_each_child_of_node(mc->dev->of_node, node) {
+ err = of_property_read_u32(node, "nvidia,ram-code",
+ &node_ram_code);
+ if (err || (node_ram_code != ram_code))
+ continue;
+
+ err = load_timings(mc, node);
+ of_node_put(node);
+ if (err)
+ return err;
+ break;
+ }
+
+ if (mc->num_timings == 0)
+ dev_warn(mc->dev,
+ "no memory timings for RAM code %u registered\n",
+ ram_code);
+
+ return 0;
+}
+
+int tegra30_mc_probe(struct tegra_mc *mc)
+{
+ int err;
+
+ mc->clk = devm_clk_get_optional(mc->dev, "mc");
+ if (IS_ERR(mc->clk)) {
+ dev_err(mc->dev, "failed to get MC clock: %ld\n", PTR_ERR(mc->clk));
+ return PTR_ERR(mc->clk);
+ }
+
+ /* ensure that debug features are disabled */
+ mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
+
+ err = tegra_mc_setup_latency_allowance(mc);
+ if (err < 0) {
+ dev_err(mc->dev, "failed to setup latency allowance: %d\n", err);
+ return err;
+ }
+
+ err = tegra_mc_setup_timings(mc);
+ if (err < 0) {
+ dev_err(mc->dev, "failed to setup timings: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_mc_ops tegra30_mc_ops = {
+ .probe = tegra30_mc_probe,
+ .handle_irq = tegra30_mc_handle_irq,
+};
+#endif
+
+static int mc_global_intstatus_to_channel(const struct tegra_mc *mc, u32 status,
+ unsigned int *mc_channel)
+{
+ if ((status & mc->soc->ch_intmask) == 0)
+ return -EINVAL;
+
+ *mc_channel = __ffs((status & mc->soc->ch_intmask) >>
+ mc->soc->global_intstatus_channel_shift);
+
+ return 0;
+}
+
+static u32 mc_channel_to_global_intstatus(const struct tegra_mc *mc,
+ unsigned int channel)
+{
+ return BIT(channel) << mc->soc->global_intstatus_channel_shift;
+}
+
+irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
+{
+ struct tegra_mc *mc = data;
+ unsigned int bit, channel;
+ unsigned long status;
+
+ if (mc->soc->num_channels) {
+ u32 global_status;
+ int err;
+
+ global_status = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, MC_GLOBAL_INTSTATUS);
+ err = mc_global_intstatus_to_channel(mc, global_status, &channel);
+ if (err < 0) {
+ dev_err_ratelimited(mc->dev, "unknown interrupt channel 0x%08x\n",
+ global_status);
+ return IRQ_NONE;
+ }
+
+ /* mask all interrupts to avoid flooding */
+ status = mc_ch_readl(mc, channel, MC_INTSTATUS) & mc->soc->intmask;
+ } else {
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ }
+
+ if (!status)
+ return IRQ_NONE;
+
+ for_each_set_bit(bit, &status, 32) {
+ const char *error = tegra_mc_status_names[bit] ?: "unknown";
+ const char *client = "unknown", *desc;
+ const char *direction, *secure;
+ u32 status_reg, addr_reg;
+ u32 intmask = BIT(bit);
+ phys_addr_t addr = 0;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ u32 addr_hi_reg = 0;
+#endif
+ unsigned int i;
+ char perm[7];
+ u8 id, type;
+ u32 value;
+
+ switch (intmask) {
+ case MC_INT_DECERR_VPR:
+ status_reg = MC_ERR_VPR_STATUS;
+ addr_reg = MC_ERR_VPR_ADR;
+ break;
+
+ case MC_INT_SECERR_SEC:
+ status_reg = MC_ERR_SEC_STATUS;
+ addr_reg = MC_ERR_SEC_ADR;
+ break;
+
+ case MC_INT_DECERR_MTS:
+ status_reg = MC_ERR_MTS_STATUS;
+ addr_reg = MC_ERR_MTS_ADR;
+ break;
+
+ case MC_INT_DECERR_GENERALIZED_CARVEOUT:
+ status_reg = MC_ERR_GENERALIZED_CARVEOUT_STATUS;
+ addr_reg = MC_ERR_GENERALIZED_CARVEOUT_ADR;
+ break;
+
+ case MC_INT_DECERR_ROUTE_SANITY:
+ status_reg = MC_ERR_ROUTE_SANITY_STATUS;
+ addr_reg = MC_ERR_ROUTE_SANITY_ADR;
+ break;
+
+ default:
+ status_reg = MC_ERR_STATUS;
+ addr_reg = MC_ERR_ADR;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (mc->soc->has_addr_hi_reg)
+ addr_hi_reg = MC_ERR_ADR_HI;
+#endif
+ break;
+ }
+
+ if (mc->soc->num_channels)
+ value = mc_ch_readl(mc, channel, status_reg);
+ else
+ value = mc_readl(mc, status_reg);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (mc->soc->num_address_bits > 32) {
+ if (addr_hi_reg) {
+ if (mc->soc->num_channels)
+ addr = mc_ch_readl(mc, channel, addr_hi_reg);
+ else
+ addr = mc_readl(mc, addr_hi_reg);
+ } else {
+ addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
+ MC_ERR_STATUS_ADR_HI_MASK);
+ }
+ addr <<= 32;
+ }
+#endif
+
+ if (value & MC_ERR_STATUS_RW)
+ direction = "write";
+ else
+ direction = "read";
+
+ if (value & MC_ERR_STATUS_SECURITY)
+ secure = "secure ";
+ else
+ secure = "";
+
+ id = value & mc->soc->client_id_mask;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == id) {
+ client = mc->soc->clients[i].name;
+ break;
+ }
+ }
+
+ type = (value & MC_ERR_STATUS_TYPE_MASK) >>
+ MC_ERR_STATUS_TYPE_SHIFT;
+ desc = tegra_mc_error_names[type];
+
+ switch (value & MC_ERR_STATUS_TYPE_MASK) {
+ case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
+ perm[0] = ' ';
+ perm[1] = '[';
+
+ if (value & MC_ERR_STATUS_READABLE)
+ perm[2] = 'R';
+ else
+ perm[2] = '-';
+
+ if (value & MC_ERR_STATUS_WRITABLE)
+ perm[3] = 'W';
+ else
+ perm[3] = '-';
+
+ if (value & MC_ERR_STATUS_NONSECURE)
+ perm[4] = '-';
+ else
+ perm[4] = 'S';
+
+ perm[5] = ']';
+ perm[6] = '\0';
+ break;
+
+ default:
+ perm[0] = '\0';
+ break;
+ }
+
+ if (mc->soc->num_channels)
+ value = mc_ch_readl(mc, channel, addr_reg);
+ else
+ value = mc_readl(mc, addr_reg);
+ addr |= value;
+
+ dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
+ client, secure, direction, &addr, error,
+ desc, perm);
+ }
+
+ /* clear interrupts */
+ if (mc->soc->num_channels) {
+ mc_ch_writel(mc, channel, status, MC_INTSTATUS);
+ mc_ch_writel(mc, MC_BROADCAST_CHANNEL,
+ mc_channel_to_global_intstatus(mc, channel),
+ MC_GLOBAL_INTSTATUS);
+ } else {
+ mc_writel(mc, status, MC_INTSTATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+const char *const tegra_mc_status_names[32] = {
+ [ 1] = "External interrupt",
+ [ 6] = "EMEM address decode error",
+ [ 7] = "GART page fault",
+ [ 8] = "Security violation",
+ [ 9] = "EMEM arbitration error",
+ [10] = "Page fault",
+ [11] = "Invalid APB ASID update",
+ [12] = "VPR violation",
+ [13] = "Secure carveout violation",
+ [16] = "MTS carveout violation",
+ [17] = "Generalized carveout violation",
+ [20] = "Route Sanity error",
+};
+
+const char *const tegra_mc_error_names[8] = {
+ [2] = "EMEM decode error",
+ [3] = "TrustZone violation",
+ [4] = "Carveout violation",
+ [6] = "SMMU translation error",
+};
+
+struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id == spec->args[0])
+ return node;
+ }
+
+ /*
+ * If a client driver calls devm_of_icc_get() before the MC driver
+ * is probed, then return EPROBE_DEFER to the client driver.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int tegra_mc_icc_get(struct icc_node *node, u32 *average, u32 *peak)
+{
+ *average = 0;
+ *peak = 0;
+
+ return 0;
+}
+
+static int tegra_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ return 0;
+}
+
+const struct tegra_mc_icc_ops tegra_mc_icc_ops = {
+ .xlate = tegra_mc_icc_xlate,
+ .aggregate = icc_std_aggregate,
+ .get_bw = tegra_mc_icc_get,
+ .set = tegra_mc_icc_set,
+};
+
+/*
+ * Memory Controller (MC) has few Memory Clients that are issuing memory
+ * bandwidth allocation requests to the MC interconnect provider. The MC
+ * provider aggregates the requests and then sends the aggregated request
+ * up to the External Memory Controller (EMC) interconnect provider which
+ * re-configures hardware interface to External Memory (EMEM) in accordance
+ * to the required bandwidth. Each MC interconnect node represents an
+ * individual Memory Client.
+ *
+ * Memory interconnect topology:
+ *
+ * +----+
+ * +--------+ | |
+ * | TEXSRD +--->+ |
+ * +--------+ | |
+ * | | +-----+ +------+
+ * ... | MC +--->+ EMC +--->+ EMEM |
+ * | | +-----+ +------+
+ * +--------+ | |
+ * | DISP.. +--->+ |
+ * +--------+ | |
+ * +----+
+ */
+static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+{
+ struct icc_node *node;
+ unsigned int i;
+ int err;
+
+ /* older device-trees don't have interconnect properties */
+ if (!device_property_present(mc->dev, "#interconnect-cells") ||
+ !mc->soc->icc_ops)
+ return 0;
+
+ mc->provider.dev = mc->dev;
+ mc->provider.data = &mc->provider;
+ mc->provider.set = mc->soc->icc_ops->set;
+ mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ mc->provider.get_bw = mc->soc->icc_ops->get_bw;
+ mc->provider.xlate = mc->soc->icc_ops->xlate;
+ mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+
+ icc_provider_init(&mc->provider);
+
+ /* create Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_MC);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->name = "Memory Controller";
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Controller to External Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_EMC);
+ if (err)
+ goto remove_nodes;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ /* create MC client node */
+ node = icc_node_create(mc->soc->clients[i].id);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = mc->soc->clients[i].name;
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Client to Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_MC);
+ if (err)
+ goto remove_nodes;
+
+ node->data = (struct tegra_mc_client *)&(mc->soc->clients[i]);
+ }
+
+ err = icc_provider_register(&mc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&mc->provider);
+
+ return err;
+}
+
+static void tegra_mc_num_channel_enabled(struct tegra_mc *mc)
+{
+ unsigned int i;
+ u32 value;
+
+ value = mc_ch_readl(mc, 0, MC_EMEM_ADR_CFG_CHANNEL_ENABLE);
+ if (value <= 0) {
+ mc->num_channels = mc->soc->num_channels;
+ return;
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (value & BIT(i))
+ mc->num_channels++;
+ }
+}
+
+static int tegra_mc_probe(struct platform_device *pdev)
+{
+ struct tegra_mc *mc;
+ u64 mask;
+ int err;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mc);
+ spin_lock_init(&mc->lock);
+ mc->soc = of_device_get_match_data(&pdev->dev);
+ mc->dev = &pdev->dev;
+
+ mask = DMA_BIT_MASK(mc->soc->num_address_bits);
+
+ err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ /* length of MC tick in nanoseconds */
+ mc->tick = 30;
+
+ mc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mc->regs))
+ return PTR_ERR(mc->regs);
+
+ mc->debugfs.root = debugfs_create_dir("mc", NULL);
+
+ if (mc->soc->ops && mc->soc->ops->probe) {
+ err = mc->soc->ops->probe(mc);
+ if (err < 0)
+ return err;
+ }
+
+ tegra_mc_num_channel_enabled(mc);
+
+ if (mc->soc->ops && mc->soc->ops->handle_irq) {
+ mc->irq = platform_get_irq(pdev, 0);
+ if (mc->irq < 0)
+ return mc->irq;
+
+ WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
+
+ if (mc->soc->num_channels)
+ mc_ch_writel(mc, MC_BROADCAST_CHANNEL, mc->soc->intmask,
+ MC_INTMASK);
+ else
+ mc_writel(mc, mc->soc->intmask, MC_INTMASK);
+
+ err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
+ dev_name(&pdev->dev), mc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
+ err);
+ return err;
+ }
+ }
+
+ if (mc->soc->reset_ops) {
+ err = tegra_mc_reset_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to register reset controller: %d\n", err);
+ }
+
+ err = tegra_mc_interconnect_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
+ err);
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
+ mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
+ if (IS_ERR(mc->smmu)) {
+ dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
+ PTR_ERR(mc->smmu));
+ mc->smmu = NULL;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
+ mc->gart = tegra_gart_probe(&pdev->dev, mc);
+ if (IS_ERR(mc->gart)) {
+ dev_err(&pdev->dev, "failed to probe GART: %ld\n",
+ PTR_ERR(mc->gart));
+ mc->gart = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_mc_suspend(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ if (mc->soc->ops && mc->soc->ops->suspend)
+ return mc->soc->ops->suspend(mc);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_mc_resume(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ if (mc->soc->ops && mc->soc->ops->resume)
+ return mc->soc->ops->resume(mc);
+
+ return 0;
+}
+
+static void tegra_mc_sync_state(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ /* check whether ICC provider is registered */
+ if (mc->provider.dev == dev)
+ icc_sync_state(dev);
+}
+
+static const struct dev_pm_ops tegra_mc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
+};
+
+static struct platform_driver tegra_mc_driver = {
+ .driver = {
+ .name = "tegra-mc",
+ .of_match_table = tegra_mc_of_match,
+ .pm = &tegra_mc_pm_ops,
+ .suppress_bind_attrs = true,
+ .sync_state = tegra_mc_sync_state,
+ },
+ .prevent_deferred_probe = true,
+ .probe = tegra_mc_probe,
+};
+
+static int tegra_mc_init(void)
+{
+ return platform_driver_register(&tegra_mc_driver);
+}
+arch_initcall(tegra_mc_init);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
new file mode 100644
index 000000000..c3f6655be
--- /dev/null
+++ b/drivers/memory/tegra/mc.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef MEMORY_TEGRA_MC_H
+#define MEMORY_TEGRA_MC_H
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/types.h>
+
+#include <soc/tegra/mc.h>
+
+#define MC_INTSTATUS 0x00
+#define MC_INTMASK 0x04
+#define MC_ERR_STATUS 0x08
+#define MC_ERR_ADR 0x0c
+#define MC_GART_ERROR_REQ 0x30
+#define MC_EMEM_ADR_CFG 0x54
+#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
+#define MC_SECURITY_VIOLATION_STATUS 0x74
+#define MC_EMEM_ARB_CFG 0x90
+#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
+#define MC_EMEM_ARB_TIMING_RCD 0x98
+#define MC_EMEM_ARB_TIMING_RP 0x9c
+#define MC_EMEM_ARB_TIMING_RC 0xa0
+#define MC_EMEM_ARB_TIMING_RAS 0xa4
+#define MC_EMEM_ARB_TIMING_FAW 0xa8
+#define MC_EMEM_ARB_TIMING_RRD 0xac
+#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
+#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
+#define MC_EMEM_ARB_TIMING_R2R 0xb8
+#define MC_EMEM_ARB_TIMING_W2W 0xbc
+#define MC_EMEM_ARB_TIMING_R2W 0xc0
+#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_MISC2 0xc8
+#define MC_EMEM_ARB_DA_TURNS 0xd0
+#define MC_EMEM_ARB_DA_COVERS 0xd4
+#define MC_EMEM_ARB_MISC0 0xd8
+#define MC_EMEM_ARB_MISC1 0xdc
+#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
+#define MC_EMEM_ARB_OVERRIDE 0xe8
+#define MC_TIMING_CONTROL_DBG 0xf8
+#define MC_TIMING_CONTROL 0xfc
+#define MC_ERR_VPR_STATUS 0x654
+#define MC_ERR_VPR_ADR 0x658
+#define MC_ERR_SEC_STATUS 0x67c
+#define MC_ERR_SEC_ADR 0x680
+#define MC_ERR_MTS_STATUS 0x9b0
+#define MC_ERR_MTS_ADR 0x9b4
+#define MC_ERR_ROUTE_SANITY_STATUS 0x9c0
+#define MC_ERR_ROUTE_SANITY_ADR 0x9c4
+#define MC_ERR_GENERALIZED_CARVEOUT_STATUS 0xc00
+#define MC_ERR_GENERALIZED_CARVEOUT_ADR 0xc04
+#define MC_EMEM_ADR_CFG_CHANNEL_ENABLE 0xdf8
+#define MC_GLOBAL_INTSTATUS 0xf24
+#define MC_ERR_ADR_HI 0x11fc
+
+#define MC_INT_DECERR_ROUTE_SANITY BIT(20)
+#define MC_INT_DECERR_GENERALIZED_CARVEOUT BIT(17)
+#define MC_INT_DECERR_MTS BIT(16)
+#define MC_INT_SECERR_SEC BIT(13)
+#define MC_INT_DECERR_VPR BIT(12)
+#define MC_INT_INVALID_APB_ASID_UPDATE BIT(11)
+#define MC_INT_INVALID_SMMU_PAGE BIT(10)
+#define MC_INT_ARBITRATION_EMEM BIT(9)
+#define MC_INT_SECURITY_VIOLATION BIT(8)
+#define MC_INT_INVALID_GART_PAGE BIT(7)
+#define MC_INT_DECERR_EMEM BIT(6)
+
+#define MC_ERR_STATUS_TYPE_SHIFT 28
+#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (0x6 << 28)
+#define MC_ERR_STATUS_TYPE_MASK (0x7 << 28)
+#define MC_ERR_STATUS_READABLE BIT(27)
+#define MC_ERR_STATUS_WRITABLE BIT(26)
+#define MC_ERR_STATUS_NONSECURE BIT(25)
+#define MC_ERR_STATUS_ADR_HI_SHIFT 20
+#define MC_ERR_STATUS_ADR_HI_MASK 0x3
+#define MC_ERR_STATUS_SECURITY BIT(17)
+#define MC_ERR_STATUS_RW BIT(16)
+
+#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
+
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) ((x) & 0x1ff)
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
+
+#define MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK 0x1ff
+#define MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE BIT(30)
+#define MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE BIT(31)
+
+#define MC_EMEM_ARB_OVERRIDE_EACK_MASK 0x3
+
+#define MC_TIMING_UPDATE BIT(0)
+
+#define MC_BROADCAST_CHANNEL ~0
+
+static inline u32 tegra_mc_scale_percents(u64 val, unsigned int percents)
+{
+ val = val * percents;
+ do_div(val, 100);
+
+ return min_t(u64, val, U32_MAX);
+}
+
+static inline struct tegra_mc *
+icc_provider_to_tegra_mc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_mc, provider);
+}
+
+static inline u32 mc_ch_readl(const struct tegra_mc *mc, int ch,
+ unsigned long offset)
+{
+ if (!mc->bcast_ch_regs)
+ return 0;
+
+ if (ch == MC_BROADCAST_CHANNEL)
+ return readl_relaxed(mc->bcast_ch_regs + offset);
+
+ return readl_relaxed(mc->ch_regs[ch] + offset);
+}
+
+static inline void mc_ch_writel(const struct tegra_mc *mc, int ch,
+ u32 value, unsigned long offset)
+{
+ if (!mc->bcast_ch_regs)
+ return;
+
+ if (ch == MC_BROADCAST_CHANNEL)
+ writel_relaxed(value, mc->bcast_ch_regs + offset);
+ else
+ writel_relaxed(value, mc->ch_regs[ch] + offset);
+}
+
+static inline u32 mc_readl(const struct tegra_mc *mc, unsigned long offset)
+{
+ return readl_relaxed(mc->regs + offset);
+}
+
+static inline void mc_writel(const struct tegra_mc *mc, u32 value,
+ unsigned long offset)
+{
+ writel_relaxed(value, mc->regs + offset);
+}
+
+extern const struct tegra_mc_reset_ops tegra_mc_reset_ops_common;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_mc_soc tegra20_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_mc_soc tegra30_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_mc_soc tegra114_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+extern const struct tegra_mc_soc tegra124_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+extern const struct tegra_mc_soc tegra132_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_mc_soc tegra210_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+extern const struct tegra_mc_soc tegra186_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_mc_soc tegra194_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+extern const struct tegra_mc_soc tegra234_mc_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+int tegra30_mc_probe(struct tegra_mc *mc);
+extern const struct tegra_mc_ops tegra30_mc_ops;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct tegra_mc_ops tegra186_mc_ops;
+#endif
+
+irqreturn_t tegra30_mc_handle_irq(int irq, void *data);
+extern const char * const tegra_mc_status_names[32];
+extern const char * const tegra_mc_error_names[8];
+
+/*
+ * These IDs are for internal use of Tegra ICC drivers. The ID numbers are
+ * chosen such that they don't conflict with the device-tree ICC node IDs.
+ */
+#define TEGRA_ICC_MC 1000
+#define TEGRA_ICC_EMC 1001
+#define TEGRA_ICC_EMEM 1002
+
+#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
new file mode 100644
index 000000000..41350570c
--- /dev/null
+++ b/drivers/memory/tegra/tegra114.c
@@ -0,0 +1,1117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/mm.h>
+
+#include <dt-bindings/memory/tegra114-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra114_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ },
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x09,
+ .name = "eppup",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ }, {
+ .id = 0x0a,
+ .name = "g2pr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ }, {
+ .id = 0x0b,
+ .name = "g2sr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x68,
+ },
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x68,
+ },
+ },
+ }, {
+ .id = 0x12,
+ .name = "fdcdrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x13,
+ .name = "fdcdrd2",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x14,
+ .name = "g2dr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x18,
+ .name = "idxsrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0b,
+ },
+ },
+ }, {
+ .id = 0x1c,
+ .name = "msencsrd",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xe8,
+ },
+ },
+ }, {
+ .id = 0x20,
+ .name = "texl2srd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x22,
+ .name = "vdebsevr",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x23,
+ .name = "vdember",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x24,
+ .name = "vdemcer",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xb8,
+ },
+ },
+ }, {
+ .id = 0x25,
+ .name = "vdetper",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xee,
+ },
+ },
+ }, {
+ .id = 0x26,
+ .name = "mpcorelpr",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x28,
+ .name = "eppu",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ }, {
+ .id = 0x29,
+ .name = "eppv",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ }, {
+ .id = 0x2a,
+ .name = "eppy",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ }, {
+ .id = 0x2b,
+ .name = "msencswr",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x2c,
+ .name = "viwsb",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x47,
+ },
+ },
+ }, {
+ .id = 0x2d,
+ .name = "viwu",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x2e,
+ .name = "viwv",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x2f,
+ .name = "viwy",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x36c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x47,
+ },
+ },
+ }, {
+ .id = 0x30,
+ .name = "g2dw",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x9,
+ },
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ }, {
+ .id = 0x33,
+ .name = "fdcdwr",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x34,
+ .name = "fdcdwr2",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x25,
+ },
+ },
+ }, {
+ .id = 0x37,
+ .name = "ispw",
+ .swgroup = TEGRA_SWGROUP_ISP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x31c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x38,
+ .name = "mpcorelpw",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xe8,
+ },
+ },
+ }, {
+ .id = 0x3e,
+ .name = "vdebsevw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x3f,
+ .name = "vdedbgw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x40,
+ .name = "vdembew",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x89,
+ },
+ },
+ }, {
+ .id = 0x41,
+ .name = "vdetpmw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x59,
+ },
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4e,
+ .name = "fdcdwr3",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x4f,
+ .name = "fdcdrd3",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x50,
+ .name = "fdcwr4",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x51,
+ .name = "fdcrd4",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x52,
+ .name = "emucifr",
+ .swgroup = TEGRA_SWGROUP_EMUCIF,
+ .regs = {
+ .la = {
+ .reg = 0x38c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x53,
+ .name = "emucifw",
+ .swgroup = TEGRA_SWGROUP_EMUCIF,
+ .regs = {
+ .la = {
+ .reg = 0x38c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "epp", .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 },
+ { .name = "g2", .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "msenc", .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+};
+
+static const unsigned int tegra114_group_drm[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
+};
+
+static const struct tegra_smmu_group_soc tegra114_groups[] = {
+ {
+ .name = "drm",
+ .swgroups = tegra114_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra114_group_drm),
+ },
+};
+
+static const struct tegra_smmu_soc tegra114_smmu_soc = {
+ .clients = tegra114_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra114_mc_clients),
+ .swgroups = tegra114_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
+ .groups = tegra114_groups,
+ .num_groups = ARRAY_SIZE(tegra114_groups),
+ .supports_round_robin_arbitration = false,
+ .supports_request_limit = false,
+ .num_tlb_lines = 32,
+ .num_asids = 4,
+};
+
+#define TEGRA114_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA114_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra114_mc_resets[] = {
+ TEGRA114_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA114_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA114_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA114_MC_RESET(EPP, 0x200, 0x204, 4),
+ TEGRA114_MC_RESET(2D, 0x200, 0x204, 5),
+ TEGRA114_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA114_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA114_MC_RESET(ISP, 0x200, 0x204, 8),
+ TEGRA114_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA114_MC_RESET(MPCORELP, 0x200, 0x204, 10),
+ TEGRA114_MC_RESET(MPE, 0x200, 0x204, 11),
+ TEGRA114_MC_RESET(3D, 0x200, 0x204, 12),
+ TEGRA114_MC_RESET(3D2, 0x200, 0x204, 13),
+ TEGRA114_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA114_MC_RESET(VDE, 0x200, 0x204, 16),
+ TEGRA114_MC_RESET(VI, 0x200, 0x204, 17),
+};
+
+const struct tegra_mc_soc tegra114_mc_soc = {
+ .clients = tegra114_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra114_mc_clients),
+ .num_address_bits = 32,
+ .atom_size = 32,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra114_smmu_soc,
+ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+ MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra114_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra114_mc_resets),
+ .ops = &tegra30_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
new file mode 100644
index 000000000..00ed2b6a0
--- /dev/null
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -0,0 +1,1535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "mc.h"
+
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X64 BIT(4)
+
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
+
+#define EMC_CFG 0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR BIT(30)
+#define EMC_CFG_DRAM_ACPD BIT(29)
+#define EMC_CFG_DYN_SREF BIT(28)
+#define EMC_CFG_PWR_MASK ((0xF << 28) | BIT(18))
+#define EMC_CFG_DSR_VTTGEN_DRV_EN BIT(18)
+
+#define EMC_REFCTRL 0x20
+#define EMC_REFCTRL_DEV_SEL_SHIFT 0
+#define EMC_REFCTRL_ENABLE BIT(31)
+
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_ODT_WRITE 0xb0
+#define EMC_ODT_READ 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_CTT 0xbc
+#define EMC_RFC_SLR 0xc0
+#define EMC_MRS_WAIT_CNT2 0xc4
+
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_MODE_SET_DLL_RESET BIT(8)
+#define EMC_MODE_SET_LONG_CNT BIT(26)
+#define EMC_EMRS 0xd0
+#define EMC_REF 0xd4
+#define EMC_PRE 0xd8
+
+#define EMC_SELF_REF 0xe0
+#define EMC_SELF_REF_CMD_ENABLED BIT(0)
+#define EMC_SELF_REF_DEV_SEL_SHIFT 30
+
+#define EMC_MRW 0xe8
+
+#define EMC_MRR 0xec
+#define EMC_MRR_MA_SHIFT 16
+#define LPDDR2_MR4_TEMP_SHIFT 0
+
+#define EMC_XM2DQSPADCTRL3 0xf8
+#define EMC_FBIO_SPARE 0x100
+
+#define EMC_FBIO_CFG6 0x114
+#define EMC_EMRS2 0x12c
+#define EMC_MRW2 0x134
+#define EMC_MRW4 0x13c
+#define EMC_EINPUT 0x14c
+#define EMC_EINPUT_DURATION 0x150
+#define EMC_PUTERM_EXTRA 0x154
+#define EMC_TCKESR 0x158
+#define EMC_TPD 0x15c
+
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31)
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
+#define EMC_STATUS 0x2b4
+#define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_2_MODE_SHIFT 0
+#define EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR BIT(6)
+
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_RDV_MASK 0x2cc
+#define EMC_WDV_MASK 0x2d0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_CMD BIT(0)
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL 0x2f8
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE BIT(0)
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
+#define EMC_XM2DQPADCTRL 0x300
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2VTTGENPADCTRL3 0x318
+#define EMC_XM2DQSPADCTRL4 0x320
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD BIT(2)
+#define EMC_SEL_DPD_CTRL_DDR3_MASK \
+ ((0xf << 2) | BIT(8))
+#define EMC_SEL_DPD_CTRL_MASK \
+ ((0x3 << 2) | BIT(5) | BIT(8))
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+#define EMC_CCFIFO_ADDR 0x3e8
+#define EMC_CCFIFO_DATA 0x3ec
+#define EMC_CCFIFO_STATUS 0x3f0
+#define EMC_CDB_CNTL_1 0x3f4
+#define EMC_CDB_CNTL_2 0x3f8
+#define EMC_XM2CLKPADCTRL2 0x3fc
+#define EMC_AUTO_CAL_CONFIG2 0x458
+#define EMC_AUTO_CAL_CONFIG3 0x45c
+#define EMC_IBDLY 0x468
+#define EMC_DLL_XFORM_ADDR0 0x46c
+#define EMC_DLL_XFORM_ADDR1 0x470
+#define EMC_DLL_XFORM_ADDR2 0x474
+#define EMC_DSR_VTTGEN_DRV 0x47c
+#define EMC_TXDSRVTTGEN 0x480
+#define EMC_XM2CMDPADCTRL4 0x484
+#define EMC_XM2CMDPADCTRL5 0x488
+#define EMC_DLL_XFORM_DQS8 0x4a0
+#define EMC_DLL_XFORM_DQS9 0x4a4
+#define EMC_DLL_XFORM_DQS10 0x4a8
+#define EMC_DLL_XFORM_DQS11 0x4ac
+#define EMC_DLL_XFORM_DQS12 0x4b0
+#define EMC_DLL_XFORM_DQS13 0x4b4
+#define EMC_DLL_XFORM_DQS14 0x4b8
+#define EMC_DLL_XFORM_DQS15 0x4bc
+#define EMC_DLL_XFORM_QUSE8 0x4c0
+#define EMC_DLL_XFORM_QUSE9 0x4c4
+#define EMC_DLL_XFORM_QUSE10 0x4c8
+#define EMC_DLL_XFORM_QUSE11 0x4cc
+#define EMC_DLL_XFORM_QUSE12 0x4d0
+#define EMC_DLL_XFORM_QUSE13 0x4d4
+#define EMC_DLL_XFORM_QUSE14 0x4d8
+#define EMC_DLL_XFORM_QUSE15 0x4dc
+#define EMC_DLL_XFORM_DQ4 0x4e0
+#define EMC_DLL_XFORM_DQ5 0x4e4
+#define EMC_DLL_XFORM_DQ6 0x4e8
+#define EMC_DLL_XFORM_DQ7 0x4ec
+#define EMC_DLI_TRIM_TXDQS8 0x520
+#define EMC_DLI_TRIM_TXDQS9 0x524
+#define EMC_DLI_TRIM_TXDQS10 0x528
+#define EMC_DLI_TRIM_TXDQS11 0x52c
+#define EMC_DLI_TRIM_TXDQS12 0x530
+#define EMC_DLI_TRIM_TXDQS13 0x534
+#define EMC_DLI_TRIM_TXDQS14 0x538
+#define EMC_DLI_TRIM_TXDQS15 0x53c
+#define EMC_CDB_CNTL_3 0x540
+#define EMC_XM2DQSPADCTRL5 0x544
+#define EMC_XM2DQSPADCTRL6 0x548
+#define EMC_XM2DQPADCTRL3 0x54c
+#define EMC_DLL_XFORM_ADDR3 0x550
+#define EMC_DLL_XFORM_ADDR4 0x554
+#define EMC_DLL_XFORM_ADDR5 0x558
+#define EMC_CFG_PIPE 0x560
+#define EMC_QPOP 0x564
+#define EMC_QUSE_WIDTH 0x568
+#define EMC_PUTERM_WIDTH 0x56c
+#define EMC_BGBIAS_CTL0 0x570
+#define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX BIT(3)
+#define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN BIT(2)
+#define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD BIT(1)
+#define EMC_PUTERM_ADJ 0x574
+
+#define DRAM_DEV_SEL_ALL 0
+#define DRAM_DEV_SEL_0 BIT(31)
+#define DRAM_DEV_SEL_1 BIT(30)
+
+#define EMC_CFG_POWER_FEATURES_MASK \
+ (EMC_CFG_DYN_SREF | EMC_CFG_DRAM_ACPD | EMC_CFG_DRAM_CLKSTOP_SR | \
+ EMC_CFG_DRAM_CLKSTOP_PD | EMC_CFG_DSR_VTTGEN_DRV_EN)
+#define EMC_REFCTRL_DEV_SEL(n) (((n > 1) ? 0 : 2) << EMC_REFCTRL_DEV_SEL_SHIFT)
+#define EMC_DRAM_DEV_SEL(n) ((n > 1) ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+/* Maximum amount of time in us. to wait for changes to become effective */
+#define EMC_STATUS_UPDATE_TIMEOUT 1000
+
+enum emc_dram_type {
+ DRAM_TYPE_DDR3 = 0,
+ DRAM_TYPE_DDR1 = 1,
+ DRAM_TYPE_LPDDR3 = 2,
+ DRAM_TYPE_DDR2 = 3
+};
+
+enum emc_dll_change {
+ DLL_CHANGE_NONE,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF
+};
+
+static const unsigned long emc_burst_regs[] = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RFC_SLR,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WEXT,
+ EMC_WDV,
+ EMC_WDV_MASK,
+ EMC_QUSE,
+ EMC_QUSE_WIDTH,
+ EMC_IBDLY,
+ EMC_EINPUT,
+ EMC_EINPUT_DURATION,
+ EMC_PUTERM_EXTRA,
+ EMC_PUTERM_WIDTH,
+ EMC_PUTERM_ADJ,
+ EMC_CDB_CNTL_1,
+ EMC_CDB_CNTL_2,
+ EMC_CDB_CNTL_3,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_RDV_MASK,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PRE_REFRESH_REQ_CNT,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_TXSR,
+ EMC_TXSRDLL,
+ EMC_TCKE,
+ EMC_TCKESR,
+ EMC_TPD,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_TREFBW,
+ EMC_FBIO_CFG6,
+ EMC_ODT_WRITE,
+ EMC_ODT_READ,
+ EMC_FBIO_CFG5,
+ EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_PERIOD,
+ EMC_DLL_XFORM_DQS0,
+ EMC_DLL_XFORM_DQS1,
+ EMC_DLL_XFORM_DQS2,
+ EMC_DLL_XFORM_DQS3,
+ EMC_DLL_XFORM_DQS4,
+ EMC_DLL_XFORM_DQS5,
+ EMC_DLL_XFORM_DQS6,
+ EMC_DLL_XFORM_DQS7,
+ EMC_DLL_XFORM_DQS8,
+ EMC_DLL_XFORM_DQS9,
+ EMC_DLL_XFORM_DQS10,
+ EMC_DLL_XFORM_DQS11,
+ EMC_DLL_XFORM_DQS12,
+ EMC_DLL_XFORM_DQS13,
+ EMC_DLL_XFORM_DQS14,
+ EMC_DLL_XFORM_DQS15,
+ EMC_DLL_XFORM_QUSE0,
+ EMC_DLL_XFORM_QUSE1,
+ EMC_DLL_XFORM_QUSE2,
+ EMC_DLL_XFORM_QUSE3,
+ EMC_DLL_XFORM_QUSE4,
+ EMC_DLL_XFORM_QUSE5,
+ EMC_DLL_XFORM_QUSE6,
+ EMC_DLL_XFORM_QUSE7,
+ EMC_DLL_XFORM_ADDR0,
+ EMC_DLL_XFORM_ADDR1,
+ EMC_DLL_XFORM_ADDR2,
+ EMC_DLL_XFORM_ADDR3,
+ EMC_DLL_XFORM_ADDR4,
+ EMC_DLL_XFORM_ADDR5,
+ EMC_DLL_XFORM_QUSE8,
+ EMC_DLL_XFORM_QUSE9,
+ EMC_DLL_XFORM_QUSE10,
+ EMC_DLL_XFORM_QUSE11,
+ EMC_DLL_XFORM_QUSE12,
+ EMC_DLL_XFORM_QUSE13,
+ EMC_DLL_XFORM_QUSE14,
+ EMC_DLL_XFORM_QUSE15,
+ EMC_DLI_TRIM_TXDQS0,
+ EMC_DLI_TRIM_TXDQS1,
+ EMC_DLI_TRIM_TXDQS2,
+ EMC_DLI_TRIM_TXDQS3,
+ EMC_DLI_TRIM_TXDQS4,
+ EMC_DLI_TRIM_TXDQS5,
+ EMC_DLI_TRIM_TXDQS6,
+ EMC_DLI_TRIM_TXDQS7,
+ EMC_DLI_TRIM_TXDQS8,
+ EMC_DLI_TRIM_TXDQS9,
+ EMC_DLI_TRIM_TXDQS10,
+ EMC_DLI_TRIM_TXDQS11,
+ EMC_DLI_TRIM_TXDQS12,
+ EMC_DLI_TRIM_TXDQS13,
+ EMC_DLI_TRIM_TXDQS14,
+ EMC_DLI_TRIM_TXDQS15,
+ EMC_DLL_XFORM_DQ0,
+ EMC_DLL_XFORM_DQ1,
+ EMC_DLL_XFORM_DQ2,
+ EMC_DLL_XFORM_DQ3,
+ EMC_DLL_XFORM_DQ4,
+ EMC_DLL_XFORM_DQ5,
+ EMC_DLL_XFORM_DQ6,
+ EMC_DLL_XFORM_DQ7,
+ EMC_XM2CMDPADCTRL,
+ EMC_XM2CMDPADCTRL4,
+ EMC_XM2CMDPADCTRL5,
+ EMC_XM2DQPADCTRL2,
+ EMC_XM2DQPADCTRL3,
+ EMC_XM2CLKPADCTRL,
+ EMC_XM2CLKPADCTRL2,
+ EMC_XM2COMPPADCTRL,
+ EMC_XM2VTTGENPADCTRL,
+ EMC_XM2VTTGENPADCTRL2,
+ EMC_XM2VTTGENPADCTRL3,
+ EMC_XM2DQSPADCTRL3,
+ EMC_XM2DQSPADCTRL4,
+ EMC_XM2DQSPADCTRL5,
+ EMC_XM2DQSPADCTRL6,
+ EMC_DSR_VTTGEN_DRV,
+ EMC_TXDSRVTTGEN,
+ EMC_FBIO_SPARE,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_MRS_WAIT_CNT2,
+ EMC_CTT,
+ EMC_CTT_DURATION,
+ EMC_CFG_PIPE,
+ EMC_DYN_SELF_REF_CONTROL,
+ EMC_QPOP
+};
+
+struct emc_timing {
+ unsigned long rate;
+
+ u32 emc_burst_data[ARRAY_SIZE(emc_burst_regs)];
+
+ u32 emc_auto_cal_config;
+ u32 emc_auto_cal_config2;
+ u32 emc_auto_cal_config3;
+ u32 emc_auto_cal_interval;
+ u32 emc_bgbias_ctl0;
+ u32 emc_cfg;
+ u32 emc_cfg_2;
+ u32 emc_ctt_term_ctrl;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_4;
+ u32 emc_mode_reset;
+ u32 emc_mrs_wait_cnt;
+ u32 emc_sel_dpd_ctrl;
+ u32 emc_xm2dqspadctrl2;
+ u32 emc_zcal_cnt_long;
+ u32 emc_zcal_interval;
+};
+
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct tegra_emc {
+ struct device *dev;
+
+ struct tegra_mc *mc;
+
+ void __iomem *regs;
+
+ struct clk *clk;
+
+ enum emc_dram_type dram_type;
+ unsigned int dram_bus_width;
+ unsigned int dram_num;
+
+ struct emc_timing last_timing;
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ struct icc_provider provider;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+};
+
+/* Timing change sequence functions */
+
+static void emc_ccfifo_writel(struct tegra_emc *emc, u32 value,
+ unsigned long offset)
+{
+ writel(value, emc->regs + EMC_CCFIFO_DATA);
+ writel(offset, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static void emc_seq_update_timing(struct tegra_emc *emc)
+{
+ unsigned int i;
+ u32 value;
+
+ writel(1, emc->regs + EMC_TIMING_CONTROL);
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) {
+ value = readl(emc->regs + EMC_STATUS);
+ if ((value & EMC_STATUS_TIMING_UPDATE_STALLED) == 0)
+ return;
+ udelay(1);
+ }
+
+ dev_err(emc->dev, "timing update timed out\n");
+}
+
+static void emc_seq_disable_auto_cal(struct tegra_emc *emc)
+{
+ unsigned int i;
+ u32 value;
+
+ writel(0, emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) {
+ value = readl(emc->regs + EMC_AUTO_CAL_STATUS);
+ if ((value & EMC_AUTO_CAL_STATUS_ACTIVE) == 0)
+ return;
+ udelay(1);
+ }
+
+ dev_err(emc->dev, "auto cal disable timed out\n");
+}
+
+static void emc_seq_wait_clkchange(struct tegra_emc *emc)
+{
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) {
+ value = readl(emc->regs + EMC_INTSTATUS);
+ if (value & EMC_INTSTATUS_CLKCHANGE_COMPLETE)
+ return;
+ udelay(1);
+ }
+
+ dev_err(emc->dev, "clock change timed out\n");
+}
+
+static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate == rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *last = &emc->last_timing;
+ enum emc_dll_change dll_change;
+ unsigned int pre_wait = 0;
+ u32 val, val2, mask;
+ bool update = false;
+ unsigned int i;
+
+ if (!timing)
+ return -ENOENT;
+
+ if ((last->emc_mode_1 & 0x1) == (timing->emc_mode_1 & 0x1))
+ dll_change = DLL_CHANGE_NONE;
+ else if (timing->emc_mode_1 & 0x1)
+ dll_change = DLL_CHANGE_ON;
+ else
+ dll_change = DLL_CHANGE_OFF;
+
+ /* Clear CLKCHANGE_COMPLETE interrupts */
+ writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, emc->regs + EMC_INTSTATUS);
+
+ /* Disable dynamic self-refresh */
+ val = readl(emc->regs + EMC_CFG);
+ if (val & EMC_CFG_PWR_MASK) {
+ val &= ~EMC_CFG_POWER_FEATURES_MASK;
+ writel(val, emc->regs + EMC_CFG);
+
+ pre_wait = 5;
+ }
+
+ /* Disable SEL_DPD_CTRL for clock change */
+ if (emc->dram_type == DRAM_TYPE_DDR3)
+ mask = EMC_SEL_DPD_CTRL_DDR3_MASK;
+ else
+ mask = EMC_SEL_DPD_CTRL_MASK;
+
+ val = readl(emc->regs + EMC_SEL_DPD_CTRL);
+ if (val & mask) {
+ val &= ~mask;
+ writel(val, emc->regs + EMC_SEL_DPD_CTRL);
+ }
+
+ /* Prepare DQ/DQS for clock change */
+ val = readl(emc->regs + EMC_BGBIAS_CTL0);
+ val2 = last->emc_bgbias_ctl0;
+ if (!(timing->emc_bgbias_ctl0 &
+ EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX) &&
+ (val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX)) {
+ val2 &= ~EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX;
+ update = true;
+ }
+
+ if ((val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD) ||
+ (val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN)) {
+ update = true;
+ }
+
+ if (update) {
+ writel(val2, emc->regs + EMC_BGBIAS_CTL0);
+ if (pre_wait < 5)
+ pre_wait = 5;
+ }
+
+ update = false;
+ val = readl(emc->regs + EMC_XM2DQSPADCTRL2);
+ if (timing->emc_xm2dqspadctrl2 & EMC_XM2DQSPADCTRL2_VREF_ENABLE &&
+ !(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE;
+ update = true;
+ }
+
+ if (timing->emc_xm2dqspadctrl2 & EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE &&
+ !(val & EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE;
+ update = true;
+ }
+
+ if (update) {
+ writel(val, emc->regs + EMC_XM2DQSPADCTRL2);
+ if (pre_wait < 30)
+ pre_wait = 30;
+ }
+
+ /* Wait to settle */
+ if (pre_wait) {
+ emc_seq_update_timing(emc);
+ udelay(pre_wait);
+ }
+
+ /* Program CTT_TERM control */
+ if (last->emc_ctt_term_ctrl != timing->emc_ctt_term_ctrl) {
+ emc_seq_disable_auto_cal(emc);
+ writel(timing->emc_ctt_term_ctrl,
+ emc->regs + EMC_CTT_TERM_CTRL);
+ emc_seq_update_timing(emc);
+ }
+
+ /* Program burst shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->emc_burst_data); ++i)
+ writel(timing->emc_burst_data[i],
+ emc->regs + emc_burst_regs[i]);
+
+ writel(timing->emc_xm2dqspadctrl2, emc->regs + EMC_XM2DQSPADCTRL2);
+ writel(timing->emc_zcal_interval, emc->regs + EMC_ZCAL_INTERVAL);
+
+ tegra_mc_write_emem_configuration(emc->mc, timing->rate);
+
+ val = timing->emc_cfg & ~EMC_CFG_POWER_FEATURES_MASK;
+ emc_ccfifo_writel(emc, val, EMC_CFG);
+
+ /* Program AUTO_CAL_CONFIG */
+ if (timing->emc_auto_cal_config2 != last->emc_auto_cal_config2)
+ emc_ccfifo_writel(emc, timing->emc_auto_cal_config2,
+ EMC_AUTO_CAL_CONFIG2);
+
+ if (timing->emc_auto_cal_config3 != last->emc_auto_cal_config3)
+ emc_ccfifo_writel(emc, timing->emc_auto_cal_config3,
+ EMC_AUTO_CAL_CONFIG3);
+
+ if (timing->emc_auto_cal_config != last->emc_auto_cal_config) {
+ val = timing->emc_auto_cal_config;
+ val &= EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+ emc_ccfifo_writel(emc, val, EMC_AUTO_CAL_CONFIG);
+ }
+
+ /* DDR3: predict MRS long wait count */
+ if (emc->dram_type == DRAM_TYPE_DDR3 &&
+ dll_change == DLL_CHANGE_ON) {
+ u32 cnt = 512;
+
+ if (timing->emc_zcal_interval != 0 &&
+ last->emc_zcal_interval == 0)
+ cnt -= emc->dram_num * 256;
+
+ val = (timing->emc_mrs_wait_cnt
+ & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK)
+ >> EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
+ if (cnt < val)
+ cnt = val;
+
+ val = timing->emc_mrs_wait_cnt
+ & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+ val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+ & EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ writel(val, emc->regs + EMC_MRS_WAIT_CNT);
+ }
+
+ val = timing->emc_cfg_2;
+ val &= ~EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR;
+ emc_ccfifo_writel(emc, val, EMC_CFG_2);
+
+ /* DDR3: Turn off DLL and enter self-refresh */
+ if (emc->dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_OFF)
+ emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_EMRS);
+
+ /* Disable refresh controller */
+ emc_ccfifo_writel(emc, EMC_REFCTRL_DEV_SEL(emc->dram_num),
+ EMC_REFCTRL);
+ if (emc->dram_type == DRAM_TYPE_DDR3)
+ emc_ccfifo_writel(emc, EMC_DRAM_DEV_SEL(emc->dram_num) |
+ EMC_SELF_REF_CMD_ENABLED,
+ EMC_SELF_REF);
+
+ /* Flow control marker */
+ emc_ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
+
+ /* DDR3: Exit self-refresh */
+ if (emc->dram_type == DRAM_TYPE_DDR3)
+ emc_ccfifo_writel(emc, EMC_DRAM_DEV_SEL(emc->dram_num),
+ EMC_SELF_REF);
+ emc_ccfifo_writel(emc, EMC_REFCTRL_DEV_SEL(emc->dram_num) |
+ EMC_REFCTRL_ENABLE,
+ EMC_REFCTRL);
+
+ /* Set DRAM mode registers */
+ if (emc->dram_type == DRAM_TYPE_DDR3) {
+ if (timing->emc_mode_1 != last->emc_mode_1)
+ emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_EMRS);
+ if (timing->emc_mode_2 != last->emc_mode_2)
+ emc_ccfifo_writel(emc, timing->emc_mode_2, EMC_EMRS2);
+
+ if ((timing->emc_mode_reset != last->emc_mode_reset) ||
+ dll_change == DLL_CHANGE_ON) {
+ val = timing->emc_mode_reset;
+ if (dll_change == DLL_CHANGE_ON) {
+ val |= EMC_MODE_SET_DLL_RESET;
+ val |= EMC_MODE_SET_LONG_CNT;
+ } else {
+ val &= ~EMC_MODE_SET_DLL_RESET;
+ }
+ emc_ccfifo_writel(emc, val, EMC_MRS);
+ }
+ } else {
+ if (timing->emc_mode_2 != last->emc_mode_2)
+ emc_ccfifo_writel(emc, timing->emc_mode_2, EMC_MRW2);
+ if (timing->emc_mode_1 != last->emc_mode_1)
+ emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_MRW);
+ if (timing->emc_mode_4 != last->emc_mode_4)
+ emc_ccfifo_writel(emc, timing->emc_mode_4, EMC_MRW4);
+ }
+
+ /* Issue ZCAL command if turning ZCAL on */
+ if (timing->emc_zcal_interval != 0 && last->emc_zcal_interval == 0) {
+ emc_ccfifo_writel(emc, EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
+ if (emc->dram_num > 1)
+ emc_ccfifo_writel(emc, EMC_ZQ_CAL_LONG_CMD_DEV1,
+ EMC_ZQ_CAL);
+ }
+
+ /* Write to RO register to remove stall after change */
+ emc_ccfifo_writel(emc, 0, EMC_CCFIFO_STATUS);
+
+ if (timing->emc_cfg_2 & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR)
+ emc_ccfifo_writel(emc, timing->emc_cfg_2, EMC_CFG_2);
+
+ /* Disable AUTO_CAL for clock change */
+ emc_seq_disable_auto_cal(emc);
+
+ /* Read register to wait until programming has settled */
+ readl(emc->regs + EMC_INTSTATUS);
+
+ return 0;
+}
+
+static void tegra_emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *last = &emc->last_timing;
+ u32 val;
+
+ if (!timing)
+ return;
+
+ /* Wait until the state machine has settled */
+ emc_seq_wait_clkchange(emc);
+
+ /* Restore AUTO_CAL */
+ if (timing->emc_ctt_term_ctrl != last->emc_ctt_term_ctrl)
+ writel(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* Restore dynamic self-refresh */
+ if (timing->emc_cfg & EMC_CFG_PWR_MASK)
+ writel(timing->emc_cfg, emc->regs + EMC_CFG);
+
+ /* Set ZCAL wait count */
+ writel(timing->emc_zcal_cnt_long, emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* LPDDR3: Turn off BGBIAS if low frequency */
+ if (emc->dram_type == DRAM_TYPE_LPDDR3 &&
+ timing->emc_bgbias_ctl0 &
+ EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX) {
+ val = timing->emc_bgbias_ctl0;
+ val |= EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN;
+ val |= EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD;
+ writel(val, emc->regs + EMC_BGBIAS_CTL0);
+ } else {
+ if (emc->dram_type == DRAM_TYPE_DDR3 &&
+ readl(emc->regs + EMC_BGBIAS_CTL0) !=
+ timing->emc_bgbias_ctl0) {
+ writel(timing->emc_bgbias_ctl0,
+ emc->regs + EMC_BGBIAS_CTL0);
+ }
+
+ writel(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+ }
+
+ /* Wait for timing to settle */
+ udelay(2);
+
+ /* Reprogram SEL_DPD_CTRL */
+ writel(timing->emc_sel_dpd_ctrl, emc->regs + EMC_SEL_DPD_CTRL);
+ emc_seq_update_timing(emc);
+
+ emc->last_timing = *timing;
+}
+
+/* Initialization and deinitialization */
+
+static void emc_read_current_timing(struct tegra_emc *emc,
+ struct emc_timing *timing)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(emc_burst_regs); ++i)
+ timing->emc_burst_data[i] =
+ readl(emc->regs + emc_burst_regs[i]);
+
+ timing->emc_cfg = readl(emc->regs + EMC_CFG);
+
+ timing->emc_auto_cal_interval = 0;
+ timing->emc_zcal_cnt_long = 0;
+ timing->emc_mode_1 = 0;
+ timing->emc_mode_2 = 0;
+ timing->emc_mode_4 = 0;
+ timing->emc_mode_reset = 0;
+}
+
+static int emc_init(struct tegra_emc *emc)
+{
+ emc->dram_type = readl(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc->dram_type & EMC_FBIO_CFG5_DRAM_WIDTH_X64)
+ emc->dram_bus_width = 64;
+ else
+ emc->dram_bus_width = 32;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+
+ emc->dram_type &= EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+ emc->dram_type >>= EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+ emc->dram_num = tegra_mc_get_emem_device_count(emc->mc);
+
+ emc_read_current_timing(emc, &emc->last_timing);
+
+ return 0;
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(node, "clock-frequency", &value);
+ if (err) {
+ dev_err(emc->dev, "timing %pOFn: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ timing->rate = value;
+
+ err = of_property_read_u32_array(node, "nvidia,emc-configuration",
+ timing->emc_burst_data,
+ ARRAY_SIZE(timing->emc_burst_data));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOFn: failed to read emc burst data: %d\n",
+ node, err);
+ return err;
+ }
+
+#define EMC_READ_PROP(prop, dtprop) { \
+ err = of_property_read_u32(node, dtprop, &timing->prop); \
+ if (err) { \
+ dev_err(emc->dev, "timing %pOFn: failed to read " #prop ": %d\n", \
+ node, err); \
+ return err; \
+ } \
+}
+
+ EMC_READ_PROP(emc_auto_cal_config, "nvidia,emc-auto-cal-config")
+ EMC_READ_PROP(emc_auto_cal_config2, "nvidia,emc-auto-cal-config2")
+ EMC_READ_PROP(emc_auto_cal_config3, "nvidia,emc-auto-cal-config3")
+ EMC_READ_PROP(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval")
+ EMC_READ_PROP(emc_bgbias_ctl0, "nvidia,emc-bgbias-ctl0")
+ EMC_READ_PROP(emc_cfg, "nvidia,emc-cfg")
+ EMC_READ_PROP(emc_cfg_2, "nvidia,emc-cfg-2")
+ EMC_READ_PROP(emc_ctt_term_ctrl, "nvidia,emc-ctt-term-ctrl")
+ EMC_READ_PROP(emc_mode_1, "nvidia,emc-mode-1")
+ EMC_READ_PROP(emc_mode_2, "nvidia,emc-mode-2")
+ EMC_READ_PROP(emc_mode_4, "nvidia,emc-mode-4")
+ EMC_READ_PROP(emc_mode_reset, "nvidia,emc-mode-reset")
+ EMC_READ_PROP(emc_mrs_wait_cnt, "nvidia,emc-mrs-wait-cnt")
+ EMC_READ_PROP(emc_sel_dpd_ctrl, "nvidia,emc-sel-dpd-ctrl")
+ EMC_READ_PROP(emc_xm2dqspadctrl2, "nvidia,emc-xm2dqspadctrl2")
+ EMC_READ_PROP(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long")
+ EMC_READ_PROP(emc_zcal_interval, "nvidia,emc-zcal-interval")
+
+#undef EMC_READ_PROP
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+ else if (a->rate == b->rate)
+ return 0;
+ else
+ return 1;
+}
+
+static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ int child_count = of_get_child_count(node);
+ struct device_node *child;
+ struct emc_timing *timing;
+ unsigned int i = 0;
+ int err;
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ emc->num_timings = child_count;
+
+ for_each_child_of_node(node, child) {
+ timing = &emc->timings[i++];
+
+ err = load_one_timing_from_dt(emc, timing, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra124-emc" },
+ { .compatible = "nvidia,tegra132-emc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+
+static struct device_node *
+tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
+{
+ struct device_node *np;
+ int err;
+
+ for_each_child_of_node(node, np) {
+ u32 value;
+
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || (value != ram_code))
+ continue;
+
+ return np;
+ }
+
+ return NULL;
+}
+
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
+{
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra124 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ int opp_token, err;
+
+ err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ return err;
+ }
+ opp_token = err;
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(opp_token);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra124_clk_set_emc_callbacks(NULL, NULL);
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_emc *emc;
+ u32 ram_code;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ mutex_init(&emc->rate_lock);
+ emc->dev = &pdev->dev;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ ram_code = tegra_read_ram_code();
+
+ np = tegra_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ } else {
+ dev_info_once(&pdev->dev,
+ "no memory timings for RAM code %u found in DT\n",
+ ram_code);
+ }
+
+ err = emc_init(emc);
+ if (err) {
+ dev_err(&pdev->dev, "EMC initialization failed: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, emc);
+
+ tegra124_clk_set_emc_callbacks(tegra_emc_prepare_timing_change,
+ tegra_emc_complete_timing_change);
+
+ err = devm_add_action_or_reset(&pdev->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
+
+ tegra_emc_rate_requests_init(emc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ emc_debugfs_init(&pdev->dev, emc);
+
+ tegra_emc_interconnect_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
+ return 0;
+};
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra-emc",
+ .of_match_table = tegra_emc_of_match,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(tegra_emc_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra124 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
new file mode 100644
index 000000000..470b7dbab
--- /dev/null
+++ b/drivers/memory/tegra/tegra124.c
@@ -0,0 +1,1311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/memory/tegra124-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra124_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ },
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc2,
+ },
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc6,
+ },
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x1c,
+ .name = "msencsrd",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ },
+ }, {
+ .id = 0x22,
+ .name = "vdebsevr",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4f,
+ },
+ },
+ }, {
+ .id = 0x23,
+ .name = "vdember",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x3d,
+ },
+ },
+ }, {
+ .id = 0x24,
+ .name = "vdemcer",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x66,
+ },
+ },
+ }, {
+ .id = 0x25,
+ .name = "vdetper",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x26,
+ .name = "mpcorelpr",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x2b,
+ .name = "msencswr",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x38,
+ .name = "mpcorelpw",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ },
+ }, {
+ .id = 0x3e,
+ .name = "vdebsevw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3f,
+ .name = "vdedbgw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x40,
+ .name = "vdembew",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x41,
+ .name = "vdetpmw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x44,
+ .name = "ispra",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x46,
+ .name = "ispwa",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x47,
+ .name = "ispwb",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4e,
+ .name = "isprab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x50,
+ .name = "ispwab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x51,
+ .name = "ispwbb",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x56,
+ .name = "a9avpscr",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x57,
+ .name = "a9avpscw",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x58,
+ .name = "gpusrd",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x59,
+ .name = "gpuswr",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x5a,
+ .name = "displayt",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x60,
+ .name = "sdmmcra",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x61,
+ .name = "sdmmcraa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x62,
+ .name = "sdmmcr",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x63,
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .name = "sdmmcrab",
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x64,
+ .name = "sdmmcwa",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x65,
+ .name = "sdmmcwaa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x66,
+ .name = "sdmmcw",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x67,
+ .name = "sdmmcwab",
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x6c,
+ .name = "vicsrd",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x6d,
+ .name = "vicswr",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x72,
+ .name = "viw",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x73,
+ .name = "displayd",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "msenc", .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
+ { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
+ { .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
+ { .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
+ { .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+};
+
+static const unsigned int tegra124_group_drm[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_VIC,
+};
+
+static const struct tegra_smmu_group_soc tegra124_groups[] = {
+ {
+ .name = "drm",
+ .swgroups = tegra124_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra124_group_drm),
+ },
+};
+
+#define TEGRA124_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA124_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra124_mc_resets[] = {
+ TEGRA124_MC_RESET(AFI, 0x200, 0x204, 0),
+ TEGRA124_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA124_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA124_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA124_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA124_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA124_MC_RESET(ISP2, 0x200, 0x204, 8),
+ TEGRA124_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA124_MC_RESET(MPCORELP, 0x200, 0x204, 10),
+ TEGRA124_MC_RESET(MSENC, 0x200, 0x204, 11),
+ TEGRA124_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA124_MC_RESET(SATA, 0x200, 0x204, 15),
+ TEGRA124_MC_RESET(VDE, 0x200, 0x204, 16),
+ TEGRA124_MC_RESET(VI, 0x200, 0x204, 17),
+ TEGRA124_MC_RESET(VIC, 0x200, 0x204, 18),
+ TEGRA124_MC_RESET(XUSB_HOST, 0x200, 0x204, 19),
+ TEGRA124_MC_RESET(XUSB_DEV, 0x200, 0x204, 20),
+ TEGRA124_MC_RESET(TSEC, 0x200, 0x204, 21),
+ TEGRA124_MC_RESET(SDMMC1, 0x200, 0x204, 22),
+ TEGRA124_MC_RESET(SDMMC2, 0x200, 0x204, 23),
+ TEGRA124_MC_RESET(SDMMC3, 0x200, 0x204, 25),
+ TEGRA124_MC_RESET(SDMMC4, 0x970, 0x974, 0),
+ TEGRA124_MC_RESET(ISP2B, 0x970, 0x974, 1),
+ TEGRA124_MC_RESET(GPU, 0x970, 0x974, 2),
+};
+
+static int tegra124_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /* TODO: program PTSA */
+ return 0;
+}
+
+static int tegra124_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra124_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra124_mc_icc_ops = {
+ .xlate_extended = tegra124_mc_of_icc_xlate_extended,
+ .aggregate = tegra124_mc_icc_aggreate,
+ .set = tegra124_mc_icc_set,
+};
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+static const unsigned long tegra124_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_RING1_THROTTLE
+};
+
+static const struct tegra_smmu_soc tegra124_smmu_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .swgroups = tegra124_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ .groups = tegra124_groups,
+ .num_groups = ARRAY_SIZE(tegra124_groups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 32,
+ .num_asids = 128,
+};
+
+const struct tegra_mc_soc tegra124_mc_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 32,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra124_smmu_soc,
+ .emem_regs = tegra124_mc_emem_regs,
+ .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra124_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra124_mc_resets),
+ .icc_ops = &tegra124_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
+};
+#endif /* CONFIG_ARCH_TEGRA_124_SOC */
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+static const struct tegra_smmu_soc tegra132_smmu_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .swgroups = tegra124_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ .groups = tegra124_groups,
+ .num_groups = ARRAY_SIZE(tegra124_groups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 32,
+ .num_asids = 128,
+};
+
+const struct tegra_mc_soc tegra132_mc_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 32,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra132_smmu_soc,
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra124_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra124_mc_resets),
+ .icc_ops = &tegra124_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
+};
+#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
new file mode 100644
index 000000000..4007f4e16
--- /dev/null
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include "mc.h"
+
+struct tegra186_emc_dvfs {
+ unsigned long latency;
+ unsigned long rate;
+};
+
+struct tegra186_emc {
+ struct tegra_bpmp *bpmp;
+ struct device *dev;
+ struct clk *clk;
+
+ struct tegra186_emc_dvfs *dvfs;
+ unsigned int num_dvfs;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ struct icc_provider provider;
+};
+
+static inline struct tegra186_emc *to_tegra186_emc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra186_emc, provider);
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra186_emc_validate_rate(struct tegra186_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_dvfs; i++)
+ if (rate == emc->dvfs[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra186_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra186_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_dvfs; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->dvfs[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra186_emc_debug_available_rates);
+
+static int tegra186_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra186_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra186_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra186_emc *emc = data;
+ int err;
+
+ if (!tegra186_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra186_emc_debug_min_rate_fops,
+ tegra186_emc_debug_min_rate_get,
+ tegra186_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra186_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra186_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra186_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra186_emc *emc = data;
+ int err;
+
+ if (!tegra186_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra186_emc_debug_max_rate_fops,
+ tegra186_emc_debug_max_rate_get,
+ tegra186_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra186_emc_get_emc_dvfs_latency(struct tegra186_emc *emc)
+{
+ struct mrq_emc_dvfs_latency_response response;
+ struct tegra_bpmp_message msg;
+ unsigned int i;
+ int err;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_EMC_DVFS_LATENCY;
+ msg.tx.data = NULL;
+ msg.tx.size = 0;
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ err = tegra_bpmp_transfer(emc->bpmp, &msg);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to EMC DVFS pairs: %d\n", err);
+ return err;
+ }
+ if (msg.rx.ret < 0) {
+ dev_err(emc->dev, "EMC DVFS MRQ failed: %d (BPMP error code)\n", msg.rx.ret);
+ return -EINVAL;
+ }
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ emc->num_dvfs = response.num_pairs;
+
+ emc->dvfs = devm_kmalloc_array(emc->dev, emc->num_dvfs, sizeof(*emc->dvfs), GFP_KERNEL);
+ if (!emc->dvfs)
+ return -ENOMEM;
+
+ dev_dbg(emc->dev, "%u DVFS pairs:\n", emc->num_dvfs);
+
+ for (i = 0; i < emc->num_dvfs; i++) {
+ emc->dvfs[i].rate = response.pairs[i].freq * 1000;
+ emc->dvfs[i].latency = response.pairs[i].latency;
+
+ if (emc->dvfs[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->dvfs[i].rate;
+
+ if (emc->dvfs[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->dvfs[i].rate;
+
+ dev_dbg(emc->dev, " %2u: %lu Hz -> %lu us\n", i,
+ emc->dvfs[i].rate, emc->dvfs[i].latency);
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate, emc->clk);
+ return err;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra186_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+ &tegra186_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+ &tegra186_emc_debug_max_rate_fops);
+
+ return 0;
+}
+
+/*
+ * tegra_emc_icc_set_bw() - Set BW api for EMC provider
+ * @src: ICC node for External Memory Controller (EMC)
+ * @dst: ICC node for External Memory (DRAM)
+ *
+ * Do nothing here as info to BPMP-FW is now passed in the BW set function
+ * of the MC driver. BPMP-FW sets the final Freq based on the passed values.
+ */
+static int tegra_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst)
+{
+ return 0;
+}
+
+static struct icc_node *
+tegra_emc_of_icc_xlate(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ return node;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int tegra_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+ *avg = 0;
+ *peak = 0;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra186_emc *emc)
+{
+ struct tegra_mc *mc = dev_get_drvdata(emc->dev->parent);
+ const struct tegra_mc_soc *soc = mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = tegra_emc_icc_set_bw;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate = tegra_emc_of_icc_xlate;
+ emc->provider.get_bw = tegra_emc_icc_get_init_bw;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra186_emc_probe(struct platform_device *pdev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(pdev->dev.parent);
+ struct tegra186_emc *emc;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->bpmp = tegra_bpmp_get(&pdev->dev);
+ if (IS_ERR(emc->bpmp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp), "failed to get BPMP\n");
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ goto put_bpmp;
+ }
+
+ platform_set_drvdata(pdev, emc);
+ emc->dev = &pdev->dev;
+
+ if (tegra_bpmp_mrq_is_supported(emc->bpmp, MRQ_EMC_DVFS_LATENCY)) {
+ err = tegra186_emc_get_emc_dvfs_latency(emc);
+ if (err)
+ goto put_bpmp;
+ }
+
+ if (mc && mc->soc->icc_ops) {
+ if (tegra_bpmp_mrq_is_supported(emc->bpmp, MRQ_BWMGR_INT)) {
+ mc->bwmgr_mrq_supported = true;
+
+ /*
+ * MC driver probe can't get BPMP reference as it gets probed
+ * earlier than BPMP. So, save the BPMP ref got from the EMC
+ * DT node in the mc->bpmp and use it in MC's icc_set hook.
+ */
+ mc->bpmp = emc->bpmp;
+ barrier();
+ }
+
+ /*
+ * Initialize the ICC even if BPMP-FW doesn't support 'MRQ_BWMGR_INT'.
+ * Use the flag 'mc->bwmgr_mrq_supported' within MC driver and return
+ * EINVAL instead of passing the request to BPMP-FW later when the BW
+ * request is made by client with 'icc_set_bw()' call.
+ */
+ err = tegra_emc_interconnect_init(emc);
+ if (err) {
+ mc->bpmp = NULL;
+ goto put_bpmp;
+ }
+ }
+
+ return 0;
+
+put_bpmp:
+ tegra_bpmp_put(emc->bpmp);
+ return err;
+}
+
+static int tegra186_emc_remove(struct platform_device *pdev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(pdev->dev.parent);
+ struct tegra186_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+
+ mc->bpmp = NULL;
+ tegra_bpmp_put(emc->bpmp);
+
+ return 0;
+}
+
+static const struct of_device_id tegra186_emc_of_match[] = {
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+ { .compatible = "nvidia,tegra186-emc" },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ { .compatible = "nvidia,tegra194-emc" },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ { .compatible = "nvidia,tegra234-emc" },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra186_emc_of_match);
+
+static struct platform_driver tegra186_emc_driver = {
+ .driver = {
+ .name = "tegra186-emc",
+ .of_match_table = tegra186_emc_of_match,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+ .probe = tegra186_emc_probe,
+ .remove = tegra186_emc_remove,
+};
+module_platform_driver(tegra186_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 External Memory Controller driver");
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
new file mode 100644
index 000000000..533f85a4b
--- /dev/null
+++ b/drivers/memory/tegra/tegra186.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/mc.h>
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+#include <dt-bindings/memory/tegra186-mc.h>
+#endif
+
+#include "mc.h"
+
+#define MC_SID_STREAMID_OVERRIDE_MASK GENMASK(7, 0)
+#define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
+#define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
+
+static int tegra186_mc_probe(struct tegra_mc *mc)
+{
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ unsigned int i;
+ char name[8];
+ int err;
+
+ mc->bcast_ch_regs = devm_platform_ioremap_resource_byname(pdev, "broadcast");
+ if (IS_ERR(mc->bcast_ch_regs)) {
+ if (PTR_ERR(mc->bcast_ch_regs) == -EINVAL) {
+ dev_warn(&pdev->dev,
+ "Broadcast channel is missing, please update your device-tree\n");
+ mc->bcast_ch_regs = NULL;
+ goto populate;
+ }
+
+ return PTR_ERR(mc->bcast_ch_regs);
+ }
+
+ mc->ch_regs = devm_kcalloc(mc->dev, mc->soc->num_channels, sizeof(*mc->ch_regs),
+ GFP_KERNEL);
+ if (!mc->ch_regs)
+ return -ENOMEM;
+
+ for (i = 0; i < mc->soc->num_channels; i++) {
+ snprintf(name, sizeof(name), "ch%u", i);
+
+ mc->ch_regs[i] = devm_platform_ioremap_resource_byname(pdev, name);
+ if (IS_ERR(mc->ch_regs[i]))
+ return PTR_ERR(mc->ch_regs[i]);
+ }
+
+populate:
+ err = of_platform_populate(mc->dev->of_node, NULL, NULL, mc->dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra186_mc_remove(struct tegra_mc *mc)
+{
+ of_platform_depopulate(mc->dev);
+}
+
+#if IS_ENABLED(CONFIG_IOMMU_API)
+static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int sid)
+{
+ u32 value, old;
+
+ value = readl(mc->regs + client->regs.sid.security);
+ if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) {
+ /*
+ * If the secure firmware has locked this down the override
+ * for this memory client, there's nothing we can do here.
+ */
+ if (value & MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED)
+ return;
+
+ /*
+ * Otherwise, try to set the override itself. Typically the
+ * secure firmware will never have set this configuration.
+ * Instead, it will either have disabled write access to
+ * this field, or it will already have set an explicit
+ * override itself.
+ */
+ WARN_ON((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0);
+
+ value |= MC_SID_STREAMID_SECURITY_OVERRIDE;
+ writel(value, mc->regs + client->regs.sid.security);
+ }
+
+ value = readl(mc->regs + client->regs.sid.override);
+ old = value & MC_SID_STREAMID_OVERRIDE_MASK;
+
+ if (old != sid) {
+ dev_dbg(mc->dev, "overriding SID %x for %s with %x\n", old,
+ client->name, sid);
+ writel(sid, mc->regs + client->regs.sid.override);
+ }
+}
+#endif
+
+static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct of_phandle_args args;
+ unsigned int i, index = 0;
+
+ while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells",
+ index, &args)) {
+ if (args.np == mc->dev->of_node && args.args_count != 0) {
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+
+ if (client->id == args.args[0]) {
+ u32 sid = fwspec->ids[0] & MC_SID_STREAMID_OVERRIDE_MASK;
+
+ tegra186_mc_client_sid_override(mc, client, sid);
+ }
+ }
+ }
+
+ index++;
+ }
+#endif
+
+ return 0;
+}
+
+const struct tegra_mc_ops tegra186_mc_ops = {
+ .probe = tegra186_mc_probe,
+ .remove = tegra186_mc_remove,
+ .probe_device = tegra186_mc_probe_device,
+ .handle_irq = tegra30_mc_handle_irq,
+};
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct tegra_mc_client tegra186_mc_clients[] = {
+ {
+ .id = TEGRA186_MEMORY_CLIENT_PTCR,
+ .name = "ptcr",
+ .sid = TEGRA186_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x000,
+ .security = 0x004,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AFIR,
+ .name = "afir",
+ .sid = TEGRA186_SID_AFI,
+ .regs = {
+ .sid = {
+ .override = 0x070,
+ .security = 0x074,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .sid = TEGRA186_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_HOST1XDMAR,
+ .name = "host1xdmar",
+ .sid = TEGRA186_SID_HOST1X,
+ .regs = {
+ .sid = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .sid = TEGRA186_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SATAR,
+ .name = "satar",
+ .sid = TEGRA186_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_MPCORER,
+ .name = "mpcorer",
+ .sid = TEGRA186_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .sid = TEGRA186_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AFIW,
+ .name = "afiw",
+ .sid = TEGRA186_SID_AFI,
+ .regs = {
+ .sid = {
+ .override = 0x188,
+ .security = 0x18c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .sid = TEGRA186_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_MPCOREW,
+ .name = "mpcorew",
+ .sid = TEGRA186_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SATAW,
+ .name = "sataw",
+ .sid = TEGRA186_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPRA,
+ .name = "ispra",
+ .sid = TEGRA186_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x220,
+ .security = 0x224,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPWA,
+ .name = "ispwa",
+ .sid = TEGRA186_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x230,
+ .security = 0x234,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPWB,
+ .name = "ispwb",
+ .sid = TEGRA186_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_HOSTR,
+ .name = "xusb_hostr",
+ .sid = TEGRA186_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x250,
+ .security = 0x254,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_HOSTW,
+ .name = "xusb_hostw",
+ .sid = TEGRA186_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_DEVR,
+ .name = "xusb_devr",
+ .sid = TEGRA186_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x260,
+ .security = 0x264,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_DEVW,
+ .name = "xusb_devw",
+ .sid = TEGRA186_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSRD,
+ .name = "tsecsrd",
+ .sid = TEGRA186_SID_TSEC,
+ .regs = {
+ .sid = {
+ .override = 0x2a0,
+ .security = 0x2a4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSWR,
+ .name = "tsecswr",
+ .sid = TEGRA186_SID_TSEC,
+ .regs = {
+ .sid = {
+ .override = 0x2a8,
+ .security = 0x2ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSRD,
+ .name = "gpusrd",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSWR,
+ .name = "gpuswr",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRA,
+ .name = "sdmmcra",
+ .sid = TEGRA186_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x300,
+ .security = 0x304,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRAA,
+ .name = "sdmmcraa",
+ .sid = TEGRA186_SID_SDMMC2,
+ .regs = {
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCR,
+ .name = "sdmmcr",
+ .sid = TEGRA186_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x310,
+ .security = 0x314,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .sid = TEGRA186_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWA,
+ .name = "sdmmcwa",
+ .sid = TEGRA186_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x320,
+ .security = 0x324,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWAA,
+ .name = "sdmmcwaa",
+ .sid = TEGRA186_SID_SDMMC2,
+ .regs = {
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCW,
+ .name = "sdmmcw",
+ .sid = TEGRA186_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x330,
+ .security = 0x334,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .sid = TEGRA186_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .sid = TEGRA186_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .sid = TEGRA186_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VIW,
+ .name = "viw",
+ .sid = TEGRA186_SID_VI,
+ .regs = {
+ .sid = {
+ .override = 0x390,
+ .security = 0x394,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .sid = TEGRA186_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .sid = TEGRA186_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d8,
+ .security = 0x3dc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .sid = TEGRA186_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .sid = TEGRA186_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f8,
+ .security = 0x3fc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SESRD,
+ .name = "sesrd",
+ .sid = TEGRA186_SID_SE,
+ .regs = {
+ .sid = {
+ .override = 0x400,
+ .security = 0x404,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SESWR,
+ .name = "seswr",
+ .sid = TEGRA186_SID_SE,
+ .regs = {
+ .sid = {
+ .override = 0x408,
+ .security = 0x40c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ETRR,
+ .name = "etrr",
+ .sid = TEGRA186_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x420,
+ .security = 0x424,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ETRW,
+ .name = "etrw",
+ .sid = TEGRA186_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSRDB,
+ .name = "tsecsrdb",
+ .sid = TEGRA186_SID_TSECB,
+ .regs = {
+ .sid = {
+ .override = 0x430,
+ .security = 0x434,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSWRB,
+ .name = "tsecswrb",
+ .sid = TEGRA186_SID_TSECB,
+ .regs = {
+ .sid = {
+ .override = 0x438,
+ .security = 0x43c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSRD2,
+ .name = "gpusrd2",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x440,
+ .security = 0x444,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSWR2,
+ .name = "gpuswr2",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x448,
+ .security = 0x44c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AXISR,
+ .name = "axisr",
+ .sid = TEGRA186_SID_GPCDMA_0,
+ .regs = {
+ .sid = {
+ .override = 0x460,
+ .security = 0x464,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AXISW,
+ .name = "axisw",
+ .sid = TEGRA186_SID_GPCDMA_0,
+ .regs = {
+ .sid = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_EQOSR,
+ .name = "eqosr",
+ .sid = TEGRA186_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x470,
+ .security = 0x474,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_EQOSW,
+ .name = "eqosw",
+ .sid = TEGRA186_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_UFSHCR,
+ .name = "ufshcr",
+ .sid = TEGRA186_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x480,
+ .security = 0x484,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_UFSHCW,
+ .name = "ufshcw",
+ .sid = TEGRA186_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .sid = TEGRA186_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONR,
+ .name = "aonr",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONW,
+ .name = "aonw",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONDMAR,
+ .name = "aondmar",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONDMAW,
+ .name = "aondmaw",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCER,
+ .name = "scer",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEW,
+ .name = "scew",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEDMAR,
+ .name = "scedmar",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEDMAW,
+ .name = "scedmaw",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .sid = TEGRA186_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSRD1,
+ .name = "vicsrd1",
+ .sid = TEGRA186_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x510,
+ .security = 0x514,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSRD1,
+ .name = "nvdecsrd1",
+ .sid = TEGRA186_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
+ },
+ },
+};
+
+const struct tegra_mc_soc tegra186_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra186_mc_clients),
+ .clients = tegra186_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 4,
+ .client_id_mask = 0xff,
+ .intmask = MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .ops = &tegra186_mc_ops,
+ .ch_intmask = 0x0000000f,
+ .global_intstatus_channel_shift = 0,
+};
+#endif
diff --git a/drivers/memory/tegra/tegra194.c b/drivers/memory/tegra/tegra194.c
new file mode 100644
index 000000000..26035ac3a
--- /dev/null
+++ b/drivers/memory/tegra/tegra194.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/mc.h>
+
+#include <dt-bindings/memory/tegra194-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra194_mc_clients[] = {
+ {
+ .id = TEGRA194_MEMORY_CLIENT_PTCR,
+ .name = "ptcr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x000,
+ .security = 0x004,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU7R,
+ .name = "miu7r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x008,
+ .security = 0x00c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU7W,
+ .name = "miu7w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x010,
+ .security = 0x014,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HOST1XDMAR,
+ .name = "host1xdmar",
+ .sid = TEGRA194_SID_HOST1X,
+ .regs = {
+ .sid = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SATAR,
+ .name = "satar",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MPCORER,
+ .name = "mpcorer",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MPCOREW,
+ .name = "mpcorew",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SATAW,
+ .name = "sataw",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPRA,
+ .name = "ispra",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x220,
+ .security = 0x224,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPFALR,
+ .name = "ispfalr",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x228,
+ .security = 0x22c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPWA,
+ .name = "ispwa",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x230,
+ .security = 0x234,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPWB,
+ .name = "ispwb",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_HOSTR,
+ .name = "xusb_hostr",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x250,
+ .security = 0x254,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_HOSTW,
+ .name = "xusb_hostw",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_DEVR,
+ .name = "xusb_devr",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x260,
+ .security = 0x264,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_DEVW,
+ .name = "xusb_devw",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCRA,
+ .name = "sdmmcra",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x300,
+ .security = 0x304,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCR,
+ .name = "sdmmcr",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x310,
+ .security = 0x314,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCWA,
+ .name = "sdmmcwa",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x320,
+ .security = 0x324,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCW,
+ .name = "sdmmcw",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x330,
+ .security = 0x334,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIW,
+ .name = "viw",
+ .sid = TEGRA194_SID_VI,
+ .regs = {
+ .sid = {
+ .override = 0x390,
+ .security = 0x394,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .name = "axiapr",
+ .id = TEGRA194_MEMORY_CLIENT_AXIAPR,
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x410,
+ .security = 0x414,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXIAPW,
+ .name = "axiapw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x418,
+ .security = 0x41c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ETRR,
+ .name = "etrr",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x420,
+ .security = 0x424,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ETRW,
+ .name = "etrw",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXISR,
+ .name = "axisr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x460,
+ .security = 0x464,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXISW,
+ .name = "axisw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_EQOSR,
+ .name = "eqosr",
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x470,
+ .security = 0x474,
+ },
+ },
+ }, {
+ .name = "eqosw",
+ .id = TEGRA194_MEMORY_CLIENT_EQOSW,
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_UFSHCR,
+ .name = "ufshcr",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x480,
+ .security = 0x484,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_UFSHCW,
+ .name = "ufshcw",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONR,
+ .name = "aonr",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONW,
+ .name = "aonw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONDMAR,
+ .name = "aondmar",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONDMAW,
+ .name = "aondmaw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCER,
+ .name = "scer",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEW,
+ .name = "scew",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEDMAR,
+ .name = "scedmar",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEDMAW,
+ .name = "scedmaw",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSRD1,
+ .name = "vicsrd1",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x510,
+ .security = 0x514,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSRD1,
+ .name = "nvdecsrd1",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU0R,
+ .name = "miu0r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x530,
+ .security = 0x534,
+ },
+ },
+ }, {
+ .name = "miu0w",
+ .id = TEGRA194_MEMORY_CLIENT_MIU0W,
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x538,
+ .security = 0x53c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU1R,
+ .name = "miu1r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x540,
+ .security = 0x544,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU1W,
+ .name = "miu1w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x548,
+ .security = 0x54c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU2R,
+ .name = "miu2r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x570,
+ .security = 0x574,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU2W,
+ .name = "miu2w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x578,
+ .security = 0x57c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU3R,
+ .name = "miu3r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x580,
+ .security = 0x584,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU3W,
+ .name = "miu3w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x588,
+ .security = 0x58c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU4R,
+ .name = "miu4r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x590,
+ .security = 0x594,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU4W,
+ .name = "miu4w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DPMUR,
+ .name = "dpmur",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIFALR,
+ .name = "vifalr",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x5e0,
+ .security = 0x5e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIFALW,
+ .name = "vifalw",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x5e8,
+ .security = 0x5ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0RDA,
+ .name = "dla0rda",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f0,
+ .security = 0x5f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f8,
+ .security = 0x5fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0WRA,
+ .name = "dla0wra",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x600,
+ .security = 0x604,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x608,
+ .security = 0x60c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1RDA,
+ .name = "dla1rda",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x610,
+ .security = 0x614,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1FALRDB,
+ .name = "dla1falrdb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x618,
+ .security = 0x61c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1WRA,
+ .name = "dla1wra",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x620,
+ .security = 0x624,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1FALWRB,
+ .name = "dla1falwrb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x628,
+ .security = 0x62c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDA,
+ .name = "pva0rda",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x630,
+ .security = 0x634,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDB,
+ .name = "pva0rdb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x638,
+ .security = 0x63c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDC,
+ .name = "pva0rdc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x640,
+ .security = 0x644,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRA,
+ .name = "pva0wra",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x648,
+ .security = 0x64c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRB,
+ .name = "pva0wrb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x650,
+ .security = 0x654,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRC,
+ .name = "pva0wrc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x658,
+ .security = 0x65c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDA,
+ .name = "pva1rda",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x660,
+ .security = 0x664,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDB,
+ .name = "pva1rdb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x668,
+ .security = 0x66c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDC,
+ .name = "pva1rdc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x670,
+ .security = 0x674,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRA,
+ .name = "pva1wra",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x678,
+ .security = 0x67c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRB,
+ .name = "pva1wrb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x680,
+ .security = 0x684,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRC,
+ .name = "pva1wrc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x688,
+ .security = 0x68c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCER,
+ .name = "rcer",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x690,
+ .security = 0x694,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEW,
+ .name = "rcew",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x698,
+ .security = 0x69c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEDMAR,
+ .name = "rcedmar",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x6a0,
+ .security = 0x6a4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEDMAW,
+ .name = "rcedmaw",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x6a8,
+ .security = 0x6ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SRD,
+ .name = "nvenc1srd",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x6b0,
+ .security = 0x6b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SWR,
+ .name = "nvenc1swr",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x6b8,
+ .security = 0x6bc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0R,
+ .name = "pcie0r",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c0,
+ .security = 0x6c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0W,
+ .name = "pcie0w",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c8,
+ .security = 0x6cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE1R,
+ .name = "pcie1r",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d0,
+ .security = 0x6d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE1W,
+ .name = "pcie1w",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d8,
+ .security = 0x6dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE2AR,
+ .name = "pcie2ar",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e0,
+ .security = 0x6e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE2AW,
+ .name = "pcie2aw",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e8,
+ .security = 0x6ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE3R,
+ .name = "pcie3r",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f0,
+ .security = 0x6f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE3W,
+ .name = "pcie3w",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f8,
+ .security = 0x6fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE4R,
+ .name = "pcie4r",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x700,
+ .security = 0x704,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE4W,
+ .name = "pcie4w",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x708,
+ .security = 0x70c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5R,
+ .name = "pcie5r",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x710,
+ .security = 0x714,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5W,
+ .name = "pcie5w",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x718,
+ .security = 0x71c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPFALW,
+ .name = "ispfalw",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x720,
+ .security = 0x724,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x748,
+ .security = 0x74c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1RDA1,
+ .name = "dla1rda1",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x750,
+ .security = 0x754,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDA1,
+ .name = "pva0rda1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x758,
+ .security = 0x75c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDB1,
+ .name = "pva0rdb1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x760,
+ .security = 0x764,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDA1,
+ .name = "pva1rda1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x768,
+ .security = 0x76c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDB1,
+ .name = "pva1rdb1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x770,
+ .security = 0x774,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5R1,
+ .name = "pcie5r1",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x778,
+ .security = 0x77c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSRD1,
+ .name = "nvencsrd1",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x780,
+ .security = 0x784,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SRD1,
+ .name = "nvenc1srd1",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x788,
+ .security = 0x78c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPRA1,
+ .name = "ispra1",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x790,
+ .security = 0x794,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0R1,
+ .name = "pcie0r1",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x798,
+ .security = 0x79c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SRD,
+ .name = "nvdec1srd",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7c8,
+ .security = 0x7cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SRD1,
+ .name = "nvdec1srd1",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7d0,
+ .security = 0x7d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SWR,
+ .name = "nvdec1swr",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7d8,
+ .security = 0x7dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU5R,
+ .name = "miu5r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7e0,
+ .security = 0x7e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU5W,
+ .name = "miu5w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7e8,
+ .security = 0x7ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU6R,
+ .name = "miu6r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7f0,
+ .security = 0x7f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU6W,
+ .name = "miu6w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7f8,
+ .security = 0x7fc,
+ },
+ },
+ },
+};
+
+const struct tegra_mc_soc tegra194_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra194_mc_clients),
+ .clients = tegra194_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 16,
+ .client_id_mask = 0xff,
+ .intmask = MC_INT_DECERR_ROUTE_SANITY |
+ MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .has_addr_hi_reg = true,
+ .ops = &tegra186_mc_ops,
+ .icc_ops = &tegra_mc_icc_ops,
+ .ch_intmask = 0x00000f00,
+ .global_intstatus_channel_shift = 8,
+};
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
new file mode 100644
index 000000000..fd595c851
--- /dev/null
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -0,0 +1,1279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tegra20 External Memory Controller driver
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/devfreq.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+#include "mc.h"
+
+#define EMC_INTSTATUS 0x000
+#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
+#define EMC_ADR_CFG_0 0x010
+#define EMC_TIMING_CONTROL 0x028
+#define EMC_RC 0x02c
+#define EMC_RFC 0x030
+#define EMC_RAS 0x034
+#define EMC_RP 0x038
+#define EMC_R2W 0x03c
+#define EMC_W2R 0x040
+#define EMC_R2P 0x044
+#define EMC_W2P 0x048
+#define EMC_RD_RCD 0x04c
+#define EMC_WR_RCD 0x050
+#define EMC_RRD 0x054
+#define EMC_REXT 0x058
+#define EMC_WDV 0x05c
+#define EMC_QUSE 0x060
+#define EMC_QRST 0x064
+#define EMC_QSAFE 0x068
+#define EMC_RDV 0x06c
+#define EMC_REFRESH 0x070
+#define EMC_BURST_REFRESH_NUM 0x074
+#define EMC_PDEX2WR 0x078
+#define EMC_PDEX2RD 0x07c
+#define EMC_PCHG2PDEN 0x080
+#define EMC_ACT2PDEN 0x084
+#define EMC_AR2PDEN 0x088
+#define EMC_RW2PDEN 0x08c
+#define EMC_TXSR 0x090
+#define EMC_TCKE 0x094
+#define EMC_TFAW 0x098
+#define EMC_TRPAB 0x09c
+#define EMC_TCLKSTABLE 0x0a0
+#define EMC_TCLKSTOP 0x0a4
+#define EMC_TREFBW 0x0a8
+#define EMC_QUSE_EXTRA 0x0ac
+#define EMC_ODT_WRITE 0x0b0
+#define EMC_ODT_READ 0x0b4
+#define EMC_MRR 0x0ec
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG6 0x114
+#define EMC_STAT_CONTROL 0x160
+#define EMC_STAT_LLMC_CONTROL 0x178
+#define EMC_STAT_PWR_CLOCK_LIMIT 0x198
+#define EMC_STAT_PWR_CLOCKS 0x19c
+#define EMC_STAT_PWR_COUNT 0x1a0
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_DLL_XFORM_DQS 0x2c0
+#define EMC_DLL_XFORM_QUSE 0x2c4
+#define EMC_ZCAL_REF_CNT 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_CFG_CLKTRIM_0 0x2d0
+#define EMC_CFG_CLKTRIM_1 0x2d4
+#define EMC_CFG_CLKTRIM_2 0x2d8
+
+#define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
+#define EMC_CLKCHANGE_PD_ENABLE BIT(1)
+#define EMC_CLKCHANGE_SR_ENABLE BIT(2)
+
+#define EMC_TIMING_UPDATE BIT(0)
+
+#define EMC_REFRESH_OVERFLOW_INT BIT(3)
+#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
+
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_READ_DQM_CTRL BIT(9)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+#define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
+
+#define EMC_PWR_GATHER_CLEAR (1 << 8)
+#define EMC_PWR_GATHER_DISABLE (2 << 8)
+#define EMC_PWR_GATHER_ENABLE (3 << 8)
+
+enum emc_dram_type {
+ DRAM_TYPE_RESERVED,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
+static const u16 emc_timing_registers[] = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WDV,
+ EMC_QUSE,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_TXSR,
+ EMC_TCKE,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_TREFBW,
+ EMC_QUSE_EXTRA,
+ EMC_FBIO_CFG6,
+ EMC_ODT_WRITE,
+ EMC_ODT_READ,
+ EMC_FBIO_CFG5,
+ EMC_CFG_DIG_DLL,
+ EMC_DLL_XFORM_DQS,
+ EMC_DLL_XFORM_QUSE,
+ EMC_ZCAL_REF_CNT,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_AUTO_CAL_INTERVAL,
+ EMC_CFG_CLKTRIM_0,
+ EMC_CFG_CLKTRIM_1,
+ EMC_CFG_CLKTRIM_2,
+};
+
+struct emc_timing {
+ unsigned long rate;
+ u32 data[ARRAY_SIZE(emc_timing_registers)];
+};
+
+enum emc_rate_request_type {
+ EMC_RATE_DEVFREQ,
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct tegra_emc {
+ struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
+ struct notifier_block clk_nb;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int dram_bus_width;
+
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ struct devfreq_simple_ondemand_data ondemand_data;
+
+ /* memory chip identity information */
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ bool mrr_error;
+};
+
+static irqreturn_t tegra_emc_isr(int irq, void *data)
+{
+ struct tegra_emc *emc = data;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ u32 status;
+
+ status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ /* notify about HW problem */
+ if (status & EMC_REFRESH_OVERFLOW_INT)
+ dev_err_ratelimited(emc->dev,
+ "refresh request overflow timeout\n");
+
+ /* clear interrupts */
+ writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate >= rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
+{
+ struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ unsigned int i;
+
+ if (!timing)
+ return -EINVAL;
+
+ dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
+ __func__, timing->rate, rate);
+
+ /* program shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->data); i++)
+ writel_relaxed(timing->data[i],
+ emc->regs + emc_timing_registers[i]);
+
+ /* wait until programming has settled */
+ readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
+
+ return 0;
+}
+
+static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
+{
+ int err;
+ u32 v;
+
+ dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
+
+ if (flush) {
+ /* manually initiate memory timing update */
+ writel_relaxed(EMC_TIMING_UPDATE,
+ emc->regs + EMC_TIMING_CONTROL);
+ return 0;
+ }
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
+ struct clk_notifier_data *cnd = data;
+ int err;
+
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ err = emc_prepare_timing_change(emc, cnd->new_rate);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = emc_prepare_timing_change(emc, cnd->old_rate);
+ if (err)
+ break;
+
+ err = emc_complete_timing_change(emc, true);
+ break;
+
+ case POST_RATE_CHANGE:
+ err = emc_complete_timing_change(emc, false);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 rate;
+ int err;
+
+ if (!of_device_is_compatible(node, "nvidia,tegra20-emc-table")) {
+ dev_err(emc->dev, "incompatible DT node: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(node, "clock-frequency", &rate);
+ if (err) {
+ dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ err = of_property_read_u32_array(node, "nvidia,emc-registers",
+ timing->data,
+ ARRAY_SIZE(emc_timing_registers));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOF: failed to read emc timing data: %d\n",
+ node, err);
+ return err;
+ }
+
+ /*
+ * The EMC clock rate is twice the bus rate, and the bus rate is
+ * measured in kHz.
+ */
+ timing->rate = rate * 2 * 1000;
+
+ dev_dbg(emc->dev, "%s: %pOF: EMC rate %lu\n",
+ __func__, node, timing->rate);
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+
+ if (a->rate > b->rate)
+ return 1;
+
+ return 0;
+}
+
+static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ struct device_node *child;
+ struct emc_timing *timing;
+ int child_count;
+ int err;
+
+ child_count = of_get_child_count(node);
+ if (!child_count) {
+ dev_err(emc->dev, "no memory timings in DT node: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ timing = emc->timings;
+
+ for_each_child_of_node(node, child) {
+ if (of_node_name_eq(child, "lpddr2"))
+ continue;
+
+ err = load_one_timing_from_dt(emc, timing++, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+
+ emc->num_timings++;
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ dev_info_once(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
+ return 0;
+}
+
+static struct device_node *
+tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ struct device_node *np;
+ u32 value, ram_code;
+ int err;
+
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info_once(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
+ if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code"))
+ return of_node_get(dev->of_node);
+
+ ram_code = tegra_read_ram_code();
+
+ for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
+ np = of_find_node_by_name(np, "emc-tables")) {
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || value != ram_code) {
+ struct device_node *lpddr2_np;
+ bool cfg_mismatches = false;
+
+ lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ if (lpddr2_np) {
+ const struct lpddr2_info *info;
+
+ info = of_lpddr2_get_info(lpddr2_np, dev);
+ if (info) {
+ if (info->manufacturer_id >= 0 &&
+ info->manufacturer_id != emc->manufacturer_id)
+ cfg_mismatches = true;
+
+ if (info->revision_id1 >= 0 &&
+ info->revision_id1 != emc->revision_id1)
+ cfg_mismatches = true;
+
+ if (info->revision_id2 >= 0 &&
+ info->revision_id2 != emc->revision_id2)
+ cfg_mismatches = true;
+
+ if (info->density != emc->basic_conf4.density)
+ cfg_mismatches = true;
+
+ if (info->io_width != emc->basic_conf4.io_width)
+ cfg_mismatches = true;
+
+ if (info->arch_type != emc->basic_conf4.arch_type)
+ cfg_mismatches = true;
+ } else {
+ dev_err(dev, "failed to parse %pOF\n", lpddr2_np);
+ cfg_mismatches = true;
+ }
+
+ of_node_put(lpddr2_np);
+ } else {
+ cfg_mismatches = true;
+ }
+
+ if (cfg_mismatches) {
+ of_node_put(np);
+ continue;
+ }
+ }
+
+ return np;
+ }
+
+ dev_err(dev, "no memory timings for RAM code %u found in device tree\n",
+ ram_code);
+
+ return NULL;
+}
+
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev ? 1 : 2;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ bool print_out)
+{
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value);
+
+ if (!print_out)
+ return;
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, emc->manufacturer_id,
+ lpddr2_jedec_manufacturer(emc->manufacturer_id),
+ emc->revision_id1, emc->revision_id2,
+ 4 >> emc->basic_conf4.arch_type,
+ 64 << emc->basic_conf4.density,
+ 32 >> emc->basic_conf4.io_width);
+}
+
+static int emc_setup_hw(struct tegra_emc *emc)
+{
+ u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
+
+ emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
+
+ /*
+ * Depending on a memory type, DRAM should enter either self-refresh
+ * or power-down state on EMC clock change.
+ */
+ if (!(emc_cfg & EMC_CLKCHANGE_PD_ENABLE) &&
+ !(emc_cfg & EMC_CLKCHANGE_SR_ENABLE)) {
+ dev_err(emc->dev,
+ "bootloader didn't specify DRAM auto-suspend mode\n");
+ return -EINVAL;
+ }
+
+ /* enable EMC and CAR to handshake on PLL divider/source changes */
+ emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
+ writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
+
+ /* initialize interrupt */
+ writel_relaxed(intmask, emc->regs + EMC_INTMASK);
+ writel_relaxed(intmask, emc->regs + EMC_INTSTATUS);
+
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16)
+ emc->dram_bus_width = 16;
+ else
+ emc->dram_bus_width = 32;
+
+ dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio);
+
+ switch (dram_type) {
+ case DRAM_TYPE_RESERVED:
+ dram_type_str = "INVALID";
+ break;
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n",
+ emc->dram_bus_width, emem_numdev, dram_type_str,
+ emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev,
+ !print_sdram_info_once);
+ print_sdram_info_once = true;
+ }
+
+ return 0;
+}
+
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
+{
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
+
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
+
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ return timing->rate;
+}
+
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
+ emc, &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ int err;
+
+ /*
+ * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
+ * is sampled on both clock edges. This means that EMC clock rate
+ * equals to the peak data-rate.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc;
+ struct icc_node *node;
+ int err;
+
+ emc->mc = devm_tegra_memory_controller_get(emc->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ soc = emc->mc->soc;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for %lu Hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ rate = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
+}
+
+static int tegra_emc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /* freeze counters */
+ writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL);
+
+ /*
+ * busy_time: number of clocks EMC request was accepted
+ * total_time: number of clocks PWR_GATHER control was set to ENABLE
+ */
+ stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT);
+ stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS);
+ stat->current_frequency = clk_get_rate(emc->clk);
+
+ /* clear counters and restart */
+ writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
+ .polling_ms = 30,
+ .target = tegra_emc_devfreq_target,
+ .get_dev_status = tegra_emc_devfreq_get_dev_status,
+};
+
+static int tegra_emc_devfreq_init(struct tegra_emc *emc)
+{
+ struct devfreq *devfreq;
+
+ /*
+ * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
+ * should be less than 50. Secondly, multiple active memory clients
+ * may cause over 20% of lost clock cycles due to stalls caused by
+ * competing memory accesses. This means that threshold should be
+ * set to a less than 30 in order to have a properly working governor.
+ */
+ emc->ondemand_data.upthreshold = 20;
+
+ /*
+ * Reset statistic gathers state, select global bandwidth for the
+ * statistics collection mode and set clocks counter saturation
+ * limit to maximum.
+ */
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
+ writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
+
+ devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &emc->ondemand_data);
+ if (IS_ERR(devfreq)) {
+ dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
+ return PTR_ERR(devfreq);
+ }
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int irq, err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "please update your device tree\n");
+ return irq;
+ }
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ mutex_init(&emc->rate_lock);
+ emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
+ emc->dev = &pdev->dev;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ err = emc_setup_hw(emc);
+ if (err)
+ return err;
+
+ np = tegra_emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
+ dev_name(&pdev->dev), emc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
+ tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+ tegra_emc_devfreq_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra20-emc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra20-emc",
+ .of_match_table = tegra_emc_of_match,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(tegra_emc_driver);
+
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
new file mode 100644
index 000000000..544bfd216
--- /dev/null
+++ b/drivers/memory/tegra/tegra20.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <dt-bindings/memory/tegra20-mc.h>
+
+#include "mc.h"
+
+#define MC_STAT_CONTROL 0x90
+#define MC_STAT_EMC_CLOCK_LIMIT 0xa0
+#define MC_STAT_EMC_CLOCKS 0xa4
+#define MC_STAT_EMC_CONTROL_0 0xa8
+#define MC_STAT_EMC_CONTROL_1 0xac
+#define MC_STAT_EMC_COUNT_0 0xb8
+#define MC_STAT_EMC_COUNT_1 0xbc
+
+#define MC_STAT_CONTROL_CLIENT_ID GENMASK(13, 8)
+#define MC_STAT_CONTROL_EVENT GENMASK(23, 16)
+#define MC_STAT_CONTROL_PRI_EVENT GENMASK(25, 24)
+#define MC_STAT_CONTROL_FILTER_CLIENT_ENABLE GENMASK(26, 26)
+#define MC_STAT_CONTROL_FILTER_PRI GENMASK(29, 28)
+
+#define MC_STAT_CONTROL_PRI_EVENT_HP 0
+#define MC_STAT_CONTROL_PRI_EVENT_TM 1
+#define MC_STAT_CONTROL_PRI_EVENT_BW 2
+
+#define MC_STAT_CONTROL_FILTER_PRI_DISABLE 0
+#define MC_STAT_CONTROL_FILTER_PRI_NO 1
+#define MC_STAT_CONTROL_FILTER_PRI_YES 2
+
+#define MC_STAT_CONTROL_EVENT_QUALIFIED 0
+#define MC_STAT_CONTROL_EVENT_ANY_READ 1
+#define MC_STAT_CONTROL_EVENT_ANY_WRITE 2
+#define MC_STAT_CONTROL_EVENT_RD_WR_CHANGE 3
+#define MC_STAT_CONTROL_EVENT_SUCCESSIVE 4
+#define MC_STAT_CONTROL_EVENT_ARB_BANK_AA 5
+#define MC_STAT_CONTROL_EVENT_ARB_BANK_BB 6
+#define MC_STAT_CONTROL_EVENT_PAGE_MISS 7
+#define MC_STAT_CONTROL_EVENT_AUTO_PRECHARGE 8
+
+#define EMC_GATHER_RST (0 << 8)
+#define EMC_GATHER_CLEAR (1 << 8)
+#define EMC_GATHER_DISABLE (2 << 8)
+#define EMC_GATHER_ENABLE (3 << 8)
+
+#define MC_STAT_SAMPLE_TIME_USEC 16000
+
+/* we store collected statistics as a fixed point values */
+#define MC_FX_FRAC_SCALE 100
+
+static DEFINE_MUTEX(tegra20_mc_stat_lock);
+
+struct tegra20_mc_stat_gather {
+ unsigned int pri_filter;
+ unsigned int pri_event;
+ unsigned int result;
+ unsigned int client;
+ unsigned int event;
+ bool client_enb;
+};
+
+struct tegra20_mc_stat {
+ struct tegra20_mc_stat_gather gather0;
+ struct tegra20_mc_stat_gather gather1;
+ unsigned int sample_time_usec;
+ const struct tegra_mc *mc;
+};
+
+struct tegra20_mc_client_stat {
+ unsigned int events;
+ unsigned int arb_high_prio;
+ unsigned int arb_timeout;
+ unsigned int arb_bandwidth;
+ unsigned int rd_wr_change;
+ unsigned int successive;
+ unsigned int page_miss;
+ unsigned int auto_precharge;
+ unsigned int arb_bank_aa;
+ unsigned int arb_bank_bb;
+};
+
+static const struct tegra_mc_client tegra20_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "display0a",
+ }, {
+ .id = 0x01,
+ .name = "display0ab",
+ }, {
+ .id = 0x02,
+ .name = "display0b",
+ }, {
+ .id = 0x03,
+ .name = "display0bb",
+ }, {
+ .id = 0x04,
+ .name = "display0c",
+ }, {
+ .id = 0x05,
+ .name = "display0cb",
+ }, {
+ .id = 0x06,
+ .name = "display1b",
+ }, {
+ .id = 0x07,
+ .name = "display1bb",
+ }, {
+ .id = 0x08,
+ .name = "eppup",
+ }, {
+ .id = 0x09,
+ .name = "g2pr",
+ }, {
+ .id = 0x0a,
+ .name = "g2sr",
+ }, {
+ .id = 0x0b,
+ .name = "mpeunifbr",
+ }, {
+ .id = 0x0c,
+ .name = "viruv",
+ }, {
+ .id = 0x0d,
+ .name = "avpcarm7r",
+ }, {
+ .id = 0x0e,
+ .name = "displayhc",
+ }, {
+ .id = 0x0f,
+ .name = "displayhcb",
+ }, {
+ .id = 0x10,
+ .name = "fdcdrd",
+ }, {
+ .id = 0x11,
+ .name = "g2dr",
+ }, {
+ .id = 0x12,
+ .name = "host1xdmar",
+ }, {
+ .id = 0x13,
+ .name = "host1xr",
+ }, {
+ .id = 0x14,
+ .name = "idxsrd",
+ }, {
+ .id = 0x15,
+ .name = "mpcorer",
+ }, {
+ .id = 0x16,
+ .name = "mpe_ipred",
+ }, {
+ .id = 0x17,
+ .name = "mpeamemrd",
+ }, {
+ .id = 0x18,
+ .name = "mpecsrd",
+ }, {
+ .id = 0x19,
+ .name = "ppcsahbdmar",
+ }, {
+ .id = 0x1a,
+ .name = "ppcsahbslvr",
+ }, {
+ .id = 0x1b,
+ .name = "texsrd",
+ }, {
+ .id = 0x1c,
+ .name = "vdebsevr",
+ }, {
+ .id = 0x1d,
+ .name = "vdember",
+ }, {
+ .id = 0x1e,
+ .name = "vdemcer",
+ }, {
+ .id = 0x1f,
+ .name = "vdetper",
+ }, {
+ .id = 0x20,
+ .name = "eppu",
+ }, {
+ .id = 0x21,
+ .name = "eppv",
+ }, {
+ .id = 0x22,
+ .name = "eppy",
+ }, {
+ .id = 0x23,
+ .name = "mpeunifbw",
+ }, {
+ .id = 0x24,
+ .name = "viwsb",
+ }, {
+ .id = 0x25,
+ .name = "viwu",
+ }, {
+ .id = 0x26,
+ .name = "viwv",
+ }, {
+ .id = 0x27,
+ .name = "viwy",
+ }, {
+ .id = 0x28,
+ .name = "g2dw",
+ }, {
+ .id = 0x29,
+ .name = "avpcarm7w",
+ }, {
+ .id = 0x2a,
+ .name = "fdcdwr",
+ }, {
+ .id = 0x2b,
+ .name = "host1xw",
+ }, {
+ .id = 0x2c,
+ .name = "ispw",
+ }, {
+ .id = 0x2d,
+ .name = "mpcorew",
+ }, {
+ .id = 0x2e,
+ .name = "mpecswr",
+ }, {
+ .id = 0x2f,
+ .name = "ppcsahbdmaw",
+ }, {
+ .id = 0x30,
+ .name = "ppcsahbslvw",
+ }, {
+ .id = 0x31,
+ .name = "vdebsevw",
+ }, {
+ .id = 0x32,
+ .name = "vdembew",
+ }, {
+ .id = 0x33,
+ .name = "vdetpmw",
+ },
+};
+
+#define TEGRA20_MC_RESET(_name, _control, _status, _reset, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA20_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .reset = _reset, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra20_mc_resets[] = {
+ TEGRA20_MC_RESET(AVPC, 0x100, 0x140, 0x104, 0),
+ TEGRA20_MC_RESET(DC, 0x100, 0x144, 0x104, 1),
+ TEGRA20_MC_RESET(DCB, 0x100, 0x148, 0x104, 2),
+ TEGRA20_MC_RESET(EPP, 0x100, 0x14c, 0x104, 3),
+ TEGRA20_MC_RESET(2D, 0x100, 0x150, 0x104, 4),
+ TEGRA20_MC_RESET(HC, 0x100, 0x154, 0x104, 5),
+ TEGRA20_MC_RESET(ISP, 0x100, 0x158, 0x104, 6),
+ TEGRA20_MC_RESET(MPCORE, 0x100, 0x15c, 0x104, 7),
+ TEGRA20_MC_RESET(MPEA, 0x100, 0x160, 0x104, 8),
+ TEGRA20_MC_RESET(MPEB, 0x100, 0x164, 0x104, 9),
+ TEGRA20_MC_RESET(MPEC, 0x100, 0x168, 0x104, 10),
+ TEGRA20_MC_RESET(3D, 0x100, 0x16c, 0x104, 11),
+ TEGRA20_MC_RESET(PPCS, 0x100, 0x170, 0x104, 12),
+ TEGRA20_MC_RESET(VDE, 0x100, 0x174, 0x104, 13),
+ TEGRA20_MC_RESET(VI, 0x100, 0x178, 0x104, 14),
+};
+
+static int tegra20_mc_hotreset_assert(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->reset);
+ mc_writel(mc, value & ~BIT(rst->bit), rst->reset);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static int tegra20_mc_hotreset_deassert(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->reset);
+ mc_writel(mc, value | BIT(rst->bit), rst->reset);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static int tegra20_mc_block_dma(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static bool tegra20_mc_dma_idling(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return mc_readl(mc, rst->status) == 0;
+}
+
+static int tegra20_mc_reset_status(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return (mc_readl(mc, rst->reset) & BIT(rst->bit)) == 0;
+}
+
+static int tegra20_mc_unblock_dma(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) | BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
+ .hotreset_assert = tegra20_mc_hotreset_assert,
+ .hotreset_deassert = tegra20_mc_hotreset_deassert,
+ .block_dma = tegra20_mc_block_dma,
+ .dma_idling = tegra20_mc_dma_idling,
+ .unblock_dma = tegra20_mc_unblock_dma,
+ .reset_status = tegra20_mc_reset_status,
+};
+
+static int tegra20_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /*
+ * It should be possible to tune arbitration knobs here, but the
+ * default values are known to work well on all devices. Hence
+ * nothing to do here so far.
+ */
+ return 0;
+}
+
+static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 300);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ /* these clients are isochronous by default */
+ if (strstarts(node->name, "display") ||
+ strstarts(node->name, "vi"))
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ else
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra20_mc_icc_ops = {
+ .xlate_extended = tegra20_mc_of_icc_xlate_extended,
+ .aggregate = tegra20_mc_icc_aggreate,
+ .set = tegra20_mc_icc_set,
+};
+
+static u32 tegra20_mc_stat_gather_control(const struct tegra20_mc_stat_gather *g)
+{
+ u32 control;
+
+ control = FIELD_PREP(MC_STAT_CONTROL_EVENT, g->event);
+ control |= FIELD_PREP(MC_STAT_CONTROL_CLIENT_ID, g->client);
+ control |= FIELD_PREP(MC_STAT_CONTROL_PRI_EVENT, g->pri_event);
+ control |= FIELD_PREP(MC_STAT_CONTROL_FILTER_PRI, g->pri_filter);
+ control |= FIELD_PREP(MC_STAT_CONTROL_FILTER_CLIENT_ENABLE, g->client_enb);
+
+ return control;
+}
+
+static void tegra20_mc_stat_gather(struct tegra20_mc_stat *stat)
+{
+ u32 clocks, count0, count1, control_0, control_1;
+ const struct tegra_mc *mc = stat->mc;
+
+ control_0 = tegra20_mc_stat_gather_control(&stat->gather0);
+ control_1 = tegra20_mc_stat_gather_control(&stat->gather1);
+
+ /*
+ * Reset statistic gathers state, select statistics collection mode
+ * and set clocks counter saturation limit to maximum.
+ */
+ mc_writel(mc, 0x00000000, MC_STAT_CONTROL);
+ mc_writel(mc, control_0, MC_STAT_EMC_CONTROL_0);
+ mc_writel(mc, control_1, MC_STAT_EMC_CONTROL_1);
+ mc_writel(mc, 0xffffffff, MC_STAT_EMC_CLOCK_LIMIT);
+
+ mc_writel(mc, EMC_GATHER_ENABLE, MC_STAT_CONTROL);
+ fsleep(stat->sample_time_usec);
+ mc_writel(mc, EMC_GATHER_DISABLE, MC_STAT_CONTROL);
+
+ count0 = mc_readl(mc, MC_STAT_EMC_COUNT_0);
+ count1 = mc_readl(mc, MC_STAT_EMC_COUNT_1);
+ clocks = mc_readl(mc, MC_STAT_EMC_CLOCKS);
+ clocks = max(clocks / 100 / MC_FX_FRAC_SCALE, 1u);
+
+ stat->gather0.result = DIV_ROUND_UP(count0, clocks);
+ stat->gather1.result = DIV_ROUND_UP(count1, clocks);
+}
+
+static void tegra20_mc_stat_events(const struct tegra_mc *mc,
+ const struct tegra_mc_client *client0,
+ const struct tegra_mc_client *client1,
+ unsigned int pri_filter,
+ unsigned int pri_event,
+ unsigned int event,
+ unsigned int *result0,
+ unsigned int *result1)
+{
+ struct tegra20_mc_stat stat = {};
+
+ stat.gather0.client = client0 ? client0->id : 0;
+ stat.gather0.pri_filter = pri_filter;
+ stat.gather0.client_enb = !!client0;
+ stat.gather0.pri_event = pri_event;
+ stat.gather0.event = event;
+
+ stat.gather1.client = client1 ? client1->id : 0;
+ stat.gather1.pri_filter = pri_filter;
+ stat.gather1.client_enb = !!client1;
+ stat.gather1.pri_event = pri_event;
+ stat.gather1.event = event;
+
+ stat.sample_time_usec = MC_STAT_SAMPLE_TIME_USEC;
+ stat.mc = mc;
+
+ tegra20_mc_stat_gather(&stat);
+
+ *result0 = stat.gather0.result;
+ *result1 = stat.gather1.result;
+}
+
+static void tegra20_mc_collect_stats(const struct tegra_mc *mc,
+ struct tegra20_mc_client_stat *stats)
+{
+ const struct tegra_mc_client *client0, *client1;
+ unsigned int i;
+
+ /* collect memory controller utilization percent for each client */
+ for (i = 0; i < mc->soc->num_clients; i += 2) {
+ client0 = &mc->soc->clients[i];
+ client1 = &mc->soc->clients[i + 1];
+
+ if (i + 1 == mc->soc->num_clients)
+ client1 = NULL;
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[i + 0].events,
+ &stats[i + 1].events);
+ }
+
+ /* collect more info from active clients */
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ unsigned int clienta, clientb = mc->soc->num_clients;
+
+ for (client0 = NULL; i < mc->soc->num_clients; i++) {
+ if (stats[i].events) {
+ client0 = &mc->soc->clients[i];
+ clienta = i++;
+ break;
+ }
+ }
+
+ for (client1 = NULL; i < mc->soc->num_clients; i++) {
+ if (stats[i].events) {
+ client1 = &mc->soc->clients[i];
+ clientb = i;
+ break;
+ }
+ }
+
+ if (!client0 && !client1)
+ break;
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_high_prio,
+ &stats[clientb].arb_high_prio);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_TM,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_timeout,
+ &stats[clientb].arb_timeout);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_BW,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_bandwidth,
+ &stats[clientb].arb_bandwidth);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_RD_WR_CHANGE,
+ &stats[clienta].rd_wr_change,
+ &stats[clientb].rd_wr_change);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_SUCCESSIVE,
+ &stats[clienta].successive,
+ &stats[clientb].successive);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_PAGE_MISS,
+ &stats[clienta].page_miss,
+ &stats[clientb].page_miss);
+ }
+}
+
+static void tegra20_mc_printf_percents(struct seq_file *s,
+ const char *fmt,
+ unsigned int percents_fx)
+{
+ char percents_str[8];
+
+ snprintf(percents_str, ARRAY_SIZE(percents_str), "%3u.%02u%%",
+ percents_fx / MC_FX_FRAC_SCALE, percents_fx % MC_FX_FRAC_SCALE);
+
+ seq_printf(s, fmt, percents_str);
+}
+
+static int tegra20_mc_stats_show(struct seq_file *s, void *unused)
+{
+ const struct tegra_mc *mc = dev_get_drvdata(s->private);
+ struct tegra20_mc_client_stat *stats;
+ unsigned int i;
+
+ stats = kcalloc(mc->soc->num_clients + 1, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ mutex_lock(&tegra20_mc_stat_lock);
+
+ tegra20_mc_collect_stats(mc, stats);
+
+ mutex_unlock(&tegra20_mc_stat_lock);
+
+ seq_puts(s, "Memory client Events Timeout High priority Bandwidth ARB RW change Successive Page miss\n");
+ seq_puts(s, "-----------------------------------------------------------------------------------------------------\n");
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ seq_printf(s, "%-14s ", mc->soc->clients[i].name);
+
+ /* An event is generated when client performs R/W request. */
+ tegra20_mc_printf_percents(s, "%-9s", stats[i].events);
+
+ /*
+ * An event is generated based on the timeout (TM) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-10s", stats[i].arb_timeout);
+
+ /*
+ * An event is generated based on the high-priority (HP) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-16s", stats[i].arb_high_prio);
+
+ /*
+ * An event is generated based on the bandwidth (BW) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-16s", stats[i].arb_bandwidth);
+
+ /*
+ * An event is generated when the memory controller switches
+ * between making a read request to making a write request.
+ */
+ tegra20_mc_printf_percents(s, "%-12s", stats[i].rd_wr_change);
+
+ /*
+ * An even generated when the chosen client has wins arbitration
+ * when it was also the winner at the previous request. If a
+ * client makes N requests in a row that are honored, SUCCESSIVE
+ * will be counted (N-1) times. Large values for this event
+ * imply that if we were patient enough, all of those requests
+ * could have been coalesced.
+ */
+ tegra20_mc_printf_percents(s, "%-13s", stats[i].successive);
+
+ /*
+ * An event is generated when the memory controller detects a
+ * page miss for the current request.
+ */
+ tegra20_mc_printf_percents(s, "%-12s\n", stats[i].page_miss);
+ }
+
+ kfree(stats);
+
+ return 0;
+}
+
+static int tegra20_mc_probe(struct tegra_mc *mc)
+{
+ debugfs_create_devm_seqfile(mc->dev, "stats", mc->debugfs.root,
+ tegra20_mc_stats_show);
+
+ return 0;
+}
+
+static int tegra20_mc_suspend(struct tegra_mc *mc)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_suspend(mc->gart);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra20_mc_resume(struct tegra_mc *mc)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_resume(mc->gart);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
+{
+ struct tegra_mc *mc = data;
+ unsigned long status;
+ unsigned int bit;
+
+ /* mask all interrupts to avoid flooding */
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ for_each_set_bit(bit, &status, 32) {
+ const char *error = tegra_mc_status_names[bit];
+ const char *direction = "read", *secure = "";
+ const char *client, *desc;
+ phys_addr_t addr;
+ u32 value, reg;
+ u8 id, type;
+
+ switch (BIT(bit)) {
+ case MC_INT_DECERR_EMEM:
+ reg = MC_DECERR_EMEM_OTHERS_STATUS;
+ value = mc_readl(mc, reg);
+
+ id = value & mc->soc->client_id_mask;
+ desc = tegra_mc_error_names[2];
+
+ if (value & BIT(31))
+ direction = "write";
+ break;
+
+ case MC_INT_INVALID_GART_PAGE:
+ reg = MC_GART_ERROR_REQ;
+ value = mc_readl(mc, reg);
+
+ id = (value >> 1) & mc->soc->client_id_mask;
+ desc = tegra_mc_error_names[2];
+
+ if (value & BIT(0))
+ direction = "write";
+ break;
+
+ case MC_INT_SECURITY_VIOLATION:
+ reg = MC_SECURITY_VIOLATION_STATUS;
+ value = mc_readl(mc, reg);
+
+ id = value & mc->soc->client_id_mask;
+ type = (value & BIT(30)) ? 4 : 3;
+ desc = tegra_mc_error_names[type];
+ secure = "secure ";
+
+ if (value & BIT(31))
+ direction = "write";
+ break;
+
+ default:
+ continue;
+ }
+
+ client = mc->soc->clients[id].name;
+ addr = mc_readl(mc, reg + sizeof(u32));
+
+ dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s)\n",
+ client, secure, direction, &addr, error,
+ desc);
+ }
+
+ /* clear interrupts */
+ mc_writel(mc, status, MC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static const struct tegra_mc_ops tegra20_mc_ops = {
+ .probe = tegra20_mc_probe,
+ .suspend = tegra20_mc_suspend,
+ .resume = tegra20_mc_resume,
+ .handle_irq = tegra20_mc_handle_irq,
+};
+
+const struct tegra_mc_soc tegra20_mc_soc = {
+ .clients = tegra20_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra20_mc_clients),
+ .num_address_bits = 32,
+ .client_id_mask = 0x3f,
+ .intmask = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE |
+ MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra20_mc_reset_ops,
+ .resets = tegra20_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra20_mc_resets),
+ .icc_ops = &tegra20_mc_icc_ops,
+ .ops = &tegra20_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
new file mode 100644
index 000000000..4cb608c71
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -0,0 +1,1774 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/*
+ * Enable flags for specifying verbosity.
+ */
+#define INFO (1 << 0)
+#define STEPS (1 << 1)
+#define SUB_STEPS (1 << 2)
+#define PRELOCK (1 << 3)
+#define PRELOCK_STEPS (1 << 4)
+#define ACTIVE_EN (1 << 5)
+#define PRAMP_UP (1 << 6)
+#define PRAMP_DN (1 << 7)
+#define EMA_WRITES (1 << 10)
+#define EMA_UPDATES (1 << 11)
+#define PER_TRAIN (1 << 16)
+#define CC_PRINT (1 << 17)
+#define CCFIFO (1 << 29)
+#define REGS (1 << 30)
+#define REG_LISTS (1 << 31)
+
+#define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
+
+#define DVFS_CLOCK_CHANGE_VERSION 21021
+#define EMC_PRELOCK_VERSION 2101
+
+enum {
+ DVFS_SEQUENCE = 1,
+ WRITE_TRAINING_SEQUENCE = 2,
+ PERIODIC_TRAINING_SEQUENCE = 3,
+ DVFS_PT1 = 10,
+ DVFS_UPDATE = 11,
+ TRAINING_PT1 = 12,
+ TRAINING_UPDATE = 13,
+ PERIODIC_TRAINING_UPDATE = 14
+};
+
+/*
+ * PTFV defines - basically just indexes into the per table PTFV array.
+ */
+#define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX 0
+#define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX 1
+#define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX 2
+#define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX 3
+#define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX 4
+#define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX 5
+#define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX 6
+#define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX 7
+#define PTFV_DVFS_SAMPLES_INDEX 9
+#define PTFV_MOVAVG_WEIGHT_INDEX 10
+#define PTFV_CONFIG_CTRL_INDEX 11
+
+#define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA (1 << 0)
+
+/*
+ * Do arithmetic in fixed point.
+ */
+#define MOVAVG_PRECISION_FACTOR 100
+
+/*
+ * The division portion of the average operation.
+ */
+#define __AVERAGE_PTFV(dev) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \
+ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+/*
+ * Convert val to fixed point and add it to the temporary average.
+ */
+#define __INCREMENT_PTFV(dev, val) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \
+ ((val) * MOVAVG_PRECISION_FACTOR); })
+
+/*
+ * Convert a moving average back to integral form and return the value.
+ */
+#define __MOVAVG_AC(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ MOVAVG_PRECISION_FACTOR)
+
+/* Weighted update. */
+#define __WEIGHTED_UPDATE_PTFV(dev, nval) \
+ do { \
+ int w = PTFV_MOVAVG_WEIGHT_INDEX; \
+ int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \
+ \
+ next->ptfv_list[dqs] = \
+ ((nval * MOVAVG_PRECISION_FACTOR) + \
+ (next->ptfv_list[dqs] * \
+ next->ptfv_list[w])) / \
+ (next->ptfv_list[w] + 1); \
+ \
+ emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \
+ __stringify(dev), nval, next->ptfv_list[dqs]); \
+ } while (0)
+
+/* Access a particular average. */
+#define __MOVAVG(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
+
+static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
+{
+ bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
+ struct tegra210_emc_timing *last = emc->last;
+ struct tegra210_emc_timing *next = emc->next;
+ u32 last_timing_rate_mhz = last->rate / 1000;
+ u32 next_timing_rate_mhz = next->rate / 1000;
+ bool dvfs_update = type == DVFS_UPDATE;
+ s32 tdel = 0, tmdel = 0, adel = 0;
+ bool dvfs_pt1 = type == DVFS_PT1;
+ unsigned long cval = 0;
+ u32 temp[2][2], value;
+ unsigned int i;
+
+ /*
+ * Dev0 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 2, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev0 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 2, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U0] -
+ __MOVAVG_AC(next, C0D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U0] =
+ __MOVAVG_AC(next, C0D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U1] -
+ __MOVAVG_AC(next, C0D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U1] =
+ __MOVAVG_AC(next, C0D0U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U0] -
+ __MOVAVG_AC(next, C1D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U0] =
+ __MOVAVG_AC(next, C1D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U1] -
+ __MOVAVG_AC(next, C1D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U1] =
+ __MOVAVG_AC(next, C1D0U1);
+ }
+ }
+
+ if (emc->num_devices < 2)
+ goto done;
+
+ /*
+ * Dev1 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 1, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev1 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 1, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U0] -
+ __MOVAVG_AC(next, C0D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U0] =
+ __MOVAVG_AC(next, C0D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U1] -
+ __MOVAVG_AC(next, C0D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U1] =
+ __MOVAVG_AC(next, C0D1U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U0] -
+ __MOVAVG_AC(next, C1D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U0] =
+ __MOVAVG_AC(next, C1D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U1] -
+ __MOVAVG_AC(next, C1D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U1] =
+ __MOVAVG_AC(next, C1D1U1);
+ }
+ }
+
+done:
+ return adel;
+}
+
+static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
+ struct tegra210_emc_timing *last,
+ struct tegra210_emc_timing *next)
+{
+#define __COPY_EMA(nt, lt, dev) \
+ ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
+ (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+ u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
+ u32 delay;
+
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay = 2 + (delay / last->rate);
+
+ if (!next->periodic_training)
+ return 0;
+
+ if (type == DVFS_SEQUENCE) {
+ if (last->periodic_training &&
+ (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
+ PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
+ /*
+ * If the previous frequency was using periodic
+ * calibration then we can reuse the previous
+ * frequencies EMA data.
+ */
+ __COPY_EMA(next, last, C0D0U0);
+ __COPY_EMA(next, last, C0D0U1);
+ __COPY_EMA(next, last, C1D0U0);
+ __COPY_EMA(next, last, C1D0U1);
+ __COPY_EMA(next, last, C0D1U0);
+ __COPY_EMA(next, last, C0D1U1);
+ __COPY_EMA(next, last, C1D1U0);
+ __COPY_EMA(next, last, C1D1U1);
+ } else {
+ /* Reset the EMA.*/
+ __MOVAVG(next, C0D0U0) = 0;
+ __MOVAVG(next, C0D0U1) = 0;
+ __MOVAVG(next, C1D0U0) = 0;
+ __MOVAVG(next, C1D0U1) = 0;
+ __MOVAVG(next, C0D1U0) = 0;
+ __MOVAVG(next, C0D1U1) = 0;
+ __MOVAVG(next, C1D1U0) = 0;
+ __MOVAVG(next, C1D1U1) = 0;
+
+ for (i = 0; i < samples; i++) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ /*
+ * Generate next sample of data.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_PT1);
+ }
+ }
+
+ /*
+ * Seems like it should be part of the
+ * 'if (last_timing->periodic_training)' conditional
+ * since is already done for the else clause.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_UPDATE);
+ }
+
+ if (type == PERIODIC_TRAINING_SEQUENCE) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
+ }
+
+ return adel;
+}
+
+static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
+ static const u32 list[] = {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_DATA_BRLSHFT_0,
+ EMC_DATA_BRLSHFT_1
+ };
+ struct tegra210_emc_timing *last = emc->last;
+ unsigned int items = ARRAY_SIZE(list), i;
+ unsigned long delay;
+
+ if (last->periodic_training) {
+ emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
+
+ value = emc_readl(emc, EMC_DBG);
+ emc_cfg_o = emc_readl(emc, EMC_CFG);
+ emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
+ EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_PD);
+
+
+ /*
+ * 1. Power optimizations should be off.
+ */
+ emc_writel(emc, emc_cfg, EMC_CFG);
+
+ /* Does emc_timing_update() for above changes. */
+ tegra210_emc_dll_disable(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
+ value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
+ value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_UPDATE);
+
+ /*
+ * 2. osc kick off - this assumes training and dvfs have set
+ * correct MR23.
+ */
+ tegra210_emc_start_periodic_compensation(emc);
+
+ /*
+ * 3. Let dram capture its clock tree delays.
+ */
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay /= last->rate + 1;
+ udelay(delay);
+
+ /*
+ * 4. Check delta wrt previous values (save value if margin
+ * exceeds what is set in table).
+ */
+ del = periodic_compensation_handler(emc,
+ PERIODIC_TRAINING_SEQUENCE,
+ last, last);
+
+ /*
+ * 5. Apply compensation w.r.t. trained values (if clock tree
+ * has drifted more than the set margin).
+ */
+ if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
+ for (i = 0; i < items; i++) {
+ value = tegra210_emc_compensate(last, list[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ list[i], value);
+ emc_writel(emc, value, list[i]);
+ }
+ }
+
+ emc_writel(emc, emc_cfg_o, EMC_CFG);
+
+ /*
+ * 6. Timing update actally applies the new trimmers.
+ */
+ tegra210_emc_timing_update(emc);
+
+ /* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
+ emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
+
+ /* 6.2. Restore the DLL. */
+ tegra210_emc_dll_enable(emc);
+ }
+
+ return 0;
+}
+
+/*
+ * Do the clock change sequence.
+ */
+static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ /* state variables */
+ static bool fsp_for_next_freq;
+ /* constant configuration parameters */
+ const bool save_restore_clkstop_pd = true;
+ const u32 zqcal_before_cc_cutoff = 2400;
+ const bool cya_allow_ref_cc = false;
+ const bool cya_issue_pc_ref = false;
+ const bool opt_cc_short_zcal = true;
+ const bool ref_b4_sref_en = false;
+ const u32 tZQCAL_lpddr4 = 1000000;
+ const bool opt_short_zcal = true;
+ const bool opt_do_sw_qrst = true;
+ const u32 opt_dvfs_mode = MAN_SR;
+ /*
+ * This is the timing table for the source frequency. It does _not_
+ * necessarily correspond to the actual timing values in the EMC at the
+ * moment. If the boot BCT differs from the table then this can happen.
+ * However, we need it for accessing the dram_timings (which are not
+ * really registers) array for the current frequency.
+ */
+ struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
+ u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
+ u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
+ u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
+ u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
+ u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
+ u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
+ bool opt_zcal_en_cc = false, is_lpddr3 = false;
+ bool compensate_trimmer_applicable = false;
+ u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
+ u32 src_clk_period, dst_clk_period; /* in picoseconds */
+ bool shared_zq_resistor = false;
+ u32 value, dram_type;
+ u32 opt_dll_mode = 0;
+ unsigned long delay;
+ unsigned int i;
+
+ emc_dbg(emc, INFO, "Running clock change.\n");
+
+ /* XXX fake == last */
+ fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
+ fsp_for_next_freq = !fsp_for_next_freq;
+
+ value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+ dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+ if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
+ shared_zq_resistor = true;
+
+ if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
+ last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
+ dram_type == DRAM_TYPE_LPDDR4)
+ opt_zcal_en_cc = true;
+
+ if (dram_type == DRAM_TYPE_DDR3)
+ opt_dll_mode = tegra210_emc_get_dll_state(next);
+
+ if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
+ (dram_type == DRAM_TYPE_LPDDR2))
+ is_lpddr3 = true;
+
+ emc_readl(emc, EMC_CFG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG);
+
+ src_clk_period = 1000000000 / last->rate;
+ dst_clk_period = 1000000000 / next->rate;
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff)
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
+ else
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
+
+ tZQCAL_lpddr4_fc_adj /= dst_clk_period;
+
+ emc_dbg = emc_readl(emc, EMC_DBG);
+ emc_pin = emc_readl(emc, EMC_PIN);
+ emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
+
+ emc_cfg = next->burst_regs[EMC_CFG_INDEX];
+ emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
+ emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
+ emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
+
+ emc_dbg(emc, INFO, "Clock change version: %d\n",
+ DVFS_CLOCK_CHANGE_VERSION);
+ emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
+ emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
+ emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
+ emc_dbg(emc, INFO, "DLL clksrc: 0x%08x\n", next->dll_clk_src);
+ emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
+ next->rate);
+ emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
+ src_clk_period, dst_clk_period);
+ emc_dbg(emc, INFO, " shared_zq_resistor: %d\n", !!shared_zq_resistor);
+ emc_dbg(emc, INFO, " num_channels: %u\n", emc->num_channels);
+ emc_dbg(emc, INFO, " opt_dll_mode: %d\n", opt_dll_mode);
+
+ /*
+ * Step 1:
+ * Pre DVFS SW sequence.
+ */
+ emc_dbg(emc, STEPS, "Step 1\n");
+ emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
+
+ emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
+
+ emc_auto_cal_config = next->emc_auto_cal_config;
+ auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
+ emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
+ emc_auto_cal_config |= auto_cal_en;
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
+
+ emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ if (next->periodic_training) {
+ tegra210_emc_reset_dram_clktree_values(next);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ tegra210_emc_start_periodic_compensation(emc);
+
+ delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
+ udelay((delay / last->rate) + 2);
+
+ value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
+ next);
+ value = (value * 128 * next->rate / 1000) / 1000000;
+
+ if (next->periodic_training && value > next->tree_margin)
+ compensate_trimmer_applicable = true;
+ }
+
+ emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
+ EMC_CFG_PIPE_CLK);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
+ ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+
+ bg_reg_mode_change =
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
+ enable_bglp_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
+ enable_bg_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+
+ if (enable_bglp_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /* Check if we need to turn on VREF generator. */
+ if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
+ (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
+ u32 pad_tx_ctrl =
+ next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 last_pad_tx_ctrl =
+ last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 next_dq_e_ivref, next_dqs_e_ivref;
+
+ next_dqs_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
+ next_dq_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
+ value = (last_pad_tx_ctrl &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
+ next_dq_e_ivref | next_dqs_e_ivref;
+ emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
+ udelay(1);
+ } else if (bg_reg_mode_change) {
+ udelay(1);
+ }
+
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 2:
+ * Prelock the DLL.
+ */
+ emc_dbg(emc, STEPS, "Step 2\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
+ EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
+ value = tegra210_emc_dll_prelock(emc, clksrc);
+ emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
+ } else {
+ emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
+ tegra210_emc_dll_disable(emc);
+ }
+
+ /*
+ * Step 3:
+ * Prepare autocal for the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 3\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
+ emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
+ emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
+ emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
+ emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
+ emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
+ emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
+ auto_cal_en);
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /*
+ * Step 4:
+ * Update EMC_CFG. (??)
+ */
+ emc_dbg(emc, STEPS, "Step 4\n");
+
+ if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
+ else
+ emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
+
+ /*
+ * Step 5:
+ * Prepare reference variables for ZQCAL regs.
+ */
+ emc_dbg(emc, STEPS, "Step 5\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
+ else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
+ zq_wait_long = max(next->min_mrs_wait,
+ div_o3(360000, dst_clk_period)) + 4;
+ else if (dram_type == DRAM_TYPE_DDR3)
+ zq_wait_long = max((u32)256,
+ div_o3(320000, dst_clk_period) + 2);
+ else
+ zq_wait_long = 0;
+
+ /*
+ * Step 6:
+ * Training code - removed.
+ */
+ emc_dbg(emc, STEPS, "Step 6\n");
+
+ /*
+ * Step 7:
+ * Program FSP reference registers and send MRWs to new FSPWR.
+ */
+ emc_dbg(emc, STEPS, "Step 7\n");
+ emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
+
+ /* WAR 200024907 */
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ u32 nRTP = 16;
+
+ if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
+ nRTP = 14;
+
+ if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
+ nRTP = 12;
+
+ if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
+ nRTP = 10;
+
+ if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
+ nRTP = 8;
+
+ deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
+
+ /*
+ * Originally there was a + .5 in the tRPST calculation.
+ * However since we can't do FP in the kernel and the tRTM
+ * computation was in a floating point ceiling function, adding
+ * one to tRTP should be ok. There is no other source of non
+ * integer values, so the result was always going to be
+ * something for the form: f_ceil(N + .5) = N + 1;
+ */
+ tRPST = (last->emc_mrw & 0x80) >> 7;
+ tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
+ max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
+ 1 + nRTP;
+
+ emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
+ next->burst_regs[EMC_RP_INDEX]);
+
+ if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
+ if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
+ last->burst_regs[EMC_RP_INDEX])) {
+ R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+
+ if (R2P_war > 63) {
+ RP_war = R2P_war +
+ last->burst_regs[EMC_RP_INDEX] - 63;
+
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+
+ R2P_war = 63;
+ }
+ } else {
+ R2P_war = last->burst_regs[EMC_R2P_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+ }
+
+ if (RP_war < deltaTWATM) {
+ W2P_war = last->burst_regs[EMC_W2P_INDEX]
+ + deltaTWATM - RP_war;
+ if (W2P_war > 63) {
+ RP_war = RP_war + W2P_war - 63;
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+ W2P_war = 63;
+ }
+ } else {
+ W2P_war = last->burst_regs[
+ EMC_W2P_INDEX];
+ }
+
+ if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
+ (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
+ (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
+ (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
+ emc_writel(emc, RP_war, EMC_RP);
+ emc_writel(emc, R2P_war, EMC_R2P);
+ emc_writel(emc, W2P_war, EMC_W2P);
+ emc_writel(emc, TRPab_war, EMC_TRPAB);
+ }
+
+ tegra210_emc_timing_update(emc);
+ } else {
+ emc_dbg(emc, INFO, "Skipped WAR\n");
+ }
+ }
+
+ if (!fsp_for_next_freq) {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
+ } else {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
+ emc_writel(emc, next->emc_mrw, EMC_MRW);
+ emc_writel(emc, next->emc_mrw2, EMC_MRW2);
+ }
+
+ /*
+ * Step 8:
+ * Program the shadow registers.
+ */
+ emc_dbg(emc, STEPS, "Step 8\n");
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
+
+ for (i = 0; i < next->num_burst; i++) {
+ const u16 *offsets = emc->offsets->burst;
+ u16 offset;
+
+ if (!offsets[i])
+ continue;
+
+ value = next->burst_regs[i];
+ offset = offsets[i];
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (offset == EMC_MRW6 || offset == EMC_MRW7 ||
+ offset == EMC_MRW8 || offset == EMC_MRW9 ||
+ offset == EMC_MRW10 || offset == EMC_MRW11 ||
+ offset == EMC_MRW12 || offset == EMC_MRW13 ||
+ offset == EMC_MRW14 || offset == EMC_MRW15 ||
+ offset == EMC_TRAINING_CTRL))
+ continue;
+
+ /* Pain... And suffering. */
+ if (offset == EMC_CFG) {
+ value &= ~EMC_CFG_DRAM_ACPD;
+ value &= ~EMC_CFG_DYN_SELF_REF;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
+ value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
+ }
+ } else if (offset == EMC_MRS_WAIT_CNT &&
+ dram_type == DRAM_TYPE_LPDDR2 &&
+ opt_zcal_en_cc && !opt_cc_short_zcal &&
+ opt_short_zcal) {
+ value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
+ ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_WAIT_CNT &&
+ dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
+ !opt_cc_short_zcal && opt_short_zcal) {
+ value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
+ EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
+ ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
+ value = 0; /* EMC_ZCAL_INTERVAL reset value. */
+ } else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
+ value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
+ } else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
+ value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
+ value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
+ value &= 0xf800f800;
+ } else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
+ value &= 0xfffffff0;
+ }
+
+ emc_writel(emc, value, offset);
+ }
+
+ /* SW addition: do EMC refresh adjustment here. */
+ tegra210_emc_adjust_timing(emc, next);
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (23 << EMC_MRW_MRW_MA_SHIFT) |
+ (next->run_clocks & EMC_MRW_MRW_OP_MASK);
+ emc_writel(emc, value, EMC_MRW);
+ }
+
+ /* Per channel burst registers. */
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
+
+ for (i = 0; i < next->num_burst_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *burst =
+ emc->offsets->burst_per_channel;
+
+ if (!burst[i].offset)
+ continue;
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (burst[i].offset == EMC_MRW6 ||
+ burst[i].offset == EMC_MRW7 ||
+ burst[i].offset == EMC_MRW8 ||
+ burst[i].offset == EMC_MRW9 ||
+ burst[i].offset == EMC_MRW10 ||
+ burst[i].offset == EMC_MRW11 ||
+ burst[i].offset == EMC_MRW12 ||
+ burst[i].offset == EMC_MRW13 ||
+ burst[i].offset == EMC_MRW14 ||
+ burst[i].offset == EMC_MRW15))
+ continue;
+
+ /* Filter out second channel if not in DUAL_CHANNEL mode. */
+ if (emc->num_channels < 2 && burst[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->burst_reg_per_ch[i], burst[i].offset);
+ emc_channel_writel(emc, burst[i].bank,
+ next->burst_reg_per_ch[i],
+ burst[i].offset);
+ }
+
+ /* Vref regs. */
+ emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
+
+ for (i = 0; i < next->vref_num; i++) {
+ const struct tegra210_emc_per_channel_regs *vref =
+ emc->offsets->vref_per_channel;
+
+ if (!vref[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && vref[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->vref_perch_regs[i], vref[i].offset);
+ emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
+ vref[i].offset);
+ }
+
+ /* Trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
+
+ for (i = 0; i < next->num_trim; i++) {
+ const u16 *offsets = emc->offsets->trim;
+
+ if (!offsets[i])
+ continue;
+
+ if (compensate_trimmer_applicable &&
+ (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offsets[i] == EMC_DATA_BRLSHFT_0 ||
+ offsets[i] == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offsets[i]);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offsets[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ (u32)(u64)offsets[i], value);
+ emc_writel(emc, value, offsets[i]);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_regs[i], offsets[i]);
+ emc_writel(emc, next->trim_regs[i], offsets[i]);
+ }
+ }
+
+ /* Per channel trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
+
+ for (i = 0; i < next->num_trim_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *trim =
+ &emc->offsets->trim_per_channel[0];
+ unsigned int offset;
+
+ if (!trim[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && trim[i].bank >= 1)
+ continue;
+
+ offset = trim[i].offset;
+
+ if (compensate_trimmer_applicable &&
+ (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offset == EMC_DATA_BRLSHFT_0 ||
+ offset == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offset);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offset);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
+ value);
+ emc_channel_writel(emc, trim[i].bank, value, offset);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_perch_regs[i], offset);
+ emc_channel_writel(emc, trim[i].bank,
+ next->trim_perch_regs[i], offset);
+ }
+ }
+
+ emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
+
+ for (i = 0; i < next->num_mc_regs; i++) {
+ const u16 *offsets = emc->offsets->burst_mc;
+ u32 *values = next->burst_mc_regs;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ values[i], offsets[i]);
+ mc_writel(emc->mc, values[i], offsets[i]);
+ }
+
+ /* Registers to be programmed on the faster clock. */
+ if (next->rate < last->rate) {
+ const u16 *la = emc->offsets->la_scale;
+
+ emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
+
+ for (i = 0; i < next->num_up_down; i++) {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->la_scale_regs[i], la[i]);
+ mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
+ }
+ }
+
+ /* Flush all the burst register writes. */
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ /*
+ * Step 9:
+ * LPDDR4 section A.
+ */
+ emc_dbg(emc, STEPS, "Step 9\n");
+
+ value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
+ value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
+
+ value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
+ EMC_DBG_WRITE_ACTIVE_ONLY);
+
+ emc_writel(emc, value, EMC_DBG);
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, emc_dbg, EMC_DBG);
+ }
+
+ /*
+ * Step 10:
+ * LPDDR4 and DDR3 common section.
+ */
+ emc_dbg(emc, STEPS, "Step 10\n");
+
+ if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
+ else
+ ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
+
+ if (dram_type == DRAM_TYPE_LPDDR4 &&
+ dst_clk_period <= zqcal_before_cc_cutoff) {
+ ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW6_INDEX] &
+ 0x0000C0C0), EMC_MRW6, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW14_INDEX] &
+ 0x00003838), EMC_MRW14, 0);
+
+ if (emc->num_devices > 1) {
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW7_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW7_INDEX] &
+ 0x0000C0C0), EMC_MRW7, 0);
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW15_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW15_INDEX] &
+ 0x00003838), EMC_MRW15, 0);
+ }
+
+ if (opt_zcal_en_cc) {
+ if (emc->num_devices < 2)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else if (shared_zq_resistor)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else
+ ccfifo_writel(emc,
+ EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
+ ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
+ ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
+ delay = 30;
+
+ if (cya_allow_ref_cc) {
+ delay += (1000 * fake->dram_timings[T_RP]) /
+ src_clk_period;
+ delay += 4000 * fake->dram_timings[T_RFC];
+ }
+
+ ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
+ EMC_PIN_PIN_CKEB |
+ EMC_PIN_PIN_CKE),
+ EMC_PIN, delay);
+ }
+
+ /* calculate reference delay multiplier */
+ value = 1;
+
+ if (ref_b4_sref_en)
+ value++;
+
+ if (cya_allow_ref_cc)
+ value++;
+
+ if (cya_issue_pc_ref)
+ value++;
+
+ if (dram_type != DRAM_TYPE_LPDDR4) {
+ delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
+ (1000 * fake->dram_timings[T_RFC] / src_clk_period));
+ delay = value * delay + 20;
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Step 11:
+ * Ramp down.
+ */
+ emc_dbg(emc, STEPS, "Step 11\n");
+
+ ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
+
+ value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
+ 0);
+
+ /*
+ * Step 12:
+ * And finally - trigger the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 12\n");
+
+ ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
+ value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ /*
+ * Step 13:
+ * Ramp up.
+ */
+ emc_dbg(emc, STEPS, "Step 13\n");
+
+ ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+
+ /*
+ * Step 14:
+ * Bringup CKE pins.
+ */
+ emc_dbg(emc, STEPS, "Step 14\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = emc_pin | EMC_PIN_PIN_CKE;
+
+ if (emc->num_devices <= 1)
+ value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
+ else
+ value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
+
+ ccfifo_writel(emc, value, EMC_PIN, 0);
+ }
+
+ /*
+ * Step 15: (two step 15s ??)
+ * Calculate zqlatch wait time; has dependency on ramping times.
+ */
+ emc_dbg(emc, STEPS, "Step 15\n");
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff) {
+ s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
+ (s32)dst_clk_period;
+ zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
+ } else {
+ zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
+ div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+ }
+
+ emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
+ emc_dbg(emc, INFO, "dst_clk_period = %u\n",
+ dst_clk_period);
+ emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
+ next->dram_timings[T_PDEX]);
+ emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+
+ if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
+ delay = div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+
+ if (emc->num_devices < 2) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ } else if (shared_zq_resistor) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time) +
+ delay);
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL, 0);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, 0);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ tZQCAL_lpddr4 / dst_clk_period);
+ } else {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ }
+ }
+
+ /* WAR: delay for zqlatch */
+ ccfifo_writel(emc, 0, 0, 10);
+
+ /*
+ * Step 16:
+ * LPDDR4 Conditional Training Kickoff. Removed.
+ */
+
+ /*
+ * Step 17:
+ * MANSR exit self refresh.
+ */
+ emc_dbg(emc, STEPS, "Step 17\n");
+
+ if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+
+ /*
+ * Step 18:
+ * Send MRWs to LPDDR3/DDR3.
+ */
+ emc_dbg(emc, STEPS, "Step 18\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
+ ccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);
+ if (is_lpddr3)
+ ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ if (opt_dll_mode)
+ ccfifo_writel(emc, next->emc_emrs &
+ ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
+ ccfifo_writel(emc, next->emc_emrs2 &
+ ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
+ ccfifo_writel(emc, next->emc_mrs |
+ EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
+ }
+
+ /*
+ * Step 19:
+ * ZQCAL for LPDDR3/DDR3
+ */
+ emc_dbg(emc, STEPS, "Step 19\n");
+
+ if (opt_zcal_en_cc) {
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ value = opt_cc_short_zcal ? 90000 : 360000;
+ value = div_o3(value, dst_clk_period);
+ value = value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
+ value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
+
+ value = opt_cc_short_zcal ? 0x56 : 0xab;
+ ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT,
+ EMC_MRW, 0);
+
+ if (emc->num_devices > 1) {
+ value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRW, 0);
+ }
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
+
+ ccfifo_writel(emc, value |
+ 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ 0);
+
+ if (emc->num_devices > 1) {
+ value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD;
+ ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (bg_reg_mode_change) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+
+ if (ramp_up_wait <= 1250000)
+ delay = (1250000 - ramp_up_wait) / dst_clk_period;
+ else
+ delay = 0;
+
+ ccfifo_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
+ EMC_PMACRO_BG_BIAS_CTRL_0, delay);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 20:
+ * Issue ref and optional QRST.
+ */
+ emc_dbg(emc, STEPS, "Step 20\n");
+
+ if (dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ if (opt_do_sw_qrst) {
+ ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
+ ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
+ }
+
+ /*
+ * Step 21:
+ * Restore ZCAL and ZCAL interval.
+ */
+ emc_dbg(emc, STEPS, "Step 21\n");
+
+ if (save_restore_clkstop_pd || opt_zcal_en_cc) {
+ ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ EMC_DBG, 0);
+ if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL, 0);
+
+ if (save_restore_clkstop_pd)
+ ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
+ ~EMC_CFG_DYN_SELF_REF,
+ EMC_CFG, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+ }
+
+ /*
+ * Step 22:
+ * Restore EMC_CFG_PIPE_CLK.
+ */
+ emc_dbg(emc, STEPS, "Step 22\n");
+
+ ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ else
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /*
+ * Step 23:
+ */
+ emc_dbg(emc, STEPS, "Step 23\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_do_clock_change(emc, clksrc);
+
+ /*
+ * Step 24:
+ * Save training results. Removed.
+ */
+
+ /*
+ * Step 25:
+ * Program MC updown registers.
+ */
+ emc_dbg(emc, STEPS, "Step 25\n");
+
+ if (next->rate > last->rate) {
+ for (i = 0; i < next->num_up_down; i++)
+ mc_writel(emc->mc, next->la_scale_regs[i],
+ emc->offsets->la_scale[i]);
+
+ tegra210_emc_timing_update(emc);
+ }
+
+ /*
+ * Step 26:
+ * Restore ZCAL registers.
+ */
+ emc_dbg(emc, STEPS, "Step 26\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
+ !opt_short_zcal && opt_cc_short_zcal) {
+ udelay(2);
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ if (dram_type == DRAM_TYPE_LPDDR2)
+ emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
+ EMC_MRS_WAIT_CNT);
+ else if (dram_type == DRAM_TYPE_DDR3)
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 27:
+ * Restore EMC_CFG, FDPD registers.
+ */
+ emc_dbg(emc, STEPS, "Step 27\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+ emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+
+ /*
+ * Step 28:
+ * Training recover. Removed.
+ */
+ emc_dbg(emc, STEPS, "Step 28\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
+ EMC_PMACRO_AUTOCAL_CFG_COMMON);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 29:
+ * Power fix WAR.
+ */
+ emc_dbg(emc, STEPS, "Step 29\n");
+
+ emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
+ EMC_PMACRO_CFG_PM_GLOBAL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_1);
+ emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
+
+ /*
+ * Step 30:
+ * Re-enable autocal.
+ */
+ emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+ }
+
+ emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /* Done! Yay. */
+}
+
+const struct tegra210_emc_sequence tegra210_emc_r21021 = {
+ .revision = 0x7,
+ .set_clock = tegra210_emc_r21021_set_clock,
+ .periodic_compensation = tegra210_emc_r21021_periodic_compensation,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
new file mode 100644
index 000000000..3300bde47
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -0,0 +1,2064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define EMC_CLK_EMC_2X_CLK_SRC_SHIFT 29
+#define EMC_CLK_EMC_2X_CLK_SRC_MASK \
+ (0x7 << EMC_CLK_EMC_2X_CLK_SRC_SHIFT)
+#define EMC_CLK_SOURCE_PLLM_LJ 0x4
+#define EMC_CLK_SOURCE_PLLMB_LJ 0x5
+#define EMC_CLK_FORCE_CC_TRIGGER BIT(27)
+#define EMC_CLK_MC_EMC_SAME_FREQ BIT(16)
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT 0
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_MASK \
+ (0xff << EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT)
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL */
+#define DLL_CLK_EMC_DLL_CLK_SRC_SHIFT 29
+#define DLL_CLK_EMC_DLL_CLK_SRC_MASK \
+ (0x7 << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT)
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT 10
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK \
+ (0x3 << DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT)
+#define PLLM_VCOA 0
+#define PLLM_VCOB 1
+#define EMC_DLL_SWITCH_OUT 2
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT 0
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK \
+ (0xff << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT)
+
+/* MC_EMEM_ARB_MISC0 */
+#define MC_EMEM_ARB_MISC0_EMC_SAME_FREQ BIT(27)
+
+/* EMC_DATA_BRLSHFT_X */
+#define EMC0_EMC_DATA_BRLSHFT_0_INDEX 2
+#define EMC1_EMC_DATA_BRLSHFT_0_INDEX 3
+#define EMC0_EMC_DATA_BRLSHFT_1_INDEX 4
+#define EMC1_EMC_DATA_BRLSHFT_1_INDEX 5
+
+#define TRIM_REG(chan, rank, reg, byte) \
+ (((EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _MASK & \
+ next->trim_regs[EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## \
+ rank ## _ ## reg ## _INDEX]) >> \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _SHIFT) \
+ + \
+ (((EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_MASK & \
+ next->trim_perch_regs[EMC ## chan ## \
+ _EMC_DATA_BRLSHFT_ ## rank ## _INDEX]) >> \
+ EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_SHIFT) * 64))
+
+#define CALC_TEMP(rank, reg, byte1, byte2, n) \
+ (((new[n] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## \
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _MASK) \
+ | \
+ ((new[n + 1] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ##\
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _MASK))
+
+#define REFRESH_SPEEDUP(value, speedup) \
+ (((value) & 0xffff0000) | ((value) & 0xffff) * (speedup))
+
+#define LPDDR2_MR4_SRR GENMASK(2, 0)
+
+static const struct tegra210_emc_sequence *tegra210_emc_sequences[] = {
+ &tegra210_emc_r21021,
+};
+
+static const struct tegra210_emc_table_register_offsets
+tegra210_emc_table_register_offsets = {
+ .burst = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RFCPB,
+ EMC_REFCTRL2,
+ EMC_RFC_SLR,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_R2R,
+ EMC_TPPD,
+ EMC_CCDMW,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WEXT,
+ EMC_WDV_CHK,
+ EMC_WDV,
+ EMC_WSV,
+ EMC_WEV,
+ EMC_WDV_MASK,
+ EMC_WS_DURATION,
+ EMC_WE_DURATION,
+ EMC_QUSE,
+ EMC_QUSE_WIDTH,
+ EMC_IBDLY,
+ EMC_OBDLY,
+ EMC_EINPUT,
+ EMC_MRW6,
+ EMC_EINPUT_DURATION,
+ EMC_PUTERM_EXTRA,
+ EMC_PUTERM_WIDTH,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_RDV_MASK,
+ EMC_RDV_EARLY,
+ EMC_RDV_EARLY_MASK,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PRE_REFRESH_REQ_CNT,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_CKE2PDEN,
+ EMC_PDEX2CKE,
+ EMC_PDEX2MRR,
+ EMC_TXSR,
+ EMC_TXSRDLL,
+ EMC_TCKE,
+ EMC_TCKESR,
+ EMC_TPD,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_MRW7,
+ EMC_TREFBW,
+ EMC_ODT_WRITE,
+ EMC_FBIO_CFG5,
+ EMC_FBIO_CFG7,
+ EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_PERIOD,
+ EMC_PMACRO_IB_RXRT,
+ EMC_CFG_PIPE_1,
+ EMC_CFG_PIPE_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_4,
+ EMC_PMACRO_QUSE_DDLL_RANK0_5,
+ EMC_PMACRO_QUSE_DDLL_RANK1_4,
+ EMC_PMACRO_QUSE_DDLL_RANK1_5,
+ EMC_MRW8,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5,
+ EMC_PMACRO_DDLL_LONG_CMD_0,
+ EMC_PMACRO_DDLL_LONG_CMD_1,
+ EMC_PMACRO_DDLL_LONG_CMD_2,
+ EMC_PMACRO_DDLL_LONG_CMD_3,
+ EMC_PMACRO_DDLL_LONG_CMD_4,
+ EMC_PMACRO_DDLL_SHORT_CMD_0,
+ EMC_PMACRO_DDLL_SHORT_CMD_1,
+ EMC_PMACRO_DDLL_SHORT_CMD_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3,
+ EMC_TXDSRVTTGEN,
+ EMC_FDPD_CTRL_DQ,
+ EMC_FDPD_CTRL_CMD,
+ EMC_FBIO_SPARE,
+ EMC_ZCAL_INTERVAL,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_MRS_WAIT_CNT,
+ EMC_MRS_WAIT_CNT2,
+ EMC_AUTO_CAL_CHANNEL,
+ EMC_DLL_CFG_0,
+ EMC_DLL_CFG_1,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON,
+ EMC_PMACRO_ZCTRL,
+ EMC_CFG,
+ EMC_CFG_PIPE,
+ EMC_DYN_SELF_REF_CONTROL,
+ EMC_QPOP,
+ EMC_DQS_BRLSHFT_0,
+ EMC_DQS_BRLSHFT_1,
+ EMC_CMD_BRLSHFT_2,
+ EMC_CMD_BRLSHFT_3,
+ EMC_PMACRO_PAD_CFG_CTRL,
+ EMC_PMACRO_DATA_PAD_RX_CTRL,
+ EMC_PMACRO_CMD_PAD_RX_CTRL,
+ EMC_PMACRO_DATA_RX_TERM_MODE,
+ EMC_PMACRO_CMD_RX_TERM_MODE,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ EMC_PMACRO_DATA_PAD_TX_CTRL,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ EMC_PMACRO_VTTGEN_CTRL_0,
+ EMC_PMACRO_VTTGEN_CTRL_1,
+ EMC_PMACRO_VTTGEN_CTRL_2,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ EMC_PMACRO_CMD_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BRICK_CTRL_RFU2,
+ EMC_PMACRO_DATA_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BG_BIAS_CTRL_0,
+ EMC_CFG_3,
+ EMC_PMACRO_TX_PWRD_0,
+ EMC_PMACRO_TX_PWRD_1,
+ EMC_PMACRO_TX_PWRD_2,
+ EMC_PMACRO_TX_PWRD_3,
+ EMC_PMACRO_TX_PWRD_4,
+ EMC_PMACRO_TX_PWRD_5,
+ EMC_CONFIG_SAMPLE_DELAY,
+ EMC_PMACRO_TX_SEL_CLK_SRC_0,
+ EMC_PMACRO_TX_SEL_CLK_SRC_1,
+ EMC_PMACRO_TX_SEL_CLK_SRC_2,
+ EMC_PMACRO_TX_SEL_CLK_SRC_3,
+ EMC_PMACRO_TX_SEL_CLK_SRC_4,
+ EMC_PMACRO_TX_SEL_CLK_SRC_5,
+ EMC_PMACRO_DDLL_BYPASS,
+ EMC_PMACRO_DDLL_PWRD_0,
+ EMC_PMACRO_DDLL_PWRD_1,
+ EMC_PMACRO_DDLL_PWRD_2,
+ EMC_PMACRO_CMD_CTRL_0,
+ EMC_PMACRO_CMD_CTRL_1,
+ EMC_PMACRO_CMD_CTRL_2,
+ EMC_TR_TIMING_0,
+ EMC_TR_DVFS,
+ EMC_TR_CTRL_1,
+ EMC_TR_RDV,
+ EMC_TR_QPOP,
+ EMC_TR_RDV_MASK,
+ EMC_MRW14,
+ EMC_TR_QSAFE,
+ EMC_TR_QRST,
+ EMC_TRAINING_CTRL,
+ EMC_TRAINING_SETTLE,
+ EMC_TRAINING_VREF_SETTLE,
+ EMC_TRAINING_CA_FINE_CTRL,
+ EMC_TRAINING_CA_CTRL_MISC,
+ EMC_TRAINING_CA_CTRL_MISC1,
+ EMC_TRAINING_CA_VREF_CTRL,
+ EMC_TRAINING_QUSE_CORS_CTRL,
+ EMC_TRAINING_QUSE_FINE_CTRL,
+ EMC_TRAINING_QUSE_CTRL_MISC,
+ EMC_TRAINING_QUSE_VREF_CTRL,
+ EMC_TRAINING_READ_FINE_CTRL,
+ EMC_TRAINING_READ_CTRL_MISC,
+ EMC_TRAINING_READ_VREF_CTRL,
+ EMC_TRAINING_WRITE_FINE_CTRL,
+ EMC_TRAINING_WRITE_CTRL_MISC,
+ EMC_TRAINING_WRITE_VREF_CTRL,
+ EMC_TRAINING_MPC,
+ EMC_MRW15,
+ },
+ .trim = {
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_IB_VREF_DQS_0,
+ EMC_PMACRO_IB_VREF_DQS_1,
+ EMC_PMACRO_IB_VREF_DQ_0,
+ EMC_PMACRO_IB_VREF_DQ_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_0,
+ EMC_PMACRO_QUSE_DDLL_RANK0_1,
+ EMC_PMACRO_QUSE_DDLL_RANK0_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_3,
+ EMC_PMACRO_QUSE_DDLL_RANK1_0,
+ EMC_PMACRO_QUSE_DDLL_RANK1_1,
+ EMC_PMACRO_QUSE_DDLL_RANK1_2,
+ EMC_PMACRO_QUSE_DDLL_RANK1_3
+ },
+ .burst_mc = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_REFPB_HP_CTRL,
+ MC_EMEM_ARB_REFPB_BANK_CTRL,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_CCDMW,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_TIMING_RFCPB,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_MISC2,
+ MC_EMEM_ARB_RING1_THROTTLE,
+ MC_EMEM_ARB_DHYST_CTRL,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7,
+ },
+ .la_scale = {
+ MC_MLL_MPCORER_PTSA_RATE,
+ MC_FTOP_PTSA_RATE,
+ MC_PTSA_GRANT_DECREMENT,
+ MC_LATENCY_ALLOWANCE_XUSB_0,
+ MC_LATENCY_ALLOWANCE_XUSB_1,
+ MC_LATENCY_ALLOWANCE_TSEC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCA_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAA_0,
+ MC_LATENCY_ALLOWANCE_SDMMC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAB_0,
+ MC_LATENCY_ALLOWANCE_PPCS_0,
+ MC_LATENCY_ALLOWANCE_PPCS_1,
+ MC_LATENCY_ALLOWANCE_MPCORE_0,
+ MC_LATENCY_ALLOWANCE_HC_0,
+ MC_LATENCY_ALLOWANCE_HC_1,
+ MC_LATENCY_ALLOWANCE_AVPC_0,
+ MC_LATENCY_ALLOWANCE_GPU_0,
+ MC_LATENCY_ALLOWANCE_GPU2_0,
+ MC_LATENCY_ALLOWANCE_NVENC_0,
+ MC_LATENCY_ALLOWANCE_NVDEC_0,
+ MC_LATENCY_ALLOWANCE_VIC_0,
+ MC_LATENCY_ALLOWANCE_VI2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_1,
+ },
+ .burst_per_channel = {
+ { .bank = 0, .offset = EMC_MRW10, },
+ { .bank = 1, .offset = EMC_MRW10, },
+ { .bank = 0, .offset = EMC_MRW11, },
+ { .bank = 1, .offset = EMC_MRW11, },
+ { .bank = 0, .offset = EMC_MRW12, },
+ { .bank = 1, .offset = EMC_MRW12, },
+ { .bank = 0, .offset = EMC_MRW13, },
+ { .bank = 1, .offset = EMC_MRW13, },
+ },
+ .trim_per_channel = {
+ { .bank = 0, .offset = EMC_CMD_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_CMD_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_2, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_3, },
+ },
+ .vref_per_channel = {
+ {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ },
+ },
+};
+
+static void tegra210_emc_train(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, training);
+ unsigned long flags;
+
+ if (!emc->last)
+ return;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (emc->sequence->periodic_compensation)
+ emc->sequence->periodic_compensation(emc);
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_start(struct tegra210_emc *emc)
+{
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_stop(struct tegra210_emc *emc)
+{
+ del_timer(&emc->training);
+}
+
+static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc)
+{
+ unsigned long flags;
+ u32 value, max = 0;
+ unsigned int i;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ for (i = 0; i < emc->num_devices; i++) {
+ value = tegra210_emc_mrr_read(emc, i, 4);
+
+ if (value & BIT(7))
+ dev_dbg(emc->dev,
+ "sensor reading changed for device %u: %08x\n",
+ i, value);
+
+ value = FIELD_GET(LPDDR2_MR4_SRR, value);
+ if (value > max)
+ max = value;
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return max;
+}
+
+static void tegra210_emc_poll_refresh(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, refresh_timer);
+ unsigned int temperature;
+
+ if (!emc->debugfs.temperature)
+ temperature = tegra210_emc_get_temperature(emc);
+ else
+ temperature = emc->debugfs.temperature;
+
+ if (temperature == emc->temperature)
+ goto reset;
+
+ switch (temperature) {
+ case 0 ... 3:
+ /* temperature is fine, using regular refresh */
+ dev_dbg(emc->dev, "switching to nominal refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_NOMINAL);
+ break;
+
+ case 4:
+ dev_dbg(emc->dev, "switching to 2x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_2X);
+ break;
+
+ case 5:
+ dev_dbg(emc->dev, "switching to 4x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_4X);
+ break;
+
+ case 6 ... 7:
+ dev_dbg(emc->dev, "switching to throttle refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_THROTTLE);
+ break;
+
+ default:
+ WARN(1, "invalid DRAM temperature state %u\n", temperature);
+ return;
+ }
+
+ emc->temperature = temperature;
+
+reset:
+ if (atomic_read(&emc->refresh_poll) > 0) {
+ unsigned int interval = emc->refresh_poll_interval;
+ unsigned int timeout = msecs_to_jiffies(interval);
+
+ mod_timer(&emc->refresh_timer, jiffies + timeout);
+ }
+}
+
+static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 0);
+ del_timer_sync(&emc->refresh_timer);
+}
+
+static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 1);
+
+ mod_timer(&emc->refresh_timer,
+ jiffies + msecs_to_jiffies(emc->refresh_poll_interval));
+}
+
+static int tegra210_emc_cd_max_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int tegra210_emc_cd_get_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ *state = atomic_read(&emc->refresh_poll);
+
+ return 0;
+}
+
+static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd,
+ unsigned long state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ if (state == atomic_read(&emc->refresh_poll))
+ return 0;
+
+ if (state)
+ tegra210_emc_poll_refresh_start(emc);
+ else
+ tegra210_emc_poll_refresh_stop(emc);
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
+ .get_max_state = tegra210_emc_cd_max_state,
+ .get_cur_state = tegra210_emc_cd_get_state,
+ .set_cur_state = tegra210_emc_cd_set_state,
+};
+
+static void tegra210_emc_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ emc->sequence->set_clock(emc, clksrc);
+
+ if (emc->next->periodic_training)
+ tegra210_emc_training_start(emc);
+ else
+ tegra210_emc_training_stop(emc);
+}
+
+static void tegra210_change_dll_src(struct tegra210_emc *emc,
+ u32 clksrc)
+{
+ u32 dll_setting = emc->next->dll_clk_src;
+ u32 emc_clk_src;
+ u32 emc_clk_div;
+
+ emc_clk_src = (clksrc & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
+ EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
+ emc_clk_div = (clksrc & EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
+ EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~(DLL_CLK_EMC_DLL_CLK_SRC_MASK |
+ DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK);
+ dll_setting |= emc_clk_src << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT;
+ dll_setting |= emc_clk_div << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK;
+ if (emc_clk_src == EMC_CLK_SOURCE_PLLMB_LJ)
+ dll_setting |= (PLLM_VCOB <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else if (emc_clk_src == EMC_CLK_SOURCE_PLLM_LJ)
+ dll_setting |= (PLLM_VCOA <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else
+ dll_setting |= (EMC_DLL_SWITCH_OUT <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+
+ tegra210_clk_emc_dll_update_setting(dll_setting);
+
+ if (emc->next->clk_out_enb_x_0_clk_enb_emc_dll)
+ tegra210_clk_emc_dll_enable(true);
+ else
+ tegra210_clk_emc_dll_enable(false);
+}
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh)
+{
+ struct tegra210_emc_timing *timings;
+ unsigned long flags;
+
+ if ((emc->dram_type != DRAM_TYPE_LPDDR2 &&
+ emc->dram_type != DRAM_TYPE_LPDDR4) ||
+ !emc->last)
+ return -ENODEV;
+
+ if (refresh > TEGRA210_EMC_REFRESH_THROTTLE)
+ return -EINVAL;
+
+ if (refresh == emc->refresh)
+ return 0;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (refresh == TEGRA210_EMC_REFRESH_THROTTLE && emc->derated)
+ timings = emc->derated;
+ else
+ timings = emc->nominal;
+
+ if (timings != emc->timings) {
+ unsigned int index = emc->last - emc->timings;
+ u32 clksrc;
+
+ clksrc = emc->provider.configs[index].value |
+ EMC_CLK_FORCE_CC_TRIGGER;
+
+ emc->next = &timings[index];
+ emc->timings = timings;
+
+ tegra210_emc_set_clock(emc, clksrc);
+ } else {
+ tegra210_emc_adjust_timing(emc, emc->last);
+ tegra210_emc_timing_update(emc);
+
+ if (refresh != TEGRA210_EMC_REFRESH_NOMINAL)
+ emc_writel(emc, EMC_REF_REF_CMD, EMC_REF);
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address)
+{
+ u32 value, ret = 0;
+ unsigned int i;
+
+ value = (chip & EMC_MRR_DEV_SEL_MASK) << EMC_MRR_DEV_SEL_SHIFT |
+ (address & EMC_MRR_MA_MASK) << EMC_MRR_MA_SHIFT;
+ emc_writel(emc, value, EMC_MRR);
+
+ for (i = 0; i < emc->num_channels; i++)
+ WARN(tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_MRR_DIVLD, 1),
+ "Timed out waiting for MRR %u (ch=%u)\n", address, i);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ value = emc_channel_readl(emc, i, EMC_MRR);
+ value &= EMC_MRR_DATA_MASK;
+
+ ret = (ret << 16) | value;
+ }
+
+ return ret;
+}
+
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc)
+{
+ int err;
+
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+ emc_readl(emc, EMC_INTSTATUS);
+
+ tegra210_clk_emc_update_setting(clksrc);
+
+ err = tegra210_emc_wait_for_update(emc, 0, EMC_INTSTATUS,
+ EMC_INTSTATUS_CLKCHANGE_COMPLETE,
+ true);
+ if (err)
+ dev_warn(emc->dev, "clock change completion error: %d\n", err);
+}
+
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (emc->timings[i].rate * 1000UL == rate)
+ return &emc->timings[i];
+
+ return NULL;
+}
+
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state)
+{
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
+ value = emc_channel_readl(emc, channel, offset);
+ if (!!(value & bit_mask) == state)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set)
+{
+ u32 emc_dbg = emc_readl(emc, EMC_DBG);
+
+ if (set)
+ emc_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+ else
+ emc_writel(emc, emc_dbg & ~EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+}
+
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next)
+{
+ if (next->emc_emrs & 0x1)
+ return 0;
+
+ return 1;
+}
+
+void tegra210_emc_timing_update(struct tegra210_emc *emc)
+{
+ unsigned int i;
+ int err = 0;
+
+ emc_writel(emc, 0x1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ err |= tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ false);
+ }
+
+ if (err)
+ dev_warn(emc->dev, "timing update error: %d\n", err);
+}
+
+unsigned long tegra210_emc_actual_osc_clocks(u32 in)
+{
+ if (in < 0x40)
+ return in * 16;
+ else if (in < 0x80)
+ return 2048;
+ else if (in < 0xc0)
+ return 4096;
+ else
+ return 8192;
+}
+
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 mpc_req = 0x4b;
+
+ emc_writel(emc, mpc_req, EMC_MPC);
+ mpc_req = emc_readl(emc, EMC_MPC);
+}
+
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset)
+{
+ u32 temp = 0, rate = next->rate / 1000;
+ s32 delta[4], delta_taps[4];
+ s32 new[] = {
+ TRIM_REG(0, 0, 0, 0),
+ TRIM_REG(0, 0, 0, 1),
+ TRIM_REG(0, 0, 1, 2),
+ TRIM_REG(0, 0, 1, 3),
+
+ TRIM_REG(1, 0, 2, 4),
+ TRIM_REG(1, 0, 2, 5),
+ TRIM_REG(1, 0, 3, 6),
+ TRIM_REG(1, 0, 3, 7),
+
+ TRIM_REG(0, 1, 0, 0),
+ TRIM_REG(0, 1, 0, 1),
+ TRIM_REG(0, 1, 1, 2),
+ TRIM_REG(0, 1, 1, 3),
+
+ TRIM_REG(1, 1, 2, 4),
+ TRIM_REG(1, 1, 2, 5),
+ TRIM_REG(1, 1, 3, 6),
+ TRIM_REG(1, 1, 3, 7)
+ };
+ unsigned i;
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ case EMC_DATA_BRLSHFT_0:
+ delta[0] = 128 * (next->current_dram_clktree[C0D0U0] -
+ next->trained_dram_clktree[C0D0U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D0U1] -
+ next->trained_dram_clktree[C0D0U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D0U0] -
+ next->trained_dram_clktree[C1D0U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D0U1] -
+ next->trained_dram_clktree[C1D0U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[i * 2] = new[i * 2] + delta_taps[i];
+ new[i * 2 + 1] = new[i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_0) {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] % 64;
+ }
+
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ case EMC_DATA_BRLSHFT_1:
+ delta[0] = 128 * (next->current_dram_clktree[C0D1U0] -
+ next->trained_dram_clktree[C0D1U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D1U1] -
+ next->trained_dram_clktree[C0D1U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D1U0] -
+ next->trained_dram_clktree[C1D1U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D1U1] -
+ next->trained_dram_clktree[C1D1U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[8 + i * 2] = new[8 + i * 2] +
+ delta_taps[i];
+ new[8 + i * 2 + 1] = new[8 + i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_1) {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] % 64;
+ }
+
+ break;
+ }
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ temp = CALC_TEMP(0, 0, 0, 1, 0);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ temp = CALC_TEMP(0, 1, 2, 3, 2);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ temp = CALC_TEMP(0, 2, 4, 5, 4);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ temp = CALC_TEMP(0, 3, 6, 7, 6);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ temp = CALC_TEMP(1, 0, 0, 1, 8);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ temp = CALC_TEMP(1, 1, 2, 3, 10);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ temp = CALC_TEMP(1, 2, 4, 5, 12);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ temp = CALC_TEMP(1, 3, 6, 7, 14);
+ break;
+
+ case EMC_DATA_BRLSHFT_0:
+ temp = ((new[0] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[1] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[2] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[3] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[4] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[5] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[6] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[7] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ case EMC_DATA_BRLSHFT_1:
+ temp = ((new[8] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[9] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[10] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[11] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[12] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[13] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[14] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[15] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ default:
+ break;
+ }
+
+ return temp;
+}
+
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc)
+{
+ unsigned int i;
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ emc_writel(emc, 1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, i, EMC_CFG_DIG_DLL);
+ if ((value & EMC_CFG_DIG_DLL_CFG_DLL_EN) == 0)
+ break;
+ }
+ }
+
+ value = emc->next->burst_regs[EMC_DLL_CFG_0_INDEX];
+ emc_writel(emc, value, EMC_DLL_CFG_0);
+
+ value = emc_readl(emc, EMC_DLL_CFG_1);
+ value &= EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK;
+
+ if (emc->next->rate >= 400000 && emc->next->rate < 600000)
+ value |= 150;
+ else if (emc->next->rate >= 600000 && emc->next->rate < 800000)
+ value |= 100;
+ else if (emc->next->rate >= 800000 && emc->next->rate < 1000000)
+ value |= 70;
+ else if (emc->next->rate >= 1000000 && emc->next->rate < 1200000)
+ value |= 30;
+ else
+ value |= 20;
+
+ emc_writel(emc, value, EMC_DLL_CFG_1);
+
+ tegra210_change_dll_src(emc, clksrc);
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, 0, EMC_CFG_DIG_DLL);
+ if (value & EMC_CFG_DIG_DLL_CFG_DLL_EN)
+ break;
+ }
+ }
+
+ while (true) {
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED) == 0)
+ continue;
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_LOCK) == 0)
+ continue;
+
+ break;
+ }
+
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ return value & EMC_DIG_DLL_STATUS_DLL_OUT_MASK;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 cmd_pad, dq_pad, rfu1, cfg5, common_tx, ramp_up_wait = 0;
+ const struct tegra210_emc_timing *timing;
+
+ if (flip_backward)
+ timing = emc->last;
+ else
+ timing = emc->next;
+
+ cmd_pad = timing->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = timing->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = timing->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = timing->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, common_tx & 0xa,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, common_tx & 0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx | 0x8,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ }
+
+ if (clk < 1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD) {
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & 0xfeedfeed,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + (10 * clk);
+ } else if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, rfu1 | 0x06000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + 10 * clk;
+ } else {
+ ccfifo_writel(emc, rfu1 | 0x00000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_up_wait += 12 * clk;
+ }
+
+ cmd_pad &= ~EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 5);
+
+ return ramp_up_wait;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 ramp_down_wait = 0, cmd_pad, dq_pad, rfu1, cfg5, common_tx;
+ const struct tegra210_emc_timing *entry;
+ u32 seq_wait;
+
+ if (flip_backward)
+ entry = emc->next;
+ else
+ entry = emc->last;
+
+ cmd_pad = entry->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = entry->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = entry->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = entry->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = entry->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, cfg5 | EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_down_wait = 12 * clk;
+
+ seq_wait = (100000 / clk) + 1;
+
+ if (clk < (1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD)) {
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & ~0x01bf01bf,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0xffff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait + 19);
+ ramp_down_wait += 100000 + (20 * clk);
+ }
+
+ if (clk < (1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD)) {
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0x5,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, 0, 0, seq_wait);
+ ramp_down_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ }
+
+ return ramp_down_wait;
+}
+
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing)
+{
+ timing->current_dram_clktree[C0D0U0] =
+ timing->trained_dram_clktree[C0D0U0];
+ timing->current_dram_clktree[C0D0U1] =
+ timing->trained_dram_clktree[C0D0U1];
+ timing->current_dram_clktree[C1D0U0] =
+ timing->trained_dram_clktree[C1D0U0];
+ timing->current_dram_clktree[C1D0U1] =
+ timing->trained_dram_clktree[C1D0U1];
+ timing->current_dram_clktree[C1D1U0] =
+ timing->trained_dram_clktree[C1D1U0];
+ timing->current_dram_clktree[C1D1U1] =
+ timing->trained_dram_clktree[C1D1U1];
+}
+
+static void update_dll_control(struct tegra210_emc *emc, u32 value, bool state)
+{
+ unsigned int i;
+
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN,
+ state);
+}
+
+void tegra210_emc_dll_disable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, false);
+}
+
+void tegra210_emc_dll_enable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, true);
+}
+
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing)
+{
+ u32 dsr_cntrl = timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
+ u32 pre_ref = timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
+ u32 ref = timing->burst_regs[EMC_REFRESH_INDEX];
+
+ switch (emc->refresh) {
+ case TEGRA210_EMC_REFRESH_NOMINAL:
+ case TEGRA210_EMC_REFRESH_THROTTLE:
+ break;
+
+ case TEGRA210_EMC_REFRESH_2X:
+ ref = REFRESH_SPEEDUP(ref, 2);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 2);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 2);
+ break;
+
+ case TEGRA210_EMC_REFRESH_4X:
+ ref = REFRESH_SPEEDUP(ref, 4);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 4);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 4);
+ break;
+
+ default:
+ dev_warn(emc->dev, "failed to set refresh: %d\n", emc->refresh);
+ return;
+ }
+
+ emc_writel(emc, ref, emc->offsets->burst[EMC_REFRESH_INDEX]);
+ emc_writel(emc, pre_ref,
+ emc->offsets->burst[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
+ emc_writel(emc, dsr_cntrl,
+ emc->offsets->burst[EMC_DYN_SELF_REF_CONTROL_INDEX]);
+}
+
+static int tegra210_emc_set_rate(struct device *dev,
+ const struct tegra210_clk_emc_config *config)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timing = NULL;
+ unsigned long rate = config->rate;
+ s64 last_change_delay;
+ unsigned long flags;
+ unsigned int i;
+
+ if (rate == emc->last->rate * 1000UL)
+ return 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL == rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing)
+ return -EINVAL;
+
+ if (rate > 204000000 && !timing->trained)
+ return -EINVAL;
+
+ emc->next = timing;
+ last_change_delay = ktime_us_delta(ktime_get(), emc->clkchange_time);
+
+ /* XXX use non-busy-looping sleep? */
+ if ((last_change_delay >= 0) &&
+ (last_change_delay < emc->clkchange_delay))
+ udelay(emc->clkchange_delay - (int)last_change_delay);
+
+ spin_lock_irqsave(&emc->lock, flags);
+ tegra210_emc_set_clock(emc, config->value);
+ emc->clkchange_time = ktime_get();
+ emc->last = timing;
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra210_emc_validate_rate(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate * 1000UL)
+ return true;
+
+ return false;
+}
+
+static int tegra210_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra210_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%u", prefix, emc->timings[i].rate * 1000);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra210_emc_debug_available_rates);
+
+static int tegra210_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+ tegra210_emc_debug_min_rate_get,
+ tegra210_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+ tegra210_emc_debug_max_rate_get,
+ tegra210_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_temperature_get(void *data, u64 *temperature)
+{
+ struct tegra210_emc *emc = data;
+ unsigned int value;
+
+ if (!emc->debugfs.temperature)
+ value = tegra210_emc_get_temperature(emc);
+ else
+ value = emc->debugfs.temperature;
+
+ *temperature = value;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
+{
+ struct tegra210_emc *emc = data;
+
+ if (temperature > 7)
+ return -EINVAL;
+
+ emc->debugfs.temperature = temperature;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+ tegra210_emc_debug_temperature_get,
+ tegra210_emc_debug_temperature_set, "%llu\n");
+
+static void tegra210_emc_debugfs_init(struct tegra210_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate * 1000UL;
+
+ if (emc->timings[i].rate * 1000UL > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate * 1000UL;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra210_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_max_rate_fops);
+ debugfs_create_file("temperature", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_temperature_fops);
+}
+
+static void tegra210_emc_detect(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ /* probe the number of connected DRAM devices */
+ value = mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ if (value & MC_EMEM_ADR_CFG_EMEM_NUMDEV)
+ emc->num_devices = 2;
+ else
+ emc->num_devices = 1;
+
+ /* probe the type of DRAM */
+ value = emc_readl(emc, EMC_FBIO_CFG5);
+ emc->dram_type = value & 0x3;
+
+ /* probe the number of channels */
+ value = emc_readl(emc, EMC_FBIO_CFG7);
+
+ if ((value & EMC_FBIO_CFG7_CH1_ENABLE) &&
+ (value & EMC_FBIO_CFG7_CH0_ENABLE))
+ emc->num_channels = 2;
+ else
+ emc->num_channels = 1;
+}
+
+static int tegra210_emc_validate_timings(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timings,
+ unsigned int num_timings)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_timings; i++) {
+ u32 min_volt = timings[i].min_volt;
+ u32 rate = timings[i].rate;
+
+ if (!rate)
+ return -EINVAL;
+
+ if ((i > 0) && ((rate <= timings[i - 1].rate) ||
+ (min_volt < timings[i - 1].min_volt)))
+ return -EINVAL;
+
+ if (timings[i].revision != timings[0].revision)
+ continue;
+ }
+
+ return 0;
+}
+
+static int tegra210_emc_probe(struct platform_device *pdev)
+{
+ struct thermal_cooling_device *cd;
+ unsigned long current_rate;
+ struct tegra210_emc *emc;
+ struct device_node *np;
+ unsigned int i;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk))
+ return PTR_ERR(emc->clk);
+
+ platform_set_drvdata(pdev, emc);
+ spin_lock_init(&emc->lock);
+ emc->dev = &pdev->dev;
+
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ for (i = 0; i < 2; i++) {
+ emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
+ if (IS_ERR(emc->channel[i]))
+ return PTR_ERR(emc->channel[i]);
+
+ }
+
+ tegra210_emc_detect(emc);
+ np = pdev->dev.of_node;
+
+ /* attach to the nominal and (optional) derated tables */
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
+ if (err < 0) {
+ dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
+ return err;
+ }
+
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
+ if (err < 0 && err != -ENODEV) {
+ dev_err(emc->dev, "failed to get derated EMC table: %d\n", err);
+ goto release;
+ }
+
+ /* validate the tables */
+ if (emc->nominal) {
+ err = tegra210_emc_validate_timings(emc, emc->nominal,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ if (emc->derated) {
+ err = tegra210_emc_validate_timings(emc, emc->derated,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ /* default to the nominal table */
+ emc->timings = emc->nominal;
+
+ /* pick the current timing based on the current EMC clock rate */
+ current_rate = clk_get_rate(emc->clk) / 1000;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate == current_rate) {
+ emc->last = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (i == emc->num_timings) {
+ dev_err(emc->dev, "no EMC table entry found for %lu kHz\n",
+ current_rate);
+ err = -ENOENT;
+ goto release;
+ }
+
+ /* pick a compatible clock change sequence for the EMC table */
+ for (i = 0; i < ARRAY_SIZE(tegra210_emc_sequences); i++) {
+ const struct tegra210_emc_sequence *sequence =
+ tegra210_emc_sequences[i];
+
+ if (emc->timings[0].revision == sequence->revision) {
+ emc->sequence = sequence;
+ break;
+ }
+ }
+
+ if (!emc->sequence) {
+ dev_err(&pdev->dev, "sequence %u not supported\n",
+ emc->timings[0].revision);
+ err = -ENOTSUPP;
+ goto release;
+ }
+
+ emc->offsets = &tegra210_emc_table_register_offsets;
+ emc->refresh = TEGRA210_EMC_REFRESH_NOMINAL;
+
+ emc->provider.owner = THIS_MODULE;
+ emc->provider.dev = &pdev->dev;
+ emc->provider.set_rate = tegra210_emc_set_rate;
+
+ emc->provider.configs = devm_kcalloc(&pdev->dev, emc->num_timings,
+ sizeof(*emc->provider.configs),
+ GFP_KERNEL);
+ if (!emc->provider.configs) {
+ err = -ENOMEM;
+ goto release;
+ }
+
+ emc->provider.num_configs = emc->num_timings;
+
+ for (i = 0; i < emc->provider.num_configs; i++) {
+ struct tegra210_emc_timing *timing = &emc->timings[i];
+ struct tegra210_clk_emc_config *config =
+ &emc->provider.configs[i];
+ u32 value;
+
+ config->rate = timing->rate * 1000UL;
+ config->value = timing->clk_src_emc;
+
+ value = timing->burst_mc_regs[MC_EMEM_ARB_MISC0_INDEX];
+
+ if ((value & MC_EMEM_ARB_MISC0_EMC_SAME_FREQ) == 0)
+ config->same_freq = false;
+ else
+ config->same_freq = true;
+ }
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to attach to EMC clock: %d\n", err);
+ goto release;
+ }
+
+ emc->clkchange_delay = 100;
+ emc->training_interval = 100;
+ dev_set_drvdata(emc->dev, emc);
+
+ timer_setup(&emc->refresh_timer, tegra210_emc_poll_refresh,
+ TIMER_DEFERRABLE);
+ atomic_set(&emc->refresh_poll, 0);
+ emc->refresh_poll_interval = 1000;
+
+ timer_setup(&emc->training, tegra210_emc_train, 0);
+
+ tegra210_emc_debugfs_init(emc);
+
+ cd = devm_thermal_of_cooling_device_register(emc->dev, np, "emc", emc,
+ &tegra210_emc_cd_ops);
+ if (IS_ERR(cd)) {
+ err = PTR_ERR(cd);
+ dev_err(emc->dev, "failed to register cooling device: %d\n",
+ err);
+ goto detach;
+ }
+
+ return 0;
+
+detach:
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+release:
+ of_reserved_mem_device_release(emc->dev);
+
+ return err;
+}
+
+static int tegra210_emc_remove(struct platform_device *pdev)
+{
+ struct tegra210_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+ of_reserved_mem_device_release(emc->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_suspend(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to acquire clock: %d\n", err);
+ return err;
+ }
+
+ emc->resume_rate = clk_get_rate(emc->clk);
+
+ clk_set_rate(emc->clk, 204000000);
+ tegra210_clk_emc_detach(emc->clk);
+
+ dev_dbg(dev, "suspending at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_resume(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(dev, "failed to attach to EMC clock: %d\n", err);
+ return err;
+ }
+
+ clk_set_rate(emc->clk, emc->resume_rate);
+ clk_rate_exclusive_put(emc->clk);
+
+ dev_dbg(dev, "resuming at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra210_emc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra210_emc_suspend, tegra210_emc_resume)
+};
+
+static const struct of_device_id tegra210_emc_of_match[] = {
+ { .compatible = "nvidia,tegra210-emc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra210_emc_of_match);
+
+static struct platform_driver tegra210_emc_driver = {
+ .driver = {
+ .name = "tegra210-emc",
+ .of_match_table = tegra210_emc_of_match,
+ .pm = &tegra210_emc_pm_ops,
+ },
+ .probe = tegra210_emc_probe,
+ .remove = tegra210_emc_remove,
+};
+
+module_platform_driver(tegra210_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Joseph Lo <josephl@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra210 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-table.c b/drivers/memory/tegra/tegra210-emc-table.c
new file mode 100644
index 000000000..34a8785d2
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-table.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of_reserved_mem.h>
+
+#include "tegra210-emc.h"
+
+#define TEGRA_EMC_MAX_FREQS 16
+
+static int tegra210_emc_table_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timings;
+ unsigned int i, count = 0;
+
+ timings = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!timings) {
+ dev_err(dev, "failed to map EMC table\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
+ if (timings[i].revision == 0)
+ break;
+
+ count++;
+ }
+
+ /* only the nominal and derated tables are expected */
+ if (emc->derated) {
+ dev_warn(dev, "excess EMC table '%s'\n", rmem->name);
+ goto out;
+ }
+
+ if (emc->nominal) {
+ if (count != emc->num_timings) {
+ dev_warn(dev, "%u derated vs. %u nominal entries\n",
+ count, emc->num_timings);
+ memunmap(timings);
+ return -EINVAL;
+ }
+
+ emc->derated = timings;
+ } else {
+ emc->num_timings = count;
+ emc->nominal = timings;
+ }
+
+out:
+ /* keep track of which table this is */
+ rmem->priv = timings;
+
+ return 0;
+}
+
+static void tegra210_emc_table_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc_timing *timings = rmem->priv;
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+
+ if ((emc->nominal && timings != emc->nominal) &&
+ (emc->derated && timings != emc->derated))
+ dev_warn(dev, "trying to release unassigned EMC table '%s'\n",
+ rmem->name);
+
+ memunmap(timings);
+}
+
+static const struct reserved_mem_ops tegra210_emc_table_ops = {
+ .device_init = tegra210_emc_table_device_init,
+ .device_release = tegra210_emc_table_device_release,
+};
+
+static int tegra210_emc_table_init(struct reserved_mem *rmem)
+{
+ pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
+ (unsigned long)rmem->size);
+
+ rmem->ops = &tegra210_emc_table_ops;
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
+ tegra210_emc_table_init);
diff --git a/drivers/memory/tegra/tegra210-emc.h b/drivers/memory/tegra/tegra210-emc.h
new file mode 100644
index 000000000..8988bcf15
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc.h
@@ -0,0 +1,1016 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_EMC_H
+#define TEGRA210_EMC_H
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#define DVFS_FGCG_HIGH_SPEED_THRESHOLD 1000
+#define IOBRICK_DCC_THRESHOLD 2400
+#define DVFS_FGCG_MID_SPEED_THRESHOLD 600
+
+#define EMC_STATUS_UPDATE_TIMEOUT 1000
+
+/* register definitions */
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
+#define EMC_DBG 0x8
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_WRITE_ACTIVE_ONLY BIT(30)
+#define EMC_CFG 0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR BIT(30)
+#define EMC_CFG_DRAM_ACPD BIT(29)
+#define EMC_CFG_DYN_SELF_REF BIT(28)
+#define EMC_PIN 0x24
+#define EMC_PIN_PIN_CKE BIT(0)
+#define EMC_PIN_PIN_CKEB BIT(1)
+#define EMC_PIN_PIN_CKE_PER_DEV BIT(2)
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_TPPD 0xac
+#define EMC_ODT_WRITE 0xb0
+#define EMC_PDEX2MRR 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_RFC_SLR 0xc0
+#define EMC_MRS_WAIT_CNT2 0xc4
+#define EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT 16
+#define EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT 0
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_EMRS 0xd0
+#define EMC_EMRS_USE_EMRS_LONG_CNT BIT(26)
+#define EMC_REF 0xd4
+#define EMC_REF_REF_CMD BIT(0)
+#define EMC_SELF_REF 0xe0
+#define EMC_MRW 0xe8
+#define EMC_MRW_MRW_OP_SHIFT 0
+#define EMC_MRW_MRW_OP_MASK \
+ (0xff << EMC_MRW_MRW_OP_SHIFT)
+#define EMC_MRW_MRW_MA_SHIFT 16
+#define EMC_MRW_USE_MRW_EXT_CNT 27
+#define EMC_MRW_MRW_DEV_SELECTN_SHIFT 30
+
+#define EMC_MRR 0xec
+#define EMC_MRR_DEV_SEL_SHIFT 30
+#define EMC_MRR_DEV_SEL_MASK 0x3
+#define EMC_MRR_MA_SHIFT 16
+#define EMC_MRR_MA_MASK 0xff
+#define EMC_MRR_DATA_SHIFT 0
+#define EMC_MRR_DATA_MASK 0xffff
+
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK \
+ (0x3 << EMC_FBIO_CFG5_DRAM_TYPE_SHIFT)
+#define EMC_FBIO_CFG5_CMD_TX_DIS BIT(8)
+
+#define EMC_PDEX2CKE 0x118
+#define EMC_CKE2PDEN 0x11c
+#define EMC_MPC 0x128
+#define EMC_EMRS2 0x12c
+#define EMC_EMRS2_USE_EMRS2_LONG_CNT BIT(26)
+#define EMC_MRW2 0x134
+#define EMC_MRW3 0x138
+#define EMC_MRW4 0x13c
+#define EMC_R2R 0x144
+#define EMC_EINPUT 0x14c
+#define EMC_EINPUT_DURATION 0x150
+#define EMC_PUTERM_EXTRA 0x154
+#define EMC_TCKESR 0x158
+#define EMC_TPD 0x15c
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START BIT(0)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL BIT(9)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL BIT(10)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE BIT(29)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31)
+#define EMC_EMC_STATUS 0x2b4
+#define EMC_EMC_STATUS_MRR_DIVLD BIT(20)
+#define EMC_EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT 4
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT)
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT 8
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_CFG_DLL_EN BIT(0)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK BIT(1)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC BIT(3)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK BIT(4)
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT 6
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK \
+ (0x3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT)
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT 8
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK \
+ (0x7 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_DIG_DLL_STATUS 0x2c4
+#define EMC_DIG_DLL_STATUS_DLL_LOCK BIT(15)
+#define EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED BIT(17)
+#define EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT 0
+#define EMC_DIG_DLL_STATUS_DLL_OUT_MASK \
+ (0x7ff << EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_1 0x2c8
+#define EMC_RDV_MASK 0x2cc
+#define EMC_WDV_MASK 0x2d0
+#define EMC_RDV_EARLY_MASK 0x2d4
+#define EMC_RDV_EARLY 0x2d8
+#define EMC_AUTO_CAL_CONFIG8 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK 0x7ff
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT 0
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_DEV_SEL_SHIFT 30
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_ZQ_LATCH_CMD BIT(1)
+#define EMC_ZQ_CAL_ZQ_CAL_CMD BIT(0)
+#define EMC_FDPD_CTRL_DQ 0x310
+#define EMC_FDPD_CTRL_CMD 0x314
+#define EMC_PMACRO_CMD_BRICK_CTRL_FDPD 0x318
+#define EMC_PMACRO_DATA_BRICK_CTRL_FDPD 0x31c
+#define EMC_PMACRO_BRICK_CTRL_RFU1 0x330
+#define EMC_PMACRO_BRICK_CTRL_RFU2 0x334
+#define EMC_TR_TIMING_0 0x3b4
+#define EMC_TR_CTRL_1 0x3bc
+#define EMC_TR_RDV 0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN BIT(2)
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+#define EMC_CCFIFO_ADDR 0x3e8
+#define EMC_CCFIFO_ADDR_STALL_BY_1 (1 << 31)
+#define EMC_CCFIFO_ADDR_STALL(x) (((x) & 0x7fff) << 16)
+#define EMC_CCFIFO_ADDR_OFFSET(x) ((x) & 0xffff)
+#define EMC_CCFIFO_DATA 0x3ec
+#define EMC_TR_QPOP 0x3f4
+#define EMC_TR_RDV_MASK 0x3f8
+#define EMC_TR_QSAFE 0x3fc
+#define EMC_TR_QRST 0x400
+#define EMC_ISSUE_QRST 0x428
+#define EMC_AUTO_CAL_CONFIG2 0x458
+#define EMC_AUTO_CAL_CONFIG3 0x45c
+#define EMC_TR_DVFS 0x460
+#define EMC_AUTO_CAL_CHANNEL 0x464
+#define EMC_IBDLY 0x468
+#define EMC_OBDLY 0x46c
+#define EMC_TXDSRVTTGEN 0x480
+#define EMC_WE_DURATION 0x48c
+#define EMC_WS_DURATION 0x490
+#define EMC_WEV 0x494
+#define EMC_WSV 0x498
+#define EMC_CFG_3 0x49c
+#define EMC_MRW6 0x4a4
+#define EMC_MRW7 0x4a8
+#define EMC_MRW8 0x4ac
+#define EMC_MRW9 0x4b0
+#define EMC_MRW10 0x4b4
+#define EMC_MRW11 0x4b8
+#define EMC_MRW12 0x4bc
+#define EMC_MRW13 0x4c0
+#define EMC_MRW14 0x4c4
+#define EMC_MRW15 0x4d0
+#define EMC_CFG_SYNC 0x4d4
+#define EMC_FDPD_CTRL_CMD_NO_RAMP 0x4d8
+#define EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE BIT(0)
+#define EMC_WDV_CHK 0x4e0
+#define EMC_CFG_PIPE_2 0x554
+#define EMC_CFG_PIPE_CLK 0x558
+#define EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON BIT(0)
+#define EMC_CFG_PIPE_1 0x55c
+#define EMC_CFG_PIPE 0x560
+#define EMC_QPOP 0x564
+#define EMC_QUSE_WIDTH 0x568
+#define EMC_PUTERM_WIDTH 0x56c
+#define EMC_AUTO_CAL_CONFIG7 0x574
+#define EMC_REFCTRL2 0x580
+#define EMC_FBIO_CFG7 0x584
+#define EMC_FBIO_CFG7_CH0_ENABLE BIT(1)
+#define EMC_FBIO_CFG7_CH1_ENABLE BIT(2)
+#define EMC_DATA_BRLSHFT_0 0x588
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_DATA_BRLSHFT_1 0x58c
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_RFCPB 0x590
+#define EMC_DQS_BRLSHFT_0 0x594
+#define EMC_DQS_BRLSHFT_1 0x598
+#define EMC_CMD_BRLSHFT_0 0x59c
+#define EMC_CMD_BRLSHFT_1 0x5a0
+#define EMC_CMD_BRLSHFT_2 0x5a4
+#define EMC_CMD_BRLSHFT_3 0x5a8
+#define EMC_QUSE_BRLSHFT_0 0x5ac
+#define EMC_AUTO_CAL_CONFIG4 0x5b0
+#define EMC_AUTO_CAL_CONFIG5 0x5b4
+#define EMC_QUSE_BRLSHFT_1 0x5b8
+#define EMC_QUSE_BRLSHFT_2 0x5bc
+#define EMC_CCDMW 0x5c0
+#define EMC_QUSE_BRLSHFT_3 0x5c4
+#define EMC_AUTO_CAL_CONFIG6 0x5cc
+#define EMC_DLL_CFG_0 0x5e4
+#define EMC_DLL_CFG_1 0x5e8
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT 10
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK \
+ (0x7ff << EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT)
+
+#define EMC_CONFIG_SAMPLE_DELAY 0x5f0
+#define EMC_CFG_UPDATE 0x5f4
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT 9
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK \
+ (0x3 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT)
+
+#define EMC_PMACRO_QUSE_DDLL_RANK0_0 0x600
+#define EMC_PMACRO_QUSE_DDLL_RANK0_1 0x604
+#define EMC_PMACRO_QUSE_DDLL_RANK0_2 0x608
+#define EMC_PMACRO_QUSE_DDLL_RANK0_3 0x60c
+#define EMC_PMACRO_QUSE_DDLL_RANK0_4 0x610
+#define EMC_PMACRO_QUSE_DDLL_RANK0_5 0x614
+#define EMC_PMACRO_QUSE_DDLL_RANK1_0 0x620
+#define EMC_PMACRO_QUSE_DDLL_RANK1_1 0x624
+#define EMC_PMACRO_QUSE_DDLL_RANK1_2 0x628
+#define EMC_PMACRO_QUSE_DDLL_RANK1_3 0x62c
+#define EMC_PMACRO_QUSE_DDLL_RANK1_4 0x630
+#define EMC_PMACRO_QUSE_DDLL_RANK1_5 0x634
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 0x640
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 0x644
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 0x648
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 0x64c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4 0x650
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5 0x654
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 0x660
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 0x664
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 0x668
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 0x66c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4 0x670
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5 0x674
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0 0x680
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1 0x684
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2 0x688
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3 0x68c
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4 0x690
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5 0x694
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0 0x6a0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1 0x6a4
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2 0x6a8
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3 0x6ac
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4 0x6b0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5 0x6b4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0 0x6c0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1 0x6c4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2 0x6c8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3 0x6cc
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0 0x6e0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1 0x6e4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2 0x6e8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3 0x6ec
+#define EMC_PMACRO_TX_PWRD_0 0x720
+#define EMC_PMACRO_TX_PWRD_1 0x724
+#define EMC_PMACRO_TX_PWRD_2 0x728
+#define EMC_PMACRO_TX_PWRD_3 0x72c
+#define EMC_PMACRO_TX_PWRD_4 0x730
+#define EMC_PMACRO_TX_PWRD_5 0x734
+#define EMC_PMACRO_TX_SEL_CLK_SRC_0 0x740
+#define EMC_PMACRO_TX_SEL_CLK_SRC_1 0x744
+#define EMC_PMACRO_TX_SEL_CLK_SRC_3 0x74c
+#define EMC_PMACRO_TX_SEL_CLK_SRC_2 0x748
+#define EMC_PMACRO_TX_SEL_CLK_SRC_4 0x750
+#define EMC_PMACRO_TX_SEL_CLK_SRC_5 0x754
+#define EMC_PMACRO_DDLL_BYPASS 0x760
+#define EMC_PMACRO_DDLL_PWRD_0 0x770
+#define EMC_PMACRO_DDLL_PWRD_1 0x774
+#define EMC_PMACRO_DDLL_PWRD_2 0x778
+#define EMC_PMACRO_CMD_CTRL_0 0x780
+#define EMC_PMACRO_CMD_CTRL_1 0x784
+#define EMC_PMACRO_CMD_CTRL_2 0x788
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0x800
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0x804
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0x808
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3 0x80c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0x810
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0x814
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0x818
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3 0x81c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0x820
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0x824
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0x828
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3 0x82c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0x830
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0x834
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0x838
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3 0x83c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0x840
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0x844
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0x848
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3 0x84c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0x850
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0x854
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0x858
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3 0x85c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0x860
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0x864
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0x868
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3 0x86c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0x870
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0x874
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0x878
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3 0x87c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0 0x880
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1 0x884
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2 0x888
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3 0x88c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0 0x890
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1 0x894
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2 0x898
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3 0x89c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0 0x8a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1 0x8a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2 0x8a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3 0x8ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0 0x8b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1 0x8b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2 0x8b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3 0x8bc
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0x900
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0x904
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0x908
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3 0x90c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0x910
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0x914
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0x918
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3 0x91c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0x920
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0x924
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0x928
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3 0x92c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0x930
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0x934
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0x938
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3 0x93c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0x940
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0x944
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0x948
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3 0x94c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0x950
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0x954
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0x958
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3 0x95c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0x960
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0x964
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0x968
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3 0x96c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0x970
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0x974
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0x978
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3 0x97c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0 0x980
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1 0x984
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2 0x988
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3 0x98c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0 0x990
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1 0x994
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2 0x998
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3 0x99c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0 0x9a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1 0x9a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2 0x9a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3 0x9ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0 0x9b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1 0x9b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2 0x9b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3 0x9bc
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0xa00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0xa04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0xa08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0xa10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0xa14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0xa18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0xa20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0xa24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0xa28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0xa30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0xa34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0xa38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0xa40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0xa44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0xa48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0xa50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0xa54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0xa58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0xa60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0xa64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0xa68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0xa70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0xa74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0xa78
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0xb00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0xb04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0xb08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0xb10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0xb14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0xb18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0xb20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0xb24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0xb28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0xb30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0xb34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0xb38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0xb40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0xb44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0xb48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0xb50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0xb54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0xb58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0xb60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0xb64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0xb68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0xb70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0xb74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0xb78
+#define EMC_PMACRO_IB_VREF_DQ_0 0xbe0
+#define EMC_PMACRO_IB_VREF_DQ_1 0xbe4
+#define EMC_PMACRO_IB_VREF_DQS_0 0xbf0
+#define EMC_PMACRO_IB_VREF_DQS_1 0xbf4
+#define EMC_PMACRO_DDLL_LONG_CMD_0 0xc00
+#define EMC_PMACRO_DDLL_LONG_CMD_1 0xc04
+#define EMC_PMACRO_DDLL_LONG_CMD_2 0xc08
+#define EMC_PMACRO_DDLL_LONG_CMD_3 0xc0c
+#define EMC_PMACRO_DDLL_LONG_CMD_4 0xc10
+#define EMC_PMACRO_DDLL_LONG_CMD_5 0xc14
+#define EMC_PMACRO_DDLL_SHORT_CMD_0 0xc20
+#define EMC_PMACRO_DDLL_SHORT_CMD_1 0xc24
+#define EMC_PMACRO_DDLL_SHORT_CMD_2 0xc28
+#define EMC_PMACRO_CFG_PM_GLOBAL_0 0xc30
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 BIT(16)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 BIT(17)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 BIT(18)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 BIT(19)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 BIT(20)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 BIT(21)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 BIT(22)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7 BIT(23)
+#define EMC_PMACRO_VTTGEN_CTRL_0 0xc34
+#define EMC_PMACRO_VTTGEN_CTRL_1 0xc38
+#define EMC_PMACRO_BG_BIAS_CTRL_0 0xc3c
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD BIT(0)
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD BIT(2)
+#define EMC_PMACRO_PAD_CFG_CTRL 0xc40
+#define EMC_PMACRO_ZCTRL 0xc44
+#define EMC_PMACRO_CMD_PAD_RX_CTRL 0xc50
+#define EMC_PMACRO_DATA_PAD_RX_CTRL 0xc54
+#define EMC_PMACRO_CMD_RX_TERM_MODE 0xc58
+#define EMC_PMACRO_DATA_RX_TERM_MODE 0xc5c
+#define EMC_PMACRO_CMD_PAD_TX_CTRL 0xc60
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC BIT(24)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON BIT(26)
+
+#define EMC_PMACRO_DATA_PAD_TX_CTRL 0xc64
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF BIT(0)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF BIT(8)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC BIT(24)
+
+#define EMC_PMACRO_COMMON_PAD_TX_CTRL 0xc68
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON 0xc78
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS BIT(16)
+#define EMC_PMACRO_VTTGEN_CTRL_2 0xcf0
+#define EMC_PMACRO_IB_RXRT 0xcf4
+#define EMC_PMACRO_TRAINING_CTRL_0 0xcf8
+#define EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR BIT(3)
+#define EMC_PMACRO_TRAINING_CTRL_1 0xcfc
+#define EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR BIT(3)
+#define EMC_TRAINING_CTRL 0xe04
+#define EMC_TRAINING_QUSE_CORS_CTRL 0xe0c
+#define EMC_TRAINING_QUSE_FINE_CTRL 0xe10
+#define EMC_TRAINING_QUSE_CTRL_MISC 0xe14
+#define EMC_TRAINING_WRITE_FINE_CTRL 0xe18
+#define EMC_TRAINING_WRITE_CTRL_MISC 0xe1c
+#define EMC_TRAINING_WRITE_VREF_CTRL 0xe20
+#define EMC_TRAINING_READ_FINE_CTRL 0xe24
+#define EMC_TRAINING_READ_CTRL_MISC 0xe28
+#define EMC_TRAINING_READ_VREF_CTRL 0xe2c
+#define EMC_TRAINING_CA_FINE_CTRL 0xe30
+#define EMC_TRAINING_CA_CTRL_MISC 0xe34
+#define EMC_TRAINING_CA_CTRL_MISC1 0xe38
+#define EMC_TRAINING_CA_VREF_CTRL 0xe3c
+#define EMC_TRAINING_SETTLE 0xe44
+#define EMC_TRAINING_MPC 0xe5c
+#define EMC_TRAINING_VREF_SETTLE 0xe6c
+#define EMC_TRAINING_QUSE_VREF_CTRL 0xed0
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK0 0xed4
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK1 0xed8
+
+#define EMC_COPY_TABLE_PARAM_PERIODIC_FIELDS BIT(0)
+#define EMC_COPY_TABLE_PARAM_TRIM_REGS BIT(1)
+
+enum burst_regs_list {
+ EMC_RP_INDEX = 6,
+ EMC_R2P_INDEX = 9,
+ EMC_W2P_INDEX,
+ EMC_MRW6_INDEX = 31,
+ EMC_REFRESH_INDEX = 41,
+ EMC_PRE_REFRESH_REQ_CNT_INDEX = 43,
+ EMC_TRPAB_INDEX = 59,
+ EMC_MRW7_INDEX = 62,
+ EMC_FBIO_CFG5_INDEX = 65,
+ EMC_FBIO_CFG7_INDEX,
+ EMC_CFG_DIG_DLL_INDEX,
+ EMC_ZCAL_INTERVAL_INDEX = 139,
+ EMC_ZCAL_WAIT_CNT_INDEX,
+ EMC_MRS_WAIT_CNT_INDEX = 141,
+ EMC_DLL_CFG_0_INDEX = 144,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX = 146,
+ EMC_CFG_INDEX = 148,
+ EMC_DYN_SELF_REF_CONTROL_INDEX = 150,
+ EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX = 161,
+ EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_BRICK_CTRL_RFU1_INDEX = 167,
+ EMC_PMACRO_BG_BIAS_CTRL_0_INDEX = 171,
+ EMC_MRW14_INDEX = 199,
+ EMC_MRW15_INDEX = 220,
+};
+
+enum trim_regs_list {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_INDEX = 60,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_INDEX,
+};
+
+enum burst_mc_regs_list {
+ MC_EMEM_ARB_MISC0_INDEX = 20,
+};
+
+enum {
+ T_RP,
+ T_FC_LPDDR4,
+ T_RFC,
+ T_PDEX,
+ RL,
+};
+
+enum {
+ AUTO_PD = 0,
+ MAN_SR = 2,
+};
+
+enum {
+ ASSEMBLY = 0,
+ ACTIVE,
+};
+
+enum {
+ C0D0U0,
+ C0D0U1,
+ C0D1U0,
+ C0D1U1,
+ C1D0U0,
+ C1D0U1,
+ C1D1U0,
+ C1D1U1,
+ DRAM_CLKTREE_NUM,
+};
+
+#define VREF_REGS_PER_CHANNEL_SIZE 4
+#define DRAM_TIMINGS_NUM 5
+#define BURST_REGS_PER_CHANNEL_SIZE 8
+#define TRIM_REGS_PER_CHANNEL_SIZE 10
+#define PTFV_ARRAY_SIZE 12
+#define SAVE_RESTORE_MOD_REGS_SIZE 12
+#define TRAINING_MOD_REGS_SIZE 20
+#define BURST_UP_DOWN_REGS_SIZE 24
+#define BURST_MC_REGS_SIZE 33
+#define TRIM_REGS_SIZE 138
+#define BURST_REGS_SIZE 221
+
+struct tegra210_emc_per_channel_regs {
+ u16 bank;
+ u16 offset;
+};
+
+struct tegra210_emc_table_register_offsets {
+ u16 burst[BURST_REGS_SIZE];
+ u16 trim[TRIM_REGS_SIZE];
+ u16 burst_mc[BURST_MC_REGS_SIZE];
+ u16 la_scale[BURST_UP_DOWN_REGS_SIZE];
+ struct tegra210_emc_per_channel_regs burst_per_channel[BURST_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs trim_per_channel[TRIM_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs vref_per_channel[VREF_REGS_PER_CHANNEL_SIZE];
+};
+
+struct tegra210_emc_timing {
+ u32 revision;
+ const char dvfs_ver[60];
+ u32 rate;
+ u32 min_volt;
+ u32 gpu_min_volt;
+ const char clock_src[32];
+ u32 clk_src_emc;
+ u32 needs_training;
+ u32 training_pattern;
+ u32 trained;
+
+ u32 periodic_training;
+ u32 trained_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 current_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 run_clocks;
+ u32 tree_margin;
+
+ u32 num_burst;
+ u32 num_burst_per_ch;
+ u32 num_trim;
+ u32 num_trim_per_ch;
+ u32 num_mc_regs;
+ u32 num_up_down;
+ u32 vref_num;
+ u32 training_mod_num;
+ u32 dram_timing_num;
+
+ u32 ptfv_list[PTFV_ARRAY_SIZE];
+
+ u32 burst_regs[BURST_REGS_SIZE];
+ u32 burst_reg_per_ch[BURST_REGS_PER_CHANNEL_SIZE];
+ u32 shadow_regs_ca_train[BURST_REGS_SIZE];
+ u32 shadow_regs_quse_train[BURST_REGS_SIZE];
+ u32 shadow_regs_rdwr_train[BURST_REGS_SIZE];
+
+ u32 trim_regs[TRIM_REGS_SIZE];
+ u32 trim_perch_regs[TRIM_REGS_PER_CHANNEL_SIZE];
+
+ u32 vref_perch_regs[VREF_REGS_PER_CHANNEL_SIZE];
+
+ u32 dram_timings[DRAM_TIMINGS_NUM];
+ u32 training_mod_regs[TRAINING_MOD_REGS_SIZE];
+ u32 save_restore_mod_regs[SAVE_RESTORE_MOD_REGS_SIZE];
+ u32 burst_mc_regs[BURST_MC_REGS_SIZE];
+ u32 la_scale_regs[BURST_UP_DOWN_REGS_SIZE];
+
+ u32 min_mrs_wait;
+ u32 emc_mrw;
+ u32 emc_mrw2;
+ u32 emc_mrw3;
+ u32 emc_mrw4;
+ u32 emc_mrw9;
+ u32 emc_mrs;
+ u32 emc_emrs;
+ u32 emc_emrs2;
+ u32 emc_auto_cal_config;
+ u32 emc_auto_cal_config2;
+ u32 emc_auto_cal_config3;
+ u32 emc_auto_cal_config4;
+ u32 emc_auto_cal_config5;
+ u32 emc_auto_cal_config6;
+ u32 emc_auto_cal_config7;
+ u32 emc_auto_cal_config8;
+ u32 emc_cfg_2;
+ u32 emc_sel_dpd_ctrl;
+ u32 emc_fdpd_ctrl_cmd_no_ramp;
+ u32 dll_clk_src;
+ u32 clk_out_enb_x_0_clk_enb_emc_dll;
+ u32 latency;
+};
+
+enum tegra210_emc_refresh {
+ TEGRA210_EMC_REFRESH_NOMINAL = 0,
+ TEGRA210_EMC_REFRESH_2X,
+ TEGRA210_EMC_REFRESH_4X,
+ TEGRA210_EMC_REFRESH_THROTTLE, /* 4x Refresh + derating. */
+};
+
+#define DRAM_TYPE_DDR3 0
+#define DRAM_TYPE_LPDDR4 1
+#define DRAM_TYPE_LPDDR2 2
+#define DRAM_TYPE_DDR2 3
+
+struct tegra210_emc {
+ struct tegra_mc *mc;
+ struct device *dev;
+ struct clk *clk;
+
+ /* nominal EMC frequency table */
+ struct tegra210_emc_timing *nominal;
+ /* derated EMC frequency table */
+ struct tegra210_emc_timing *derated;
+
+ /* currently selected table (nominal or derated) */
+ struct tegra210_emc_timing *timings;
+ unsigned int num_timings;
+
+ const struct tegra210_emc_table_register_offsets *offsets;
+
+ const struct tegra210_emc_sequence *sequence;
+ spinlock_t lock;
+
+ void __iomem *regs, *channel[2];
+ unsigned int num_channels;
+ unsigned int num_devices;
+ unsigned int dram_type;
+
+ struct tegra210_emc_timing *last;
+ struct tegra210_emc_timing *next;
+
+ unsigned int training_interval;
+ struct timer_list training;
+
+ enum tegra210_emc_refresh refresh;
+ unsigned int refresh_poll_interval;
+ struct timer_list refresh_timer;
+ unsigned int temperature;
+ atomic_t refresh_poll;
+
+ ktime_t clkchange_time;
+ int clkchange_delay;
+
+ unsigned long resume_rate;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned int temperature;
+ } debugfs;
+
+ struct tegra210_clk_emc_provider provider;
+};
+
+struct tegra210_emc_sequence {
+ u8 revision;
+ void (*set_clock)(struct tegra210_emc *emc, u32 clksrc);
+ u32 (*periodic_compensation)(struct tegra210_emc *emc);
+};
+
+static inline void emc_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset)
+{
+ writel_relaxed(value, emc->regs + offset);
+}
+
+static inline u32 emc_readl(struct tegra210_emc *emc, unsigned int offset)
+{
+ return readl_relaxed(emc->regs + offset);
+}
+
+static inline void emc_channel_writel(struct tegra210_emc *emc,
+ unsigned int channel,
+ u32 value, unsigned int offset)
+{
+ writel_relaxed(value, emc->channel[channel] + offset);
+}
+
+static inline u32 emc_channel_readl(struct tegra210_emc *emc,
+ unsigned int channel, unsigned int offset)
+{
+ return readl_relaxed(emc->channel[channel] + offset);
+}
+
+static inline void ccfifo_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset, u32 delay)
+{
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_DATA);
+
+ value = EMC_CCFIFO_ADDR_STALL_BY_1 | EMC_CCFIFO_ADDR_STALL(delay) |
+ EMC_CCFIFO_ADDR_OFFSET(offset);
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static inline u32 div_o3(u32 a, u32 b)
+{
+ u32 result = a / b;
+
+ if ((b * result) < a)
+ return result + 1;
+
+ return result;
+}
+
+/* from tegra210-emc-r21021.c */
+extern const struct tegra210_emc_sequence tegra210_emc_r21021;
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh);
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address);
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc);
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set);
+void tegra210_emc_timing_update(struct tegra210_emc *emc);
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next);
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate);
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing);
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state);
+unsigned long tegra210_emc_actual_osc_clocks(u32 in);
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset);
+void tegra210_emc_dll_disable(struct tegra210_emc *emc);
+void tegra210_emc_dll_enable(struct tegra210_emc *emc);
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc);
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing);
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc);
+
+#endif
diff --git a/drivers/memory/tegra/tegra210-mc.h b/drivers/memory/tegra/tegra210-mc.h
new file mode 100644
index 000000000..b9b91ceb4
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-mc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_MC_H
+#define TEGRA210_MC_H
+
+#include "mc.h"
+
+/* register definitions */
+#define MC_LATENCY_ALLOWANCE_AVPC_0 0x2e4
+#define MC_LATENCY_ALLOWANCE_HC_0 0x310
+#define MC_LATENCY_ALLOWANCE_HC_1 0x314
+#define MC_LATENCY_ALLOWANCE_MPCORE_0 0x320
+#define MC_LATENCY_ALLOWANCE_NVENC_0 0x328
+#define MC_LATENCY_ALLOWANCE_PPCS_0 0x344
+#define MC_LATENCY_ALLOWANCE_PPCS_1 0x348
+#define MC_LATENCY_ALLOWANCE_ISP2_0 0x370
+#define MC_LATENCY_ALLOWANCE_ISP2_1 0x374
+#define MC_LATENCY_ALLOWANCE_XUSB_0 0x37c
+#define MC_LATENCY_ALLOWANCE_XUSB_1 0x380
+#define MC_LATENCY_ALLOWANCE_TSEC_0 0x390
+#define MC_LATENCY_ALLOWANCE_VIC_0 0x394
+#define MC_LATENCY_ALLOWANCE_VI2_0 0x398
+#define MC_LATENCY_ALLOWANCE_GPU_0 0x3ac
+#define MC_LATENCY_ALLOWANCE_SDMMCA_0 0x3b8
+#define MC_LATENCY_ALLOWANCE_SDMMCAA_0 0x3bc
+#define MC_LATENCY_ALLOWANCE_SDMMC_0 0x3c0
+#define MC_LATENCY_ALLOWANCE_SDMMCAB_0 0x3c4
+#define MC_LATENCY_ALLOWANCE_GPU2_0 0x3e8
+#define MC_LATENCY_ALLOWANCE_NVDEC_0 0x3d8
+#define MC_MLL_MPCORER_PTSA_RATE 0x44c
+#define MC_FTOP_PTSA_RATE 0x50c
+#define MC_EMEM_ARB_TIMING_RFCPB 0x6c0
+#define MC_EMEM_ARB_TIMING_CCDMW 0x6c4
+#define MC_EMEM_ARB_REFPB_HP_CTRL 0x6f0
+#define MC_EMEM_ARB_REFPB_BANK_CTRL 0x6f4
+#define MC_PTSA_GRANT_DECREMENT 0x960
+#define MC_EMEM_ARB_DHYST_CTRL 0xbcc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0 0xbd0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1 0xbd4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2 0xbd8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3 0xbdc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4 0xbe0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5 0xbe4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6 0xbe8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7 0xbec
+
+#endif
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
new file mode 100644
index 000000000..8ab6498db
--- /dev/null
+++ b/drivers/memory/tegra/tegra210.c
@@ -0,0 +1,1290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <dt-bindings/memory/tegra210-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra210_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x1c,
+ .name = "nvencsrd",
+ .swgroup = TEGRA_SWGROUP_NVENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x2b,
+ .name = "nvencswr",
+ .swgroup = TEGRA_SWGROUP_NVENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x44,
+ .name = "ispra",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x46,
+ .name = "ispwa",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x47,
+ .name = "ispwb",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x7a,
+ },
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4e,
+ .name = "isprab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x50,
+ .name = "ispwab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x51,
+ .name = "ispwbb",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x56,
+ .name = "a9avpscr",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x57,
+ .name = "a9avpscw",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x58,
+ .name = "gpusrd",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x59,
+ .name = "gpuswr",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x5a,
+ .name = "displayt",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x60,
+ .name = "sdmmcra",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x61,
+ .name = "sdmmcraa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x5a,
+ },
+ },
+ }, {
+ .id = 0x62,
+ .name = "sdmmcr",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x63,
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .name = "sdmmcrab",
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x5a,
+ },
+ },
+ }, {
+ .id = 0x64,
+ .name = "sdmmcwa",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x65,
+ .name = "sdmmcwaa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x66,
+ .name = "sdmmcw",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x67,
+ .name = "sdmmcwab",
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x6c,
+ .name = "vicsrd",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x6d,
+ .name = "vicswr",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x72,
+ .name = "viw",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x73,
+ .name = "displayd",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x78,
+ .name = "nvdecsrd",
+ .swgroup = TEGRA_SWGROUP_NVDEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x79,
+ .name = "nvdecswr",
+ .swgroup = TEGRA_SWGROUP_NVDEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x7a,
+ .name = "aper",
+ .swgroup = TEGRA_SWGROUP_APE,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x7b,
+ .name = "apew",
+ .swgroup = TEGRA_SWGROUP_APE,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x7e,
+ .name = "nvjpgsrd",
+ .swgroup = TEGRA_SWGROUP_NVJPG,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x7f,
+ .name = "nvjpgswr",
+ .swgroup = TEGRA_SWGROUP_NVJPG,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x80,
+ .name = "sesrd",
+ .swgroup = TEGRA_SWGROUP_SE,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
+ },
+ }, {
+ .id = 0x81,
+ .name = "seswr",
+ .swgroup = TEGRA_SWGROUP_SE,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x82,
+ .name = "axiapr",
+ .swgroup = TEGRA_SWGROUP_AXIAP,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x83,
+ .name = "axiapw",
+ .swgroup = TEGRA_SWGROUP_AXIAP,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x84,
+ .name = "etrr",
+ .swgroup = TEGRA_SWGROUP_ETR,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x85,
+ .name = "etrw",
+ .swgroup = TEGRA_SWGROUP_ETR,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x86,
+ .name = "tsecsrdb",
+ .swgroup = TEGRA_SWGROUP_TSECB,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ },
+ }, {
+ .id = 0x87,
+ .name = "tsecswrb",
+ .swgroup = TEGRA_SWGROUP_TSECB,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x88,
+ .name = "gpusrd2",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x89,
+ .name = "gpuswr2",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "nvenc", .swgroup = TEGRA_SWGROUP_NVENC, .reg = 0x264 },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "ppcs1", .swgroup = TEGRA_SWGROUP_PPCS1, .reg = 0x298 },
+ { .name = "dc1", .swgroup = TEGRA_SWGROUP_DC1, .reg = 0xa88 },
+ { .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
+ { .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
+ { .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
+ { .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "ppcs2", .swgroup = TEGRA_SWGROUP_PPCS2, .reg = 0xab0 },
+ { .name = "nvdec", .swgroup = TEGRA_SWGROUP_NVDEC, .reg = 0xab4 },
+ { .name = "ape", .swgroup = TEGRA_SWGROUP_APE, .reg = 0xab8 },
+ { .name = "se", .swgroup = TEGRA_SWGROUP_SE, .reg = 0xabc },
+ { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
+ { .name = "hc1", .swgroup = TEGRA_SWGROUP_HC1, .reg = 0xac4 },
+ { .name = "se1", .swgroup = TEGRA_SWGROUP_SE1, .reg = 0xac8 },
+ { .name = "axiap", .swgroup = TEGRA_SWGROUP_AXIAP, .reg = 0xacc },
+ { .name = "etr", .swgroup = TEGRA_SWGROUP_ETR, .reg = 0xad0 },
+ { .name = "tsecb", .swgroup = TEGRA_SWGROUP_TSECB, .reg = 0xad4 },
+ { .name = "tsec1", .swgroup = TEGRA_SWGROUP_TSEC1, .reg = 0xad8 },
+ { .name = "tsecb1", .swgroup = TEGRA_SWGROUP_TSECB1, .reg = 0xadc },
+ { .name = "nvdec1", .swgroup = TEGRA_SWGROUP_NVDEC1, .reg = 0xae0 },
+};
+
+static const unsigned int tegra210_group_display[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+};
+
+static const struct tegra_smmu_group_soc tegra210_groups[] = {
+ {
+ .name = "display",
+ .swgroups = tegra210_group_display,
+ .num_swgroups = ARRAY_SIZE(tegra210_group_display),
+ },
+};
+
+static const struct tegra_smmu_soc tegra210_smmu_soc = {
+ .clients = tegra210_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra210_mc_clients),
+ .swgroups = tegra210_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra210_swgroups),
+ .groups = tegra210_groups,
+ .num_groups = ARRAY_SIZE(tegra210_groups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 48,
+ .num_asids = 128,
+};
+
+#define TEGRA210_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA210_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra210_mc_resets[] = {
+ TEGRA210_MC_RESET(AFI, 0x200, 0x204, 0),
+ TEGRA210_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA210_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA210_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA210_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA210_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA210_MC_RESET(ISP2, 0x200, 0x204, 8),
+ TEGRA210_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA210_MC_RESET(NVENC, 0x200, 0x204, 11),
+ TEGRA210_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA210_MC_RESET(SATA, 0x200, 0x204, 15),
+ TEGRA210_MC_RESET(VI, 0x200, 0x204, 17),
+ TEGRA210_MC_RESET(VIC, 0x200, 0x204, 18),
+ TEGRA210_MC_RESET(XUSB_HOST, 0x200, 0x204, 19),
+ TEGRA210_MC_RESET(XUSB_DEV, 0x200, 0x204, 20),
+ TEGRA210_MC_RESET(A9AVP, 0x200, 0x204, 21),
+ TEGRA210_MC_RESET(TSEC, 0x200, 0x204, 22),
+ TEGRA210_MC_RESET(SDMMC1, 0x200, 0x204, 29),
+ TEGRA210_MC_RESET(SDMMC2, 0x200, 0x204, 30),
+ TEGRA210_MC_RESET(SDMMC3, 0x200, 0x204, 31),
+ TEGRA210_MC_RESET(SDMMC4, 0x970, 0x974, 0),
+ TEGRA210_MC_RESET(ISP2B, 0x970, 0x974, 1),
+ TEGRA210_MC_RESET(GPU, 0x970, 0x974, 2),
+ TEGRA210_MC_RESET(NVDEC, 0x970, 0x974, 5),
+ TEGRA210_MC_RESET(APE, 0x970, 0x974, 6),
+ TEGRA210_MC_RESET(SE, 0x970, 0x974, 7),
+ TEGRA210_MC_RESET(NVJPG, 0x970, 0x974, 8),
+ TEGRA210_MC_RESET(AXIAP, 0x970, 0x974, 11),
+ TEGRA210_MC_RESET(ETR, 0x970, 0x974, 12),
+ TEGRA210_MC_RESET(TSECB, 0x970, 0x974, 13),
+};
+
+const struct tegra_mc_soc tegra210_mc_soc = {
+ .clients = tegra210_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra210_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 64,
+ .client_id_mask = 0xff,
+ .smmu = &tegra210_smmu_soc,
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra210_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra210_mc_resets),
+ .ops = &tegra30_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
new file mode 100644
index 000000000..2845041f3
--- /dev/null
+++ b/drivers/memory/tegra/tegra234.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022-2023, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/mc.h>
+
+#include <dt-bindings/memory/tegra234-mc.h>
+#include <linux/interconnect.h>
+#include <linux/tegra-icc.h>
+
+#include <soc/tegra/bpmp.h>
+#include "mc.h"
+
+/*
+ * MC Client entries are sorted in the increasing order of the
+ * override and security register offsets.
+ */
+static const struct tegra_mc_client tegra234_mc_clients[] = {
+ {
+ .id = TEGRA234_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .bpmp_id = TEGRA_ICC_BPMP_HDA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0xa8,
+ .security = 0xac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVENC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0xe0,
+ .security = 0xe4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE6AR,
+ .name = "pcie6ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_6,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE6,
+ .regs = {
+ .sid = {
+ .override = 0x140,
+ .security = 0x144,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE6AW,
+ .name = "pcie6aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_6,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE6,
+ .regs = {
+ .sid = {
+ .override = 0x148,
+ .security = 0x14c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE7AR,
+ .name = "pcie7ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_7,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE7,
+ .regs = {
+ .sid = {
+ .override = 0x150,
+ .security = 0x154,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVENC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDB,
+ .name = "dla0rdb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x160,
+ .security = 0x164,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDB1,
+ .name = "dla0rdb1",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x168,
+ .security = 0x16c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0WRB,
+ .name = "dla0wrb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x170,
+ .security = 0x174,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDB,
+ .name = "dla0rdb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x178,
+ .security = 0x17c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE7AW,
+ .name = "pcie7aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_7,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE7,
+ .regs = {
+ .sid = {
+ .override = 0x180,
+ .security = 0x184,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE8AR,
+ .name = "pcie8ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_8,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE8,
+ .regs = {
+ .sid = {
+ .override = 0x190,
+ .security = 0x194,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .bpmp_id = TEGRA_ICC_BPMP_HDA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE8AW,
+ .name = "pcie8aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_8,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE8,
+ .regs = {
+ .sid = {
+ .override = 0x1d8,
+ .security = 0x1dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE9AR,
+ .name = "pcie9ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_9,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE9,
+ .regs = {
+ .sid = {
+ .override = 0x1e0,
+ .security = 0x1e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE6AR1,
+ .name = "pcie6ar1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_6,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE6,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE9AW,
+ .name = "pcie9aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_9,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE9,
+ .regs = {
+ .sid = {
+ .override = 0x1f0,
+ .security = 0x1f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE10AR,
+ .name = "pcie10ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_10,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE10,
+ .regs = {
+ .sid = {
+ .override = 0x1f8,
+ .security = 0x1fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE10AW,
+ .name = "pcie10aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_10,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE10,
+ .regs = {
+ .sid = {
+ .override = 0x200,
+ .security = 0x204,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE10AR1,
+ .name = "pcie10ar1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_10,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE10,
+ .regs = {
+ .sid = {
+ .override = 0x240,
+ .security = 0x244,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE7AR1,
+ .name = "pcie7ar1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_7,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE7,
+ .regs = {
+ .sid = {
+ .override = 0x248,
+ .security = 0x24c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEARD,
+ .name = "mgbeard",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBRD,
+ .name = "mgbebrd",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECRD,
+ .name = "mgbecrd",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x2d0,
+ .security = 0x2d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDRD,
+ .name = "mgbedrd",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x2d8,
+ .security = 0x2dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEAWR,
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .name = "mgbeawr",
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2e0,
+ .security = 0x2e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBWR,
+ .name = "mgbebwr",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2f8,
+ .security = 0x2fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECWR,
+ .name = "mgbecwr",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .bpmp_id = TEGRA_ICC_BPMP_SDMMC_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDWR,
+ .name = "mgbedwr",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .bpmp_id = TEGRA_ICC_BPMP_SDMMC_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_VIC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .bpmp_id = TEGRA_ICC_BPMP_VIC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDB1,
+ .name = "dla0rdb1",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x370,
+ .security = 0x374,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1WRB,
+ .name = "dla0wrb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x378,
+ .security = 0x37c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VI2W,
+ .name = "vi2w",
+ .bpmp_id = TEGRA_ICC_BPMP_VI2,
+ .type = TEGRA_ICC_ISO_VI,
+ .sid = TEGRA234_SID_ISO_VI2,
+ .regs = {
+ .sid = {
+ .override = 0x380,
+ .security = 0x384,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VI2FALR,
+ .name = "vi2falr",
+ .bpmp_id = TEGRA_ICC_BPMP_VI2FAL,
+ .type = TEGRA_ICC_ISO_VIFAL,
+ .sid = TEGRA234_SID_ISO_VI2FALC,
+ .regs = {
+ .sid = {
+ .override = 0x388,
+ .security = 0x38c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVDEC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVDEC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .bpmp_id = TEGRA_ICC_BPMP_APE,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .bpmp_id = TEGRA_ICC_BPMP_APE,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d8,
+ .security = 0x3dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VI2FALW,
+ .name = "vi2falw",
+ .bpmp_id = TEGRA_ICC_BPMP_VI2FAL,
+ .type = TEGRA_ICC_ISO_VIFAL,
+ .sid = TEGRA234_SID_ISO_VI2FALC,
+ .regs = {
+ .sid = {
+ .override = 0x3e0,
+ .security = 0x3e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f8,
+ .security = 0x3fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .bpmp_id = TEGRA_ICC_BPMP_DISPLAY,
+ .type = TEGRA_ICC_ISO_DISPLAY,
+ .sid = TEGRA234_SID_ISO_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .bpmp_id = TEGRA_ICC_BPMP_APEDMA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .bpmp_id = TEGRA_ICC_BPMP_APEDMA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .bpmp_id = TEGRA_ICC_BPMP_DISPLAY,
+ .type = TEGRA_ICC_ISO_DISPLAY,
+ .sid = TEGRA234_SID_ISO_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDA,
+ .name = "dla0rda",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f0,
+ .security = 0x5f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f8,
+ .security = 0x5fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0WRA,
+ .name = "dla0wra",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x600,
+ .security = 0x604,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x608,
+ .security = 0x60c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDA,
+ .name = "dla0rda",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x610,
+ .security = 0x614,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x618,
+ .security = 0x61c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1WRA,
+ .name = "dla0wra",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x620,
+ .security = 0x624,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x628,
+ .security = 0x62c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE0R,
+ .name = "pcie0r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c0,
+ .security = 0x6c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE0W,
+ .name = "pcie0w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c8,
+ .security = 0x6cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE1R,
+ .name = "pcie1r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d0,
+ .security = 0x6d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE1W,
+ .name = "pcie1w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d8,
+ .security = 0x6dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE2AR,
+ .name = "pcie2ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_2,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e0,
+ .security = 0x6e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE2AW,
+ .name = "pcie2aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_2,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e8,
+ .security = 0x6ec,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE3R,
+ .name = "pcie3r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_3,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f0,
+ .security = 0x6f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE3W,
+ .name = "pcie3w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_3,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f8,
+ .security = 0x6fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE4R,
+ .name = "pcie4r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x700,
+ .security = 0x704,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE4W,
+ .name = "pcie4w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x708,
+ .security = 0x70c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE5R,
+ .name = "pcie5r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_5,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x710,
+ .security = 0x714,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE5W,
+ .name = "pcie5w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_5,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x718,
+ .security = 0x71c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x748,
+ .security = 0x74c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x750,
+ .security = 0x754,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE5R1,
+ .name = "pcie5r1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_5,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x778,
+ .security = 0x77c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPG1SRD,
+ .name = "nvjpg1srd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG1,
+ .regs = {
+ .sid = {
+ .override = 0x918,
+ .security = 0x91c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPG1SWR,
+ .name = "nvjpg1swr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG1,
+ .regs = {
+ .sid = {
+ .override = 0x920,
+ .security = 0x924,
+ },
+ },
+ }, {
+ .id = TEGRA_ICC_MC_CPU_CLUSTER0,
+ .name = "sw_cluster0",
+ .bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER0,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA_ICC_MC_CPU_CLUSTER1,
+ .name = "sw_cluster1",
+ .bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER1,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA_ICC_MC_CPU_CLUSTER2,
+ .name = "sw_cluster2",
+ .bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER2,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVL1R,
+ .name = "nvl1r",
+ .bpmp_id = TEGRA_ICC_BPMP_GPU,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVL1W,
+ .name = "nvl1w",
+ .bpmp_id = TEGRA_ICC_BPMP_GPU,
+ .type = TEGRA_ICC_NISO,
+ },
+};
+
+/*
+ * tegra234_mc_icc_set() - Pass MC client info to the BPMP-FW
+ * @src: ICC node for Memory Controller's (MC) Client
+ * @dst: ICC node for Memory Controller (MC)
+ *
+ * Passing the current request info from the MC to the BPMP-FW where
+ * LA and PTSA registers are accessed and the final EMC freq is set
+ * based on client_id, type, latency and bandwidth.
+ * icc_set_bw() makes set_bw calls for both MC and EMC providers in
+ * sequence. Both the calls are protected by 'mutex_lock(&icc_lock)'.
+ * So, the data passed won't be updated by concurrent set calls from
+ * other clients.
+ */
+static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(dst->provider);
+ struct mrq_bwmgr_int_request bwmgr_req = { 0 };
+ struct mrq_bwmgr_int_response bwmgr_resp = { 0 };
+ const struct tegra_mc_client *pclient = src->data;
+ struct tegra_bpmp_message msg;
+ int ret;
+
+ /*
+ * Same Src and Dst node will happen during boot from icc_node_add().
+ * This can be used to pre-initialize and set bandwidth for all clients
+ * before their drivers are loaded. We are skipping this case as for us,
+ * the pre-initialization already happened in Bootloader(MB2) and BPMP-FW.
+ */
+ if (src->id == dst->id)
+ return 0;
+
+ if (!mc->bwmgr_mrq_supported)
+ return 0;
+
+ if (!mc->bpmp) {
+ dev_err(mc->dev, "BPMP reference NULL\n");
+ return -ENOENT;
+ }
+
+ if (pclient->type == TEGRA_ICC_NISO)
+ bwmgr_req.bwmgr_calc_set_req.niso_bw = src->avg_bw;
+ else
+ bwmgr_req.bwmgr_calc_set_req.iso_bw = src->avg_bw;
+
+ bwmgr_req.bwmgr_calc_set_req.client_id = pclient->bpmp_id;
+
+ bwmgr_req.cmd = CMD_BWMGR_INT_CALC_AND_SET;
+ bwmgr_req.bwmgr_calc_set_req.mc_floor = src->peak_bw;
+ bwmgr_req.bwmgr_calc_set_req.floor_unit = BWMGR_INT_UNIT_KBPS;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_BWMGR_INT;
+ msg.tx.data = &bwmgr_req;
+ msg.tx.size = sizeof(bwmgr_req);
+ msg.rx.data = &bwmgr_resp;
+ msg.rx.size = sizeof(bwmgr_resp);
+
+ if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 &&
+ pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2)
+ msg.flags = TEGRA_BPMP_MESSAGE_RESET;
+
+ ret = tegra_bpmp_transfer(mc->bpmp, &msg);
+ if (ret < 0) {
+ dev_err(mc->dev, "BPMP transfer failed: %d\n", ret);
+ goto error;
+ }
+ if (msg.rx.ret < 0) {
+ pr_err("failed to set bandwidth for %u: %d\n",
+ bwmgr_req.bwmgr_calc_set_req.client_id, msg.rx.ret);
+ ret = -EINVAL;
+ }
+
+error:
+ return ret;
+}
+
+static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ struct icc_provider *p = node->provider;
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(p);
+
+ if (!mc->bwmgr_mrq_supported)
+ return 0;
+
+ if (node->id == TEGRA_ICC_MC_CPU_CLUSTER0 ||
+ node->id == TEGRA_ICC_MC_CPU_CLUSTER1 ||
+ node->id == TEGRA_ICC_MC_CPU_CLUSTER2) {
+ if (mc)
+ peak_bw = peak_bw * mc->num_channels;
+ }
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+ *avg = 0;
+ *peak = 0;
+
+ return 0;
+}
+
+static const struct tegra_mc_icc_ops tegra234_mc_icc_ops = {
+ .xlate = tegra_mc_icc_xlate,
+ .aggregate = tegra234_mc_icc_aggregate,
+ .get_bw = tegra234_mc_icc_get_init_bw,
+ .set = tegra234_mc_icc_set,
+};
+
+const struct tegra_mc_soc tegra234_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra234_mc_clients),
+ .clients = tegra234_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 16,
+ .client_id_mask = 0x1ff,
+ .intmask = MC_INT_DECERR_ROUTE_SANITY |
+ MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .has_addr_hi_reg = true,
+ .ops = &tegra186_mc_ops,
+ .icc_ops = &tegra234_mc_icc_ops,
+ .ch_intmask = 0x0000ff00,
+ .global_intstatus_channel_shift = 8,
+ /*
+ * Additionally, there are lite carveouts but those are not currently
+ * supported.
+ */
+ .num_carveouts = 32,
+};
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
new file mode 100644
index 000000000..9eae25c57
--- /dev/null
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -0,0 +1,1751 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Tegra30 External Memory Controller driver
+ *
+ * Based on downstream driver from NVIDIA and tegra124-emc.c
+ * Copyright (C) 2011-2014 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+#include "mc.h"
+
+#define EMC_INTSTATUS 0x000
+#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
+#define EMC_ADR_CFG 0x010
+#define EMC_CFG 0x00c
+#define EMC_REFCTRL 0x020
+#define EMC_TIMING_CONTROL 0x028
+#define EMC_RC 0x02c
+#define EMC_RFC 0x030
+#define EMC_RAS 0x034
+#define EMC_RP 0x038
+#define EMC_R2W 0x03c
+#define EMC_W2R 0x040
+#define EMC_R2P 0x044
+#define EMC_W2P 0x048
+#define EMC_RD_RCD 0x04c
+#define EMC_WR_RCD 0x050
+#define EMC_RRD 0x054
+#define EMC_REXT 0x058
+#define EMC_WDV 0x05c
+#define EMC_QUSE 0x060
+#define EMC_QRST 0x064
+#define EMC_QSAFE 0x068
+#define EMC_RDV 0x06c
+#define EMC_REFRESH 0x070
+#define EMC_BURST_REFRESH_NUM 0x074
+#define EMC_PDEX2WR 0x078
+#define EMC_PDEX2RD 0x07c
+#define EMC_PCHG2PDEN 0x080
+#define EMC_ACT2PDEN 0x084
+#define EMC_AR2PDEN 0x088
+#define EMC_RW2PDEN 0x08c
+#define EMC_TXSR 0x090
+#define EMC_TCKE 0x094
+#define EMC_TFAW 0x098
+#define EMC_TRPAB 0x09c
+#define EMC_TCLKSTABLE 0x0a0
+#define EMC_TCLKSTOP 0x0a4
+#define EMC_TREFBW 0x0a8
+#define EMC_QUSE_EXTRA 0x0ac
+#define EMC_ODT_WRITE 0x0b0
+#define EMC_ODT_READ 0x0b4
+#define EMC_WEXT 0x0b8
+#define EMC_CTT 0x0bc
+#define EMC_MRS_WAIT_CNT 0x0c8
+#define EMC_MRS 0x0cc
+#define EMC_EMRS 0x0d0
+#define EMC_SELF_REF 0x0e0
+#define EMC_MRW 0x0e8
+#define EMC_MRR 0x0ec
+#define EMC_XM2DQSPADCTRL3 0x0f8
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG6 0x114
+#define EMC_CFG_RSV 0x120
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_STATUS 0x2b4
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2QUSEPADCTRL 0x318
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+
+#define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+
+#define EMC_MODE_SET_DLL_RESET BIT(8)
+#define EMC_MODE_SET_LONG_CNT BIT(26)
+
+#define EMC_SELF_REF_CMD_ENABLED BIT(0)
+
+#define DRAM_DEV_SEL_ALL (0 << 30)
+#define DRAM_DEV_SEL_0 BIT(31)
+#define DRAM_DEV_SEL_1 BIT(30)
+#define DRAM_BROADCAST(num) \
+ ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+#define EMC_ZQ_CAL_CMD BIT(0)
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
+#define EMC_CFG5_QUSE_MODE_SHIFT 13
+#define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT)
+
+#define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2
+#define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3
+
+#define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9)
+
+#define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10)
+
+#define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4)
+
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
+#define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5)
+
+#define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
+
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_REFCTRL_DEV_SEL_MASK 0x3
+#define EMC_REFCTRL_ENABLE BIT(31)
+#define EMC_REFCTRL_ENABLE_ALL(num) \
+ (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE)
+#define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2)
+
+#define EMC_CFG_PERIODIC_QRST BIT(21)
+#define EMC_CFG_DYN_SREF_ENABLE BIT(28)
+
+#define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
+#define EMC_CLKCHANGE_PD_ENABLE BIT(1)
+#define EMC_CLKCHANGE_SR_ENABLE BIT(2)
+
+#define EMC_TIMING_UPDATE BIT(0)
+
+#define EMC_REFRESH_OVERFLOW_INT BIT(3)
+#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_EMEM_NUMDEV BIT(0)
+
+enum emc_dram_type {
+ DRAM_TYPE_DDR3,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
+enum emc_dll_change {
+ DLL_CHANGE_NONE,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF
+};
+
+static const u16 emc_timing_registers[] = {
+ [0] = EMC_RC,
+ [1] = EMC_RFC,
+ [2] = EMC_RAS,
+ [3] = EMC_RP,
+ [4] = EMC_R2W,
+ [5] = EMC_W2R,
+ [6] = EMC_R2P,
+ [7] = EMC_W2P,
+ [8] = EMC_RD_RCD,
+ [9] = EMC_WR_RCD,
+ [10] = EMC_RRD,
+ [11] = EMC_REXT,
+ [12] = EMC_WEXT,
+ [13] = EMC_WDV,
+ [14] = EMC_QUSE,
+ [15] = EMC_QRST,
+ [16] = EMC_QSAFE,
+ [17] = EMC_RDV,
+ [18] = EMC_REFRESH,
+ [19] = EMC_BURST_REFRESH_NUM,
+ [20] = EMC_PRE_REFRESH_REQ_CNT,
+ [21] = EMC_PDEX2WR,
+ [22] = EMC_PDEX2RD,
+ [23] = EMC_PCHG2PDEN,
+ [24] = EMC_ACT2PDEN,
+ [25] = EMC_AR2PDEN,
+ [26] = EMC_RW2PDEN,
+ [27] = EMC_TXSR,
+ [28] = EMC_TXSRDLL,
+ [29] = EMC_TCKE,
+ [30] = EMC_TFAW,
+ [31] = EMC_TRPAB,
+ [32] = EMC_TCLKSTABLE,
+ [33] = EMC_TCLKSTOP,
+ [34] = EMC_TREFBW,
+ [35] = EMC_QUSE_EXTRA,
+ [36] = EMC_FBIO_CFG6,
+ [37] = EMC_ODT_WRITE,
+ [38] = EMC_ODT_READ,
+ [39] = EMC_FBIO_CFG5,
+ [40] = EMC_CFG_DIG_DLL,
+ [41] = EMC_CFG_DIG_DLL_PERIOD,
+ [42] = EMC_DLL_XFORM_DQS0,
+ [43] = EMC_DLL_XFORM_DQS1,
+ [44] = EMC_DLL_XFORM_DQS2,
+ [45] = EMC_DLL_XFORM_DQS3,
+ [46] = EMC_DLL_XFORM_DQS4,
+ [47] = EMC_DLL_XFORM_DQS5,
+ [48] = EMC_DLL_XFORM_DQS6,
+ [49] = EMC_DLL_XFORM_DQS7,
+ [50] = EMC_DLL_XFORM_QUSE0,
+ [51] = EMC_DLL_XFORM_QUSE1,
+ [52] = EMC_DLL_XFORM_QUSE2,
+ [53] = EMC_DLL_XFORM_QUSE3,
+ [54] = EMC_DLL_XFORM_QUSE4,
+ [55] = EMC_DLL_XFORM_QUSE5,
+ [56] = EMC_DLL_XFORM_QUSE6,
+ [57] = EMC_DLL_XFORM_QUSE7,
+ [58] = EMC_DLI_TRIM_TXDQS0,
+ [59] = EMC_DLI_TRIM_TXDQS1,
+ [60] = EMC_DLI_TRIM_TXDQS2,
+ [61] = EMC_DLI_TRIM_TXDQS3,
+ [62] = EMC_DLI_TRIM_TXDQS4,
+ [63] = EMC_DLI_TRIM_TXDQS5,
+ [64] = EMC_DLI_TRIM_TXDQS6,
+ [65] = EMC_DLI_TRIM_TXDQS7,
+ [66] = EMC_DLL_XFORM_DQ0,
+ [67] = EMC_DLL_XFORM_DQ1,
+ [68] = EMC_DLL_XFORM_DQ2,
+ [69] = EMC_DLL_XFORM_DQ3,
+ [70] = EMC_XM2CMDPADCTRL,
+ [71] = EMC_XM2DQSPADCTRL2,
+ [72] = EMC_XM2DQPADCTRL2,
+ [73] = EMC_XM2CLKPADCTRL,
+ [74] = EMC_XM2COMPPADCTRL,
+ [75] = EMC_XM2VTTGENPADCTRL,
+ [76] = EMC_XM2VTTGENPADCTRL2,
+ [77] = EMC_XM2QUSEPADCTRL,
+ [78] = EMC_XM2DQSPADCTRL3,
+ [79] = EMC_CTT_TERM_CTRL,
+ [80] = EMC_ZCAL_INTERVAL,
+ [81] = EMC_ZCAL_WAIT_CNT,
+ [82] = EMC_MRS_WAIT_CNT,
+ [83] = EMC_AUTO_CAL_CONFIG,
+ [84] = EMC_CTT,
+ [85] = EMC_CTT_DURATION,
+ [86] = EMC_DYN_SELF_REF_CONTROL,
+ [87] = EMC_FBIO_SPARE,
+ [88] = EMC_CFG_RSV,
+};
+
+struct emc_timing {
+ unsigned long rate;
+
+ u32 data[ARRAY_SIZE(emc_timing_registers)];
+
+ u32 emc_auto_cal_interval;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+ u32 emc_zcal_cnt_long;
+ bool emc_cfg_periodic_qrst;
+ bool emc_cfg_dyn_self_ref;
+};
+
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct tegra_emc {
+ struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
+ struct notifier_block clk_nb;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int irq;
+ bool bad_state;
+
+ struct emc_timing *new_timing;
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ u32 mc_override;
+ u32 emc_cfg;
+
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+
+ bool vref_cal_toggle : 1;
+ bool zcal_long : 1;
+ bool dll_on : 1;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ bool mrr_error;
+};
+
+static int emc_seq_update_timing(struct tegra_emc *emc)
+{
+ u32 val;
+ int err;
+
+ writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
+ !(val & EMC_STATUS_TIMING_UPDATE_STALLED),
+ 1, 200);
+ if (err) {
+ dev_err(emc->dev, "failed to update timing: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra_emc_isr(int irq, void *data)
+{
+ struct tegra_emc *emc = data;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ u32 status;
+
+ status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ /* notify about HW problem */
+ if (status & EMC_REFRESH_OVERFLOW_INT)
+ dev_err_ratelimited(emc->dev,
+ "refresh request overflow timeout\n");
+
+ /* clear interrupts */
+ writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static struct emc_timing *emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate >= rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing,
+ bool *schmitt_to_vref)
+{
+ bool preset = false;
+ u32 val;
+
+ if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2);
+
+ if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3);
+
+ if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL);
+
+ if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) {
+ val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL);
+
+ *schmitt_to_vref = true;
+ preset = true;
+ }
+ }
+
+ return preset;
+}
+
+static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int misc0_index = 16;
+ unsigned int i;
+ bool same;
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (mc->timings[i].rate != rate)
+ continue;
+
+ if (mc->timings[i].emem_data[misc0_index] & BIT(27))
+ same = true;
+ else
+ same = false;
+
+ return tegra20_clk_prepare_emc_mc_same_freq(emc->clk, same);
+ }
+
+ return -EINVAL;
+}
+
+static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ enum emc_dll_change dll_change;
+ enum emc_dram_type dram_type;
+ bool schmitt_to_vref = false;
+ unsigned int pre_wait = 0;
+ bool qrst_used = false;
+ unsigned int dram_num;
+ unsigned int i;
+ u32 fbio_cfg5;
+ u32 emc_dbg;
+ u32 val;
+ int err;
+
+ if (!timing || emc->bad_state)
+ return -EINVAL;
+
+ dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
+ __func__, timing->rate, rate);
+
+ emc->bad_state = true;
+
+ err = emc_prepare_mc_clk_cfg(emc, rate);
+ if (err) {
+ dev_err(emc->dev, "mc clock preparation failed: %d\n", err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = false;
+ emc->mc_override = mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+ emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG);
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+
+ if (emc->dll_on == !!(timing->emc_mode_1 & 0x1))
+ dll_change = DLL_CHANGE_NONE;
+ else if (timing->emc_mode_1 & 0x1)
+ dll_change = DLL_CHANGE_ON;
+ else
+ dll_change = DLL_CHANGE_OFF;
+
+ emc->dll_on = !!(timing->emc_mode_1 & 0x1);
+
+ if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL))
+ emc->zcal_long = true;
+ else
+ emc->zcal_long = false;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+
+ /* disable dynamic self-refresh */
+ if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
+ emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+
+ pre_wait = 5;
+ }
+
+ /* update MC arbiter settings */
+ val = mc_readl(emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ);
+ if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
+ ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) {
+
+ val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
+ MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50;
+ mc_writel(emc->mc, val, MC_EMEM_ARB_OUTSTANDING_REQ);
+ mc_writel(emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+ }
+
+ if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
+ mc_writel(emc->mc,
+ emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK,
+ MC_EMEM_ARB_OVERRIDE);
+
+ /* check DQ/DQS VREF delay */
+ if (emc_dqs_preset(emc, timing, &schmitt_to_vref)) {
+ if (pre_wait < 3)
+ pre_wait = 3;
+ }
+
+ if (pre_wait) {
+ err = emc_seq_update_timing(emc);
+ if (err)
+ return err;
+
+ udelay(pre_wait);
+ }
+
+ /* disable auto-calibration if VREF mode is switching */
+ if (timing->emc_auto_cal_interval) {
+ val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL);
+ val ^= timing->data[74];
+
+ if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) {
+ writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ err = readl_relaxed_poll_timeout_atomic(
+ emc->regs + EMC_AUTO_CAL_STATUS, val,
+ !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300);
+ if (err) {
+ dev_err(emc->dev,
+ "auto-cal finish timeout: %d\n", err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = true;
+ }
+ }
+
+ /* program shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->data); i++) {
+ /* EMC_XM2CLKPADCTRL should be programmed separately */
+ if (i != 73)
+ writel_relaxed(timing->data[i],
+ emc->regs + emc_timing_registers[i]);
+ }
+
+ err = tegra_mc_write_emem_configuration(emc->mc, timing->rate);
+ if (err)
+ return err;
+
+ /* DDR3: predict MRS long wait count */
+ if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) {
+ u32 cnt = 512;
+
+ if (emc->zcal_long)
+ cnt -= dram_num * 256;
+
+ val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK;
+ if (cnt < val)
+ cnt = val;
+
+ val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+ val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
+ EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT);
+ }
+
+ /* this read also completes the writes */
+ val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL);
+
+ if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) {
+ u32 cur_mode, new_mode;
+
+ cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK;
+ cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK;
+ new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) ||
+ (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK))
+ qrst_used = true;
+ }
+
+ /* flow control marker 1 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE);
+
+ /* enable periodic reset */
+ if (qrst_used) {
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ emc->regs + EMC_DBG);
+ writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST,
+ emc->regs + EMC_CFG);
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ }
+
+ /* disable auto-refresh to save time after clock change */
+ writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* turn off DLL and enter self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (dll_change == DLL_CHANGE_OFF)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ writel_relaxed(DRAM_BROADCAST(dram_num) |
+ EMC_SELF_REF_CMD_ENABLED,
+ emc->regs + EMC_SELF_REF);
+ }
+
+ /* flow control marker 2 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
+
+ /* enable write-active MUX, update unshadowed pad control */
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG);
+ writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL);
+
+ /* restore periodic QRST and disable write-active MUX */
+ val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST);
+ if (qrst_used || timing->emc_cfg_periodic_qrst != val) {
+ if (timing->emc_cfg_periodic_qrst)
+ emc->emc_cfg |= EMC_CFG_PERIODIC_QRST;
+ else
+ emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST;
+
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ /* exit self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3)
+ writel_relaxed(DRAM_BROADCAST(dram_num),
+ emc->regs + EMC_SELF_REF);
+
+ /* set DRAM-mode registers */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_reset != emc->emc_mode_reset ||
+ dll_change == DLL_CHANGE_ON) {
+ val = timing->emc_mode_reset;
+ if (dll_change == DLL_CHANGE_ON) {
+ val |= EMC_MODE_SET_DLL_RESET;
+ val |= EMC_MODE_SET_LONG_CNT;
+ } else {
+ val &= ~EMC_MODE_SET_DLL_RESET;
+ }
+ writel_relaxed(val, emc->regs + EMC_MRS);
+ }
+ } else {
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_MRW);
+
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_MRW);
+ }
+
+ emc->emc_mode_1 = timing->emc_mode_1;
+ emc->emc_mode_2 = timing->emc_mode_2;
+ emc->emc_mode_reset = timing->emc_mode_reset;
+
+ /* issue ZCAL command if turning ZCAL on */
+ if (emc->zcal_long) {
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0,
+ emc->regs + EMC_ZQ_CAL);
+
+ if (dram_num > 1)
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1,
+ emc->regs + EMC_ZQ_CAL);
+ }
+
+ /* flow control marker 3 */
+ writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE);
+
+ /*
+ * Read and discard an arbitrary MC register (Note: EMC registers
+ * can't be used) to ensure the register writes are completed.
+ */
+ mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+
+ return 0;
+}
+
+static int emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned int dram_num;
+ int err;
+ u32 v;
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
+ }
+
+ /* re-enable auto-refresh */
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* wait for writes to settle */
+ udelay(2);
+
+ /* update restored timing */
+ err = emc_seq_update_timing(emc);
+ if (!err)
+ emc->bad_state = false;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ return err;
+}
+
+static int emc_unprepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ if (!emc->bad_state) {
+ /* shouldn't ever happen in practice */
+ dev_err(emc->dev, "timing configuration can't be reverted\n");
+ emc->bad_state = true;
+ }
+
+ return 0;
+}
+
+static int emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
+ struct clk_notifier_data *cnd = data;
+ int err;
+
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ /*
+ * Disable interrupt since read accesses are prohibited after
+ * stalling.
+ */
+ disable_irq(emc->irq);
+ err = emc_prepare_timing_change(emc, cnd->new_rate);
+ enable_irq(emc->irq);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = emc_unprepare_timing_change(emc, cnd->old_rate);
+ break;
+
+ case POST_RATE_CHANGE:
+ err = emc_complete_timing_change(emc, cnd->new_rate);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(node, "clock-frequency", &value);
+ if (err) {
+ dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ timing->rate = value;
+
+ err = of_property_read_u32_array(node, "nvidia,emc-configuration",
+ timing->data,
+ ARRAY_SIZE(emc_timing_registers));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOF: failed to read emc timing data: %d\n",
+ node, err);
+ return err;
+ }
+
+#define EMC_READ_BOOL(prop, dtprop) \
+ timing->prop = of_property_read_bool(node, dtprop);
+
+#define EMC_READ_U32(prop, dtprop) \
+ err = of_property_read_u32(node, dtprop, &timing->prop); \
+ if (err) { \
+ dev_err(emc->dev, \
+ "timing %pOFn: failed to read " #prop ": %d\n", \
+ node, err); \
+ return err; \
+ }
+
+ EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval")
+ EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1")
+ EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2")
+ EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset")
+ EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long")
+ EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref")
+ EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst")
+
+#undef EMC_READ_U32
+#undef EMC_READ_BOOL
+
+ dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate);
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+
+ if (a->rate > b->rate)
+ return 1;
+
+ return 0;
+}
+
+static int emc_check_mc_timings(struct tegra_emc *emc)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int i;
+
+ if (emc->num_timings != mc->num_timings) {
+ dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n",
+ emc->num_timings, mc->num_timings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (emc->timings[i].rate != mc->timings[i].rate) {
+ dev_err(emc->dev,
+ "emc/mc timing rate mismatch: %lu %lu\n",
+ emc->timings[i].rate, mc->timings[i].rate);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ struct device_node *child;
+ struct emc_timing *timing;
+ int child_count;
+ int err;
+
+ child_count = of_get_child_count(node);
+ if (!child_count) {
+ dev_err(emc->dev, "no memory timings in: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ emc->num_timings = child_count;
+ timing = emc->timings;
+
+ for_each_child_of_node(node, child) {
+ err = load_one_timing_from_dt(emc, timing++, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ err = emc_check_mc_timings(emc);
+ if (err)
+ return err;
+
+ dev_info_once(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
+ return 0;
+}
+
+static struct device_node *emc_find_node_by_ram_code(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ struct device_node *np;
+ u32 value, ram_code;
+ int err;
+
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info_once(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
+ ram_code = tegra_read_ram_code();
+
+ for_each_child_of_node(dev->of_node, np) {
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || value != ram_code)
+ continue;
+
+ return np;
+ }
+
+ dev_err(dev, "no memory timings for RAM code %u found in device-tree\n",
+ ram_code);
+
+ return NULL;
+}
+
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev ? 1 : 2;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev)
+{
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &basic_conf4.value);
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, manufacturer_id,
+ lpddr2_jedec_manufacturer(manufacturer_id),
+ revision_id1, revision_id2,
+ 4 >> basic_conf4.arch_type,
+ 64 << basic_conf4.density,
+ 32 >> basic_conf4.io_width);
+}
+
+static int emc_setup_hw(struct tegra_emc *emc)
+{
+ u32 fbio_cfg5, emc_cfg, emc_dbg, emc_adr_cfg;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
+
+ /* enable EMC and CAR to handshake on PLL divider/source changes */
+ emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
+
+ /* configure clock change mode accordingly to DRAM type */
+ switch (dram_type) {
+ case DRAM_TYPE_LPDDR2:
+ emc_cfg |= EMC_CLKCHANGE_PD_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ break;
+
+ default:
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE;
+ break;
+ }
+
+ writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
+
+ /* initialize interrupt */
+ writel_relaxed(intmask, emc->regs + EMC_INTMASK);
+ writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS);
+
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ switch (dram_type) {
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ case DRAM_TYPE_DDR3:
+ dram_type_str = "DDR3";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%u %s %s attached\n", emem_numdev,
+ dram_type_str, emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2 && !print_sdram_info_once) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev);
+
+ print_sdram_info_once = true;
+ }
+
+ return 0;
+}
+
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
+{
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
+
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
+
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ return timing->rate;
+}
+
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
+ emc, &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ const unsigned int dram_data_bus_width_bytes = 4;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra30 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ mutex_init(&emc->rate_lock);
+ emc->clk_nb.notifier_call = emc_clk_change_notify;
+ emc->dev = &pdev->dev;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ err = emc_setup_hw(emc);
+ if (err)
+ return err;
+
+ np = emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ emc->irq = err;
+
+ err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
+ dev_name(&pdev->dev), emc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
+ tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
+ return 0;
+}
+
+static int tegra_emc_suspend(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ /* take exclusive control over the clock's rate */
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err) {
+ dev_err(emc->dev, "failed to acquire clk: %d\n", err);
+ return err;
+ }
+
+ /* suspending in a bad state will hang machine */
+ if (WARN(emc->bad_state, "hardware in a bad state\n"))
+ return -EINVAL;
+
+ emc->bad_state = true;
+
+ return 0;
+}
+
+static int tegra_emc_resume(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ emc_setup_hw(emc);
+ emc->bad_state = false;
+
+ clk_rate_exclusive_put(emc->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_emc_pm_ops = {
+ .suspend = tegra_emc_suspend,
+ .resume = tegra_emc_resume,
+};
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra30-emc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra30-emc",
+ .of_match_table = tegra_emc_of_match,
+ .pm = &tegra_emc_pm_ops,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(tegra_emc_driver);
+
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
new file mode 100644
index 000000000..06f8b35e0
--- /dev/null
+++ b/drivers/memory/tegra/tegra30.c
@@ -0,0 +1,1403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/memory/tegra30-mc.h>
+
+#include "mc.h"
+
+static const unsigned long tegra30_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_RING1_THROTTLE,
+};
+
+static const struct tegra_mc_client tegra30_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x07,
+ .name = "display1b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x08,
+ .name = "display1bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x09,
+ .name = "eppup",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x17,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x0a,
+ .name = "g2pr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x0b,
+ .name = "g2sr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x0c,
+ .name = "mpeunifbr",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x0d,
+ .name = "viruv",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2c,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x12,
+ .name = "fdcdrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x13,
+ .name = "fdcdrd2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x14,
+ .name = "g2dr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x05,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x18,
+ .name = "idxsrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x19,
+ .name = "idxsrd2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x1a,
+ .name = "mpe_ipred",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x1b,
+ .name = "mpeamemrd",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x32c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x42,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x1c,
+ .name = "mpecsrd",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x32c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x12,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x20,
+ .name = "texsrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x21,
+ .name = "texsrd2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x22,
+ .name = "vdebsevr",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x23,
+ .name = "vdember",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xd0,
+ },
+ },
+ .fifo_size = 16 * 4,
+ }, {
+ .id = 0x24,
+ .name = "vdemcer",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2a,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x25,
+ .name = "vdetper",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x74,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x26,
+ .name = "mpcorelpr",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ .fifo_size = 16 * 14,
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ .fifo_size = 16 * 14,
+ }, {
+ .id = 0x28,
+ .name = "eppu",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x29,
+ .name = "eppv",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2a,
+ .name = "eppy",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2b,
+ .name = "mpeunifbw",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x330,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x2c,
+ .name = "viwsb",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x12,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2d,
+ .name = "viwu",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xb2,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2e,
+ .name = "viwv",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xb2,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2f,
+ .name = "viwy",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x36c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x12,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x30,
+ .name = "g2dw",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x9,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x33,
+ .name = "fdcdwr",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x34,
+ .name = "fdcdwr2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x37,
+ .name = "ispw",
+ .swgroup = TEGRA_SWGROUP_ISP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x31c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x38,
+ .name = "mpcorelpw",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ .fifo_size = 16 * 24,
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ .fifo_size = 16 * 24,
+ }, {
+ .id = 0x3a,
+ .name = "mpecswr",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x330,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x06,
+ },
+ },
+ .fifo_size = 16 * 4,
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x3e,
+ .name = "vdebsevw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 4,
+ }, {
+ .id = 0x3f,
+ .name = "vdedbgw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x40,
+ .name = "vdembew",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x42,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x41,
+ .name = "vdetpmw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x2a,
+ },
+ },
+ .fifo_size = 16 * 16,
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "epp", .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 },
+ { .name = "g2", .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c },
+ { .name = "mpe", .swgroup = TEGRA_SWGROUP_MPE, .reg = 0x264 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x278 },
+ { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
+ { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
+};
+
+static const unsigned int tegra30_group_drm[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
+ TEGRA_SWGROUP_NV2,
+};
+
+static const struct tegra_smmu_group_soc tegra30_groups[] = {
+ {
+ .name = "drm",
+ .swgroups = tegra30_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra30_group_drm),
+ },
+};
+
+static const struct tegra_smmu_soc tegra30_smmu_soc = {
+ .clients = tegra30_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra30_mc_clients),
+ .swgroups = tegra30_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
+ .groups = tegra30_groups,
+ .num_groups = ARRAY_SIZE(tegra30_groups),
+ .supports_round_robin_arbitration = false,
+ .supports_request_limit = false,
+ .num_tlb_lines = 16,
+ .num_asids = 4,
+};
+
+#define TEGRA30_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA30_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra30_mc_resets[] = {
+ TEGRA30_MC_RESET(AFI, 0x200, 0x204, 0),
+ TEGRA30_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA30_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA30_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA30_MC_RESET(EPP, 0x200, 0x204, 4),
+ TEGRA30_MC_RESET(2D, 0x200, 0x204, 5),
+ TEGRA30_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA30_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA30_MC_RESET(ISP, 0x200, 0x204, 8),
+ TEGRA30_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA30_MC_RESET(MPCORELP, 0x200, 0x204, 10),
+ TEGRA30_MC_RESET(MPE, 0x200, 0x204, 11),
+ TEGRA30_MC_RESET(3D, 0x200, 0x204, 12),
+ TEGRA30_MC_RESET(3D2, 0x200, 0x204, 13),
+ TEGRA30_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA30_MC_RESET(SATA, 0x200, 0x204, 15),
+ TEGRA30_MC_RESET(VDE, 0x200, 0x204, 16),
+ TEGRA30_MC_RESET(VI, 0x200, 0x204, 17),
+};
+
+static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int bandwidth_mbytes_sec)
+{
+ u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
+ unsigned int fifo_size = client->fifo_size;
+ u32 arb_nsec, la_ticks, value;
+
+ /* see 18.4.1 Client Configuration in Tegra3 TRM v03p */
+ if (bandwidth_mbytes_sec)
+ arb_nsec = fifo_size * NSEC_PER_USEC / bandwidth_mbytes_sec;
+ else
+ arb_nsec = U32_MAX;
+
+ /*
+ * Latency allowness should be set with consideration for the module's
+ * latency tolerance and internal buffering capabilities.
+ *
+ * Display memory clients use isochronous transfers and have very low
+ * tolerance to a belated transfers. Hence we need to compensate the
+ * memory arbitration imperfection for them in order to prevent FIFO
+ * underflow condition when memory bus is busy.
+ *
+ * VI clients also need a stronger compensation.
+ */
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_MPCORE:
+ case TEGRA_SWGROUP_PTC:
+ /*
+ * We always want lower latency for these clients, hence
+ * don't touch them.
+ */
+ return;
+
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 2;
+ break;
+
+ case TEGRA_SWGROUP_VI:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 1;
+ break;
+
+ default:
+ arb_tolerance_compensation_nsec = 150;
+ arb_tolerance_compensation_div = 1;
+ break;
+ }
+
+ if (arb_nsec > arb_tolerance_compensation_nsec)
+ arb_nsec -= arb_tolerance_compensation_nsec;
+ else
+ arb_nsec = 0;
+
+ arb_nsec /= arb_tolerance_compensation_div;
+
+ /*
+ * Latency allowance is a number of ticks a request from a particular
+ * client may wait in the EMEM arbiter before it becomes a high-priority
+ * request.
+ */
+ la_ticks = arb_nsec / mc->tick;
+ la_ticks = min(la_ticks, client->regs.la.mask);
+
+ value = mc_readl(mc, client->regs.la.reg);
+ value &= ~(client->regs.la.mask << client->regs.la.shift);
+ value |= la_ticks << client->regs.la.shift;
+ mc_writel(mc, value, client->regs.la.reg);
+}
+
+static int tegra30_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(src->provider);
+ const struct tegra_mc_client *client = &mc->soc->clients[src->id];
+ u64 peak_bandwidth = icc_units_to_bps(src->peak_bw);
+
+ /*
+ * Skip pre-initialization that is done by icc_node_add(), which sets
+ * bandwidth to maximum for all clients before drivers are loaded.
+ *
+ * This doesn't make sense for us because we don't have drivers for all
+ * clients and it's okay to keep configuration left from bootloader
+ * during boot, at least for today.
+ */
+ if (src == dst)
+ return 0;
+
+ /* convert bytes/sec to megabytes/sec */
+ do_div(peak_bandwidth, 1000000);
+
+ tegra30_mc_tune_client_latency(mc, client, peak_bandwidth);
+
+ return 0;
+}
+
+static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra30_mc_icc_ops = {
+ .xlate_extended = tegra30_mc_of_icc_xlate_extended,
+ .aggregate = tegra30_mc_icc_aggreate,
+ .set = tegra30_mc_icc_set,
+};
+
+const struct tegra_mc_soc tegra30_mc_soc = {
+ .clients = tegra30_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra30_mc_clients),
+ .num_address_bits = 32,
+ .atom_size = 16,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra30_smmu_soc,
+ .emem_regs = tegra30_mc_emem_regs,
+ .num_emem_regs = ARRAY_SIZE(tegra30_mc_emem_regs),
+ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+ MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra30_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra30_mc_resets),
+ .icc_ops = &tegra30_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
+};