summaryrefslogtreecommitdiffstats
path: root/drivers/memory
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/memory')
-rw-r--r--drivers/memory/.gitignore2
-rw-r--r--drivers/memory/Kconfig231
-rw-r--r--drivers/memory/Makefile41
-rw-r--r--drivers/memory/atmel-ebi.c648
-rw-r--r--drivers/memory/brcmstb_dpfe.c947
-rw-r--r--drivers/memory/brcmstb_memc.c301
-rw-r--r--drivers/memory/bt1-l2-ctl.c323
-rw-r--r--drivers/memory/da8xx-ddrctl.c165
-rw-r--r--drivers/memory/dfl-emif.c259
-rw-r--r--drivers/memory/emif-asm-offsets.c14
-rw-r--r--drivers/memory/emif.c1201
-rw-r--r--drivers/memory/emif.h607
-rw-r--r--drivers/memory/fsl-corenet-cf.c268
-rw-r--r--drivers/memory/fsl_ifc.c331
-rw-r--r--drivers/memory/jedec_ddr.h280
-rw-r--r--drivers/memory/jedec_ddr_data.c174
-rw-r--r--drivers/memory/jz4780-nemc.c422
-rw-r--r--drivers/memory/mtk-smi.c871
-rw-r--r--drivers/memory/mvebu-devbus.c345
-rw-r--r--drivers/memory/of_memory.c398
-rw-r--r--drivers/memory/of_memory.h60
-rw-r--r--drivers/memory/omap-gpmc.c2761
-rw-r--r--drivers/memory/pl172.c316
-rw-r--r--drivers/memory/pl353-smc.c170
-rw-r--r--drivers/memory/renesas-rpc-if.c809
-rw-r--r--drivers/memory/samsung/Kconfig35
-rw-r--r--drivers/memory/samsung/Makefile3
-rw-r--r--drivers/memory/samsung/exynos-srom.c212
-rw-r--r--drivers/memory/samsung/exynos-srom.h48
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c1593
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c1212
-rw-r--r--drivers/memory/tegra/Kconfig64
-rw-r--r--drivers/memory/tegra/Makefile25
-rw-r--r--drivers/memory/tegra/mc.c1046
-rw-r--r--drivers/memory/tegra/mc.h212
-rw-r--r--drivers/memory/tegra/tegra114.c1117
-rw-r--r--drivers/memory/tegra/tegra124-emc.c1535
-rw-r--r--drivers/memory/tegra/tegra124.c1311
-rw-r--r--drivers/memory/tegra/tegra186-emc.c421
-rw-r--r--drivers/memory/tegra/tegra186.c884
-rw-r--r--drivers/memory/tegra/tegra194.c1361
-rw-r--r--drivers/memory/tegra/tegra20-emc.c1279
-rw-r--r--drivers/memory/tegra/tegra20.c809
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c1774
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2064
-rw-r--r--drivers/memory/tegra/tegra210-emc-table.c88
-rw-r--r--drivers/memory/tegra/tegra210-emc.h1016
-rw-r--r--drivers/memory/tegra/tegra210-mc.h50
-rw-r--r--drivers/memory/tegra/tegra210.c1290
-rw-r--r--drivers/memory/tegra/tegra234.c1065
-rw-r--r--drivers/memory/tegra/tegra30-emc.c1751
-rw-r--r--drivers/memory/tegra/tegra30.c1403
-rw-r--r--drivers/memory/ti-aemif.c453
-rw-r--r--drivers/memory/ti-emif-pm.c346
-rw-r--r--drivers/memory/ti-emif-sram-pm.S368
55 files changed, 36779 insertions, 0 deletions
diff --git a/drivers/memory/.gitignore b/drivers/memory/.gitignore
new file mode 100644
index 0000000000..5e84bee05e
--- /dev/null
+++ b/drivers/memory/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/ti-emif-asm-offsets.h
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
new file mode 100644
index 0000000000..8efdd1f971
--- /dev/null
+++ b/drivers/memory/Kconfig
@@ -0,0 +1,231 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Memory devices
+#
+
+menuconfig MEMORY
+ bool "Memory Controller drivers"
+ help
+ This option allows to enable specific memory controller drivers,
+ useful mostly on embedded systems. These could be controllers
+ for DRAM (SDR, DDR), ROM, SRAM and others. The drivers features
+ vary from memory tuning and frequency scaling to enabling
+ access to attached peripherals through memory bus.
+
+if MEMORY
+
+config DDR
+ bool
+ help
+ Data from JEDEC specs for DDR SDRAM memories,
+ particularly the AC timing parameters and addressing
+ information. This data is useful for drivers handling
+ DDR SDRAM controllers.
+
+config ARM_PL172_MPMC
+ tristate "ARM PL172 MPMC driver"
+ depends on ARM_AMBA && OF
+ help
+ This selects the ARM PrimeCell PL172 MultiPort Memory Controller.
+ If you have an embedded system with an AMBA bus and a PL172
+ controller, say Y or M here.
+
+config ATMEL_EBI
+ bool "Atmel EBI driver"
+ default y if ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
+ select MFD_SYSCON
+ select MFD_ATMEL_SMC
+ help
+ Driver for Atmel EBI controller.
+ Used to configure the EBI (external bus interface) when the device-
+ tree is used. This bus supports NANDs, external ethernet controller,
+ SRAMs, ATA devices, etc.
+
+config BRCMSTB_DPFE
+ tristate "Broadcom STB DPFE driver"
+ default ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This driver provides access to the DPFE interface of Broadcom
+ STB SoCs. The firmware running on the DCPU inside the DDR PHY can
+ provide current information about the system's RAM, for instance
+ the DRAM refresh rate. This can be used as an indirect indicator
+ for the DRAM's temperature. Slower refresh rate means cooler RAM,
+ higher refresh rate means hotter RAM.
+
+config BRCMSTB_MEMC
+ tristate "Broadcom STB MEMC driver"
+ default ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This driver provides a way to configure the Broadcom STB memory
+ controller and specifically control the Self Refresh Power Down
+ (SRPD) inactivity timeout.
+
+config BT1_L2_CTL
+ bool "Baikal-T1 CM2 L2-RAM Cache Control Block"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Baikal-T1 CPU is based on the MIPS P5600 Warrior IP-core. The CPU
+ resides Coherency Manager v2 with embedded 1MB L2-cache. It's
+ possible to tune the L2 cache performance up by setting the data,
+ tags and way-select latencies of RAM access. This driver provides a
+ dt properties-based and sysfs interface for it.
+
+config TI_AEMIF
+ tristate "Texas Instruments AEMIF driver"
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST
+ depends on OF
+ help
+ This driver is for the AEMIF module available in Texas Instruments
+ SoCs. AEMIF stands for Asynchronous External Memory Interface and
+ is intended to provide a glue-less interface to a variety of
+ asynchronuous memory devices like ASRAM, NOR and NAND memory. A total
+ of 256M bytes of any of these memories can be accessed at a given
+ time via four chip selects with 64M byte access per chip select.
+
+config TI_EMIF
+ tristate "Texas Instruments EMIF driver"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ select DDR
+ help
+ This driver is for the EMIF module available in Texas Instruments
+ SoCs. EMIF is an SDRAM controller that, based on its revision,
+ supports one or more of DDR2, DDR3, and LPDDR2 SDRAM protocols.
+ This driver takes care of only LPDDR2 memories presently. The
+ functions of the driver includes re-configuring AC timing
+ parameters and other settings during frequency, voltage and
+ temperature changes
+
+config OMAP_GPMC
+ tristate "Texas Instruments OMAP SoC GPMC driver"
+ depends on OF_ADDRESS
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ select GPIOLIB
+ help
+ This driver is for the General Purpose Memory Controller (GPMC)
+ present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
+ interfacing to a variety of asynchronous as well as synchronous
+ memory drives like NOR, NAND, OneNAND, SRAM.
+
+config OMAP_GPMC_DEBUG
+ bool "Enable GPMC debug output and skip reset of GPMC during init"
+ depends on OMAP_GPMC
+ help
+ Enables verbose debugging mostly to decode the bootloader provided
+ timings. To preserve the bootloader provided timings, the reset
+ of GPMC is skipped during init. Enable this during development to
+ configure devices connected to the GPMC bus.
+
+ NOTE: In addition to matching the register setup with the bootloader
+ you also need to match the GPMC FCLK frequency used by the
+ bootloader or else the GPMC timings won't be identical with the
+ bootloader timings.
+
+config TI_EMIF_SRAM
+ tristate "Texas Instruments EMIF SRAM driver"
+ depends on SOC_AM33XX || SOC_AM43XX || (ARM && CPU_V7 && COMPILE_TEST)
+ depends on SRAM
+ help
+ This driver is for the EMIF module available on Texas Instruments
+ AM33XX and AM43XX SoCs and is required for PM. Certain parts of
+ the EMIF PM code must run from on-chip SRAM late in the suspend
+ sequence so this driver provides several relocatable PM functions
+ for the SoC PM code to use.
+
+config FPGA_DFL_EMIF
+ tristate "FPGA DFL EMIF Driver"
+ depends on FPGA_DFL && HAS_IOMEM
+ help
+ This driver is for the EMIF private feature implemented under
+ FPGA Device Feature List (DFL) framework. It is used to expose
+ memory interface status information as well as memory clearing
+ control.
+
+config MVEBU_DEVBUS
+ bool "Marvell EBU Device Bus Controller"
+ default y if PLAT_ORION
+ depends on PLAT_ORION || COMPILE_TEST
+ depends on OF
+ help
+ This driver is for the Device Bus controller available in some
+ Marvell EBU SoCs such as Discovery (mv78xx0), Orion (88f5xxx) and
+ Armada 370 and Armada XP. This controller allows to handle flash
+ devices such as NOR, NAND, SRAM, and FPGA.
+
+config FSL_CORENET_CF
+ tristate "Freescale CoreNet Error Reporting"
+ depends on FSL_SOC_BOOKE || COMPILE_TEST
+ help
+ Say Y for reporting of errors from the Freescale CoreNet
+ Coherency Fabric. Errors reported include accesses to
+ physical addresses that mapped by no local access window
+ (LAW) or an invalid LAW, as well as bad cache state that
+ represents a coherency violation.
+
+config FSL_IFC
+ bool "Freescale IFC driver" if COMPILE_TEST
+ depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM
+
+config JZ4780_NEMC
+ bool "Ingenic JZ4780 SoC NEMC driver"
+ depends on MIPS || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ This driver is for the NAND/External Memory Controller (NEMC) in
+ the Ingenic JZ4780. This controller is used to handle external
+ memory devices such as NAND and SRAM.
+
+config MTK_SMI
+ tristate "MediaTek SoC Memory Controller driver" if COMPILE_TEST
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This driver is for the Memory Controller module in MediaTek SoCs,
+ mainly help enable/disable iommu and control the power domain and
+ clocks for each local arbiter.
+
+config DA8XX_DDRCTL
+ bool "Texas Instruments da8xx DDR2/mDDR driver"
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
+ help
+ This driver is for the DDR2/mDDR Memory Controller present on
+ Texas Instruments da8xx SoCs. It's used to tweak various memory
+ controller configuration options.
+
+config PL353_SMC
+ tristate "ARM PL35X Static Memory Controller(SMC) driver"
+ default y if ARM
+ depends on ARM || COMPILE_TEST
+ depends on ARM_AMBA
+ help
+ This driver is for the ARM PL351/PL353 Static Memory
+ Controller(SMC) module.
+
+config RENESAS_RPCIF
+ tristate "Renesas RPC-IF driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select REGMAP_MMIO
+ select RESET_CONTROLLER
+ help
+ This supports Renesas R-Car Gen3 or RZ/G2 RPC-IF which provides
+ either SPI host or HyperFlash. You'll have to select individual
+ components under the corresponding menu.
+
+config STM32_FMC2_EBI
+ tristate "Support for FMC2 External Bus Interface on STM32MP SoCs"
+ depends on ARCH_STM32 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Select this option to enable the STM32 FMC2 External Bus Interface
+ controller. This driver configures the transactions with external
+ devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on
+ SOCs containing the FMC2 External Bus Interface.
+
+source "drivers/memory/samsung/Kconfig"
+source "drivers/memory/tegra/Kconfig"
+
+endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
new file mode 100644
index 0000000000..d2e6ca9abb
--- /dev/null
+++ b/drivers/memory/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for memory devices
+#
+
+obj-$(CONFIG_DDR) += jedec_ddr_data.o
+ifeq ($(CONFIG_DDR),y)
+obj-$(CONFIG_OF) += of_memory.o
+endif
+obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
+obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
+obj-$(CONFIG_BRCMSTB_DPFE) += brcmstb_dpfe.o
+obj-$(CONFIG_BRCMSTB_MEMC) += brcmstb_memc.o
+obj-$(CONFIG_BT1_L2_CTL) += bt1-l2-ctl.o
+obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
+obj-$(CONFIG_TI_EMIF) += emif.o
+obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
+obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
+obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
+obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
+obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
+obj-$(CONFIG_MTK_SMI) += mtk-smi.o
+obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
+obj-$(CONFIG_PL353_SMC) += pl353-smc.o
+obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o
+obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o
+
+obj-$(CONFIG_SAMSUNG_MC) += samsung/
+obj-$(CONFIG_TEGRA_MC) += tegra/
+obj-$(CONFIG_TI_EMIF_SRAM) += ti-emif-sram.o
+obj-$(CONFIG_FPGA_DFL_EMIF) += dfl-emif.o
+
+ti-emif-sram-objs := ti-emif-pm.o ti-emif-sram-pm.o
+
+$(obj)/ti-emif-sram-pm.o: $(obj)/ti-emif-asm-offsets.h
+
+$(obj)/ti-emif-asm-offsets.h: $(obj)/emif-asm-offsets.s FORCE
+ $(call filechk,offsets,__TI_EMIF_ASM_OFFSETS_H__)
+
+targets += emif-asm-offsets.s
+clean-files += ti-emif-asm-offsets.h
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
new file mode 100644
index 0000000000..635966d705
--- /dev/null
+++ b/drivers/memory/atmel-ebi.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EBI driver for Atmel chips
+ * inspired by the fsl weim bus driver
+ *
+ * Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-matrix.h>
+#include <linux/mfd/syscon/atmel-smc.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
+
+#define AT91_EBI_NUM_CS 8
+
+struct atmel_ebi_dev_config {
+ int cs;
+ struct atmel_smc_cs_conf smcconf;
+};
+
+struct atmel_ebi;
+
+struct atmel_ebi_dev {
+ struct list_head node;
+ struct atmel_ebi *ebi;
+ u32 mode;
+ int numcs;
+ struct atmel_ebi_dev_config configs[];
+};
+
+struct atmel_ebi_caps {
+ unsigned int available_cs;
+ unsigned int ebi_csa_offs;
+ const char *regmap_name;
+ void (*get_config)(struct atmel_ebi_dev *ebid,
+ struct atmel_ebi_dev_config *conf);
+ int (*xlate_config)(struct atmel_ebi_dev *ebid,
+ struct device_node *configs_np,
+ struct atmel_ebi_dev_config *conf);
+ void (*apply_config)(struct atmel_ebi_dev *ebid,
+ struct atmel_ebi_dev_config *conf);
+};
+
+struct atmel_ebi {
+ struct clk *clk;
+ struct regmap *regmap;
+ struct {
+ struct regmap *regmap;
+ struct clk *clk;
+ const struct atmel_hsmc_reg_layout *layout;
+ } smc;
+
+ struct device *dev;
+ const struct atmel_ebi_caps *caps;
+ struct list_head devs;
+};
+
+struct atmel_smc_timing_xlate {
+ const char *name;
+ int (*converter)(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int nycles);
+ unsigned int shift;
+};
+
+#define ATMEL_SMC_SETUP_XLATE(nm, pos) \
+ { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos}
+
+#define ATMEL_SMC_PULSE_XLATE(nm, pos) \
+ { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos}
+
+#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \
+ { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos}
+
+static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid,
+ struct atmel_ebi_dev_config *conf)
+{
+ atmel_smc_cs_conf_get(ebid->ebi->smc.regmap, conf->cs,
+ &conf->smcconf);
+}
+
+static void sama5_ebi_get_config(struct atmel_ebi_dev *ebid,
+ struct atmel_ebi_dev_config *conf)
+{
+ atmel_hsmc_cs_conf_get(ebid->ebi->smc.regmap, ebid->ebi->smc.layout,
+ conf->cs, &conf->smcconf);
+}
+
+static const struct atmel_smc_timing_xlate timings_xlate_table[] = {
+ ATMEL_SMC_SETUP_XLATE("atmel,smc-ncs-rd-setup-ns",
+ ATMEL_SMC_NCS_RD_SHIFT),
+ ATMEL_SMC_SETUP_XLATE("atmel,smc-ncs-wr-setup-ns",
+ ATMEL_SMC_NCS_WR_SHIFT),
+ ATMEL_SMC_SETUP_XLATE("atmel,smc-nrd-setup-ns", ATMEL_SMC_NRD_SHIFT),
+ ATMEL_SMC_SETUP_XLATE("atmel,smc-nwe-setup-ns", ATMEL_SMC_NWE_SHIFT),
+ ATMEL_SMC_PULSE_XLATE("atmel,smc-ncs-rd-pulse-ns",
+ ATMEL_SMC_NCS_RD_SHIFT),
+ ATMEL_SMC_PULSE_XLATE("atmel,smc-ncs-wr-pulse-ns",
+ ATMEL_SMC_NCS_WR_SHIFT),
+ ATMEL_SMC_PULSE_XLATE("atmel,smc-nrd-pulse-ns", ATMEL_SMC_NRD_SHIFT),
+ ATMEL_SMC_PULSE_XLATE("atmel,smc-nwe-pulse-ns", ATMEL_SMC_NWE_SHIFT),
+ ATMEL_SMC_CYCLE_XLATE("atmel,smc-nrd-cycle-ns", ATMEL_SMC_NRD_SHIFT),
+ ATMEL_SMC_CYCLE_XLATE("atmel,smc-nwe-cycle-ns", ATMEL_SMC_NWE_SHIFT),
+};
+
+static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid,
+ struct device_node *np,
+ struct atmel_smc_cs_conf *smcconf)
+{
+ unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
+ unsigned int clk_period_ns = NSEC_PER_SEC / clk_rate;
+ bool required = false;
+ unsigned int ncycles;
+ int ret, i;
+ u32 val;
+
+ ret = of_property_read_u32(np, "atmel,smc-tdf-ns", &val);
+ if (!ret) {
+ required = true;
+ ncycles = DIV_ROUND_UP(val, clk_period_ns);
+ if (ncycles > ATMEL_SMC_MODE_TDF_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
+ ncycles = ATMEL_SMC_MODE_TDF_MIN;
+
+ smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(timings_xlate_table); i++) {
+ const struct atmel_smc_timing_xlate *xlate;
+
+ xlate = &timings_xlate_table[i];
+
+ ret = of_property_read_u32(np, xlate->name, &val);
+ if (ret) {
+ if (!required)
+ continue;
+ else
+ break;
+ }
+
+ if (!required) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ncycles = DIV_ROUND_UP(val, clk_period_ns);
+ ret = xlate->converter(smcconf, xlate->shift, ncycles);
+ if (ret)
+ goto out;
+ }
+
+out:
+ if (ret) {
+ dev_err(ebid->ebi->dev,
+ "missing or invalid timings definition in %pOF",
+ np);
+ return ret;
+ }
+
+ return required;
+}
+
+static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid,
+ struct device_node *np,
+ struct atmel_ebi_dev_config *conf)
+{
+ struct atmel_smc_cs_conf *smcconf = &conf->smcconf;
+ bool required = false;
+ const char *tmp_str;
+ u32 tmp;
+ int ret;
+
+ ret = of_property_read_u32(np, "atmel,smc-bus-width", &tmp);
+ if (!ret) {
+ switch (tmp) {
+ case 8:
+ smcconf->mode |= ATMEL_SMC_MODE_DBW_8;
+ break;
+
+ case 16:
+ smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
+ break;
+
+ case 32:
+ smcconf->mode |= ATMEL_SMC_MODE_DBW_32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ required = true;
+ }
+
+ if (of_property_read_bool(np, "atmel,smc-tdf-optimized")) {
+ smcconf->mode |= ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-byte-access-type", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "write")) {
+ smcconf->mode |= ATMEL_SMC_MODE_BAT_WRITE;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-read-mode", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "nrd")) {
+ smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-write-mode", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "nwe")) {
+ smcconf->mode |= ATMEL_SMC_MODE_WRITEMODE_NWE;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-exnw-mode", &tmp_str);
+ if (tmp_str) {
+ if (!strcmp(tmp_str, "frozen"))
+ smcconf->mode |= ATMEL_SMC_MODE_EXNWMODE_FROZEN;
+ else if (!strcmp(tmp_str, "ready"))
+ smcconf->mode |= ATMEL_SMC_MODE_EXNWMODE_READY;
+ else if (strcmp(tmp_str, "disabled"))
+ return -EINVAL;
+
+ required = true;
+ }
+
+ ret = of_property_read_u32(np, "atmel,smc-page-mode", &tmp);
+ if (!ret) {
+ switch (tmp) {
+ case 4:
+ smcconf->mode |= ATMEL_SMC_MODE_PS_4;
+ break;
+
+ case 8:
+ smcconf->mode |= ATMEL_SMC_MODE_PS_8;
+ break;
+
+ case 16:
+ smcconf->mode |= ATMEL_SMC_MODE_PS_16;
+ break;
+
+ case 32:
+ smcconf->mode |= ATMEL_SMC_MODE_PS_32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ smcconf->mode |= ATMEL_SMC_MODE_PMEN;
+ required = true;
+ }
+
+ ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf);
+ if (ret < 0)
+ return -EINVAL;
+
+ if ((ret > 0 && !required) || (!ret && required)) {
+ dev_err(ebid->ebi->dev, "missing atmel,smc- properties in %pOF",
+ np);
+ return -EINVAL;
+ }
+
+ return required;
+}
+
+static void at91sam9_ebi_apply_config(struct atmel_ebi_dev *ebid,
+ struct atmel_ebi_dev_config *conf)
+{
+ atmel_smc_cs_conf_apply(ebid->ebi->smc.regmap, conf->cs,
+ &conf->smcconf);
+}
+
+static void sama5_ebi_apply_config(struct atmel_ebi_dev *ebid,
+ struct atmel_ebi_dev_config *conf)
+{
+ atmel_hsmc_cs_conf_apply(ebid->ebi->smc.regmap, ebid->ebi->smc.layout,
+ conf->cs, &conf->smcconf);
+}
+
+static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
+ int reg_cells)
+{
+ const struct atmel_ebi_caps *caps = ebi->caps;
+ struct atmel_ebi_dev_config conf = { };
+ struct device *dev = ebi->dev;
+ struct atmel_ebi_dev *ebid;
+ unsigned long cslines = 0;
+ int ret, numcs = 0, nentries, i;
+ bool apply = false;
+ u32 cs;
+
+ nentries = of_property_count_elems_of_size(np, "reg",
+ reg_cells * sizeof(u32));
+ for (i = 0; i < nentries; i++) {
+ ret = of_property_read_u32_index(np, "reg", i * reg_cells,
+ &cs);
+ if (ret)
+ return ret;
+
+ if (cs >= AT91_EBI_NUM_CS ||
+ !(ebi->caps->available_cs & BIT(cs))) {
+ dev_err(dev, "invalid reg property in %pOF\n", np);
+ return -EINVAL;
+ }
+
+ if (!test_and_set_bit(cs, &cslines))
+ numcs++;
+ }
+
+ if (!numcs) {
+ dev_err(dev, "invalid reg property in %pOF\n", np);
+ return -EINVAL;
+ }
+
+ ebid = devm_kzalloc(ebi->dev, struct_size(ebid, configs, numcs),
+ GFP_KERNEL);
+ if (!ebid)
+ return -ENOMEM;
+
+ ebid->ebi = ebi;
+ ebid->numcs = numcs;
+
+ ret = caps->xlate_config(ebid, np, &conf);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ apply = true;
+
+ i = 0;
+ for_each_set_bit(cs, &cslines, AT91_EBI_NUM_CS) {
+ ebid->configs[i].cs = cs;
+
+ if (apply) {
+ conf.cs = cs;
+ caps->apply_config(ebid, &conf);
+ }
+
+ caps->get_config(ebid, &ebid->configs[i]);
+
+ /*
+ * Attach the EBI device to the generic SMC logic if at least
+ * one "atmel,smc-" property is present.
+ */
+ if (ebi->caps->ebi_csa_offs && apply)
+ regmap_update_bits(ebi->regmap,
+ ebi->caps->ebi_csa_offs,
+ BIT(cs), 0);
+
+ i++;
+ }
+
+ list_add_tail(&ebid->node, &ebi->devs);
+
+ return 0;
+}
+
+static const struct atmel_ebi_caps at91sam9260_ebi_caps = {
+ .available_cs = 0xff,
+ .ebi_csa_offs = AT91SAM9260_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps at91sam9261_ebi_caps = {
+ .available_cs = 0xff,
+ .ebi_csa_offs = AT91SAM9261_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps at91sam9263_ebi0_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91SAM9263_MATRIX_EBI0CSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps at91sam9263_ebi1_caps = {
+ .available_cs = 0x7,
+ .ebi_csa_offs = AT91SAM9263_MATRIX_EBI1CSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps at91sam9rl_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91SAM9RL_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps at91sam9g45_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91SAM9G45_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps at91sam9x5_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91SAM9X5_MATRIX_EBICSA,
+ .regmap_name = "atmel,matrix",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps sama5d3_ebi_caps = {
+ .available_cs = 0xf,
+ .get_config = sama5_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = sama5_ebi_apply_config,
+};
+
+static const struct atmel_ebi_caps sam9x60_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa_offs = AT91_SFR_CCFG_EBICSA,
+ .regmap_name = "microchip,sfr",
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = atmel_ebi_xslate_smc_config,
+ .apply_config = at91sam9_ebi_apply_config,
+};
+
+static const struct of_device_id atmel_ebi_id_table[] = {
+ {
+ .compatible = "atmel,at91sam9260-ebi",
+ .data = &at91sam9260_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9261-ebi",
+ .data = &at91sam9261_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-ebi0",
+ .data = &at91sam9263_ebi0_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-ebi1",
+ .data = &at91sam9263_ebi1_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9rl-ebi",
+ .data = &at91sam9rl_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-ebi",
+ .data = &at91sam9g45_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-ebi",
+ .data = &at91sam9x5_ebi_caps,
+ },
+ {
+ .compatible = "atmel,sama5d3-ebi",
+ .data = &sama5d3_ebi_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-ebi",
+ .data = &sam9x60_ebi_caps,
+ },
+ { /* sentinel */ }
+};
+
+static int atmel_ebi_dev_disable(struct atmel_ebi *ebi, struct device_node *np)
+{
+ struct device *dev = ebi->dev;
+ struct property *newprop;
+
+ newprop = devm_kzalloc(dev, sizeof(*newprop), GFP_KERNEL);
+ if (!newprop)
+ return -ENOMEM;
+
+ newprop->name = devm_kstrdup(dev, "status", GFP_KERNEL);
+ if (!newprop->name)
+ return -ENOMEM;
+
+ newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
+ if (!newprop->value)
+ return -ENOMEM;
+
+ newprop->length = sizeof("disabled");
+
+ return of_update_property(np, newprop);
+}
+
+static int atmel_ebi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node, *smc_np;
+ const struct of_device_id *match;
+ struct atmel_ebi *ebi;
+ int ret, reg_cells;
+ struct clk *clk;
+ u32 val;
+
+ match = of_match_device(atmel_ebi_id_table, dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ ebi = devm_kzalloc(dev, sizeof(*ebi), GFP_KERNEL);
+ if (!ebi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ebi);
+
+ INIT_LIST_HEAD(&ebi->devs);
+ ebi->caps = match->data;
+ ebi->dev = dev;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ebi->clk = clk;
+
+ smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
+
+ ebi->smc.regmap = syscon_node_to_regmap(smc_np);
+ if (IS_ERR(ebi->smc.regmap)) {
+ ret = PTR_ERR(ebi->smc.regmap);
+ goto put_node;
+ }
+
+ ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
+ if (IS_ERR(ebi->smc.layout)) {
+ ret = PTR_ERR(ebi->smc.layout);
+ goto put_node;
+ }
+
+ ebi->smc.clk = of_clk_get(smc_np, 0);
+ if (IS_ERR(ebi->smc.clk)) {
+ if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+ ret = PTR_ERR(ebi->smc.clk);
+ goto put_node;
+ }
+
+ ebi->smc.clk = NULL;
+ }
+ of_node_put(smc_np);
+ ret = clk_prepare_enable(ebi->smc.clk);
+ if (ret)
+ return ret;
+
+ /*
+ * The sama5d3 does not provide an EBICSA register and thus does need
+ * to access it.
+ */
+ if (ebi->caps->ebi_csa_offs) {
+ ebi->regmap =
+ syscon_regmap_lookup_by_phandle(np,
+ ebi->caps->regmap_name);
+ if (IS_ERR(ebi->regmap))
+ return PTR_ERR(ebi->regmap);
+ }
+
+ ret = of_property_read_u32(np, "#address-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells = val;
+
+ ret = of_property_read_u32(np, "#size-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells += val;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_property_present(child, "reg"))
+ continue;
+
+ ret = atmel_ebi_dev_setup(ebi, child, reg_cells);
+ if (ret) {
+ dev_err(dev, "failed to configure EBI bus for %pOF, disabling the device",
+ child);
+
+ ret = atmel_ebi_dev_disable(ebi, child);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+ }
+
+ return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+ of_node_put(smc_np);
+ return ret;
+}
+
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
+{
+ struct atmel_ebi *ebi = dev_get_drvdata(dev);
+ struct atmel_ebi_dev *ebid;
+
+ list_for_each_entry(ebid, &ebi->devs, node) {
+ int i;
+
+ for (i = 0; i < ebid->numcs; i++)
+ ebid->ebi->caps->apply_config(ebid, &ebid->configs[i]);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(atmel_ebi_pm_ops, NULL, atmel_ebi_resume);
+
+static struct platform_driver atmel_ebi_driver = {
+ .driver = {
+ .name = "atmel-ebi",
+ .of_match_table = atmel_ebi_id_table,
+ .pm = &atmel_ebi_pm_ops,
+ },
+};
+builtin_platform_driver_probe(atmel_ebi_driver, atmel_ebi_probe);
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
new file mode 100644
index 0000000000..a7ab3d3772
--- /dev/null
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
+ *
+ * Copyright (c) 2017 Broadcom
+ */
+
+/*
+ * This driver provides access to the DPFE interface of Broadcom STB SoCs.
+ * The firmware running on the DCPU inside the DDR PHY can provide current
+ * information about the system's RAM, for instance the DRAM refresh rate.
+ * This can be used as an indirect indicator for the DRAM's temperature.
+ * Slower refresh rate means cooler RAM, higher refresh rate means hotter
+ * RAM.
+ *
+ * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
+ * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
+ *
+ * Note regarding the loading of the firmware image: we use be32_to_cpu()
+ * and le_32_to_cpu(), so we can support the following four cases:
+ * - LE kernel + LE firmware image (the most common case)
+ * - LE kernel + BE firmware image
+ * - BE kernel + LE firmware image
+ * - BE kernel + BE firmware image
+ *
+ * The DPCU always runs in big endian mode. The firmware image, however, can
+ * be in either format. Also, communication between host CPU and DCPU is
+ * always in little endian.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define DRVNAME "brcmstb-dpfe"
+
+/* DCPU register offsets */
+#define REG_DCPU_RESET 0x0
+#define REG_TO_DCPU_MBOX 0x10
+#define REG_TO_HOST_MBOX 0x14
+
+/* Macros to process offsets returned by the DCPU */
+#define DRAM_MSG_ADDR_OFFSET 0x0
+#define DRAM_MSG_TYPE_OFFSET 0x1c
+#define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
+#define DRAM_MSG_TYPE_MASK ((1UL << \
+ (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
+
+/* Message RAM */
+#define DCPU_MSG_RAM_START 0x100
+#define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
+
+/* DRAM Info Offsets & Masks */
+#define DRAM_INFO_INTERVAL 0x0
+#define DRAM_INFO_MR4 0x4
+#define DRAM_INFO_ERROR 0x8
+#define DRAM_INFO_MR4_MASK 0xff
+#define DRAM_INFO_MR4_SHIFT 24 /* We need to look at byte 3 */
+
+/* DRAM MR4 Offsets & Masks */
+#define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
+#define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
+#define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
+#define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
+#define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
+
+#define DRAM_MR4_REFRESH_MASK 0x7
+#define DRAM_MR4_SR_ABORT_MASK 0x1
+#define DRAM_MR4_PPRE_MASK 0x1
+#define DRAM_MR4_TH_OFFS_MASK 0x3
+#define DRAM_MR4_TUF_MASK 0x1
+
+/* DRAM Vendor Offsets & Masks (API v2) */
+#define DRAM_VENDOR_MR5 0x0
+#define DRAM_VENDOR_MR6 0x4
+#define DRAM_VENDOR_MR7 0x8
+#define DRAM_VENDOR_MR8 0xc
+#define DRAM_VENDOR_ERROR 0x10
+#define DRAM_VENDOR_MASK 0xff
+#define DRAM_VENDOR_SHIFT 24 /* We need to look at byte 3 */
+
+/* DRAM Information Offsets & Masks (API v3) */
+#define DRAM_DDR_INFO_MR4 0x0
+#define DRAM_DDR_INFO_MR5 0x4
+#define DRAM_DDR_INFO_MR6 0x8
+#define DRAM_DDR_INFO_MR7 0xc
+#define DRAM_DDR_INFO_MR8 0x10
+#define DRAM_DDR_INFO_ERROR 0x14
+#define DRAM_DDR_INFO_MASK 0xff
+
+/* Reset register bits & masks */
+#define DCPU_RESET_SHIFT 0x0
+#define DCPU_RESET_MASK 0x1
+#define DCPU_CLK_DISABLE_SHIFT 0x2
+
+/* DCPU return codes */
+#define DCPU_RET_ERROR_BIT BIT(31)
+#define DCPU_RET_SUCCESS 0x1
+#define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
+#define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
+#define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
+#define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
+/* This error code is not firmware defined and only used in the driver. */
+#define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
+
+/* Firmware magic */
+#define DPFE_BE_MAGIC 0xfe1010fe
+#define DPFE_LE_MAGIC 0xfe0101fe
+
+/* Error codes */
+#define ERR_INVALID_MAGIC -1
+#define ERR_INVALID_SIZE -2
+#define ERR_INVALID_CHKSUM -3
+
+/* Message types */
+#define DPFE_MSG_TYPE_COMMAND 1
+#define DPFE_MSG_TYPE_RESPONSE 2
+
+#define DELAY_LOOP_MAX 1000
+
+enum dpfe_msg_fields {
+ MSG_HEADER,
+ MSG_COMMAND,
+ MSG_ARG_COUNT,
+ MSG_ARG0,
+ MSG_FIELD_MAX = 16 /* Max number of arguments */
+};
+
+enum dpfe_commands {
+ DPFE_CMD_GET_INFO,
+ DPFE_CMD_GET_REFRESH,
+ DPFE_CMD_GET_VENDOR,
+ DPFE_CMD_MAX /* Last entry */
+};
+
+/*
+ * Format of the binary firmware file:
+ *
+ * entry
+ * 0 header
+ * value: 0xfe0101fe <== little endian
+ * 0xfe1010fe <== big endian
+ * 1 sequence:
+ * [31:16] total segments on this build
+ * [15:0] this segment sequence.
+ * 2 FW version
+ * 3 IMEM byte size
+ * 4 DMEM byte size
+ * IMEM
+ * DMEM
+ * last checksum ==> sum of everything
+ */
+struct dpfe_firmware_header {
+ u32 magic;
+ u32 sequence;
+ u32 version;
+ u32 imem_size;
+ u32 dmem_size;
+};
+
+/* Things we only need during initialization. */
+struct init_data {
+ unsigned int dmem_len;
+ unsigned int imem_len;
+ unsigned int chksum;
+ bool is_big_endian;
+};
+
+/* API version and corresponding commands */
+struct dpfe_api {
+ int version;
+ const char *fw_name;
+ const struct attribute_group **sysfs_attrs;
+ u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
+};
+
+/* Things we need for as long as we are active. */
+struct brcmstb_dpfe_priv {
+ void __iomem *regs;
+ void __iomem *dmem;
+ void __iomem *imem;
+ struct device *dev;
+ const struct dpfe_api *dpfe_api;
+ struct mutex lock;
+};
+
+/*
+ * Forward declaration of our sysfs attribute functions, so we can declare the
+ * attribute data structures early.
+ */
+static ssize_t show_info(struct device *, struct device_attribute *, char *);
+static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
+static ssize_t store_refresh(struct device *, struct device_attribute *,
+ const char *, size_t);
+static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
+static ssize_t show_dram(struct device *, struct device_attribute *, char *);
+
+/*
+ * Declare our attributes early, so they can be referenced in the API data
+ * structure. We need to do this, because the attributes depend on the API
+ * version.
+ */
+static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
+static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
+static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
+static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
+
+/* API v2 sysfs attributes */
+static struct attribute *dpfe_v2_attrs[] = {
+ &dev_attr_dpfe_info.attr,
+ &dev_attr_dpfe_refresh.attr,
+ &dev_attr_dpfe_vendor.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dpfe_v2);
+
+/* API v3 sysfs attributes */
+static struct attribute *dpfe_v3_attrs[] = {
+ &dev_attr_dpfe_info.attr,
+ &dev_attr_dpfe_dram.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dpfe_v3);
+
+/*
+ * Old API v2 firmware commands, as defined in the rev 0.61 specification, we
+ * use a version set to 1 to denote that it is not compatible with the new API
+ * v2 and onwards.
+ */
+static const struct dpfe_api dpfe_api_old_v2 = {
+ .version = 1,
+ .fw_name = "dpfe.bin",
+ .sysfs_attrs = dpfe_v2_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 1,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 2,
+ },
+ }
+};
+
+/*
+ * API v2 firmware commands, as defined in the rev 0.8 specification, named new
+ * v2 here
+ */
+static const struct dpfe_api dpfe_api_new_v2 = {
+ .version = 2,
+ .fw_name = NULL, /* We expect the firmware to have been downloaded! */
+ .sysfs_attrs = dpfe_v2_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x101,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x201,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x202,
+ },
+ }
+};
+
+/* API v3 firmware commands */
+static const struct dpfe_api dpfe_api_v3 = {
+ .version = 3,
+ .fw_name = NULL, /* We expect the firmware to have been downloaded! */
+ .sysfs_attrs = dpfe_v3_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x0101,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x0202,
+ [MSG_ARG_COUNT] = 0,
+ },
+ /* There's no GET_VENDOR command in API v3. */
+ },
+};
+
+static const char *get_error_text(unsigned int i)
+{
+ static const char * const error_text[] = {
+ "Success", "Header code incorrect",
+ "Unknown command or argument", "Incorrect checksum",
+ "Malformed command", "Timed out", "Unknown error",
+ };
+
+ if (unlikely(i >= ARRAY_SIZE(error_text)))
+ i = ARRAY_SIZE(error_text) - 1;
+
+ return error_text[i];
+}
+
+static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
+{
+ u32 val;
+
+ mutex_lock(&priv->lock);
+ val = readl_relaxed(priv->regs + REG_DCPU_RESET);
+ mutex_unlock(&priv->lock);
+
+ return !(val & DCPU_RESET_MASK);
+}
+
+static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
+{
+ u32 val;
+
+ if (!is_dcpu_enabled(priv))
+ return;
+
+ mutex_lock(&priv->lock);
+
+ /* Put DCPU in reset if it's running. */
+ val = readl_relaxed(priv->regs + REG_DCPU_RESET);
+ val |= (1 << DCPU_RESET_SHIFT);
+ writel_relaxed(val, priv->regs + REG_DCPU_RESET);
+
+ mutex_unlock(&priv->lock);
+}
+
+static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
+{
+ void __iomem *regs = priv->regs;
+ u32 val;
+
+ mutex_lock(&priv->lock);
+
+ /* Clear mailbox registers. */
+ writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
+ writel_relaxed(0, regs + REG_TO_HOST_MBOX);
+
+ /* Disable DCPU clock gating */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+
+ /* Take DCPU out of reset */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val &= ~(1 << DCPU_RESET_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+
+ mutex_unlock(&priv->lock);
+}
+
+static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
+{
+ unsigned int sum = 0;
+ unsigned int i;
+
+ /* Don't include the last field in the checksum. */
+ for (i = 0; i < max; i++)
+ sum += msg[i];
+
+ return sum;
+}
+
+static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
+ char *buf, ssize_t *size)
+{
+ unsigned int msg_type;
+ unsigned int offset;
+ void __iomem *ptr = NULL;
+
+ /* There is no need to use this function for API v3 or later. */
+ if (unlikely(priv->dpfe_api->version >= 3))
+ return NULL;
+
+ msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
+ offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
+
+ /*
+ * msg_type == 1: the offset is relative to the message RAM
+ * msg_type == 0: the offset is relative to the data RAM (this is the
+ * previous way of passing data)
+ * msg_type is anything else: there's critical hardware problem
+ */
+ switch (msg_type) {
+ case 1:
+ ptr = priv->regs + DCPU_MSG_RAM_START + offset;
+ break;
+ case 0:
+ ptr = priv->dmem + offset;
+ break;
+ default:
+ dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
+ response);
+ if (buf && size)
+ *size = sprintf(buf,
+ "FATAL: communication error with DCPU\n");
+ }
+
+ return ptr;
+}
+
+static void __finalize_command(struct brcmstb_dpfe_priv *priv)
+{
+ unsigned int release_mbox;
+
+ /*
+ * It depends on the API version which MBOX register we have to write to
+ * signal we are done.
+ */
+ release_mbox = (priv->dpfe_api->version < 2)
+ ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
+ writel_relaxed(0, priv->regs + release_mbox);
+}
+
+static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
+ u32 result[])
+{
+ void __iomem *regs = priv->regs;
+ unsigned int i, chksum, chksum_idx;
+ const u32 *msg;
+ int ret = 0;
+ u32 resp;
+
+ if (cmd >= DPFE_CMD_MAX)
+ return -1;
+
+ msg = priv->dpfe_api->command[cmd];
+
+ mutex_lock(&priv->lock);
+
+ /* Wait for DCPU to become ready */
+ for (i = 0; i < DELAY_LOOP_MAX; i++) {
+ resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
+ if (resp == 0)
+ break;
+ msleep(1);
+ }
+ if (resp != 0) {
+ mutex_unlock(&priv->lock);
+ return -ffs(DCPU_RET_ERR_TIMEDOUT);
+ }
+
+ /* Compute checksum over the message */
+ chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
+ chksum = get_msg_chksum(msg, chksum_idx);
+
+ /* Write command and arguments to message area */
+ for (i = 0; i < MSG_FIELD_MAX; i++) {
+ if (i == chksum_idx)
+ writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
+ else
+ writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+ }
+
+ /* Tell DCPU there is a command waiting */
+ writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
+
+ /* Wait for DCPU to process the command */
+ for (i = 0; i < DELAY_LOOP_MAX; i++) {
+ /* Read response code */
+ resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
+ if (resp > 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == DELAY_LOOP_MAX) {
+ resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
+ ret = -ffs(resp);
+ } else {
+ /* Read response data */
+ for (i = 0; i < MSG_FIELD_MAX; i++)
+ result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
+ chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
+ }
+
+ /* Tell DCPU we are done */
+ __finalize_command(priv);
+
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return ret;
+
+ /* Verify response */
+ chksum = get_msg_chksum(result, chksum_idx);
+ if (chksum != result[chksum_idx])
+ resp = DCPU_RET_ERR_CHKSUM;
+
+ if (resp != DCPU_RET_SUCCESS) {
+ resp &= ~DCPU_RET_ERROR_BIT;
+ ret = -ffs(resp);
+ }
+
+ return ret;
+}
+
+/* Ensure that the firmware file loaded meets all the requirements. */
+static int __verify_firmware(struct init_data *init,
+ const struct firmware *fw)
+{
+ const struct dpfe_firmware_header *header = (void *)fw->data;
+ unsigned int dmem_size, imem_size, total_size;
+ bool is_big_endian = false;
+ const u32 *chksum_ptr;
+
+ if (header->magic == DPFE_BE_MAGIC)
+ is_big_endian = true;
+ else if (header->magic != DPFE_LE_MAGIC)
+ return ERR_INVALID_MAGIC;
+
+ if (is_big_endian) {
+ dmem_size = be32_to_cpu(header->dmem_size);
+ imem_size = be32_to_cpu(header->imem_size);
+ } else {
+ dmem_size = le32_to_cpu(header->dmem_size);
+ imem_size = le32_to_cpu(header->imem_size);
+ }
+
+ /* Data and instruction sections are 32 bit words. */
+ if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
+ return ERR_INVALID_SIZE;
+
+ /*
+ * The header + the data section + the instruction section + the
+ * checksum must be equal to the total firmware size.
+ */
+ total_size = dmem_size + imem_size + sizeof(*header) +
+ sizeof(*chksum_ptr);
+ if (total_size != fw->size)
+ return ERR_INVALID_SIZE;
+
+ /* The checksum comes at the very end. */
+ chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
+
+ init->is_big_endian = is_big_endian;
+ init->dmem_len = dmem_size;
+ init->imem_len = imem_size;
+ init->chksum = (is_big_endian)
+ ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
+
+ return 0;
+}
+
+/* Verify checksum by reading back the firmware from co-processor RAM. */
+static int __verify_fw_checksum(struct init_data *init,
+ struct brcmstb_dpfe_priv *priv,
+ const struct dpfe_firmware_header *header,
+ u32 checksum)
+{
+ u32 magic, sequence, version, sum;
+ u32 __iomem *dmem = priv->dmem;
+ u32 __iomem *imem = priv->imem;
+ unsigned int i;
+
+ if (init->is_big_endian) {
+ magic = be32_to_cpu(header->magic);
+ sequence = be32_to_cpu(header->sequence);
+ version = be32_to_cpu(header->version);
+ } else {
+ magic = le32_to_cpu(header->magic);
+ sequence = le32_to_cpu(header->sequence);
+ version = le32_to_cpu(header->version);
+ }
+
+ sum = magic + sequence + version + init->dmem_len + init->imem_len;
+
+ for (i = 0; i < init->dmem_len / sizeof(u32); i++)
+ sum += readl_relaxed(dmem + i);
+
+ for (i = 0; i < init->imem_len / sizeof(u32); i++)
+ sum += readl_relaxed(imem + i);
+
+ return (sum == checksum) ? 0 : -1;
+}
+
+static int __write_firmware(u32 __iomem *mem, const u32 *fw,
+ unsigned int size, bool is_big_endian)
+{
+ unsigned int i;
+
+ /* Convert size to 32-bit words. */
+ size /= sizeof(u32);
+
+ /* It is recommended to clear the firmware area first. */
+ for (i = 0; i < size; i++)
+ writel_relaxed(0, mem + i);
+
+ /* Now copy it. */
+ if (is_big_endian) {
+ for (i = 0; i < size; i++)
+ writel_relaxed(be32_to_cpu(fw[i]), mem + i);
+ } else {
+ for (i = 0; i < size; i++)
+ writel_relaxed(le32_to_cpu(fw[i]), mem + i);
+ }
+
+ return 0;
+}
+
+static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
+{
+ const struct dpfe_firmware_header *header;
+ unsigned int dmem_size, imem_size;
+ struct device *dev = priv->dev;
+ bool is_big_endian = false;
+ const struct firmware *fw;
+ const u32 *dmem, *imem;
+ struct init_data init;
+ const void *fw_blob;
+ int ret;
+
+ /*
+ * Skip downloading the firmware if the DCPU is already running and
+ * responding to commands.
+ */
+ if (is_dcpu_enabled(priv)) {
+ u32 response[MSG_FIELD_MAX];
+
+ ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
+ if (!ret)
+ return 0;
+ }
+
+ /*
+ * If the firmware filename is NULL it means the boot firmware has to
+ * download the DCPU firmware for us. If that didn't work, we have to
+ * bail, since downloading it ourselves wouldn't work either.
+ */
+ if (!priv->dpfe_api->fw_name)
+ return -ENODEV;
+
+ ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
+ /*
+ * Defer the firmware download if the firmware file couldn't be found.
+ * The root file system may not be available yet.
+ */
+ if (ret)
+ return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
+
+ ret = __verify_firmware(&init, fw);
+ if (ret) {
+ ret = -EFAULT;
+ goto release_fw;
+ }
+
+ __disable_dcpu(priv);
+
+ is_big_endian = init.is_big_endian;
+ dmem_size = init.dmem_len;
+ imem_size = init.imem_len;
+
+ /* At the beginning of the firmware blob is a header. */
+ header = (struct dpfe_firmware_header *)fw->data;
+ /* Void pointer to the beginning of the actual firmware. */
+ fw_blob = fw->data + sizeof(*header);
+ /* IMEM comes right after the header. */
+ imem = fw_blob;
+ /* DMEM follows after IMEM. */
+ dmem = fw_blob + imem_size;
+
+ ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
+ if (ret)
+ goto release_fw;
+ ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
+ if (ret)
+ goto release_fw;
+
+ ret = __verify_fw_checksum(&init, priv, header, init.chksum);
+ if (ret)
+ goto release_fw;
+
+ __enable_dcpu(priv);
+
+release_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static ssize_t generic_show(unsigned int command, u32 response[],
+ struct brcmstb_dpfe_priv *priv, char *buf)
+{
+ int ret;
+
+ if (!priv)
+ return sprintf(buf, "ERROR: driver private data not set\n");
+
+ ret = __send_command(priv, command, response);
+ if (ret < 0)
+ return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
+
+ return 0;
+}
+
+static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct brcmstb_dpfe_priv *priv;
+ unsigned int info;
+ ssize_t ret;
+
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
+ if (ret)
+ return ret;
+
+ info = response[MSG_ARG0];
+
+ return sprintf(buf, "%u.%u.%u.%u\n",
+ (info >> 24) & 0xff,
+ (info >> 16) & 0xff,
+ (info >> 8) & 0xff,
+ info & 0xff);
+}
+
+static ssize_t show_refresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ void __iomem *info;
+ struct brcmstb_dpfe_priv *priv;
+ u8 refresh, sr_abort, ppre, thermal_offs, tuf;
+ u32 mr4;
+ ssize_t ret;
+
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
+ if (ret)
+ return ret;
+
+ info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+ if (!info)
+ return ret;
+
+ mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
+ DRAM_INFO_MR4_MASK;
+
+ refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
+ sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
+ ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
+ thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
+ tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
+ readl_relaxed(info + DRAM_INFO_INTERVAL),
+ refresh, sr_abort, ppre, thermal_offs, tuf,
+ readl_relaxed(info + DRAM_INFO_ERROR));
+}
+
+static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct brcmstb_dpfe_priv *priv;
+ void __iomem *info;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ priv = dev_get_drvdata(dev);
+ ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
+ if (ret)
+ return ret;
+
+ info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
+ if (!info)
+ return -EIO;
+
+ writel_relaxed(val, info + DRAM_INFO_INTERVAL);
+
+ return count;
+}
+
+static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct brcmstb_dpfe_priv *priv;
+ void __iomem *info;
+ ssize_t ret;
+ u32 mr5, mr6, mr7, mr8, err;
+
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
+ if (ret)
+ return ret;
+
+ info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+ if (!info)
+ return ret;
+
+ mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
+ DRAM_VENDOR_MASK;
+ err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
+}
+
+static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct brcmstb_dpfe_priv *priv;
+ ssize_t ret;
+ u32 mr4, mr5, mr6, mr7, mr8, err;
+
+ priv = dev_get_drvdata(dev);
+ ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
+ if (ret)
+ return ret;
+
+ mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
+ mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
+ mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
+ mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
+ mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
+ err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
+ mr8, err);
+}
+
+static int brcmstb_dpfe_resume(struct platform_device *pdev)
+{
+ struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
+
+ return brcmstb_dpfe_download_firmware(priv);
+}
+
+static int brcmstb_dpfe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct brcmstb_dpfe_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ mutex_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->regs = devm_platform_ioremap_resource_byname(pdev, "dpfe-cpu");
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "couldn't map DCPU registers\n");
+ return -ENODEV;
+ }
+
+ priv->dmem = devm_platform_ioremap_resource_byname(pdev, "dpfe-dmem");
+ if (IS_ERR(priv->dmem)) {
+ dev_err(dev, "Couldn't map DCPU data memory\n");
+ return -ENOENT;
+ }
+
+ priv->imem = devm_platform_ioremap_resource_byname(pdev, "dpfe-imem");
+ if (IS_ERR(priv->imem)) {
+ dev_err(dev, "Couldn't map DCPU instruction memory\n");
+ return -ENOENT;
+ }
+
+ priv->dpfe_api = of_device_get_match_data(dev);
+ if (unlikely(!priv->dpfe_api)) {
+ /*
+ * It should be impossible to end up here, but to be safe we
+ * check anyway.
+ */
+ dev_err(dev, "Couldn't determine API\n");
+ return -ENOENT;
+ }
+
+ ret = brcmstb_dpfe_download_firmware(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Couldn't download firmware\n");
+
+ ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
+ if (!ret)
+ dev_info(dev, "registered with API v%d.\n",
+ priv->dpfe_api->version);
+
+ return ret;
+}
+
+static int brcmstb_dpfe_remove(struct platform_device *pdev)
+{
+ struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
+
+ return 0;
+}
+
+static const struct of_device_id brcmstb_dpfe_of_match[] = {
+ /* Use legacy API v2 for a select number of chips */
+ { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
+ /* API v3 is the default going forward */
+ { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
+
+static struct platform_driver brcmstb_dpfe_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = brcmstb_dpfe_of_match,
+ },
+ .probe = brcmstb_dpfe_probe,
+ .remove = brcmstb_dpfe_remove,
+ .resume = brcmstb_dpfe_resume,
+};
+
+module_platform_driver(brcmstb_dpfe_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/brcmstb_memc.c b/drivers/memory/brcmstb_memc.c
new file mode 100644
index 0000000000..233a53f5bc
--- /dev/null
+++ b/drivers/memory/brcmstb_memc.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DDR Self-Refresh Power Down (SRPD) support for Broadcom STB SoCs
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define REG_MEMC_CNTRLR_CONFIG 0x00
+#define CNTRLR_CONFIG_LPDDR4_SHIFT 5
+#define CNTRLR_CONFIG_MASK 0xf
+#define REG_MEMC_SRPD_CFG_21 0x20
+#define REG_MEMC_SRPD_CFG_20 0x34
+#define REG_MEMC_SRPD_CFG_1x 0x3c
+#define INACT_COUNT_SHIFT 0
+#define INACT_COUNT_MASK 0xffff
+#define SRPD_EN_SHIFT 16
+
+struct brcmstb_memc_data {
+ u32 srpd_offset;
+};
+
+struct brcmstb_memc {
+ struct device *dev;
+ void __iomem *ddr_ctrl;
+ unsigned int timeout_cycles;
+ u32 frequency;
+ u32 srpd_offset;
+};
+
+static int brcmstb_memc_uses_lpddr4(struct brcmstb_memc *memc)
+{
+ void __iomem *config = memc->ddr_ctrl + REG_MEMC_CNTRLR_CONFIG;
+ u32 reg;
+
+ reg = readl_relaxed(config) & CNTRLR_CONFIG_MASK;
+
+ return reg == CNTRLR_CONFIG_LPDDR4_SHIFT;
+}
+
+static int brcmstb_memc_srpd_config(struct brcmstb_memc *memc,
+ unsigned int cycles)
+{
+ void __iomem *cfg = memc->ddr_ctrl + memc->srpd_offset;
+ u32 val;
+
+ /* Max timeout supported in HW */
+ if (cycles > INACT_COUNT_MASK)
+ return -EINVAL;
+
+ memc->timeout_cycles = cycles;
+
+ val = (cycles << INACT_COUNT_SHIFT) & INACT_COUNT_MASK;
+ if (cycles)
+ val |= BIT(SRPD_EN_SHIFT);
+
+ writel_relaxed(val, cfg);
+ /* Ensure the write is committed to the controller */
+ (void)readl_relaxed(cfg);
+
+ return 0;
+}
+
+static ssize_t frequency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", memc->frequency);
+}
+
+static ssize_t srpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", memc->timeout_cycles);
+}
+
+static ssize_t srpd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ /*
+ * Cannot change the inactivity timeout on LPDDR4 chips because the
+ * dynamic tuning process will also get affected by the inactivity
+ * timeout, thus making it non functional.
+ */
+ if (brcmstb_memc_uses_lpddr4(memc))
+ return -EOPNOTSUPP;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = brcmstb_memc_srpd_config(memc, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(frequency);
+static DEVICE_ATTR_RW(srpd);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_frequency.attr,
+ &dev_attr_srpd.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attrs,
+};
+
+static const struct of_device_id brcmstb_memc_of_match[];
+
+static int brcmstb_memc_probe(struct platform_device *pdev)
+{
+ const struct brcmstb_memc_data *memc_data;
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct brcmstb_memc *memc;
+ int ret;
+
+ memc = devm_kzalloc(dev, sizeof(*memc), GFP_KERNEL);
+ if (!memc)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, memc);
+
+ of_id = of_match_device(brcmstb_memc_of_match, dev);
+ memc_data = of_id->data;
+ memc->srpd_offset = memc_data->srpd_offset;
+
+ memc->ddr_ctrl = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(memc->ddr_ctrl))
+ return PTR_ERR(memc->ddr_ctrl);
+
+ of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &memc->frequency);
+
+ ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int brcmstb_memc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ sysfs_remove_group(&dev->kobj, &dev_attr_group);
+
+ return 0;
+}
+
+enum brcmstb_memc_hwtype {
+ BRCMSTB_MEMC_V21,
+ BRCMSTB_MEMC_V20,
+ BRCMSTB_MEMC_V1X,
+};
+
+static const struct brcmstb_memc_data brcmstb_memc_versions[] = {
+ { .srpd_offset = REG_MEMC_SRPD_CFG_21 },
+ { .srpd_offset = REG_MEMC_SRPD_CFG_20 },
+ { .srpd_offset = REG_MEMC_SRPD_CFG_1x },
+};
+
+static const struct of_device_id brcmstb_memc_of_match[] = {
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V1X]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V20]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.5",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.6",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.7",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.8",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.2",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.3",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.4",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ /* default to the original offset */
+ {
+ .compatible = "brcm,brcmstb-memc-ddr",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V1X]
+ },
+ {}
+};
+
+static int brcmstb_memc_suspend(struct device *dev)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+ void __iomem *cfg = memc->ddr_ctrl + memc->srpd_offset;
+ u32 val;
+
+ if (memc->timeout_cycles == 0)
+ return 0;
+
+ /*
+ * Disable SRPD prior to suspending the system since that can
+ * cause issues with other memory clients managed by the ARM
+ * trusted firmware to access memory.
+ */
+ val = readl_relaxed(cfg);
+ val &= ~BIT(SRPD_EN_SHIFT);
+ writel_relaxed(val, cfg);
+ /* Ensure the write is committed to the controller */
+ (void)readl_relaxed(cfg);
+
+ return 0;
+}
+
+static int brcmstb_memc_resume(struct device *dev)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ if (memc->timeout_cycles == 0)
+ return 0;
+
+ return brcmstb_memc_srpd_config(memc, memc->timeout_cycles);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_memc_pm_ops, brcmstb_memc_suspend,
+ brcmstb_memc_resume);
+
+static struct platform_driver brcmstb_memc_driver = {
+ .probe = brcmstb_memc_probe,
+ .remove = brcmstb_memc_remove,
+ .driver = {
+ .name = "brcmstb_memc",
+ .of_match_table = brcmstb_memc_of_match,
+ .pm = pm_ptr(&brcmstb_memc_pm_ops),
+ },
+};
+module_platform_driver(brcmstb_memc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("DDR SRPD driver for Broadcom STB chips");
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
new file mode 100644
index 0000000000..78bd71b203
--- /dev/null
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 CM2 L2-cache Control Block driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+
+#define L2_CTL_REG 0x028
+#define L2_CTL_DATA_STALL_FLD 0
+#define L2_CTL_DATA_STALL_MASK GENMASK(1, L2_CTL_DATA_STALL_FLD)
+#define L2_CTL_TAG_STALL_FLD 2
+#define L2_CTL_TAG_STALL_MASK GENMASK(3, L2_CTL_TAG_STALL_FLD)
+#define L2_CTL_WS_STALL_FLD 4
+#define L2_CTL_WS_STALL_MASK GENMASK(5, L2_CTL_WS_STALL_FLD)
+#define L2_CTL_SET_CLKRATIO BIT(13)
+#define L2_CTL_CLKRATIO_LOCK BIT(31)
+
+#define L2_CTL_STALL_MIN 0
+#define L2_CTL_STALL_MAX 3
+#define L2_CTL_STALL_SET_DELAY_US 1
+#define L2_CTL_STALL_SET_TOUT_US 1000
+
+/*
+ * struct l2_ctl - Baikal-T1 L2 Control block private data.
+ * @dev: Pointer to the device structure.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ */
+struct l2_ctl {
+ struct device *dev;
+
+ struct regmap *sys_regs;
+};
+
+/*
+ * enum l2_ctl_stall - Baikal-T1 L2-cache-RAM stall identifier.
+ * @L2_WSSTALL: Way-select latency.
+ * @L2_TAGSTALL: Tag latency.
+ * @L2_DATASTALL: Data latency.
+ */
+enum l2_ctl_stall {
+ L2_WS_STALL,
+ L2_TAG_STALL,
+ L2_DATA_STALL
+};
+
+/*
+ * struct l2_ctl_device_attribute - Baikal-T1 L2-cache device attribute.
+ * @dev_attr: Actual sysfs device attribute.
+ * @id: L2-cache stall field identifier.
+ */
+struct l2_ctl_device_attribute {
+ struct device_attribute dev_attr;
+ enum l2_ctl_stall id;
+};
+
+#define to_l2_ctl_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct l2_ctl_device_attribute, dev_attr)
+
+#define L2_CTL_ATTR_RW(_name, _prefix, _id) \
+ struct l2_ctl_device_attribute l2_ctl_attr_##_name = \
+ { __ATTR(_name, 0644, _prefix##_show, _prefix##_store), _id }
+
+static int l2_ctl_get_latency(struct l2_ctl *l2, enum l2_ctl_stall id, u32 *val)
+{
+ u32 data = 0;
+ int ret;
+
+ ret = regmap_read(l2->sys_regs, L2_CTL_REG, &data);
+ if (ret)
+ return ret;
+
+ switch (id) {
+ case L2_WS_STALL:
+ *val = FIELD_GET(L2_CTL_WS_STALL_MASK, data);
+ break;
+ case L2_TAG_STALL:
+ *val = FIELD_GET(L2_CTL_TAG_STALL_MASK, data);
+ break;
+ case L2_DATA_STALL:
+ *val = FIELD_GET(L2_CTL_DATA_STALL_MASK, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int l2_ctl_set_latency(struct l2_ctl *l2, enum l2_ctl_stall id, u32 val)
+{
+ u32 mask = 0, data = 0;
+ int ret;
+
+ val = clamp_val(val, L2_CTL_STALL_MIN, L2_CTL_STALL_MAX);
+
+ switch (id) {
+ case L2_WS_STALL:
+ data = FIELD_PREP(L2_CTL_WS_STALL_MASK, val);
+ mask = L2_CTL_WS_STALL_MASK;
+ break;
+ case L2_TAG_STALL:
+ data = FIELD_PREP(L2_CTL_TAG_STALL_MASK, val);
+ mask = L2_CTL_TAG_STALL_MASK;
+ break;
+ case L2_DATA_STALL:
+ data = FIELD_PREP(L2_CTL_DATA_STALL_MASK, val);
+ mask = L2_CTL_DATA_STALL_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data |= L2_CTL_SET_CLKRATIO;
+ mask |= L2_CTL_SET_CLKRATIO;
+
+ ret = regmap_update_bits(l2->sys_regs, L2_CTL_REG, mask, data);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(l2->sys_regs, L2_CTL_REG, data,
+ data & L2_CTL_CLKRATIO_LOCK,
+ L2_CTL_STALL_SET_DELAY_US,
+ L2_CTL_STALL_SET_TOUT_US);
+}
+
+static void l2_ctl_clear_data(void *data)
+{
+ struct l2_ctl *l2 = data;
+ struct platform_device *pdev = to_platform_device(l2->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct l2_ctl *l2_ctl_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct l2_ctl *l2;
+ int ret;
+
+ l2 = devm_kzalloc(dev, sizeof(*l2), GFP_KERNEL);
+ if (!l2)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, l2_ctl_clear_data, l2);
+ if (ret) {
+ dev_err(dev, "Can't add L2 CTL data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ l2->dev = dev;
+ platform_set_drvdata(pdev, l2);
+
+ return l2;
+}
+
+static int l2_ctl_find_sys_regs(struct l2_ctl *l2)
+{
+ l2->sys_regs = syscon_node_to_regmap(l2->dev->of_node->parent);
+ if (IS_ERR(l2->sys_regs)) {
+ dev_err(l2->dev, "Couldn't get L2 CTL register map\n");
+ return PTR_ERR(l2->sys_regs);
+ }
+
+ return 0;
+}
+
+static int l2_ctl_of_parse_property(struct l2_ctl *l2, enum l2_ctl_stall id,
+ const char *propname)
+{
+ int ret = 0;
+ u32 data;
+
+ if (!of_property_read_u32(l2->dev->of_node, propname, &data)) {
+ ret = l2_ctl_set_latency(l2, id, data);
+ if (ret)
+ dev_err(l2->dev, "Invalid value of '%s'\n", propname);
+ }
+
+ return ret;
+}
+
+static int l2_ctl_of_parse(struct l2_ctl *l2)
+{
+ int ret;
+
+ ret = l2_ctl_of_parse_property(l2, L2_WS_STALL, "baikal,l2-ws-latency");
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_of_parse_property(l2, L2_TAG_STALL, "baikal,l2-tag-latency");
+ if (ret)
+ return ret;
+
+ return l2_ctl_of_parse_property(l2, L2_DATA_STALL,
+ "baikal,l2-data-latency");
+}
+
+static ssize_t l2_ctl_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct l2_ctl_device_attribute *devattr = to_l2_ctl_dev_attr(attr);
+ struct l2_ctl *l2 = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = l2_ctl_get_latency(l2, devattr->id, &data);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", data);
+}
+
+static ssize_t l2_ctl_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct l2_ctl_device_attribute *devattr = to_l2_ctl_dev_attr(attr);
+ struct l2_ctl *l2 = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ if (kstrtouint(buf, 0, &data) < 0)
+ return -EINVAL;
+
+ ret = l2_ctl_set_latency(l2, devattr->id, data);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static L2_CTL_ATTR_RW(l2_ws_latency, l2_ctl_latency, L2_WS_STALL);
+static L2_CTL_ATTR_RW(l2_tag_latency, l2_ctl_latency, L2_TAG_STALL);
+static L2_CTL_ATTR_RW(l2_data_latency, l2_ctl_latency, L2_DATA_STALL);
+
+static struct attribute *l2_ctl_sysfs_attrs[] = {
+ &l2_ctl_attr_l2_ws_latency.dev_attr.attr,
+ &l2_ctl_attr_l2_tag_latency.dev_attr.attr,
+ &l2_ctl_attr_l2_data_latency.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(l2_ctl_sysfs);
+
+static void l2_ctl_remove_sysfs(void *data)
+{
+ struct l2_ctl *l2 = data;
+
+ device_remove_groups(l2->dev, l2_ctl_sysfs_groups);
+}
+
+static int l2_ctl_init_sysfs(struct l2_ctl *l2)
+{
+ int ret;
+
+ ret = device_add_groups(l2->dev, l2_ctl_sysfs_groups);
+ if (ret) {
+ dev_err(l2->dev, "Failed to create L2 CTL sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(l2->dev, l2_ctl_remove_sysfs, l2);
+ if (ret)
+ dev_err(l2->dev, "Can't add L2 CTL sysfs remove action\n");
+
+ return ret;
+}
+
+static int l2_ctl_probe(struct platform_device *pdev)
+{
+ struct l2_ctl *l2;
+ int ret;
+
+ l2 = l2_ctl_create_data(pdev);
+ if (IS_ERR(l2))
+ return PTR_ERR(l2);
+
+ ret = l2_ctl_find_sys_regs(l2);
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_of_parse(l2);
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_init_sysfs(l2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id l2_ctl_of_match[] = {
+ { .compatible = "baikal,bt1-l2-ctl" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, l2_ctl_of_match);
+
+static struct platform_driver l2_ctl_driver = {
+ .probe = l2_ctl_probe,
+ .driver = {
+ .name = "bt1-l2-ctl",
+ .of_match_table = l2_ctl_of_match
+ }
+};
+module_platform_driver(l2_ctl_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 L2-cache driver");
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
new file mode 100644
index 0000000000..2bf34da85d
--- /dev/null
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI da8xx DDR2/mDDR controller driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+struct da8xx_ddrctl_config_knob {
+ const char *name;
+ u32 reg;
+ u32 mask;
+ u32 shift;
+};
+
+static const struct da8xx_ddrctl_config_knob da8xx_ddrctl_knobs[] = {
+ {
+ .name = "da850-pbbpr",
+ .reg = 0x20,
+ .mask = 0xffffff00,
+ .shift = 0,
+ },
+};
+
+struct da8xx_ddrctl_setting {
+ const char *name;
+ u32 val;
+};
+
+struct da8xx_ddrctl_board_settings {
+ const char *board;
+ const struct da8xx_ddrctl_setting *settings;
+};
+
+static const struct da8xx_ddrctl_setting da850_lcdk_ddrctl_settings[] = {
+ {
+ .name = "da850-pbbpr",
+ .val = 0x20,
+ },
+ { }
+};
+
+static const struct da8xx_ddrctl_board_settings da8xx_ddrctl_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .settings = da850_lcdk_ddrctl_settings,
+ },
+};
+
+static const struct da8xx_ddrctl_config_knob *
+da8xx_ddrctl_match_knob(const struct da8xx_ddrctl_setting *setting)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_knobs); i++) {
+ knob = &da8xx_ddrctl_knobs[i];
+
+ if (strcmp(knob->name, setting->name) == 0)
+ return knob;
+ }
+
+ return NULL;
+}
+
+static const struct da8xx_ddrctl_setting *da8xx_ddrctl_get_board_settings(void)
+{
+ const struct da8xx_ddrctl_board_settings *board_settings;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_board_confs); i++) {
+ board_settings = &da8xx_ddrctl_board_confs[i];
+
+ if (of_machine_is_compatible(board_settings->board))
+ return board_settings->settings;
+ }
+
+ return NULL;
+}
+
+static int da8xx_ddrctl_probe(struct platform_device *pdev)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ const struct da8xx_ddrctl_setting *setting;
+ struct resource *res;
+ void __iomem *ddrctl;
+ struct device *dev;
+ u32 reg;
+
+ dev = &pdev->dev;
+
+ setting = da8xx_ddrctl_get_board_settings();
+ if (!setting) {
+ dev_err(dev, "no settings defined for this board\n");
+ return -EINVAL;
+ }
+
+ ddrctl = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ddrctl)) {
+ dev_err(dev, "unable to map memory controller registers\n");
+ return PTR_ERR(ddrctl);
+ }
+
+ for (; setting->name; setting++) {
+ knob = da8xx_ddrctl_match_knob(setting);
+ if (!knob) {
+ dev_warn(dev,
+ "no such config option: %s\n", setting->name);
+ continue;
+ }
+
+ if (knob->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev,
+ "register offset of '%s' exceeds mapped memory size\n",
+ knob->name);
+ continue;
+ }
+
+ reg = readl(ddrctl + knob->reg);
+ reg &= knob->mask;
+ reg |= setting->val << knob->shift;
+
+ dev_dbg(dev, "writing 0x%08x to %s\n", reg, setting->name);
+
+ writel(reg, ddrctl + knob->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_ddrctl_of_match[] = {
+ { .compatible = "ti,da850-ddr-controller", },
+ { },
+};
+
+static struct platform_driver da8xx_ddrctl_driver = {
+ .probe = da8xx_ddrctl_probe,
+ .driver = {
+ .name = "da850-ddr-controller",
+ .of_match_table = da8xx_ddrctl_of_match,
+ },
+};
+module_platform_driver(da8xx_ddrctl_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx DDR2/mDDR controller driver");
diff --git a/drivers/memory/dfl-emif.c b/drivers/memory/dfl-emif.c
new file mode 100644
index 0000000000..da06cd30a0
--- /dev/null
+++ b/drivers/memory/dfl-emif.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DFL device driver for EMIF private feature
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ *
+ */
+#include <linux/bitfield.h>
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define FME_FEATURE_ID_EMIF 0x9
+
+#define EMIF_STAT 0x8
+#define EMIF_STAT_INIT_DONE_SFT 0
+#define EMIF_STAT_CALC_FAIL_SFT 8
+#define EMIF_STAT_CLEAR_BUSY_SFT 16
+#define EMIF_CTRL 0x10
+#define EMIF_CTRL_CLEAR_EN_SFT 0
+#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(7, 0)
+
+#define EMIF_POLL_INVL 10000 /* us */
+#define EMIF_POLL_TIMEOUT 5000000 /* us */
+
+/*
+ * The Capability Register replaces the Control Register (at the same
+ * offset) for EMIF feature revisions > 0. The bitmask that indicates
+ * the presence of memory channels exists in both the Capability Register
+ * and Control Register definitions. These can be thought of as a C union.
+ * The Capability Register definitions are used to check for the existence
+ * of a memory channel, and the Control Register definitions are used for
+ * managing the memory-clear functionality in revision 0.
+ */
+#define EMIF_CAPABILITY_BASE 0x10
+#define EMIF_CAPABILITY_CHN_MSK_V0 GENMASK_ULL(3, 0)
+#define EMIF_CAPABILITY_CHN_MSK GENMASK_ULL(7, 0)
+
+struct dfl_emif {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock; /* Serialises access to EMIF_CTRL reg */
+};
+
+struct emif_attr {
+ struct device_attribute attr;
+ u32 shift;
+ u32 index;
+};
+
+#define to_emif_attr(dev_attr) \
+ container_of(dev_attr, struct emif_attr, attr)
+
+static ssize_t emif_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct emif_attr *eattr = to_emif_attr(attr);
+ struct dfl_emif *de = dev_get_drvdata(dev);
+ u64 val;
+
+ val = readq(de->base + EMIF_STAT);
+
+ return sysfs_emit(buf, "%u\n",
+ !!(val & BIT_ULL(eattr->shift + eattr->index)));
+}
+
+static ssize_t emif_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct emif_attr *eattr = to_emif_attr(attr);
+ struct dfl_emif *de = dev_get_drvdata(dev);
+ u64 clear_busy_msk, clear_en_msk, val;
+ void __iomem *base = de->base;
+
+ if (!sysfs_streq(buf, "1"))
+ return -EINVAL;
+
+ clear_busy_msk = BIT_ULL(EMIF_STAT_CLEAR_BUSY_SFT + eattr->index);
+ clear_en_msk = BIT_ULL(EMIF_CTRL_CLEAR_EN_SFT + eattr->index);
+
+ spin_lock(&de->lock);
+ /* The CLEAR_EN field is WO, but other fields are RW */
+ val = readq(base + EMIF_CTRL);
+ val &= ~EMIF_CTRL_CLEAR_EN_MSK;
+ val |= clear_en_msk;
+ writeq(val, base + EMIF_CTRL);
+ spin_unlock(&de->lock);
+
+ if (readq_poll_timeout(base + EMIF_STAT, val,
+ !(val & clear_busy_msk),
+ EMIF_POLL_INVL, EMIF_POLL_TIMEOUT)) {
+ dev_err(de->dev, "timeout, fail to clear\n");
+ return -ETIMEDOUT;
+ }
+
+ return count;
+}
+
+#define emif_state_attr(_name, _shift, _index) \
+ static struct emif_attr emif_attr_##inf##_index##_##_name = \
+ { .attr = __ATTR(inf##_index##_##_name, 0444, \
+ emif_state_show, NULL), \
+ .shift = (_shift), .index = (_index) }
+
+#define emif_clear_attr(_index) \
+ static struct emif_attr emif_attr_##inf##_index##_clear = \
+ { .attr = __ATTR(inf##_index##_clear, 0200, \
+ NULL, emif_clear_store), \
+ .index = (_index) }
+
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 0);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 1);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 2);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 3);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 4);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 5);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 6);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 7);
+
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 0);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 1);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 2);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 3);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 4);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 5);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 6);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 7);
+
+
+emif_clear_attr(0);
+emif_clear_attr(1);
+emif_clear_attr(2);
+emif_clear_attr(3);
+emif_clear_attr(4);
+emif_clear_attr(5);
+emif_clear_attr(6);
+emif_clear_attr(7);
+
+
+static struct attribute *dfl_emif_attrs[] = {
+ &emif_attr_inf0_init_done.attr.attr,
+ &emif_attr_inf0_cal_fail.attr.attr,
+ &emif_attr_inf0_clear.attr.attr,
+
+ &emif_attr_inf1_init_done.attr.attr,
+ &emif_attr_inf1_cal_fail.attr.attr,
+ &emif_attr_inf1_clear.attr.attr,
+
+ &emif_attr_inf2_init_done.attr.attr,
+ &emif_attr_inf2_cal_fail.attr.attr,
+ &emif_attr_inf2_clear.attr.attr,
+
+ &emif_attr_inf3_init_done.attr.attr,
+ &emif_attr_inf3_cal_fail.attr.attr,
+ &emif_attr_inf3_clear.attr.attr,
+
+ &emif_attr_inf4_init_done.attr.attr,
+ &emif_attr_inf4_cal_fail.attr.attr,
+ &emif_attr_inf4_clear.attr.attr,
+
+ &emif_attr_inf5_init_done.attr.attr,
+ &emif_attr_inf5_cal_fail.attr.attr,
+ &emif_attr_inf5_clear.attr.attr,
+
+ &emif_attr_inf6_init_done.attr.attr,
+ &emif_attr_inf6_cal_fail.attr.attr,
+ &emif_attr_inf6_clear.attr.attr,
+
+ &emif_attr_inf7_init_done.attr.attr,
+ &emif_attr_inf7_cal_fail.attr.attr,
+ &emif_attr_inf7_clear.attr.attr,
+
+ NULL,
+};
+
+static umode_t dfl_emif_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct dfl_emif *de = dev_get_drvdata(kobj_to_dev(kobj));
+ struct emif_attr *eattr = container_of(attr, struct emif_attr,
+ attr.attr);
+ struct dfl_device *ddev = to_dfl_dev(de->dev);
+ u64 val;
+
+ /*
+ * This device supports up to 8 memory interfaces, but not all
+ * interfaces are used on different platforms. The read out value of
+ * CAPABILITY_CHN_MSK field (which is a bitmap) indicates which
+ * interfaces are available.
+ */
+ if (ddev->revision > 0 && strstr(attr->name, "_clear"))
+ return 0;
+
+ if (ddev->revision == 0)
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK_V0,
+ readq(de->base + EMIF_CAPABILITY_BASE));
+ else
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK,
+ readq(de->base + EMIF_CAPABILITY_BASE));
+
+ return (val & BIT_ULL(eattr->index)) ? attr->mode : 0;
+}
+
+static const struct attribute_group dfl_emif_group = {
+ .is_visible = dfl_emif_visible,
+ .attrs = dfl_emif_attrs,
+};
+
+static const struct attribute_group *dfl_emif_groups[] = {
+ &dfl_emif_group,
+ NULL,
+};
+
+static int dfl_emif_probe(struct dfl_device *ddev)
+{
+ struct device *dev = &ddev->dev;
+ struct dfl_emif *de;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ de->base = devm_ioremap_resource(dev, &ddev->mmio_res);
+ if (IS_ERR(de->base))
+ return PTR_ERR(de->base);
+
+ de->dev = dev;
+ spin_lock_init(&de->lock);
+ dev_set_drvdata(dev, de);
+
+ return 0;
+}
+
+static const struct dfl_device_id dfl_emif_ids[] = {
+ { FME_ID, FME_FEATURE_ID_EMIF },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, dfl_emif_ids);
+
+static struct dfl_driver dfl_emif_driver = {
+ .drv = {
+ .name = "dfl-emif",
+ .dev_groups = dfl_emif_groups,
+ },
+ .id_table = dfl_emif_ids,
+ .probe = dfl_emif_probe,
+};
+module_dfl_driver(dfl_emif_driver);
+
+MODULE_DESCRIPTION("DFL EMIF driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
new file mode 100644
index 0000000000..4b98d1854c
--- /dev/null
+++ b/drivers/memory/emif-asm-offsets.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI AM33XX EMIF PM Assembly Offsets
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Inc.
+ */
+#include <linux/ti-emif-sram.h>
+
+int main(void)
+{
+ ti_emif_asm_offsets();
+
+ return 0;
+}
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
new file mode 100644
index 0000000000..f305643209
--- /dev/null
+++ b/drivers/memory/emif.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * EMIF driver
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/platform_data/emif_plat.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+
+#include "emif.h"
+#include "jedec_ddr.h"
+#include "of_memory.h"
+
+/**
+ * struct emif_data - Per device static data for driver's use
+ * @duplicate: Whether the DDR devices attached to this EMIF
+ * instance are exactly same as that on EMIF1. In
+ * this case we can save some memory and processing
+ * @temperature_level: Maximum temperature of LPDDR2 devices attached
+ * to this EMIF - read from MR4 register. If there
+ * are two devices attached to this EMIF, this
+ * value is the maximum of the two temperature
+ * levels.
+ * @node: node in the device list
+ * @base: base address of memory-mapped IO registers.
+ * @dev: device pointer.
+ * @regs_cache: An array of 'struct emif_regs' that stores
+ * calculated register values for different
+ * frequencies, to avoid re-calculating them on
+ * each DVFS transition.
+ * @curr_regs: The set of register values used in the last
+ * frequency change (i.e. corresponding to the
+ * frequency in effect at the moment)
+ * @plat_data: Pointer to saved platform data.
+ * @debugfs_root: dentry to the root folder for EMIF in debugfs
+ * @np_ddr: Pointer to ddr device tree node
+ */
+struct emif_data {
+ u8 duplicate;
+ u8 temperature_level;
+ u8 lpmode;
+ struct list_head node;
+ unsigned long irq_state;
+ void __iomem *base;
+ struct device *dev;
+ struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES];
+ struct emif_regs *curr_regs;
+ struct emif_platform_data *plat_data;
+ struct dentry *debugfs_root;
+ struct device_node *np_ddr;
+};
+
+static struct emif_data *emif1;
+static DEFINE_SPINLOCK(emif_lock);
+static unsigned long irq_state;
+static LIST_HEAD(device_list);
+
+#ifdef CONFIG_DEBUG_FS
+static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
+ struct emif_regs *regs)
+{
+ u32 type = emif->plat_data->device_info->type;
+ u32 ip_rev = emif->plat_data->ip_rev;
+
+ seq_printf(s, "EMIF register cache dump for %dMHz\n",
+ regs->freq/1000000);
+
+ seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
+ seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
+ seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
+ seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
+
+ if (ip_rev == EMIF_4D) {
+ seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
+ regs->read_idle_ctrl_shdw_normal);
+ seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
+ regs->read_idle_ctrl_shdw_volt_ramp);
+ } else if (ip_rev == EMIF_4D5) {
+ seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
+ regs->dll_calib_ctrl_shdw_normal);
+ seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
+ regs->dll_calib_ctrl_shdw_volt_ramp);
+ }
+
+ if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
+ seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
+ regs->ref_ctrl_shdw_derated);
+ seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
+ regs->sdram_tim1_shdw_derated);
+ seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
+ regs->sdram_tim3_shdw_derated);
+ }
+}
+
+static int emif_regdump_show(struct seq_file *s, void *unused)
+{
+ struct emif_data *emif = s->private;
+ struct emif_regs **regs_cache;
+ int i;
+
+ if (emif->duplicate)
+ regs_cache = emif1->regs_cache;
+ else
+ regs_cache = emif->regs_cache;
+
+ for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
+ do_emif_regdump_show(s, emif, regs_cache[i]);
+ seq_putc(s, '\n');
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(emif_regdump);
+
+static int emif_mr4_show(struct seq_file *s, void *unused)
+{
+ struct emif_data *emif = s->private;
+
+ seq_printf(s, "MR4=%d\n", emif->temperature_level);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(emif_mr4);
+
+static int __init_or_module emif_debugfs_init(struct emif_data *emif)
+{
+ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
+ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
+ &emif_regdump_fops);
+ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
+ &emif_mr4_fops);
+ return 0;
+}
+
+static void __exit emif_debugfs_exit(struct emif_data *emif)
+{
+ debugfs_remove_recursive(emif->debugfs_root);
+ emif->debugfs_root = NULL;
+}
+#else
+static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
+{
+ return 0;
+}
+
+static inline void __exit emif_debugfs_exit(struct emif_data *emif)
+{
+}
+#endif
+
+/*
+ * Get bus width used by EMIF. Note that this may be different from the
+ * bus width of the DDR devices used. For instance two 16-bit DDR devices
+ * may be connected to a given CS of EMIF. In this case bus width as far
+ * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
+ */
+static u32 get_emif_bus_width(struct emif_data *emif)
+{
+ u32 width;
+ void __iomem *base = emif->base;
+
+ width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
+ >> NARROW_MODE_SHIFT;
+ width = width == 0 ? 32 : 16;
+
+ return width;
+}
+
+static void set_lpmode(struct emif_data *emif, u8 lpmode)
+{
+ u32 temp;
+ void __iomem *base = emif->base;
+
+ /*
+ * Workaround for errata i743 - LPDDR2 Power-Down State is Not
+ * Efficient
+ *
+ * i743 DESCRIPTION:
+ * The EMIF supports power-down state for low power. The EMIF
+ * automatically puts the SDRAM into power-down after the memory is
+ * not accessed for a defined number of cycles and the
+ * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
+ * As the EMIF supports automatic output impedance calibration, a ZQ
+ * calibration long command is issued every time it exits active
+ * power-down and precharge power-down modes. The EMIF waits and
+ * blocks any other command during this calibration.
+ * The EMIF does not allow selective disabling of ZQ calibration upon
+ * exit of power-down mode. Due to very short periods of power-down
+ * cycles, ZQ calibration overhead creates bandwidth issues and
+ * increases overall system power consumption. On the other hand,
+ * issuing ZQ calibration long commands when exiting self-refresh is
+ * still required.
+ *
+ * WORKAROUND
+ * Because there is no power consumption benefit of the power-down due
+ * to the calibration and there is a performance risk, the guideline
+ * is to not allow power-down state and, therefore, to not have set
+ * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
+ */
+ if ((emif->plat_data->ip_rev == EMIF_4D) &&
+ (lpmode == EMIF_LP_MODE_PWR_DN)) {
+ WARN_ONCE(1,
+ "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
+ /* rollback LP_MODE to Self-refresh mode */
+ lpmode = EMIF_LP_MODE_SELF_REFRESH;
+ }
+
+ temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
+ temp &= ~LP_MODE_MASK;
+ temp |= (lpmode << LP_MODE_SHIFT);
+ writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
+}
+
+static void do_freq_update(void)
+{
+ struct emif_data *emif;
+
+ /*
+ * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
+ *
+ * i728 DESCRIPTION:
+ * The EMIF automatically puts the SDRAM into self-refresh mode
+ * after the EMIF has not performed accesses during
+ * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
+ * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
+ * to 0x2. If during a small window the following three events
+ * occur:
+ * - The SR_TIMING counter expires
+ * - And frequency change is requested
+ * - And OCP access is requested
+ * Then it causes instable clock on the DDR interface.
+ *
+ * WORKAROUND
+ * To avoid the occurrence of the three events, the workaround
+ * is to disable the self-refresh when requesting a frequency
+ * change. Before requesting a frequency change the software must
+ * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
+ * frequency change has been done, the software can reprogram
+ * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
+ */
+ list_for_each_entry(emif, &device_list, node) {
+ if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
+ set_lpmode(emif, EMIF_LP_MODE_DISABLE);
+ }
+
+ /*
+ * TODO: Do FREQ_UPDATE here when an API
+ * is available for this as part of the new
+ * clock framework
+ */
+
+ list_for_each_entry(emif, &device_list, node) {
+ if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
+ set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
+ }
+}
+
+/* Find addressing table entry based on the device's type and density */
+static const struct lpddr2_addressing *get_addressing_table(
+ const struct ddr_device_info *device_info)
+{
+ u32 index, type, density;
+
+ type = device_info->type;
+ density = device_info->density;
+
+ switch (type) {
+ case DDR_TYPE_LPDDR2_S4:
+ index = density - 1;
+ break;
+ case DDR_TYPE_LPDDR2_S2:
+ switch (density) {
+ case DDR_DENSITY_1Gb:
+ case DDR_DENSITY_2Gb:
+ index = density + 3;
+ break;
+ default:
+ index = density - 1;
+ }
+ break;
+ default:
+ return NULL;
+ }
+
+ return &lpddr2_jedec_addressing_table[index];
+}
+
+static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
+ bool cs1_used, bool cal_resistors_per_cs)
+{
+ u32 zq = 0, val = 0;
+
+ val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
+ zq |= val << ZQ_REFINTERVAL_SHIFT;
+
+ val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
+ zq |= val << ZQ_ZQCL_MULT_SHIFT;
+
+ val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
+ zq |= val << ZQ_ZQINIT_MULT_SHIFT;
+
+ zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
+
+ if (cal_resistors_per_cs)
+ zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
+ else
+ zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
+
+ zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
+
+ val = cs1_used ? 1 : 0;
+ zq |= val << ZQ_CS1EN_SHIFT;
+
+ return zq;
+}
+
+static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
+ const struct emif_custom_configs *custom_configs, bool cs1_used,
+ u32 sdram_io_width, u32 emif_bus_width)
+{
+ u32 alert = 0, interval, devcnt;
+
+ if (custom_configs && (custom_configs->mask &
+ EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
+ interval = custom_configs->temp_alert_poll_interval_ms;
+ else
+ interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
+
+ interval *= 1000000; /* Convert to ns */
+ interval /= addressing->tREFI_ns; /* Convert to refresh cycles */
+ alert |= (interval << TA_REFINTERVAL_SHIFT);
+
+ /*
+ * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
+ * also to this form and subtract to get TA_DEVCNT, which is
+ * in log2(x) form.
+ */
+ emif_bus_width = __fls(emif_bus_width) - 1;
+ devcnt = emif_bus_width - sdram_io_width;
+ alert |= devcnt << TA_DEVCNT_SHIFT;
+
+ /* DEVWDT is in 'log2(x) - 3' form */
+ alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
+
+ alert |= 1 << TA_SFEXITEN_SHIFT;
+ alert |= 1 << TA_CS0EN_SHIFT;
+ alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
+
+ return alert;
+}
+
+static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
+{
+ u32 pwr_mgmt_ctrl = 0, timeout;
+ u32 lpmode = EMIF_LP_MODE_SELF_REFRESH;
+ u32 timeout_perf = EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
+ u32 timeout_pwr = EMIF_LP_MODE_TIMEOUT_POWER;
+ u32 freq_threshold = EMIF_LP_MODE_FREQ_THRESHOLD;
+ u32 mask;
+ u8 shift;
+
+ struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
+
+ if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
+ lpmode = cust_cfgs->lpmode;
+ timeout_perf = cust_cfgs->lpmode_timeout_performance;
+ timeout_pwr = cust_cfgs->lpmode_timeout_power;
+ freq_threshold = cust_cfgs->lpmode_freq_threshold;
+ }
+
+ /* Timeout based on DDR frequency */
+ timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
+
+ /*
+ * The value to be set in register is "log2(timeout) - 3"
+ * if timeout < 16 load 0 in register
+ * if timeout is not a power of 2, round to next highest power of 2
+ */
+ if (timeout < 16) {
+ timeout = 0;
+ } else {
+ if (timeout & (timeout - 1))
+ timeout <<= 1;
+ timeout = __fls(timeout) - 3;
+ }
+
+ switch (lpmode) {
+ case EMIF_LP_MODE_CLOCK_STOP:
+ shift = CS_TIM_SHIFT;
+ mask = CS_TIM_MASK;
+ break;
+ case EMIF_LP_MODE_SELF_REFRESH:
+ /* Workaround for errata i735 */
+ if (timeout < 6)
+ timeout = 6;
+
+ shift = SR_TIM_SHIFT;
+ mask = SR_TIM_MASK;
+ break;
+ case EMIF_LP_MODE_PWR_DN:
+ shift = PD_TIM_SHIFT;
+ mask = PD_TIM_MASK;
+ break;
+ case EMIF_LP_MODE_DISABLE:
+ default:
+ mask = 0;
+ shift = 0;
+ break;
+ }
+ /* Round to maximum in case of overflow, BUT warn! */
+ if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
+ pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
+ lpmode,
+ timeout_perf,
+ timeout_pwr,
+ freq_threshold);
+ WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
+ timeout, mask >> shift);
+ timeout = mask >> shift;
+ }
+
+ /* Setup required timing */
+ pwr_mgmt_ctrl = (timeout << shift) & mask;
+ /* setup a default mask for rest of the modes */
+ pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
+ ~mask;
+
+ /* No CS_TIM in EMIF_4D5 */
+ if (ip_rev == EMIF_4D5)
+ pwr_mgmt_ctrl &= ~CS_TIM_MASK;
+
+ pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
+
+ return pwr_mgmt_ctrl;
+}
+
+/*
+ * Get the temperature level of the EMIF instance:
+ * Reads the MR4 register of attached SDRAM parts to find out the temperature
+ * level. If there are two parts attached(one on each CS), then the temperature
+ * level for the EMIF instance is the higher of the two temperatures.
+ */
+static void get_temperature_level(struct emif_data *emif)
+{
+ u32 temp, temperature_level;
+ void __iomem *base;
+
+ base = emif->base;
+
+ /* Read mode register 4 */
+ writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
+ temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
+ temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
+ MR4_SDRAM_REF_RATE_SHIFT;
+
+ if (emif->plat_data->device_info->cs1_used) {
+ writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
+ temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
+ temp = (temp & MR4_SDRAM_REF_RATE_MASK)
+ >> MR4_SDRAM_REF_RATE_SHIFT;
+ temperature_level = max(temp, temperature_level);
+ }
+
+ /* treat everything less than nominal(3) in MR4 as nominal */
+ if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
+ temperature_level = SDRAM_TEMP_NOMINAL;
+
+ /* if we get reserved value in MR4 persist with the existing value */
+ if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
+ emif->temperature_level = temperature_level;
+}
+
+/*
+ * setup_temperature_sensitive_regs() - set the timings for temperature
+ * sensitive registers. This happens once at initialisation time based
+ * on the temperature at boot time and subsequently based on the temperature
+ * alert interrupt. Temperature alert can happen when the temperature
+ * increases or drops. So this function can have the effect of either
+ * derating the timings or going back to nominal values.
+ */
+static void setup_temperature_sensitive_regs(struct emif_data *emif,
+ struct emif_regs *regs)
+{
+ u32 tim1, tim3, ref_ctrl, type;
+ void __iomem *base = emif->base;
+ u32 temperature;
+
+ type = emif->plat_data->device_info->type;
+
+ tim1 = regs->sdram_tim1_shdw;
+ tim3 = regs->sdram_tim3_shdw;
+ ref_ctrl = regs->ref_ctrl_shdw;
+
+ /* No de-rating for non-lpddr2 devices */
+ if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
+ goto out;
+
+ temperature = emif->temperature_level;
+ if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
+ ref_ctrl = regs->ref_ctrl_shdw_derated;
+ } else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
+ tim1 = regs->sdram_tim1_shdw_derated;
+ tim3 = regs->sdram_tim3_shdw_derated;
+ ref_ctrl = regs->ref_ctrl_shdw_derated;
+ }
+
+out:
+ writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
+ writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
+ writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
+}
+
+static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
+{
+ u32 old_temp_level;
+ irqreturn_t ret = IRQ_HANDLED;
+ struct emif_custom_configs *custom_configs;
+
+ spin_lock_irqsave(&emif_lock, irq_state);
+ old_temp_level = emif->temperature_level;
+ get_temperature_level(emif);
+
+ if (unlikely(emif->temperature_level == old_temp_level)) {
+ goto out;
+ } else if (!emif->curr_regs) {
+ dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
+ goto out;
+ }
+
+ custom_configs = emif->plat_data->custom_configs;
+
+ /*
+ * IF we detect higher than "nominal rating" from DDR sensor
+ * on an unsupported DDR part, shutdown system
+ */
+ if (custom_configs && !(custom_configs->mask &
+ EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
+ if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
+ dev_err(emif->dev,
+ "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
+ __func__, emif->temperature_level);
+ /*
+ * Temperature far too high - do kernel_power_off()
+ * from thread context
+ */
+ emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
+ ret = IRQ_WAKE_THREAD;
+ goto out;
+ }
+ }
+
+ if (emif->temperature_level < old_temp_level ||
+ emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
+ /*
+ * Temperature coming down - defer handling to thread OR
+ * Temperature far too high - do kernel_power_off() from
+ * thread context
+ */
+ ret = IRQ_WAKE_THREAD;
+ } else {
+ /* Temperature is going up - handle immediately */
+ setup_temperature_sensitive_regs(emif, emif->curr_regs);
+ do_freq_update();
+ }
+
+out:
+ spin_unlock_irqrestore(&emif_lock, irq_state);
+ return ret;
+}
+
+static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
+{
+ u32 interrupts;
+ struct emif_data *emif = dev_id;
+ void __iomem *base = emif->base;
+ struct device *dev = emif->dev;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ /* Save the status and clear it */
+ interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
+ writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
+
+ /*
+ * Handle temperature alert
+ * Temperature alert should be same for all ports
+ * So, it's enough to process it only for one of the ports
+ */
+ if (interrupts & TA_SYS_MASK)
+ ret = handle_temp_alert(base, emif);
+
+ if (interrupts & ERR_SYS_MASK)
+ dev_err(dev, "Access error from SYS port - %x\n", interrupts);
+
+ if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
+ /* Save the status and clear it */
+ interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
+ writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
+
+ if (interrupts & ERR_LL_MASK)
+ dev_err(dev, "Access error from LL port - %x\n",
+ interrupts);
+ }
+
+ return ret;
+}
+
+static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
+{
+ struct emif_data *emif = dev_id;
+
+ if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
+ dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
+
+ /* If we have Power OFF ability, use it, else try restarting */
+ if (kernel_can_power_off()) {
+ kernel_power_off();
+ } else {
+ WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
+ kernel_restart("SDRAM Over-temp Emergency restart");
+ }
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irqsave(&emif_lock, irq_state);
+
+ if (emif->curr_regs) {
+ setup_temperature_sensitive_regs(emif, emif->curr_regs);
+ do_freq_update();
+ } else {
+ dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
+ }
+
+ spin_unlock_irqrestore(&emif_lock, irq_state);
+
+ return IRQ_HANDLED;
+}
+
+static void clear_all_interrupts(struct emif_data *emif)
+{
+ void __iomem *base = emif->base;
+
+ writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
+ base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
+ if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
+ writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
+ base + EMIF_LL_OCP_INTERRUPT_STATUS);
+}
+
+static void disable_and_clear_all_interrupts(struct emif_data *emif)
+{
+ void __iomem *base = emif->base;
+
+ /* Disable all interrupts */
+ writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
+ base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
+ if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
+ writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
+ base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
+
+ /* Clear all interrupts */
+ clear_all_interrupts(emif);
+}
+
+static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
+{
+ u32 interrupts, type;
+ void __iomem *base = emif->base;
+
+ type = emif->plat_data->device_info->type;
+
+ clear_all_interrupts(emif);
+
+ /* Enable interrupts for SYS interface */
+ interrupts = EN_ERR_SYS_MASK;
+ if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
+ interrupts |= EN_TA_SYS_MASK;
+ writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
+
+ /* Enable interrupts for LL interface */
+ if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
+ /* TA need not be enabled for LL */
+ interrupts = EN_ERR_LL_MASK;
+ writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
+ }
+
+ /* setup IRQ handlers */
+ return devm_request_threaded_irq(emif->dev, irq,
+ emif_interrupt_handler,
+ emif_threaded_isr,
+ 0, dev_name(emif->dev),
+ emif);
+
+}
+
+static void __init_or_module emif_onetime_settings(struct emif_data *emif)
+{
+ u32 pwr_mgmt_ctrl, zq, temp_alert_cfg;
+ void __iomem *base = emif->base;
+ const struct lpddr2_addressing *addressing;
+ const struct ddr_device_info *device_info;
+
+ device_info = emif->plat_data->device_info;
+ addressing = get_addressing_table(device_info);
+
+ /*
+ * Init power management settings
+ * We don't know the frequency yet. Use a high frequency
+ * value for a conservative timeout setting
+ */
+ pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
+ emif->plat_data->ip_rev);
+ emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
+ writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
+
+ /* Init ZQ calibration settings */
+ zq = get_zq_config_reg(addressing, device_info->cs1_used,
+ device_info->cal_resistors_per_cs);
+ writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
+
+ /* Check temperature level temperature level*/
+ get_temperature_level(emif);
+ if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
+ dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
+
+ /* Init temperature polling */
+ temp_alert_cfg = get_temp_alert_config(addressing,
+ emif->plat_data->custom_configs, device_info->cs1_used,
+ device_info->io_width, get_emif_bus_width(emif));
+ writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
+
+ /*
+ * Program external PHY control registers that are not frequency
+ * dependent
+ */
+ if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
+ return;
+ writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
+ writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
+}
+
+static void get_default_timings(struct emif_data *emif)
+{
+ struct emif_platform_data *pd = emif->plat_data;
+
+ pd->timings = lpddr2_jedec_timings;
+ pd->timings_arr_size = ARRAY_SIZE(lpddr2_jedec_timings);
+
+ dev_warn(emif->dev, "%s: using default timings\n", __func__);
+}
+
+static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
+ u32 ip_rev, struct device *dev)
+{
+ int valid;
+
+ valid = (type == DDR_TYPE_LPDDR2_S4 ||
+ type == DDR_TYPE_LPDDR2_S2)
+ && (density >= DDR_DENSITY_64Mb
+ && density <= DDR_DENSITY_8Gb)
+ && (io_width >= DDR_IO_WIDTH_8
+ && io_width <= DDR_IO_WIDTH_32);
+
+ /* Combinations of EMIF and PHY revisions that we support today */
+ switch (ip_rev) {
+ case EMIF_4D:
+ valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
+ break;
+ case EMIF_4D5:
+ valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
+ break;
+ default:
+ valid = 0;
+ }
+
+ if (!valid)
+ dev_err(dev, "%s: invalid DDR details\n", __func__);
+ return valid;
+}
+
+static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
+ struct device *dev)
+{
+ int valid = 1;
+
+ if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
+ (cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
+ valid = cust_cfgs->lpmode_freq_threshold &&
+ cust_cfgs->lpmode_timeout_performance &&
+ cust_cfgs->lpmode_timeout_power;
+
+ if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
+ valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
+
+ if (!valid)
+ dev_warn(dev, "%s: invalid custom configs\n", __func__);
+
+ return valid;
+}
+
+#if defined(CONFIG_OF)
+static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
+ struct emif_data *emif)
+{
+ struct emif_custom_configs *cust_cfgs = NULL;
+ int len;
+ const __be32 *lpmode, *poll_intvl;
+
+ lpmode = of_get_property(np_emif, "low-power-mode", &len);
+ poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
+
+ if (lpmode || poll_intvl)
+ cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
+ GFP_KERNEL);
+
+ if (!cust_cfgs)
+ return;
+
+ if (lpmode) {
+ cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
+ cust_cfgs->lpmode = be32_to_cpup(lpmode);
+ of_property_read_u32(np_emif,
+ "low-power-mode-timeout-performance",
+ &cust_cfgs->lpmode_timeout_performance);
+ of_property_read_u32(np_emif,
+ "low-power-mode-timeout-power",
+ &cust_cfgs->lpmode_timeout_power);
+ of_property_read_u32(np_emif,
+ "low-power-mode-freq-threshold",
+ &cust_cfgs->lpmode_freq_threshold);
+ }
+
+ if (poll_intvl) {
+ cust_cfgs->mask |=
+ EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
+ cust_cfgs->temp_alert_poll_interval_ms =
+ be32_to_cpup(poll_intvl);
+ }
+
+ if (of_find_property(np_emif, "extended-temp-part", &len))
+ cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
+
+ if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
+ devm_kfree(emif->dev, cust_cfgs);
+ return;
+ }
+
+ emif->plat_data->custom_configs = cust_cfgs;
+}
+
+static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
+ struct device_node *np_ddr,
+ struct ddr_device_info *dev_info)
+{
+ u32 density = 0, io_width = 0;
+ int len;
+
+ if (of_find_property(np_emif, "cs1-used", &len))
+ dev_info->cs1_used = true;
+
+ if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
+ dev_info->cal_resistors_per_cs = true;
+
+ if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
+ dev_info->type = DDR_TYPE_LPDDR2_S4;
+ else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
+ dev_info->type = DDR_TYPE_LPDDR2_S2;
+
+ of_property_read_u32(np_ddr, "density", &density);
+ of_property_read_u32(np_ddr, "io-width", &io_width);
+
+ /* Convert from density in Mb to the density encoding in jedc_ddr.h */
+ if (density & (density - 1))
+ dev_info->density = 0;
+ else
+ dev_info->density = __fls(density) - 5;
+
+ /* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
+ if (io_width & (io_width - 1))
+ dev_info->io_width = 0;
+ else
+ dev_info->io_width = __fls(io_width) - 1;
+}
+
+static struct emif_data * __init_or_module of_get_memory_device_details(
+ struct device_node *np_emif, struct device *dev)
+{
+ struct emif_data *emif = NULL;
+ struct ddr_device_info *dev_info = NULL;
+ struct emif_platform_data *pd = NULL;
+ struct device_node *np_ddr;
+ int len;
+
+ np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
+ if (!np_ddr)
+ goto error;
+ emif = devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
+
+ if (!emif || !pd || !dev_info) {
+ dev_err(dev, "%s: Out of memory!!\n",
+ __func__);
+ goto error;
+ }
+
+ emif->plat_data = pd;
+ pd->device_info = dev_info;
+ emif->dev = dev;
+ emif->np_ddr = np_ddr;
+ emif->temperature_level = SDRAM_TEMP_NOMINAL;
+
+ if (of_device_is_compatible(np_emif, "ti,emif-4d"))
+ emif->plat_data->ip_rev = EMIF_4D;
+ else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
+ emif->plat_data->ip_rev = EMIF_4D5;
+
+ of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
+
+ if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
+ pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
+
+ of_get_ddr_info(np_emif, np_ddr, dev_info);
+ if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
+ pd->device_info->io_width, pd->phy_type, pd->ip_rev,
+ emif->dev)) {
+ dev_err(dev, "%s: invalid device data!!\n", __func__);
+ goto error;
+ }
+ /*
+ * For EMIF instances other than EMIF1 see if the devices connected
+ * are exactly same as on EMIF1(which is typically the case). If so,
+ * mark it as a duplicate of EMIF1. This will save some memory and
+ * computation.
+ */
+ if (emif1 && emif1->np_ddr == np_ddr) {
+ emif->duplicate = true;
+ goto out;
+ } else if (emif1) {
+ dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
+ __func__);
+ }
+
+ of_get_custom_configs(np_emif, emif);
+ emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
+ emif->plat_data->device_info->type,
+ &emif->plat_data->timings_arr_size);
+
+ emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
+ goto out;
+
+error:
+ return NULL;
+out:
+ return emif;
+}
+
+#else
+
+static struct emif_data * __init_or_module of_get_memory_device_details(
+ struct device_node *np_emif, struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static struct emif_data *__init_or_module get_device_details(
+ struct platform_device *pdev)
+{
+ u32 size;
+ struct emif_data *emif = NULL;
+ struct ddr_device_info *dev_info;
+ struct emif_custom_configs *cust_cfgs;
+ struct emif_platform_data *pd;
+ struct device *dev;
+ void *temp;
+
+ pd = pdev->dev.platform_data;
+ dev = &pdev->dev;
+
+ if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
+ pd->device_info->density, pd->device_info->io_width,
+ pd->phy_type, pd->ip_rev, dev))) {
+ dev_err(dev, "%s: invalid device data\n", __func__);
+ goto error;
+ }
+
+ emif = devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
+ temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
+
+ if (!emif || !temp || !dev_info)
+ goto error;
+
+ memcpy(temp, pd, sizeof(*pd));
+ pd = temp;
+ memcpy(dev_info, pd->device_info, sizeof(*dev_info));
+
+ pd->device_info = dev_info;
+ emif->plat_data = pd;
+ emif->dev = dev;
+ emif->temperature_level = SDRAM_TEMP_NOMINAL;
+
+ /*
+ * For EMIF instances other than EMIF1 see if the devices connected
+ * are exactly same as on EMIF1(which is typically the case). If so,
+ * mark it as a duplicate of EMIF1 and skip copying timings data.
+ * This will save some memory and some computation later.
+ */
+ emif->duplicate = emif1 && (memcmp(dev_info,
+ emif1->plat_data->device_info,
+ sizeof(struct ddr_device_info)) == 0);
+
+ if (emif->duplicate) {
+ pd->timings = NULL;
+ pd->min_tck = NULL;
+ goto out;
+ } else if (emif1) {
+ dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
+ __func__);
+ }
+
+ /*
+ * Copy custom configs - ignore allocation error, if any, as
+ * custom_configs is not very critical
+ */
+ cust_cfgs = pd->custom_configs;
+ if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
+ temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
+ if (temp)
+ memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
+ pd->custom_configs = temp;
+ }
+
+ /*
+ * Copy timings and min-tck values from platform data. If it is not
+ * available or if memory allocation fails, use JEDEC defaults
+ */
+ size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
+ if (pd->timings) {
+ temp = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (temp) {
+ memcpy(temp, pd->timings, size);
+ pd->timings = temp;
+ } else {
+ get_default_timings(emif);
+ }
+ } else {
+ get_default_timings(emif);
+ }
+
+ if (pd->min_tck) {
+ temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
+ if (temp) {
+ memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
+ pd->min_tck = temp;
+ } else {
+ pd->min_tck = &lpddr2_jedec_min_tck;
+ }
+ } else {
+ pd->min_tck = &lpddr2_jedec_min_tck;
+ }
+
+out:
+ return emif;
+
+error:
+ return NULL;
+}
+
+static int __init_or_module emif_probe(struct platform_device *pdev)
+{
+ struct emif_data *emif;
+ int irq, ret;
+
+ if (pdev->dev.of_node)
+ emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
+ else
+ emif = get_device_details(pdev);
+
+ if (!emif) {
+ pr_err("%s: error getting device data\n", __func__);
+ goto error;
+ }
+
+ list_add(&emif->node, &device_list);
+
+ /* Save pointers to each other in emif and device structures */
+ emif->dev = &pdev->dev;
+ platform_set_drvdata(pdev, emif);
+
+ emif->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emif->base))
+ goto error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto error;
+
+ emif_onetime_settings(emif);
+ emif_debugfs_init(emif);
+ disable_and_clear_all_interrupts(emif);
+ ret = setup_interrupts(emif, irq);
+ if (ret)
+ goto error;
+
+ /* One-time actions taken on probing the first device */
+ if (!emif1) {
+ emif1 = emif;
+
+ /*
+ * TODO: register notifiers for frequency and voltage
+ * change here once the respective frameworks are
+ * available
+ */
+ }
+
+ dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
+ __func__, emif->base, irq);
+
+ return 0;
+error:
+ return -ENODEV;
+}
+
+static int __exit emif_remove(struct platform_device *pdev)
+{
+ struct emif_data *emif = platform_get_drvdata(pdev);
+
+ emif_debugfs_exit(emif);
+
+ return 0;
+}
+
+static void emif_shutdown(struct platform_device *pdev)
+{
+ struct emif_data *emif = platform_get_drvdata(pdev);
+
+ disable_and_clear_all_interrupts(emif);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id emif_of_match[] = {
+ { .compatible = "ti,emif-4d" },
+ { .compatible = "ti,emif-4d5" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, emif_of_match);
+#endif
+
+static struct platform_driver emif_driver = {
+ .remove = __exit_p(emif_remove),
+ .shutdown = emif_shutdown,
+ .driver = {
+ .name = "emif",
+ .of_match_table = of_match_ptr(emif_of_match),
+ },
+};
+
+module_platform_driver_probe(emif_driver, emif_probe);
+
+MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:emif");
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/memory/emif.h b/drivers/memory/emif.h
new file mode 100644
index 0000000000..55aeb36a5b
--- /dev/null
+++ b/drivers/memory/emif.h
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Defines for the EMIF driver
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ */
+#ifndef __EMIF_H
+#define __EMIF_H
+
+/*
+ * Maximum number of different frequencies supported by EMIF driver
+ * Determines the number of entries in the pointer array for register
+ * cache
+ */
+#define EMIF_MAX_NUM_FREQUENCIES 6
+
+/* State of the core voltage */
+#define DDR_VOLTAGE_STABLE 0
+#define DDR_VOLTAGE_RAMPING 1
+
+/* Defines for timing De-rating */
+#define EMIF_NORMAL_TIMINGS 0
+#define EMIF_DERATED_TIMINGS 1
+
+/* Length of the forced read idle period in terms of cycles */
+#define EMIF_READ_IDLE_LEN_VAL 5
+
+/*
+ * forced read idle interval to be used when voltage
+ * is changed as part of DVFS/DPS - 1ms
+ */
+#define READ_IDLE_INTERVAL_DVFS (1*1000000)
+
+/*
+ * Forced read idle interval to be used when voltage is stable
+ * 50us - or maximum value will do
+ */
+#define READ_IDLE_INTERVAL_NORMAL (50*1000000)
+
+/* DLL calibration interval when voltage is NOT stable - 1us */
+#define DLL_CALIB_INTERVAL_DVFS (1*1000000)
+
+#define DLL_CALIB_ACK_WAIT_VAL 5
+
+/* Interval between ZQCS commands - hw team recommended value */
+#define EMIF_ZQCS_INTERVAL_US (50*1000)
+/* Enable ZQ Calibration on exiting Self-refresh */
+#define ZQ_SFEXITEN_ENABLE 1
+/*
+ * ZQ Calibration simultaneously on both chip-selects:
+ * Needs one calibration resistor per CS
+ */
+#define ZQ_DUALCALEN_DISABLE 0
+#define ZQ_DUALCALEN_ENABLE 1
+
+#define T_ZQCS_DEFAULT_NS 90
+#define T_ZQCL_DEFAULT_NS 360
+#define T_ZQINIT_DEFAULT_NS 1000
+
+/* DPD_EN */
+#define DPD_DISABLE 0
+#define DPD_ENABLE 1
+
+/*
+ * Default values for the low-power entry to be used if not provided by user.
+ * OMAP4/5 has a hw bug(i735) due to which this value can not be less than 512
+ * Timeout values are in DDR clock 'cycles' and frequency threshold in Hz
+ */
+#define EMIF_LP_MODE_TIMEOUT_PERFORMANCE 2048
+#define EMIF_LP_MODE_TIMEOUT_POWER 512
+#define EMIF_LP_MODE_FREQ_THRESHOLD 400000000
+
+/* DDR_PHY_CTRL_1 values for EMIF4D - ATTILA PHY combination */
+#define EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY 0x049FF000
+#define EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY 0x41
+#define EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY 0x80
+#define EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY 0xFF
+
+/* DDR_PHY_CTRL_1 values for EMIF4D5 INTELLIPHY combination */
+#define EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY 0x0E084200
+#define EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS 10000
+
+/* TEMP_ALERT_CONFIG - corresponding to temp gradient 5 C/s */
+#define TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS 360
+
+#define EMIF_T_CSTA 3
+#define EMIF_T_PDLL_UL 128
+
+/* External PHY control registers magic values */
+#define EMIF_EXT_PHY_CTRL_1_VAL 0x04020080
+#define EMIF_EXT_PHY_CTRL_5_VAL 0x04010040
+#define EMIF_EXT_PHY_CTRL_6_VAL 0x01004010
+#define EMIF_EXT_PHY_CTRL_7_VAL 0x00001004
+#define EMIF_EXT_PHY_CTRL_8_VAL 0x04010040
+#define EMIF_EXT_PHY_CTRL_9_VAL 0x01004010
+#define EMIF_EXT_PHY_CTRL_10_VAL 0x00001004
+#define EMIF_EXT_PHY_CTRL_11_VAL 0x00000000
+#define EMIF_EXT_PHY_CTRL_12_VAL 0x00000000
+#define EMIF_EXT_PHY_CTRL_13_VAL 0x00000000
+#define EMIF_EXT_PHY_CTRL_14_VAL 0x80080080
+#define EMIF_EXT_PHY_CTRL_15_VAL 0x00800800
+#define EMIF_EXT_PHY_CTRL_16_VAL 0x08102040
+#define EMIF_EXT_PHY_CTRL_17_VAL 0x00000001
+#define EMIF_EXT_PHY_CTRL_18_VAL 0x540A8150
+#define EMIF_EXT_PHY_CTRL_19_VAL 0xA81502A0
+#define EMIF_EXT_PHY_CTRL_20_VAL 0x002A0540
+#define EMIF_EXT_PHY_CTRL_21_VAL 0x00000000
+#define EMIF_EXT_PHY_CTRL_22_VAL 0x00000000
+#define EMIF_EXT_PHY_CTRL_23_VAL 0x00000000
+#define EMIF_EXT_PHY_CTRL_24_VAL 0x00000077
+
+#define EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS 1200
+
+/* Registers offset */
+#define EMIF_MODULE_ID_AND_REVISION 0x0000
+#define EMIF_STATUS 0x0004
+#define EMIF_SDRAM_CONFIG 0x0008
+#define EMIF_SDRAM_CONFIG_2 0x000c
+#define EMIF_SDRAM_REFRESH_CONTROL 0x0010
+#define EMIF_SDRAM_REFRESH_CTRL_SHDW 0x0014
+#define EMIF_SDRAM_TIMING_1 0x0018
+#define EMIF_SDRAM_TIMING_1_SHDW 0x001c
+#define EMIF_SDRAM_TIMING_2 0x0020
+#define EMIF_SDRAM_TIMING_2_SHDW 0x0024
+#define EMIF_SDRAM_TIMING_3 0x0028
+#define EMIF_SDRAM_TIMING_3_SHDW 0x002c
+#define EMIF_LPDDR2_NVM_TIMING 0x0030
+#define EMIF_LPDDR2_NVM_TIMING_SHDW 0x0034
+#define EMIF_POWER_MANAGEMENT_CONTROL 0x0038
+#define EMIF_POWER_MANAGEMENT_CTRL_SHDW 0x003c
+#define EMIF_LPDDR2_MODE_REG_DATA 0x0040
+#define EMIF_LPDDR2_MODE_REG_CONFIG 0x0050
+#define EMIF_OCP_CONFIG 0x0054
+#define EMIF_OCP_CONFIG_VALUE_1 0x0058
+#define EMIF_OCP_CONFIG_VALUE_2 0x005c
+#define EMIF_IODFT_TEST_LOGIC_GLOBAL_CONTROL 0x0060
+#define EMIF_IODFT_TEST_LOGIC_CTRL_MISR_RESULT 0x0064
+#define EMIF_IODFT_TEST_LOGIC_ADDRESS_MISR_RESULT 0x0068
+#define EMIF_IODFT_TEST_LOGIC_DATA_MISR_RESULT_1 0x006c
+#define EMIF_IODFT_TEST_LOGIC_DATA_MISR_RESULT_2 0x0070
+#define EMIF_IODFT_TEST_LOGIC_DATA_MISR_RESULT_3 0x0074
+#define EMIF_PERFORMANCE_COUNTER_1 0x0080
+#define EMIF_PERFORMANCE_COUNTER_2 0x0084
+#define EMIF_PERFORMANCE_COUNTER_CONFIG 0x0088
+#define EMIF_PERFORMANCE_COUNTER_MASTER_REGION_SELECT 0x008c
+#define EMIF_PERFORMANCE_COUNTER_TIME 0x0090
+#define EMIF_MISC_REG 0x0094
+#define EMIF_DLL_CALIB_CTRL 0x0098
+#define EMIF_DLL_CALIB_CTRL_SHDW 0x009c
+#define EMIF_END_OF_INTERRUPT 0x00a0
+#define EMIF_SYSTEM_OCP_INTERRUPT_RAW_STATUS 0x00a4
+#define EMIF_LL_OCP_INTERRUPT_RAW_STATUS 0x00a8
+#define EMIF_SYSTEM_OCP_INTERRUPT_STATUS 0x00ac
+#define EMIF_LL_OCP_INTERRUPT_STATUS 0x00b0
+#define EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET 0x00b4
+#define EMIF_LL_OCP_INTERRUPT_ENABLE_SET 0x00b8
+#define EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR 0x00bc
+#define EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR 0x00c0
+#define EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG 0x00c8
+#define EMIF_TEMPERATURE_ALERT_CONFIG 0x00cc
+#define EMIF_OCP_ERROR_LOG 0x00d0
+#define EMIF_READ_WRITE_LEVELING_RAMP_WINDOW 0x00d4
+#define EMIF_READ_WRITE_LEVELING_RAMP_CONTROL 0x00d8
+#define EMIF_READ_WRITE_LEVELING_CONTROL 0x00dc
+#define EMIF_DDR_PHY_CTRL_1 0x00e4
+#define EMIF_DDR_PHY_CTRL_1_SHDW 0x00e8
+#define EMIF_DDR_PHY_CTRL_2 0x00ec
+#define EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING 0x0100
+#define EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING 0x0104
+#define EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING 0x0108
+#define EMIF_READ_WRITE_EXECUTION_THRESHOLD 0x0120
+#define EMIF_COS_CONFIG 0x0124
+#define EMIF_PHY_STATUS_1 0x0140
+#define EMIF_PHY_STATUS_2 0x0144
+#define EMIF_PHY_STATUS_3 0x0148
+#define EMIF_PHY_STATUS_4 0x014c
+#define EMIF_PHY_STATUS_5 0x0150
+#define EMIF_PHY_STATUS_6 0x0154
+#define EMIF_PHY_STATUS_7 0x0158
+#define EMIF_PHY_STATUS_8 0x015c
+#define EMIF_PHY_STATUS_9 0x0160
+#define EMIF_PHY_STATUS_10 0x0164
+#define EMIF_PHY_STATUS_11 0x0168
+#define EMIF_PHY_STATUS_12 0x016c
+#define EMIF_PHY_STATUS_13 0x0170
+#define EMIF_PHY_STATUS_14 0x0174
+#define EMIF_PHY_STATUS_15 0x0178
+#define EMIF_PHY_STATUS_16 0x017c
+#define EMIF_PHY_STATUS_17 0x0180
+#define EMIF_PHY_STATUS_18 0x0184
+#define EMIF_PHY_STATUS_19 0x0188
+#define EMIF_PHY_STATUS_20 0x018c
+#define EMIF_PHY_STATUS_21 0x0190
+#define EMIF_EXT_PHY_CTRL_1 0x0200
+#define EMIF_EXT_PHY_CTRL_1_SHDW 0x0204
+#define EMIF_EXT_PHY_CTRL_2 0x0208
+#define EMIF_EXT_PHY_CTRL_2_SHDW 0x020c
+#define EMIF_EXT_PHY_CTRL_3 0x0210
+#define EMIF_EXT_PHY_CTRL_3_SHDW 0x0214
+#define EMIF_EXT_PHY_CTRL_4 0x0218
+#define EMIF_EXT_PHY_CTRL_4_SHDW 0x021c
+#define EMIF_EXT_PHY_CTRL_5 0x0220
+#define EMIF_EXT_PHY_CTRL_5_SHDW 0x0224
+#define EMIF_EXT_PHY_CTRL_6 0x0228
+#define EMIF_EXT_PHY_CTRL_6_SHDW 0x022c
+#define EMIF_EXT_PHY_CTRL_7 0x0230
+#define EMIF_EXT_PHY_CTRL_7_SHDW 0x0234
+#define EMIF_EXT_PHY_CTRL_8 0x0238
+#define EMIF_EXT_PHY_CTRL_8_SHDW 0x023c
+#define EMIF_EXT_PHY_CTRL_9 0x0240
+#define EMIF_EXT_PHY_CTRL_9_SHDW 0x0244
+#define EMIF_EXT_PHY_CTRL_10 0x0248
+#define EMIF_EXT_PHY_CTRL_10_SHDW 0x024c
+#define EMIF_EXT_PHY_CTRL_11 0x0250
+#define EMIF_EXT_PHY_CTRL_11_SHDW 0x0254
+#define EMIF_EXT_PHY_CTRL_12 0x0258
+#define EMIF_EXT_PHY_CTRL_12_SHDW 0x025c
+#define EMIF_EXT_PHY_CTRL_13 0x0260
+#define EMIF_EXT_PHY_CTRL_13_SHDW 0x0264
+#define EMIF_EXT_PHY_CTRL_14 0x0268
+#define EMIF_EXT_PHY_CTRL_14_SHDW 0x026c
+#define EMIF_EXT_PHY_CTRL_15 0x0270
+#define EMIF_EXT_PHY_CTRL_15_SHDW 0x0274
+#define EMIF_EXT_PHY_CTRL_16 0x0278
+#define EMIF_EXT_PHY_CTRL_16_SHDW 0x027c
+#define EMIF_EXT_PHY_CTRL_17 0x0280
+#define EMIF_EXT_PHY_CTRL_17_SHDW 0x0284
+#define EMIF_EXT_PHY_CTRL_18 0x0288
+#define EMIF_EXT_PHY_CTRL_18_SHDW 0x028c
+#define EMIF_EXT_PHY_CTRL_19 0x0290
+#define EMIF_EXT_PHY_CTRL_19_SHDW 0x0294
+#define EMIF_EXT_PHY_CTRL_20 0x0298
+#define EMIF_EXT_PHY_CTRL_20_SHDW 0x029c
+#define EMIF_EXT_PHY_CTRL_21 0x02a0
+#define EMIF_EXT_PHY_CTRL_21_SHDW 0x02a4
+#define EMIF_EXT_PHY_CTRL_22 0x02a8
+#define EMIF_EXT_PHY_CTRL_22_SHDW 0x02ac
+#define EMIF_EXT_PHY_CTRL_23 0x02b0
+#define EMIF_EXT_PHY_CTRL_23_SHDW 0x02b4
+#define EMIF_EXT_PHY_CTRL_24 0x02b8
+#define EMIF_EXT_PHY_CTRL_24_SHDW 0x02bc
+#define EMIF_EXT_PHY_CTRL_25 0x02c0
+#define EMIF_EXT_PHY_CTRL_25_SHDW 0x02c4
+#define EMIF_EXT_PHY_CTRL_26 0x02c8
+#define EMIF_EXT_PHY_CTRL_26_SHDW 0x02cc
+#define EMIF_EXT_PHY_CTRL_27 0x02d0
+#define EMIF_EXT_PHY_CTRL_27_SHDW 0x02d4
+#define EMIF_EXT_PHY_CTRL_28 0x02d8
+#define EMIF_EXT_PHY_CTRL_28_SHDW 0x02dc
+#define EMIF_EXT_PHY_CTRL_29 0x02e0
+#define EMIF_EXT_PHY_CTRL_29_SHDW 0x02e4
+#define EMIF_EXT_PHY_CTRL_30 0x02e8
+#define EMIF_EXT_PHY_CTRL_30_SHDW 0x02ec
+
+/* Registers shifts and masks */
+
+/* EMIF_MODULE_ID_AND_REVISION */
+#define SCHEME_SHIFT 30
+#define SCHEME_MASK (0x3 << 30)
+#define MODULE_ID_SHIFT 16
+#define MODULE_ID_MASK (0xfff << 16)
+#define RTL_VERSION_SHIFT 11
+#define RTL_VERSION_MASK (0x1f << 11)
+#define MAJOR_REVISION_SHIFT 8
+#define MAJOR_REVISION_MASK (0x7 << 8)
+#define MINOR_REVISION_SHIFT 0
+#define MINOR_REVISION_MASK (0x3f << 0)
+
+/* STATUS */
+#define BE_SHIFT 31
+#define BE_MASK (1 << 31)
+#define DUAL_CLK_MODE_SHIFT 30
+#define DUAL_CLK_MODE_MASK (1 << 30)
+#define FAST_INIT_SHIFT 29
+#define FAST_INIT_MASK (1 << 29)
+#define RDLVLGATETO_SHIFT 6
+#define RDLVLGATETO_MASK (1 << 6)
+#define RDLVLTO_SHIFT 5
+#define RDLVLTO_MASK (1 << 5)
+#define WRLVLTO_SHIFT 4
+#define WRLVLTO_MASK (1 << 4)
+#define PHY_DLL_READY_SHIFT 2
+#define PHY_DLL_READY_MASK (1 << 2)
+
+/* SDRAM_CONFIG */
+#define SDRAM_TYPE_SHIFT 29
+#define SDRAM_TYPE_MASK (0x7 << 29)
+#define IBANK_POS_SHIFT 27
+#define IBANK_POS_MASK (0x3 << 27)
+#define DDR_TERM_SHIFT 24
+#define DDR_TERM_MASK (0x7 << 24)
+#define DDR2_DDQS_SHIFT 23
+#define DDR2_DDQS_MASK (1 << 23)
+#define DYN_ODT_SHIFT 21
+#define DYN_ODT_MASK (0x3 << 21)
+#define DDR_DISABLE_DLL_SHIFT 20
+#define DDR_DISABLE_DLL_MASK (1 << 20)
+#define SDRAM_DRIVE_SHIFT 18
+#define SDRAM_DRIVE_MASK (0x3 << 18)
+#define CWL_SHIFT 16
+#define CWL_MASK (0x3 << 16)
+#define NARROW_MODE_SHIFT 14
+#define NARROW_MODE_MASK (0x3 << 14)
+#define CL_SHIFT 10
+#define CL_MASK (0xf << 10)
+#define ROWSIZE_SHIFT 7
+#define ROWSIZE_MASK (0x7 << 7)
+#define IBANK_SHIFT 4
+#define IBANK_MASK (0x7 << 4)
+#define EBANK_SHIFT 3
+#define EBANK_MASK (1 << 3)
+#define PAGESIZE_SHIFT 0
+#define PAGESIZE_MASK (0x7 << 0)
+
+/* SDRAM_CONFIG_2 */
+#define CS1NVMEN_SHIFT 30
+#define CS1NVMEN_MASK (1 << 30)
+#define EBANK_POS_SHIFT 27
+#define EBANK_POS_MASK (1 << 27)
+#define RDBNUM_SHIFT 4
+#define RDBNUM_MASK (0x3 << 4)
+#define RDBSIZE_SHIFT 0
+#define RDBSIZE_MASK (0x7 << 0)
+
+/* SDRAM_REFRESH_CONTROL */
+#define INITREF_DIS_SHIFT 31
+#define INITREF_DIS_MASK (1 << 31)
+#define SRT_SHIFT 29
+#define SRT_MASK (1 << 29)
+#define ASR_SHIFT 28
+#define ASR_MASK (1 << 28)
+#define PASR_SHIFT 24
+#define PASR_MASK (0x7 << 24)
+#define REFRESH_RATE_SHIFT 0
+#define REFRESH_RATE_MASK (0xffff << 0)
+
+/* SDRAM_TIMING_1 */
+#define T_RTW_SHIFT 29
+#define T_RTW_MASK (0x7 << 29)
+#define T_RP_SHIFT 25
+#define T_RP_MASK (0xf << 25)
+#define T_RCD_SHIFT 21
+#define T_RCD_MASK (0xf << 21)
+#define T_WR_SHIFT 17
+#define T_WR_MASK (0xf << 17)
+#define T_RAS_SHIFT 12
+#define T_RAS_MASK (0x1f << 12)
+#define T_RC_SHIFT 6
+#define T_RC_MASK (0x3f << 6)
+#define T_RRD_SHIFT 3
+#define T_RRD_MASK (0x7 << 3)
+#define T_WTR_SHIFT 0
+#define T_WTR_MASK (0x7 << 0)
+
+/* SDRAM_TIMING_2 */
+#define T_XP_SHIFT 28
+#define T_XP_MASK (0x7 << 28)
+#define T_ODT_SHIFT 25
+#define T_ODT_MASK (0x7 << 25)
+#define T_XSNR_SHIFT 16
+#define T_XSNR_MASK (0x1ff << 16)
+#define T_XSRD_SHIFT 6
+#define T_XSRD_MASK (0x3ff << 6)
+#define T_RTP_SHIFT 3
+#define T_RTP_MASK (0x7 << 3)
+#define T_CKE_SHIFT 0
+#define T_CKE_MASK (0x7 << 0)
+
+/* SDRAM_TIMING_3 */
+#define T_PDLL_UL_SHIFT 28
+#define T_PDLL_UL_MASK (0xf << 28)
+#define T_CSTA_SHIFT 24
+#define T_CSTA_MASK (0xf << 24)
+#define T_CKESR_SHIFT 21
+#define T_CKESR_MASK (0x7 << 21)
+#define ZQ_ZQCS_SHIFT 15
+#define ZQ_ZQCS_MASK (0x3f << 15)
+#define T_TDQSCKMAX_SHIFT 13
+#define T_TDQSCKMAX_MASK (0x3 << 13)
+#define T_RFC_SHIFT 4
+#define T_RFC_MASK (0x1ff << 4)
+#define T_RAS_MAX_SHIFT 0
+#define T_RAS_MAX_MASK (0xf << 0)
+
+/* POWER_MANAGEMENT_CONTROL */
+#define PD_TIM_SHIFT 12
+#define PD_TIM_MASK (0xf << 12)
+#define DPD_EN_SHIFT 11
+#define DPD_EN_MASK (1 << 11)
+#define LP_MODE_SHIFT 8
+#define LP_MODE_MASK (0x7 << 8)
+#define SR_TIM_SHIFT 4
+#define SR_TIM_MASK (0xf << 4)
+#define CS_TIM_SHIFT 0
+#define CS_TIM_MASK (0xf << 0)
+
+/* LPDDR2_MODE_REG_DATA */
+#define VALUE_0_SHIFT 0
+#define VALUE_0_MASK (0x7f << 0)
+
+/* LPDDR2_MODE_REG_CONFIG */
+#define CS_SHIFT 31
+#define CS_MASK (1 << 31)
+#define REFRESH_EN_SHIFT 30
+#define REFRESH_EN_MASK (1 << 30)
+#define ADDRESS_SHIFT 0
+#define ADDRESS_MASK (0xff << 0)
+
+/* OCP_CONFIG */
+#define SYS_THRESH_MAX_SHIFT 24
+#define SYS_THRESH_MAX_MASK (0xf << 24)
+#define MPU_THRESH_MAX_SHIFT 20
+#define MPU_THRESH_MAX_MASK (0xf << 20)
+#define LL_THRESH_MAX_SHIFT 16
+#define LL_THRESH_MAX_MASK (0xf << 16)
+
+/* PERFORMANCE_COUNTER_1 */
+#define COUNTER1_SHIFT 0
+#define COUNTER1_MASK (0xffffffff << 0)
+
+/* PERFORMANCE_COUNTER_2 */
+#define COUNTER2_SHIFT 0
+#define COUNTER2_MASK (0xffffffff << 0)
+
+/* PERFORMANCE_COUNTER_CONFIG */
+#define CNTR2_MCONNID_EN_SHIFT 31
+#define CNTR2_MCONNID_EN_MASK (1 << 31)
+#define CNTR2_REGION_EN_SHIFT 30
+#define CNTR2_REGION_EN_MASK (1 << 30)
+#define CNTR2_CFG_SHIFT 16
+#define CNTR2_CFG_MASK (0xf << 16)
+#define CNTR1_MCONNID_EN_SHIFT 15
+#define CNTR1_MCONNID_EN_MASK (1 << 15)
+#define CNTR1_REGION_EN_SHIFT 14
+#define CNTR1_REGION_EN_MASK (1 << 14)
+#define CNTR1_CFG_SHIFT 0
+#define CNTR1_CFG_MASK (0xf << 0)
+
+/* PERFORMANCE_COUNTER_MASTER_REGION_SELECT */
+#define MCONNID2_SHIFT 24
+#define MCONNID2_MASK (0xff << 24)
+#define REGION_SEL2_SHIFT 16
+#define REGION_SEL2_MASK (0x3 << 16)
+#define MCONNID1_SHIFT 8
+#define MCONNID1_MASK (0xff << 8)
+#define REGION_SEL1_SHIFT 0
+#define REGION_SEL1_MASK (0x3 << 0)
+
+/* PERFORMANCE_COUNTER_TIME */
+#define TOTAL_TIME_SHIFT 0
+#define TOTAL_TIME_MASK (0xffffffff << 0)
+
+/* DLL_CALIB_CTRL */
+#define ACK_WAIT_SHIFT 16
+#define ACK_WAIT_MASK (0xf << 16)
+#define DLL_CALIB_INTERVAL_SHIFT 0
+#define DLL_CALIB_INTERVAL_MASK (0x1ff << 0)
+
+/* END_OF_INTERRUPT */
+#define EOI_SHIFT 0
+#define EOI_MASK (1 << 0)
+
+/* SYSTEM_OCP_INTERRUPT_RAW_STATUS */
+#define DNV_SYS_SHIFT 2
+#define DNV_SYS_MASK (1 << 2)
+#define TA_SYS_SHIFT 1
+#define TA_SYS_MASK (1 << 1)
+#define ERR_SYS_SHIFT 0
+#define ERR_SYS_MASK (1 << 0)
+
+/* LOW_LATENCY_OCP_INTERRUPT_RAW_STATUS */
+#define DNV_LL_SHIFT 2
+#define DNV_LL_MASK (1 << 2)
+#define TA_LL_SHIFT 1
+#define TA_LL_MASK (1 << 1)
+#define ERR_LL_SHIFT 0
+#define ERR_LL_MASK (1 << 0)
+
+/* SYSTEM_OCP_INTERRUPT_ENABLE_SET */
+#define EN_DNV_SYS_SHIFT 2
+#define EN_DNV_SYS_MASK (1 << 2)
+#define EN_TA_SYS_SHIFT 1
+#define EN_TA_SYS_MASK (1 << 1)
+#define EN_ERR_SYS_SHIFT 0
+#define EN_ERR_SYS_MASK (1 << 0)
+
+/* LOW_LATENCY_OCP_INTERRUPT_ENABLE_SET */
+#define EN_DNV_LL_SHIFT 2
+#define EN_DNV_LL_MASK (1 << 2)
+#define EN_TA_LL_SHIFT 1
+#define EN_TA_LL_MASK (1 << 1)
+#define EN_ERR_LL_SHIFT 0
+#define EN_ERR_LL_MASK (1 << 0)
+
+/* SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG */
+#define ZQ_CS1EN_SHIFT 31
+#define ZQ_CS1EN_MASK (1 << 31)
+#define ZQ_CS0EN_SHIFT 30
+#define ZQ_CS0EN_MASK (1 << 30)
+#define ZQ_DUALCALEN_SHIFT 29
+#define ZQ_DUALCALEN_MASK (1 << 29)
+#define ZQ_SFEXITEN_SHIFT 28
+#define ZQ_SFEXITEN_MASK (1 << 28)
+#define ZQ_ZQINIT_MULT_SHIFT 18
+#define ZQ_ZQINIT_MULT_MASK (0x3 << 18)
+#define ZQ_ZQCL_MULT_SHIFT 16
+#define ZQ_ZQCL_MULT_MASK (0x3 << 16)
+#define ZQ_REFINTERVAL_SHIFT 0
+#define ZQ_REFINTERVAL_MASK (0xffff << 0)
+
+/* TEMPERATURE_ALERT_CONFIG */
+#define TA_CS1EN_SHIFT 31
+#define TA_CS1EN_MASK (1 << 31)
+#define TA_CS0EN_SHIFT 30
+#define TA_CS0EN_MASK (1 << 30)
+#define TA_SFEXITEN_SHIFT 28
+#define TA_SFEXITEN_MASK (1 << 28)
+#define TA_DEVWDT_SHIFT 26
+#define TA_DEVWDT_MASK (0x3 << 26)
+#define TA_DEVCNT_SHIFT 24
+#define TA_DEVCNT_MASK (0x3 << 24)
+#define TA_REFINTERVAL_SHIFT 0
+#define TA_REFINTERVAL_MASK (0x3fffff << 0)
+
+/* OCP_ERROR_LOG */
+#define MADDRSPACE_SHIFT 14
+#define MADDRSPACE_MASK (0x3 << 14)
+#define MBURSTSEQ_SHIFT 11
+#define MBURSTSEQ_MASK (0x7 << 11)
+#define MCMD_SHIFT 8
+#define MCMD_MASK (0x7 << 8)
+#define MCONNID_SHIFT 0
+#define MCONNID_MASK (0xff << 0)
+
+/* READ_WRITE_LEVELING_CONTROL */
+#define RDWRLVLFULL_START 0x80000000
+
+/* DDR_PHY_CTRL_1 - EMIF4D */
+#define DLL_SLAVE_DLY_CTRL_SHIFT_4D 4
+#define DLL_SLAVE_DLY_CTRL_MASK_4D (0xFF << 4)
+#define READ_LATENCY_SHIFT_4D 0
+#define READ_LATENCY_MASK_4D (0xf << 0)
+
+/* DDR_PHY_CTRL_1 - EMIF4D5 */
+#define DLL_HALF_DELAY_SHIFT_4D5 21
+#define DLL_HALF_DELAY_MASK_4D5 (1 << 21)
+#define READ_LATENCY_SHIFT_4D5 0
+#define READ_LATENCY_MASK_4D5 (0x1f << 0)
+
+/* DDR_PHY_CTRL_1_SHDW */
+#define DDR_PHY_CTRL_1_SHDW_SHIFT 5
+#define DDR_PHY_CTRL_1_SHDW_MASK (0x7ffffff << 5)
+#define READ_LATENCY_SHDW_SHIFT 0
+#define READ_LATENCY_SHDW_MASK (0x1f << 0)
+
+#define EMIF_SRAM_AM33_REG_LAYOUT 0x00000000
+#define EMIF_SRAM_AM43_REG_LAYOUT 0x00000001
+
+#ifndef __ASSEMBLY__
+/*
+ * Structure containing shadow of important registers in EMIF
+ * The calculation function fills in this structure to be later used for
+ * initialisation and DVFS
+ */
+struct emif_regs {
+ u32 freq;
+ u32 ref_ctrl_shdw;
+ u32 ref_ctrl_shdw_derated;
+ u32 sdram_tim1_shdw;
+ u32 sdram_tim1_shdw_derated;
+ u32 sdram_tim2_shdw;
+ u32 sdram_tim3_shdw;
+ u32 sdram_tim3_shdw_derated;
+ u32 pwr_mgmt_ctrl_shdw;
+ union {
+ u32 read_idle_ctrl_shdw_normal;
+ u32 dll_calib_ctrl_shdw_normal;
+ };
+ union {
+ u32 read_idle_ctrl_shdw_volt_ramp;
+ u32 dll_calib_ctrl_shdw_volt_ramp;
+ };
+
+ u32 phy_ctrl_1_shdw;
+ u32 ext_phy_ctrl_2_shdw;
+ u32 ext_phy_ctrl_3_shdw;
+ u32 ext_phy_ctrl_4_shdw;
+};
+
+struct ti_emif_pm_functions;
+
+extern unsigned int ti_emif_sram;
+extern unsigned int ti_emif_sram_sz;
+extern struct ti_emif_pm_data ti_emif_pm_sram_data;
+extern struct emif_regs_amx3 ti_emif_regs_amx3;
+
+void ti_emif_save_context(void);
+void ti_emif_restore_context(void);
+void ti_emif_run_hw_leveling(void);
+void ti_emif_enter_sr(void);
+void ti_emif_exit_sr(void);
+void ti_emif_abort_sr(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __EMIF_H */
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
new file mode 100644
index 0000000000..7fc9f57ae2
--- /dev/null
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CoreNet Coherency Fabric error reporting
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+enum ccf_version {
+ CCF1,
+ CCF2,
+};
+
+struct ccf_info {
+ enum ccf_version version;
+ int err_reg_offs;
+ bool has_brr;
+};
+
+static const struct ccf_info ccf1_info = {
+ .version = CCF1,
+ .err_reg_offs = 0xa00,
+ .has_brr = false,
+};
+
+static const struct ccf_info ccf2_info = {
+ .version = CCF2,
+ .err_reg_offs = 0xe40,
+ .has_brr = true,
+};
+
+/*
+ * This register is present but not documented, with different values for
+ * IP_ID, on other chips with fsl,corenet2-cf such as t4240 and b4860.
+ */
+#define CCF_BRR 0xbf8
+#define CCF_BRR_IPID 0xffff0000
+#define CCF_BRR_IPID_T1040 0x09310000
+
+static const struct of_device_id ccf_matches[] = {
+ {
+ .compatible = "fsl,corenet1-cf",
+ .data = &ccf1_info,
+ },
+ {
+ .compatible = "fsl,corenet2-cf",
+ .data = &ccf2_info,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ccf_matches);
+
+struct ccf_err_regs {
+ u32 errdet; /* 0x00 Error Detect Register */
+ /* 0x04 Error Enable (ccf1)/Disable (ccf2) Register */
+ u32 errdis;
+ /* 0x08 Error Interrupt Enable Register (ccf2 only) */
+ u32 errinten;
+ u32 cecar; /* 0x0c Error Capture Attribute Register */
+ u32 cecaddrh; /* 0x10 Error Capture Address High */
+ u32 cecaddrl; /* 0x14 Error Capture Address Low */
+ u32 cecar2; /* 0x18 Error Capture Attribute Register 2 */
+};
+
+/* LAE/CV also valid for errdis and errinten */
+#define ERRDET_LAE (1 << 0) /* Local Access Error */
+#define ERRDET_CV (1 << 1) /* Coherency Violation */
+#define ERRDET_UTID (1 << 2) /* Unavailable Target ID (t1040) */
+#define ERRDET_MCST (1 << 3) /* Multicast Stash (t1040) */
+#define ERRDET_CTYPE_SHIFT 26 /* Capture Type (ccf2 only) */
+#define ERRDET_CTYPE_MASK (0x1f << ERRDET_CTYPE_SHIFT)
+#define ERRDET_CAP (1 << 31) /* Capture Valid (ccf2 only) */
+
+#define CECAR_VAL (1 << 0) /* Valid (ccf1 only) */
+#define CECAR_UVT (1 << 15) /* Unavailable target ID (ccf1) */
+#define CECAR_SRCID_SHIFT_CCF1 24
+#define CECAR_SRCID_MASK_CCF1 (0xff << CECAR_SRCID_SHIFT_CCF1)
+#define CECAR_SRCID_SHIFT_CCF2 18
+#define CECAR_SRCID_MASK_CCF2 (0xff << CECAR_SRCID_SHIFT_CCF2)
+
+#define CECADDRH_ADDRH 0xff
+
+struct ccf_private {
+ const struct ccf_info *info;
+ struct device *dev;
+ void __iomem *regs;
+ struct ccf_err_regs __iomem *err_regs;
+ bool t1040;
+};
+
+static irqreturn_t ccf_irq(int irq, void *dev_id)
+{
+ struct ccf_private *ccf = dev_id;
+ static DEFINE_RATELIMIT_STATE(ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ u32 errdet, cecar, cecar2;
+ u64 addr;
+ u32 src_id;
+ bool uvt = false;
+ bool cap_valid = false;
+
+ errdet = ioread32be(&ccf->err_regs->errdet);
+ cecar = ioread32be(&ccf->err_regs->cecar);
+ cecar2 = ioread32be(&ccf->err_regs->cecar2);
+ addr = ioread32be(&ccf->err_regs->cecaddrl);
+ addr |= ((u64)(ioread32be(&ccf->err_regs->cecaddrh) &
+ CECADDRH_ADDRH)) << 32;
+
+ if (!__ratelimit(&ratelimit))
+ goto out;
+
+ switch (ccf->info->version) {
+ case CCF1:
+ if (cecar & CECAR_VAL) {
+ if (cecar & CECAR_UVT)
+ uvt = true;
+
+ src_id = (cecar & CECAR_SRCID_MASK_CCF1) >>
+ CECAR_SRCID_SHIFT_CCF1;
+ cap_valid = true;
+ }
+
+ break;
+ case CCF2:
+ if (errdet & ERRDET_CAP) {
+ src_id = (cecar & CECAR_SRCID_MASK_CCF2) >>
+ CECAR_SRCID_SHIFT_CCF2;
+ cap_valid = true;
+ }
+
+ break;
+ }
+
+ dev_crit(ccf->dev, "errdet 0x%08x cecar 0x%08x cecar2 0x%08x\n",
+ errdet, cecar, cecar2);
+
+ if (errdet & ERRDET_LAE) {
+ if (uvt)
+ dev_crit(ccf->dev, "LAW Unavailable Target ID\n");
+ else
+ dev_crit(ccf->dev, "Local Access Window Error\n");
+ }
+
+ if (errdet & ERRDET_CV)
+ dev_crit(ccf->dev, "Coherency Violation\n");
+
+ if (errdet & ERRDET_UTID)
+ dev_crit(ccf->dev, "Unavailable Target ID\n");
+
+ if (errdet & ERRDET_MCST)
+ dev_crit(ccf->dev, "Multicast Stash\n");
+
+ if (cap_valid) {
+ dev_crit(ccf->dev, "address 0x%09llx, src id 0x%x\n",
+ addr, src_id);
+ }
+
+out:
+ iowrite32be(errdet, &ccf->err_regs->errdet);
+ return errdet ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ccf_probe(struct platform_device *pdev)
+{
+ struct ccf_private *ccf;
+ const struct of_device_id *match;
+ u32 errinten;
+ int ret, irq;
+
+ match = of_match_device(ccf_matches, &pdev->dev);
+ if (WARN_ON(!match))
+ return -ENODEV;
+
+ ccf = devm_kzalloc(&pdev->dev, sizeof(*ccf), GFP_KERNEL);
+ if (!ccf)
+ return -ENOMEM;
+
+ ccf->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ccf->regs))
+ return PTR_ERR(ccf->regs);
+
+ ccf->dev = &pdev->dev;
+ ccf->info = match->data;
+ ccf->err_regs = ccf->regs + ccf->info->err_reg_offs;
+
+ if (ccf->info->has_brr) {
+ u32 brr = ioread32be(ccf->regs + CCF_BRR);
+
+ if ((brr & CCF_BRR_IPID) == CCF_BRR_IPID_T1040)
+ ccf->t1040 = true;
+ }
+
+ dev_set_drvdata(&pdev->dev, ccf);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: can't request irq\n", __func__);
+ return ret;
+ }
+
+ errinten = ERRDET_LAE | ERRDET_CV;
+ if (ccf->t1040)
+ errinten |= ERRDET_UTID | ERRDET_MCST;
+
+ switch (ccf->info->version) {
+ case CCF1:
+ /* On CCF1 this register enables rather than disables. */
+ iowrite32be(errinten, &ccf->err_regs->errdis);
+ break;
+
+ case CCF2:
+ iowrite32be(0, &ccf->err_regs->errdis);
+ iowrite32be(errinten, &ccf->err_regs->errinten);
+ break;
+ }
+
+ return 0;
+}
+
+static int ccf_remove(struct platform_device *pdev)
+{
+ struct ccf_private *ccf = dev_get_drvdata(&pdev->dev);
+
+ switch (ccf->info->version) {
+ case CCF1:
+ iowrite32be(0, &ccf->err_regs->errdis);
+ break;
+
+ case CCF2:
+ /*
+ * We clear errdis on ccf1 because that's the only way to
+ * disable interrupts, but on ccf2 there's no need to disable
+ * detection.
+ */
+ iowrite32be(0, &ccf->err_regs->errinten);
+ break;
+ }
+
+ return 0;
+}
+
+static struct platform_driver ccf_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ccf_matches,
+ },
+ .probe = ccf_probe,
+ .remove = ccf_remove,
+};
+
+module_platform_driver(ccf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale CoreNet Coherency Fabric error reporting");
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
new file mode 100644
index 0000000000..2509e51520
--- /dev/null
+++ b/drivers/memory/fsl_ifc.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Freescale Integrated Flash Controller
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_ifc.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
+
+/*
+ * convert_ifc_address - convert the base address
+ * @addr_base: base address of the memory bank
+ */
+unsigned int convert_ifc_address(phys_addr_t addr_base)
+{
+ return addr_base & CSPR_BA;
+}
+EXPORT_SYMBOL(convert_ifc_address);
+
+/*
+ * fsl_ifc_find - find IFC bank
+ * @addr_base: base address of the memory bank
+ *
+ * This function walks IFC banks comparing "Base address" field of the CSPR
+ * registers with the supplied addr_base argument. When bases match this
+ * function returns bank number (starting with 0), otherwise it returns
+ * appropriate errno value.
+ */
+int fsl_ifc_find(phys_addr_t addr_base)
+{
+ int i = 0;
+
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs)
+ return -ENODEV;
+
+ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
+ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
+
+ if (cspr & CSPR_V && (cspr & CSPR_BA) ==
+ convert_ifc_address(addr_base))
+ return i;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fsl_ifc_find);
+
+static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
+
+ /*
+ * Clear all the common status and event registers
+ */
+ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
+
+ /* enable all error and events */
+ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en);
+
+ /* enable all error and event interrupts */
+ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en);
+ ifc_out32(0x0, &ifc->cm_erattr0);
+ ifc_out32(0x0, &ifc->cm_erattr1);
+
+ return 0;
+}
+
+static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+{
+ struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
+
+ of_platform_depopulate(&dev->dev);
+ free_irq(ctrl->nand_irq, ctrl);
+ free_irq(ctrl->irq, ctrl);
+
+ irq_dispose_mapping(ctrl->nand_irq);
+ irq_dispose_mapping(ctrl->irq);
+
+ iounmap(ctrl->gregs);
+
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+/*
+ * NAND events are split between an operational interrupt which only
+ * receives OPC, and an error interrupt that receives everything else,
+ * including non-NAND errors. Whichever interrupt gets to it first
+ * records the status and wakes the wait queue.
+ */
+static DEFINE_SPINLOCK(nand_irq_lock);
+
+static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
+{
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+ unsigned long flags;
+ u32 stat;
+
+ spin_lock_irqsave(&nand_irq_lock, flags);
+
+ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
+ if (stat) {
+ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat);
+ ctrl->nand_stat = stat;
+ wake_up(&ctrl->nand_wait);
+ }
+
+ spin_unlock_irqrestore(&nand_irq_lock, flags);
+
+ return stat;
+}
+
+static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+
+ if (check_nand_stat(ctrl))
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+/*
+ * NOTE: This interrupt is used to report ifc events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_ifc_ctrl *ctrl = data;
+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
+ u32 err_axiid, err_srcid, status, cs_err, err_addr;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* read for chip select error */
+ cs_err = ifc_in32(&ifc->cm_evter_stat);
+ if (cs_err) {
+ dev_err(ctrl->dev, "transaction sent to IFC is not mapped to any memory bank 0x%08X\n",
+ cs_err);
+ /* clear the chip select error */
+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
+
+ /* read error attribute registers print the error information */
+ status = ifc_in32(&ifc->cm_erattr0);
+ err_addr = ifc_in32(&ifc->cm_erattr1);
+
+ if (status & IFC_CM_ERATTR0_ERTYP_READ)
+ dev_err(ctrl->dev, "Read transaction error CM_ERATTR0 0x%08X\n",
+ status);
+ else
+ dev_err(ctrl->dev, "Write transaction error CM_ERATTR0 0x%08X\n",
+ status);
+
+ err_axiid = (status & IFC_CM_ERATTR0_ERAID) >>
+ IFC_CM_ERATTR0_ERAID_SHIFT;
+ dev_err(ctrl->dev, "AXI ID of the error transaction 0x%08X\n",
+ err_axiid);
+
+ err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >>
+ IFC_CM_ERATTR0_ESRCID_SHIFT;
+ dev_err(ctrl->dev, "SRC ID of the error transaction 0x%08X\n",
+ err_srcid);
+
+ dev_err(ctrl->dev, "Transaction Address corresponding to error ERADDR 0x%08X\n",
+ err_addr);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (check_nand_stat(ctrl))
+ ret = IRQ_HANDLED;
+
+ return ret;
+}
+
+/*
+ * fsl_ifc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+ */
+static int fsl_ifc_ctrl_probe(struct platform_device *dev)
+{
+ int ret = 0;
+ int version, banks;
+ void __iomem *addr;
+
+ dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
+
+ fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev),
+ GFP_KERNEL);
+ if (!fsl_ifc_ctrl_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
+
+ /* IOMAP the entire IFC region */
+ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_ifc_ctrl_dev->gregs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
+ fsl_ifc_ctrl_dev->little_endian = true;
+ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
+ } else {
+ fsl_ifc_ctrl_dev->little_endian = false;
+ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
+ }
+
+ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) &
+ FSL_IFC_VERSION_MASK;
+
+ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+ version >> 24, (version >> 16) & 0xf, banks);
+
+ fsl_ifc_ctrl_dev->version = version;
+ fsl_ifc_ctrl_dev->banks = banks;
+
+ addr = fsl_ifc_ctrl_dev->gregs;
+ if (version >= FSL_IFC_VERSION_2_0_0)
+ addr += PGOFFSET_64K;
+ else
+ addr += PGOFFSET_4K;
+ fsl_ifc_ctrl_dev->rregs = addr;
+
+ /* get the Controller level irq */
+ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (fsl_ifc_ctrl_dev->irq == 0) {
+ dev_err(&dev->dev, "failed to get irq resource for IFC\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* get the nand machine irq */
+ fsl_ifc_ctrl_dev->nand_irq =
+ irq_of_parse_and_map(dev->dev.of_node, 1);
+
+ fsl_ifc_ctrl_dev->dev = &dev->dev;
+
+ ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
+ if (ret < 0)
+ goto err_unmap_nandirq;
+
+ init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
+
+ ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED,
+ "fsl-ifc", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->irq);
+ goto err_unmap_nandirq;
+ }
+
+ if (fsl_ifc_ctrl_dev->nand_irq) {
+ ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq,
+ 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_ifc_ctrl_dev->nand_irq);
+ goto err_free_irq;
+ }
+ }
+
+ /* legacy dts may still use "simple-bus" compatible */
+ ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
+ if (ret)
+ goto err_free_nandirq;
+
+ return 0;
+
+err_free_nandirq:
+ free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
+err_free_irq:
+ free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+err_unmap_nandirq:
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
+err:
+ iounmap(fsl_ifc_ctrl_dev->gregs);
+ return ret;
+}
+
+static const struct of_device_id fsl_ifc_match[] = {
+ {
+ .compatible = "fsl,ifc",
+ },
+ {},
+};
+
+static struct platform_driver fsl_ifc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-ifc",
+ .of_match_table = fsl_ifc_match,
+ },
+ .probe = fsl_ifc_ctrl_probe,
+ .remove = fsl_ifc_ctrl_remove,
+};
+
+static int __init fsl_ifc_init(void)
+{
+ return platform_driver_register(&fsl_ifc_ctrl_driver);
+}
+subsys_initcall(fsl_ifc_init);
+
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
new file mode 100644
index 0000000000..6cd508478b
--- /dev/null
+++ b/drivers/memory/jedec_ddr.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definitions for DDR memories based on JEDEC specs
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ */
+#ifndef __JEDEC_DDR_H
+#define __JEDEC_DDR_H
+
+#include <linux/types.h>
+
+/* DDR Densities */
+#define DDR_DENSITY_64Mb 1
+#define DDR_DENSITY_128Mb 2
+#define DDR_DENSITY_256Mb 3
+#define DDR_DENSITY_512Mb 4
+#define DDR_DENSITY_1Gb 5
+#define DDR_DENSITY_2Gb 6
+#define DDR_DENSITY_4Gb 7
+#define DDR_DENSITY_8Gb 8
+#define DDR_DENSITY_16Gb 9
+#define DDR_DENSITY_32Gb 10
+
+/* DDR type */
+#define DDR_TYPE_DDR2 1
+#define DDR_TYPE_DDR3 2
+#define DDR_TYPE_LPDDR2_S4 3
+#define DDR_TYPE_LPDDR2_S2 4
+#define DDR_TYPE_LPDDR2_NVM 5
+#define DDR_TYPE_LPDDR3 6
+
+/* DDR IO width */
+#define DDR_IO_WIDTH_4 1
+#define DDR_IO_WIDTH_8 2
+#define DDR_IO_WIDTH_16 3
+#define DDR_IO_WIDTH_32 4
+
+/* Number of Row bits */
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+#define R16 16
+
+/* Number of Column bits */
+#define C7 7
+#define C8 8
+#define C9 9
+#define C10 10
+#define C11 11
+#define C12 12
+
+/* Number of Banks */
+#define B1 0
+#define B2 1
+#define B4 2
+#define B8 3
+
+/* Refresh rate in nano-seconds */
+#define T_REFI_15_6 15600
+#define T_REFI_7_8 7800
+#define T_REFI_3_9 3900
+
+/* tRFC values */
+#define T_RFC_90 90000
+#define T_RFC_110 110000
+#define T_RFC_130 130000
+#define T_RFC_160 160000
+#define T_RFC_210 210000
+#define T_RFC_300 300000
+#define T_RFC_350 350000
+
+/* Mode register numbers */
+#define DDR_MR0 0
+#define DDR_MR1 1
+#define DDR_MR2 2
+#define DDR_MR3 3
+#define DDR_MR4 4
+#define DDR_MR5 5
+#define DDR_MR6 6
+#define DDR_MR7 7
+#define DDR_MR8 8
+#define DDR_MR9 9
+#define DDR_MR10 10
+#define DDR_MR11 11
+#define DDR_MR16 16
+#define DDR_MR17 17
+#define DDR_MR18 18
+
+/*
+ * LPDDR2 related defines
+ */
+
+/* MR4 register fields */
+#define MR4_SDRAM_REF_RATE_SHIFT 0
+#define MR4_SDRAM_REF_RATE_MASK 7
+#define MR4_TUF_SHIFT 7
+#define MR4_TUF_MASK (1 << 7)
+
+/* MR4 SDRAM Refresh Rate field values */
+#define SDRAM_TEMP_NOMINAL 0x3
+#define SDRAM_TEMP_RESERVED_4 0x4
+#define SDRAM_TEMP_HIGH_DERATE_REFRESH 0x5
+#define SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS 0x6
+#define SDRAM_TEMP_VERY_HIGH_SHUTDOWN 0x7
+
+#define NUM_DDR_ADDR_TABLE_ENTRIES 11
+#define NUM_DDR_TIMING_TABLE_ENTRIES 4
+
+#define LPDDR2_MANID_SAMSUNG 1
+#define LPDDR2_MANID_QIMONDA 2
+#define LPDDR2_MANID_ELPIDA 3
+#define LPDDR2_MANID_ETRON 4
+#define LPDDR2_MANID_NANYA 5
+#define LPDDR2_MANID_HYNIX 6
+#define LPDDR2_MANID_MOSEL 7
+#define LPDDR2_MANID_WINBOND 8
+#define LPDDR2_MANID_ESMT 9
+#define LPDDR2_MANID_SPANSION 11
+#define LPDDR2_MANID_SST 12
+#define LPDDR2_MANID_ZMOS 13
+#define LPDDR2_MANID_INTEL 14
+#define LPDDR2_MANID_NUMONYX 254
+#define LPDDR2_MANID_MICRON 255
+
+#define LPDDR2_TYPE_S4 0
+#define LPDDR2_TYPE_S2 1
+#define LPDDR2_TYPE_NVM 2
+
+/* Structure for DDR addressing info from the JEDEC spec */
+struct lpddr2_addressing {
+ u32 num_banks;
+ u32 tREFI_ns;
+ u32 tRFCab_ps;
+};
+
+/*
+ * Structure for timings from the LPDDR2 datasheet
+ * All parameters are in pico seconds(ps) unless explicitly indicated
+ * with a suffix like tRAS_max_ns below
+ */
+struct lpddr2_timings {
+ u32 max_freq;
+ u32 min_freq;
+ u32 tRPab;
+ u32 tRCD;
+ u32 tWR;
+ u32 tRAS_min;
+ u32 tRRD;
+ u32 tWTR;
+ u32 tXP;
+ u32 tRTP;
+ u32 tCKESR;
+ u32 tDQSCK_max;
+ u32 tDQSCK_max_derated;
+ u32 tFAW;
+ u32 tZQCS;
+ u32 tZQCL;
+ u32 tZQinit;
+ u32 tRAS_max_ns;
+};
+
+/*
+ * Min value for some parameters in terms of number of tCK cycles(nCK)
+ * Please set to zero parameters that are not valid for a given memory
+ * type
+ */
+struct lpddr2_min_tck {
+ u32 tRPab;
+ u32 tRCD;
+ u32 tWR;
+ u32 tRASmin;
+ u32 tRRD;
+ u32 tWTR;
+ u32 tXP;
+ u32 tRTP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tFAW;
+};
+
+extern const struct lpddr2_addressing
+ lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES];
+extern const struct lpddr2_timings
+ lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
+extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+
+/* Structure of MR8 */
+union lpddr2_basic_config4 {
+ u32 value;
+
+ struct {
+ unsigned int arch_type : 2;
+ unsigned int density : 4;
+ unsigned int io_width : 2;
+ } __packed;
+};
+
+/*
+ * Structure for information about LPDDR2 chip. All parameters are
+ * matching raw values of standard mode register bitfields or set to
+ * -ENOENT if info unavailable.
+ */
+struct lpddr2_info {
+ int arch_type;
+ int density;
+ int io_width;
+ int manufacturer_id;
+ int revision_id1;
+ int revision_id2;
+};
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id);
+
+/*
+ * Structure for timings for LPDDR3 based on LPDDR2 plus additional fields.
+ * All parameters are in pico seconds(ps) excluding max_freq, min_freq which
+ * are in Hz.
+ */
+struct lpddr3_timings {
+ u32 max_freq;
+ u32 min_freq;
+ u32 tRFC;
+ u32 tRRD;
+ u32 tRPab;
+ u32 tRPpb;
+ u32 tRCD;
+ u32 tRC;
+ u32 tRAS;
+ u32 tWTR;
+ u32 tWR;
+ u32 tRTP;
+ u32 tW2W_C2C;
+ u32 tR2R_C2C;
+ u32 tWL;
+ u32 tDQSCK;
+ u32 tRL;
+ u32 tFAW;
+ u32 tXSR;
+ u32 tXP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tMRD;
+};
+
+/*
+ * Min value for some parameters in terms of number of tCK cycles(nCK)
+ * Please set to zero parameters that are not valid for a given memory
+ * type
+ */
+struct lpddr3_min_tck {
+ u32 tRFC;
+ u32 tRRD;
+ u32 tRPab;
+ u32 tRPpb;
+ u32 tRCD;
+ u32 tRC;
+ u32 tRAS;
+ u32 tWTR;
+ u32 tWR;
+ u32 tRTP;
+ u32 tW2W_C2C;
+ u32 tR2R_C2C;
+ u32 tWL;
+ u32 tDQSCK;
+ u32 tRL;
+ u32 tFAW;
+ u32 tXSR;
+ u32 tXP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tMRD;
+};
+
+#endif /* __JEDEC_DDR_H */
diff --git a/drivers/memory/jedec_ddr_data.c b/drivers/memory/jedec_ddr_data.c
new file mode 100644
index 0000000000..2cca4fa188
--- /dev/null
+++ b/drivers/memory/jedec_ddr_data.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DDR addressing details and AC timing parameters from JEDEC specs
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ */
+
+#include <linux/export.h>
+
+#include "jedec_ddr.h"
+
+/* LPDDR2 addressing details from JESD209-2 section 2.4 */
+const struct lpddr2_addressing
+ lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES] = {
+ {B4, T_REFI_15_6, T_RFC_90}, /* 64M */
+ {B4, T_REFI_15_6, T_RFC_90}, /* 128M */
+ {B4, T_REFI_7_8, T_RFC_90}, /* 256M */
+ {B4, T_REFI_7_8, T_RFC_90}, /* 512M */
+ {B8, T_REFI_7_8, T_RFC_130}, /* 1GS4 */
+ {B8, T_REFI_3_9, T_RFC_130}, /* 2GS4 */
+ {B8, T_REFI_3_9, T_RFC_130}, /* 4G */
+ {B8, T_REFI_3_9, T_RFC_210}, /* 8G */
+ {B4, T_REFI_7_8, T_RFC_130}, /* 1GS2 */
+ {B4, T_REFI_3_9, T_RFC_130}, /* 2GS2 */
+};
+EXPORT_SYMBOL_GPL(lpddr2_jedec_addressing_table);
+
+/* LPDDR2 AC timing parameters from JESD209-2 section 12 */
+const struct lpddr2_timings
+ lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES] = {
+ /* Speed bin 400(200 MHz) */
+ [0] = {
+ .max_freq = 200000000,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 10000,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 6000,
+ },
+ /* Speed bin 533(266 MHz) */
+ [1] = {
+ .max_freq = 266666666,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 7500,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 6000,
+ },
+ /* Speed bin 800(400 MHz) */
+ [2] = {
+ .max_freq = 400000000,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 7500,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 6000,
+ },
+ /* Speed bin 1066(533 MHz) */
+ [3] = {
+ .max_freq = 533333333,
+ .min_freq = 10000000,
+ .tRPab = 21000,
+ .tRCD = 18000,
+ .tWR = 15000,
+ .tRAS_min = 42000,
+ .tRRD = 10000,
+ .tWTR = 7500,
+ .tXP = 7500,
+ .tRTP = 7500,
+ .tCKESR = 15000,
+ .tDQSCK_max = 5500,
+ .tFAW = 50000,
+ .tZQCS = 90000,
+ .tZQCL = 360000,
+ .tZQinit = 1000000,
+ .tRAS_max_ns = 70000,
+ .tDQSCK_max_derated = 5620,
+ },
+};
+EXPORT_SYMBOL_GPL(lpddr2_jedec_timings);
+
+const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
+ .tRPab = 3,
+ .tRCD = 3,
+ .tWR = 3,
+ .tRASmin = 3,
+ .tRRD = 2,
+ .tWTR = 2,
+ .tXP = 2,
+ .tRTP = 2,
+ .tCKE = 3,
+ .tCKESR = 3,
+ .tFAW = 8
+};
+EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id)
+{
+ switch (manufacturer_id) {
+ case LPDDR2_MANID_SAMSUNG:
+ return "Samsung";
+ case LPDDR2_MANID_QIMONDA:
+ return "Qimonda";
+ case LPDDR2_MANID_ELPIDA:
+ return "Elpida";
+ case LPDDR2_MANID_ETRON:
+ return "Etron";
+ case LPDDR2_MANID_NANYA:
+ return "Nanya";
+ case LPDDR2_MANID_HYNIX:
+ return "Hynix";
+ case LPDDR2_MANID_MOSEL:
+ return "Mosel";
+ case LPDDR2_MANID_WINBOND:
+ return "Winbond";
+ case LPDDR2_MANID_ESMT:
+ return "ESMT";
+ case LPDDR2_MANID_SPANSION:
+ return "Spansion";
+ case LPDDR2_MANID_SST:
+ return "SST";
+ case LPDDR2_MANID_ZMOS:
+ return "ZMOS";
+ case LPDDR2_MANID_INTEL:
+ return "Intel";
+ case LPDDR2_MANID_NUMONYX:
+ return "Numonyx";
+ case LPDDR2_MANID_MICRON:
+ return "Micron";
+ default:
+ break;
+ }
+
+ return "invalid";
+}
+EXPORT_SYMBOL_GPL(lpddr2_jedec_manufacturer);
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
new file mode 100644
index 0000000000..e5a93e7da1
--- /dev/null
+++ b/drivers/memory/jz4780-nemc.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * JZ4780 NAND/external memory controller (NEMC)
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/jz4780-nemc.h>
+
+#define NEMC_SMCRn(n) (0x14 + (((n) - 1) * 4))
+#define NEMC_NFCSR 0x50
+
+#define NEMC_REG_LEN 0x54
+
+#define NEMC_SMCR_SMT BIT(0)
+#define NEMC_SMCR_BW_SHIFT 6
+#define NEMC_SMCR_BW_MASK (0x3 << NEMC_SMCR_BW_SHIFT)
+#define NEMC_SMCR_BW_8 (0 << 6)
+#define NEMC_SMCR_TAS_SHIFT 8
+#define NEMC_SMCR_TAS_MASK (0xf << NEMC_SMCR_TAS_SHIFT)
+#define NEMC_SMCR_TAH_SHIFT 12
+#define NEMC_SMCR_TAH_MASK (0xf << NEMC_SMCR_TAH_SHIFT)
+#define NEMC_SMCR_TBP_SHIFT 16
+#define NEMC_SMCR_TBP_MASK (0xf << NEMC_SMCR_TBP_SHIFT)
+#define NEMC_SMCR_TAW_SHIFT 20
+#define NEMC_SMCR_TAW_MASK (0xf << NEMC_SMCR_TAW_SHIFT)
+#define NEMC_SMCR_TSTRV_SHIFT 24
+#define NEMC_SMCR_TSTRV_MASK (0x3f << NEMC_SMCR_TSTRV_SHIFT)
+
+#define NEMC_NFCSR_NFEn(n) BIT(((n) - 1) << 1)
+#define NEMC_NFCSR_NFCEn(n) BIT((((n) - 1) << 1) + 1)
+#define NEMC_NFCSR_TNFEn(n) BIT(16 + (n) - 1)
+
+struct jz_soc_info {
+ u8 tas_tah_cycles_max;
+};
+
+struct jz4780_nemc {
+ spinlock_t lock;
+ struct device *dev;
+ const struct jz_soc_info *soc_info;
+ void __iomem *base;
+ struct clk *clk;
+ uint32_t clk_period;
+ unsigned long banks_present;
+};
+
+/**
+ * jz4780_nemc_num_banks() - count the number of banks referenced by a device
+ * @dev: device to count banks for, must be a child of the NEMC.
+ *
+ * Return: The number of unique NEMC banks referred to by the specified NEMC
+ * child device. Unique here means that a device that references the same bank
+ * multiple times in its "reg" property will only count once.
+ */
+unsigned int jz4780_nemc_num_banks(struct device *dev)
+{
+ const __be32 *prop;
+ unsigned int bank, count = 0;
+ unsigned long referenced = 0;
+ int i = 0;
+
+ while ((prop = of_get_address(dev->of_node, i++, NULL, NULL))) {
+ bank = of_read_number(prop, 1);
+ if (!(referenced & BIT(bank))) {
+ referenced |= BIT(bank);
+ count++;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(jz4780_nemc_num_banks);
+
+/**
+ * jz4780_nemc_set_type() - set the type of device connected to a bank
+ * @dev: child device of the NEMC.
+ * @bank: bank number to configure.
+ * @type: type of device connected to the bank.
+ */
+void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
+ enum jz4780_nemc_bank_type type)
+{
+ struct jz4780_nemc *nemc = dev_get_drvdata(dev->parent);
+ uint32_t nfcsr;
+
+ nfcsr = readl(nemc->base + NEMC_NFCSR);
+
+ /* TODO: Support toggle NAND devices. */
+ switch (type) {
+ case JZ4780_NEMC_BANK_SRAM:
+ nfcsr &= ~(NEMC_NFCSR_TNFEn(bank) | NEMC_NFCSR_NFEn(bank));
+ break;
+ case JZ4780_NEMC_BANK_NAND:
+ nfcsr &= ~NEMC_NFCSR_TNFEn(bank);
+ nfcsr |= NEMC_NFCSR_NFEn(bank);
+ break;
+ }
+
+ writel(nfcsr, nemc->base + NEMC_NFCSR);
+}
+EXPORT_SYMBOL(jz4780_nemc_set_type);
+
+/**
+ * jz4780_nemc_assert() - (de-)assert a NAND device's chip enable pin
+ * @dev: child device of the NEMC.
+ * @bank: bank number of device.
+ * @assert: whether the chip enable pin should be asserted.
+ *
+ * (De-)asserts the chip enable pin for the NAND device connected to the
+ * specified bank.
+ */
+void jz4780_nemc_assert(struct device *dev, unsigned int bank, bool assert)
+{
+ struct jz4780_nemc *nemc = dev_get_drvdata(dev->parent);
+ uint32_t nfcsr;
+
+ nfcsr = readl(nemc->base + NEMC_NFCSR);
+
+ if (assert)
+ nfcsr |= NEMC_NFCSR_NFCEn(bank);
+ else
+ nfcsr &= ~NEMC_NFCSR_NFCEn(bank);
+
+ writel(nfcsr, nemc->base + NEMC_NFCSR);
+}
+EXPORT_SYMBOL(jz4780_nemc_assert);
+
+static uint32_t jz4780_nemc_clk_period(struct jz4780_nemc *nemc)
+{
+ unsigned long rate;
+
+ rate = clk_get_rate(nemc->clk);
+ if (!rate)
+ return 0;
+
+ /* Return in picoseconds. */
+ return div64_ul(1000000000000ull, rate);
+}
+
+static uint32_t jz4780_nemc_ns_to_cycles(struct jz4780_nemc *nemc, uint32_t ns)
+{
+ return ((ns * 1000) + nemc->clk_period - 1) / nemc->clk_period;
+}
+
+static bool jz4780_nemc_configure_bank(struct jz4780_nemc *nemc,
+ unsigned int bank,
+ struct device_node *node)
+{
+ uint32_t smcr, val, cycles;
+
+ /*
+ * Conversion of tBP and tAW cycle counts to values supported by the
+ * hardware (round up to the next supported value).
+ */
+ static const u8 convert_tBP_tAW[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+
+ /* 11 - 12 -> 12 cycles */
+ 11, 11,
+
+ /* 13 - 15 -> 15 cycles */
+ 12, 12, 12,
+
+ /* 16 - 20 -> 20 cycles */
+ 13, 13, 13, 13, 13,
+
+ /* 21 - 25 -> 25 cycles */
+ 14, 14, 14, 14, 14,
+
+ /* 26 - 31 -> 31 cycles */
+ 15, 15, 15, 15, 15, 15
+ };
+
+ smcr = readl(nemc->base + NEMC_SMCRn(bank));
+ smcr &= ~NEMC_SMCR_SMT;
+
+ if (!of_property_read_u32(node, "ingenic,nemc-bus-width", &val)) {
+ smcr &= ~NEMC_SMCR_BW_MASK;
+ switch (val) {
+ case 8:
+ smcr |= NEMC_SMCR_BW_8;
+ break;
+ default:
+ /*
+ * Earlier SoCs support a 16 bit bus width (the 4780
+ * does not), until those are properly supported, error.
+ */
+ dev_err(nemc->dev, "unsupported bus width: %u\n", val);
+ return false;
+ }
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tAS", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TAS_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > nemc->soc_info->tas_tah_cycles_max) {
+ dev_err(nemc->dev, "tAS %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= cycles << NEMC_SMCR_TAS_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tAH", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TAH_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > nemc->soc_info->tas_tah_cycles_max) {
+ dev_err(nemc->dev, "tAH %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= cycles << NEMC_SMCR_TAH_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tBP", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TBP_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 31) {
+ dev_err(nemc->dev, "tBP %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= convert_tBP_tAW[cycles] << NEMC_SMCR_TBP_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tAW", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TAW_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 31) {
+ dev_err(nemc->dev, "tAW %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= convert_tBP_tAW[cycles] << NEMC_SMCR_TAW_SHIFT;
+ }
+
+ if (of_property_read_u32(node, "ingenic,nemc-tSTRV", &val) == 0) {
+ smcr &= ~NEMC_SMCR_TSTRV_MASK;
+ cycles = jz4780_nemc_ns_to_cycles(nemc, val);
+ if (cycles > 63) {
+ dev_err(nemc->dev, "tSTRV %u is too high (%u cycles)\n",
+ val, cycles);
+ return false;
+ }
+
+ smcr |= cycles << NEMC_SMCR_TSTRV_SHIFT;
+ }
+
+ writel(smcr, nemc->base + NEMC_SMCRn(bank));
+ return true;
+}
+
+static int jz4780_nemc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jz4780_nemc *nemc;
+ struct resource *res;
+ struct device_node *child;
+ const __be32 *prop;
+ unsigned int bank;
+ unsigned long referenced;
+ int i, ret;
+
+ nemc = devm_kzalloc(dev, sizeof(*nemc), GFP_KERNEL);
+ if (!nemc)
+ return -ENOMEM;
+
+ nemc->soc_info = device_get_match_data(dev);
+ if (!nemc->soc_info)
+ return -EINVAL;
+
+ spin_lock_init(&nemc->lock);
+ nemc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ /*
+ * The driver currently only uses the registers up to offset
+ * NEMC_REG_LEN. Since the EFUSE registers are in the middle of the
+ * NEMC registers, we only request the registers we will use for now;
+ * that way the EFUSE driver can probe too.
+ */
+ if (!devm_request_mem_region(dev, res->start, NEMC_REG_LEN, dev_name(dev))) {
+ dev_err(dev, "unable to request I/O memory region\n");
+ return -EBUSY;
+ }
+
+ nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
+ if (!nemc->base) {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -ENOMEM;
+ }
+
+ writel(0, nemc->base + NEMC_NFCSR);
+
+ nemc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nemc->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(nemc->clk);
+ }
+
+ ret = clk_prepare_enable(nemc->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ nemc->clk_period = jz4780_nemc_clk_period(nemc);
+ if (!nemc->clk_period) {
+ dev_err(dev, "failed to calculate clock period\n");
+ clk_disable_unprepare(nemc->clk);
+ return -EINVAL;
+ }
+
+ /*
+ * Iterate over child devices, check that they do not conflict with
+ * each other, and register child devices for them. If a child device
+ * has invalid properties, it is ignored and no platform device is
+ * registered for it.
+ */
+ for_each_child_of_node(nemc->dev->of_node, child) {
+ referenced = 0;
+ i = 0;
+ while ((prop = of_get_address(child, i++, NULL, NULL))) {
+ bank = of_read_number(prop, 1);
+ if (bank < 1 || bank >= JZ4780_NEMC_NUM_BANKS) {
+ dev_err(nemc->dev,
+ "%pOF requests invalid bank %u\n",
+ child, bank);
+
+ /* Will continue the outer loop below. */
+ referenced = 0;
+ break;
+ }
+
+ referenced |= BIT(bank);
+ }
+
+ if (!referenced) {
+ dev_err(nemc->dev, "%pOF has no addresses\n",
+ child);
+ continue;
+ } else if (nemc->banks_present & referenced) {
+ dev_err(nemc->dev, "%pOF conflicts with another node\n",
+ child);
+ continue;
+ }
+
+ /* Configure bank parameters. */
+ for_each_set_bit(bank, &referenced, JZ4780_NEMC_NUM_BANKS) {
+ if (!jz4780_nemc_configure_bank(nemc, bank, child)) {
+ referenced = 0;
+ break;
+ }
+ }
+
+ if (referenced) {
+ if (of_platform_device_create(child, NULL, nemc->dev))
+ nemc->banks_present |= referenced;
+ }
+ }
+
+ platform_set_drvdata(pdev, nemc);
+ dev_info(dev, "JZ4780 NEMC initialised\n");
+ return 0;
+}
+
+static int jz4780_nemc_remove(struct platform_device *pdev)
+{
+ struct jz4780_nemc *nemc = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(nemc->clk);
+ return 0;
+}
+
+static const struct jz_soc_info jz4740_soc_info = {
+ .tas_tah_cycles_max = 7,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .tas_tah_cycles_max = 15,
+};
+
+static const struct of_device_id jz4780_nemc_dt_match[] = {
+ { .compatible = "ingenic,jz4740-nemc", .data = &jz4740_soc_info, },
+ { .compatible = "ingenic,jz4780-nemc", .data = &jz4780_soc_info, },
+ {},
+};
+
+static struct platform_driver jz4780_nemc_driver = {
+ .probe = jz4780_nemc_probe,
+ .remove = jz4780_nemc_remove,
+ .driver = {
+ .name = "jz4780-nemc",
+ .of_match_table = of_match_ptr(jz4780_nemc_dt_match),
+ },
+};
+
+static int __init jz4780_nemc_init(void)
+{
+ return platform_driver_register(&jz4780_nemc_driver);
+}
+subsys_initcall(jz4780_nemc_init);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
new file mode 100644
index 0000000000..6523cb5105
--- /dev/null
+++ b/drivers/memory/mtk-smi.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+#include <soc/mediatek/smi.h>
+#include <dt-bindings/memory/mt2701-larb-port.h>
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/* SMI COMMON */
+#define SMI_L1LEN 0x100
+
+#define SMI_L1_ARB 0x200
+#define SMI_BUS_SEL 0x220
+#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+#define SMI_READ_FIFO_TH 0x230
+#define SMI_M4U_TH 0x234
+#define SMI_FIFO_TH1 0x238
+#define SMI_FIFO_TH2 0x23c
+#define SMI_DCM 0x300
+#define SMI_DUMMY 0x444
+
+/* SMI LARB */
+#define SMI_LARB_SLP_CON 0xc
+#define SLP_PROT_EN BIT(0)
+#define SLP_PROT_RDY BIT(16)
+
+#define SMI_LARB_CMD_THRT_CON 0x24
+#define SMI_LARB_THRT_RD_NU_LMT_MSK GENMASK(7, 4)
+#define SMI_LARB_THRT_RD_NU_LMT (5 << 4)
+
+#define SMI_LARB_SW_FLAG 0x40
+#define SMI_LARB_SW_FLAG_1 0x1
+
+#define SMI_LARB_OSTDL_PORT 0x200
+#define SMI_LARB_OSTDL_PORTx(id) (SMI_LARB_OSTDL_PORT + (((id) & 0x1f) << 2))
+
+/* Below are about mmu enable registers, they are different in SoCs */
+/* gen1: mt2701 */
+#define REG_SMI_SECUR_CON_BASE 0x5c0
+
+/* every register control 8 port, register offset 0x4 */
+#define REG_SMI_SECUR_CON_OFFSET(id) (((id) >> 3) << 2)
+#define REG_SMI_SECUR_CON_ADDR(id) \
+ (REG_SMI_SECUR_CON_BASE + REG_SMI_SECUR_CON_OFFSET(id))
+
+/*
+ * every port have 4 bit to control, bit[port + 3] control virtual or physical,
+ * bit[port + 2 : port + 1] control the domain, bit[port] control the security
+ * or non-security.
+ */
+#define SMI_SECUR_CON_VAL_MSK(id) (~(0xf << (((id) & 0x7) << 2)))
+#define SMI_SECUR_CON_VAL_VIRT(id) BIT((((id) & 0x7) << 2) + 3)
+/* mt2701 domain should be set to 3 */
+#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
+
+/* gen2: */
+/* mt8167 */
+#define MT8167_SMI_LARB_MMU_EN 0xfc0
+
+/* mt8173 */
+#define MT8173_SMI_LARB_MMU_EN 0xf00
+
+/* general */
+#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
+#define F_MMU_EN BIT(0)
+#define BANK_SEL(id) ({ \
+ u32 _id = (id) & 0x3; \
+ (_id << 8 | _id << 10 | _id << 12 | _id << 14); \
+})
+
+#define SMI_COMMON_INIT_REGS_NR 6
+#define SMI_LARB_PORT_NR_MAX 32
+
+#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
+#define MTK_SMI_FLAG_SW_FLAG BIT(1)
+#define MTK_SMI_FLAG_SLEEP_CTL BIT(2)
+#define MTK_SMI_FLAG_CFG_PORT_SEC_CTL BIT(3)
+#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
+
+struct mtk_smi_reg_pair {
+ unsigned int offset;
+ u32 value;
+};
+
+enum mtk_smi_type {
+ MTK_SMI_GEN1,
+ MTK_SMI_GEN2, /* gen2 smi common */
+ MTK_SMI_GEN2_SUB_COMM, /* gen2 smi sub common */
+};
+
+/* larbs: Require apb/smi clocks while gals is optional. */
+static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"};
+#define MTK_SMI_LARB_REQ_CLK_NR 2
+#define MTK_SMI_LARB_OPT_CLK_NR 1
+
+/*
+ * common: Require these four clocks in has_gals case. Otherwise, only apb/smi are required.
+ * sub common: Require apb/smi/gals0 clocks in has_gals case. Otherwise, only apb/smi are required.
+ */
+static const char * const mtk_smi_common_clks[] = {"apb", "smi", "gals0", "gals1"};
+#define MTK_SMI_CLK_NR_MAX ARRAY_SIZE(mtk_smi_common_clks)
+#define MTK_SMI_COM_REQ_CLK_NR 2
+#define MTK_SMI_COM_GALS_REQ_CLK_NR MTK_SMI_CLK_NR_MAX
+#define MTK_SMI_SUB_COM_GALS_REQ_CLK_NR 3
+
+struct mtk_smi_common_plat {
+ enum mtk_smi_type type;
+ bool has_gals;
+ u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+
+ const struct mtk_smi_reg_pair *init;
+};
+
+struct mtk_smi_larb_gen {
+ int port_in_larb[MTK_LARB_NR_MAX + 1];
+ int (*config_port)(struct device *dev);
+ unsigned int larb_direct_to_common_mask;
+ unsigned int flags_general;
+ const u8 (*ostd)[SMI_LARB_PORT_NR_MAX];
+};
+
+struct mtk_smi {
+ struct device *dev;
+ unsigned int clk_num;
+ struct clk_bulk_data clks[MTK_SMI_CLK_NR_MAX];
+ struct clk *clk_async; /*only needed by mt2701*/
+ union {
+ void __iomem *smi_ao_base; /* only for gen1 */
+ void __iomem *base; /* only for gen2 */
+ };
+ struct device *smi_common_dev; /* for sub common */
+ const struct mtk_smi_common_plat *plat;
+};
+
+struct mtk_smi_larb { /* larb: local arbiter */
+ struct mtk_smi smi;
+ void __iomem *base;
+ struct device *smi_common_dev; /* common or sub-common dev */
+ const struct mtk_smi_larb_gen *larb_gen;
+ int larbid;
+ u32 *mmu;
+ unsigned char *bank;
+};
+
+static int
+mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ struct mtk_smi_larb_iommu *larb_mmu = data;
+ unsigned int i;
+
+ for (i = 0; i < MTK_LARB_NR_MAX; i++) {
+ if (dev == larb_mmu[i].dev) {
+ larb->larbid = i;
+ larb->mmu = &larb_mmu[i].mmu;
+ larb->bank = larb_mmu[i].bank;
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static void
+mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* Do nothing as the iommu is always enabled. */
+}
+
+static const struct component_ops mtk_smi_larb_component_ops = {
+ .bind = mtk_smi_larb_bind,
+ .unbind = mtk_smi_larb_unbind,
+};
+
+static int mtk_smi_larb_config_port_gen1(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+ int i, m4u_port_id, larb_port_num;
+ u32 sec_con_val, reg_val;
+
+ m4u_port_id = larb_gen->port_in_larb[larb->larbid];
+ larb_port_num = larb_gen->port_in_larb[larb->larbid + 1]
+ - larb_gen->port_in_larb[larb->larbid];
+
+ for (i = 0; i < larb_port_num; i++, m4u_port_id++) {
+ if (*larb->mmu & BIT(i)) {
+ /* bit[port + 3] controls the virtual or physical */
+ sec_con_val = SMI_SECUR_CON_VAL_VIRT(m4u_port_id);
+ } else {
+ /* do not need to enable m4u for this port */
+ continue;
+ }
+ reg_val = readl(common->smi_ao_base
+ + REG_SMI_SECUR_CON_ADDR(m4u_port_id));
+ reg_val &= SMI_SECUR_CON_VAL_MSK(m4u_port_id);
+ reg_val |= sec_con_val;
+ reg_val |= SMI_SECUR_CON_VAL_DOMAIN(m4u_port_id);
+ writel(reg_val,
+ common->smi_ao_base
+ + REG_SMI_SECUR_CON_ADDR(m4u_port_id));
+ }
+ return 0;
+}
+
+static int mtk_smi_larb_config_port_mt8167(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
+ return 0;
+}
+
+static int mtk_smi_larb_config_port_mt8173(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + MT8173_SMI_LARB_MMU_EN);
+ return 0;
+}
+
+static int mtk_smi_larb_config_port_gen2_general(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ u32 reg, flags_general = larb->larb_gen->flags_general;
+ const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL;
+ struct arm_smccc_res res;
+ int i;
+
+ if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
+ return 0;
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_THRT_UPDATE)) {
+ reg = readl_relaxed(larb->base + SMI_LARB_CMD_THRT_CON);
+ reg &= ~SMI_LARB_THRT_RD_NU_LMT_MSK;
+ reg |= SMI_LARB_THRT_RD_NU_LMT;
+ writel_relaxed(reg, larb->base + SMI_LARB_CMD_THRT_CON);
+ }
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_SW_FLAG))
+ writel_relaxed(SMI_LARB_SW_FLAG_1, larb->base + SMI_LARB_SW_FLAG);
+
+ for (i = 0; i < SMI_LARB_PORT_NR_MAX && larbostd && !!larbostd[i]; i++)
+ writel_relaxed(larbostd[i], larb->base + SMI_LARB_OSTDL_PORTx(i));
+
+ /*
+ * When mmu_en bits are in security world, the bank_sel still is in the
+ * LARB_NONSEC_CON below. And the mmu_en bits of LARB_NONSEC_CON have no
+ * effect in this case.
+ */
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_CFG_PORT_SEC_CTL)) {
+ arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL, IOMMU_ATF_CMD_CONFIG_SMI_LARB,
+ larb->larbid, *larb->mmu, 0, 0, 0, 0, &res);
+ if (res.a0 != 0) {
+ dev_err(dev, "Enable iommu fail, ret %ld\n", res.a0);
+ return -EINVAL;
+ }
+ }
+
+ for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
+ reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
+ reg |= F_MMU_EN;
+ reg |= BANK_SEL(larb->bank[i]);
+ writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
+ }
+ return 0;
+}
+
+static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x02, 0x18, 0x22, 0x22, 0x01, 0x02, 0x0a,},
+ [1] = {0x12, 0x02, 0x14, 0x14, 0x01, 0x18, 0x0a,},
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,},
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a, 0x07, 0x07,},
+ [5] = {0x02, 0x01, 0x04, 0x02, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x01, 0x0a, 0x05, 0x02, 0x03, 0x01, 0x01, 0x14, 0x14,
+ 0x0a, 0x14, 0x1e, 0x01, 0x0c, 0x0a, 0x05, 0x02, 0x02, 0x05,
+ 0x03, 0x01, 0x1e, 0x01, 0x05,},
+ [9] = {0x1e, 0x01, 0x0a, 0x0a, 0x01, 0x01, 0x03, 0x1e, 0x1e, 0x10,
+ 0x07, 0x01, 0x0a, 0x06, 0x03, 0x03, 0x0e, 0x01, 0x04, 0x28,},
+ [10] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [11] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [12] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [13] = {0x07, 0x02, 0x04, 0x02, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x07, 0x02, 0x04, 0x02, 0x05, 0x05,},
+ [14] = {0x02, 0x02, 0x0c, 0x0c, 0x0c, 0x0c, 0x01, 0x01, 0x02, 0x02,
+ 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x01, 0x01,},
+ [15] = {0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x0c, 0x0c,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02,
+ 0x0c, 0x01, 0x01,},
+ [16] = {0x28, 0x28, 0x03, 0x01, 0x01, 0x03, 0x14, 0x14, 0x0a, 0x0d,
+ 0x03, 0x05, 0x0e, 0x01, 0x01, 0x05, 0x06, 0x0d, 0x01,},
+ [17] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [18] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [19] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [20] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [21] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,
+ 0x01,},
+ [23] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x18, 0x01, 0x01,},
+ [24] = {0x12, 0x06, 0x12, 0x06,},
+ [25] = {0x01},
+};
+
+static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
+ [1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,}, /* ... */
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a,},
+ [5] = {0x06, 0x01, 0x17, 0x06, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x0c, 0x12,},
+ [9] = {0x0a, 0x08, 0x04, 0x06, 0x01, 0x01, 0x10, 0x18, 0x11, 0x0a,
+ 0x08, 0x04, 0x11, 0x06, 0x02, 0x06, 0x01, 0x11, 0x11, 0x06,},
+ [10] = {0x18, 0x08, 0x01, 0x01, 0x20, 0x12, 0x18, 0x06, 0x05, 0x10,
+ 0x08, 0x08, 0x10, 0x08, 0x08, 0x18, 0x0c, 0x09, 0x0b, 0x0d,
+ 0x0d, 0x06, 0x10, 0x10,},
+ [11] = {0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x01, 0x01, 0x01, 0x01,},
+ [12] = {0x09, 0x09, 0x05, 0x05, 0x0c, 0x18, 0x02, 0x02, 0x04, 0x02,},
+ [13] = {0x02, 0x02, 0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x08, 0x01,},
+ [14] = {0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x16, 0x01, 0x16, 0x01,
+ 0x01, 0x02, 0x02, 0x08, 0x02,},
+ [15] = {},
+ [16] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [17] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [18] = {0x12, 0x06, 0x12, 0x06,},
+ [19] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [20] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [21] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [23] = {0x18, 0x01,},
+ [24] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x01,
+ 0x01, 0x01,},
+ [25] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [26] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [27] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [28] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
+ .port_in_larb = {
+ LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
+ LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
+ },
+ .config_port = mtk_smi_larb_config_port_gen1,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask =
+ BIT(4) | BIT(6) | BIT(11) | BIT(12) | BIT(13),
+ /* DUMMY | IPU0 | IPU1 | CCU | MDLA */
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
+ /* mt8167 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8167,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
+ /* mt8173 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8173,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
+ /* IPU0 | IPU1 | CCU */
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8186 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_SLEEP_CTL,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG |
+ MTK_SMI_FLAG_SLEEP_CTL | MTK_SMI_FLAG_CFG_PORT_SEC_CTL,
+ .ostd = mtk_smi_larb_mt8188_ostd,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG |
+ MTK_SMI_FLAG_SLEEP_CTL,
+ .ostd = mtk_smi_larb_mt8195_ostd,
+};
+
+static const struct of_device_id mtk_smi_larb_of_ids[] = {
+ {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701},
+ {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
+ {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
+ {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
+ {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
+ {.compatible = "mediatek,mt8186-smi-larb", .data = &mtk_smi_larb_mt8186},
+ {.compatible = "mediatek,mt8188-smi-larb", .data = &mtk_smi_larb_mt8188},
+ {.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
+ {.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
+ {}
+};
+
+static int mtk_smi_larb_sleep_ctrl_enable(struct mtk_smi_larb *larb)
+{
+ int ret;
+ u32 tmp;
+
+ writel_relaxed(SLP_PROT_EN, larb->base + SMI_LARB_SLP_CON);
+ ret = readl_poll_timeout_atomic(larb->base + SMI_LARB_SLP_CON,
+ tmp, !!(tmp & SLP_PROT_RDY), 10, 1000);
+ if (ret) {
+ /* TODO: Reset this larb if it fails here. */
+ dev_err(larb->smi.dev, "sleep ctrl is not ready(0x%x).\n", tmp);
+ }
+ return ret;
+}
+
+static void mtk_smi_larb_sleep_ctrl_disable(struct mtk_smi_larb *larb)
+{
+ writel_relaxed(0, larb->base + SMI_LARB_SLP_CON);
+}
+
+static int mtk_smi_device_link_common(struct device *dev, struct device **com_dev)
+{
+ struct platform_device *smi_com_pdev;
+ struct device_node *smi_com_node;
+ struct device *smi_com_dev;
+ struct device_link *link;
+
+ smi_com_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_com_node)
+ return -EINVAL;
+
+ smi_com_pdev = of_find_device_by_node(smi_com_node);
+ of_node_put(smi_com_node);
+ if (smi_com_pdev) {
+ /* smi common is the supplier, Make sure it is ready before */
+ if (!platform_get_drvdata(smi_com_pdev)) {
+ put_device(&smi_com_pdev->dev);
+ return -EPROBE_DEFER;
+ }
+ smi_com_dev = &smi_com_pdev->dev;
+ link = device_link_add(dev, smi_com_dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "Unable to link smi-common dev\n");
+ put_device(&smi_com_pdev->dev);
+ return -ENODEV;
+ }
+ *com_dev = smi_com_dev;
+ } else {
+ dev_err(dev, "Failed to get the smi_common device\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mtk_smi_dts_clk_init(struct device *dev, struct mtk_smi *smi,
+ const char * const clks[],
+ unsigned int clk_nr_required,
+ unsigned int clk_nr_optional)
+{
+ int i, ret;
+
+ for (i = 0; i < clk_nr_required; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get(dev, clk_nr_required, smi->clks);
+ if (ret)
+ return ret;
+
+ for (i = clk_nr_required; i < clk_nr_required + clk_nr_optional; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get_optional(dev, clk_nr_optional,
+ smi->clks + clk_nr_required);
+ smi->clk_num = clk_nr_required + clk_nr_optional;
+ return ret;
+}
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+ return -ENOMEM;
+
+ larb->larb_gen = of_device_get_match_data(dev);
+ larb->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(larb->base))
+ return PTR_ERR(larb->base);
+
+ ret = mtk_smi_dts_clk_init(dev, &larb->smi, mtk_smi_larb_clks,
+ MTK_SMI_LARB_REQ_CLK_NR, MTK_SMI_LARB_OPT_CLK_NR);
+ if (ret)
+ return ret;
+
+ larb->smi.dev = dev;
+
+ ret = mtk_smi_device_link_common(dev, &larb->smi_common_dev);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, larb);
+ ret = component_add(dev, &mtk_smi_larb_component_ops);
+ if (ret)
+ goto err_pm_disable;
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
+ return ret;
+}
+
+static int mtk_smi_larb_remove(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
+
+ device_link_remove(&pdev->dev, larb->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+ return 0;
+}
+
+static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks);
+ if (ret)
+ return ret;
+
+ if (MTK_SMI_CAPS(larb->larb_gen->flags_general, MTK_SMI_FLAG_SLEEP_CTL))
+ mtk_smi_larb_sleep_ctrl_disable(larb);
+
+ /* Configure the basic setting for this larb */
+ return larb_gen->config_port(dev);
+}
+
+static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ int ret;
+
+ if (MTK_SMI_CAPS(larb->larb_gen->flags_general, MTK_SMI_FLAG_SLEEP_CTL)) {
+ ret = mtk_smi_larb_sleep_ctrl_enable(larb);
+ if (ret)
+ return ret;
+ }
+
+ clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks);
+ return 0;
+}
+
+static const struct dev_pm_ops smi_larb_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+ .remove = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+ .pm = &smi_larb_pm_ops,
+ }
+};
+
+static const struct mtk_smi_reg_pair mtk_smi_common_mt6795_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1_ARB, 0x1b},
+ {SMI_M4U_TH, 0xce810c85},
+ {SMI_FIFO_TH1, 0x43214c8},
+ {SMI_READ_FIFO_TH, 0x191f},
+};
+
+static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1LEN, 0xb},
+ {SMI_M4U_TH, 0xe100e10},
+ {SMI_FIFO_TH1, 0x506090a},
+ {SMI_FIFO_TH2, 0x506090a},
+ {SMI_DCM, 0x4f1},
+ {SMI_DUMMY, 0x1},
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
+ .type = MTK_SMI_GEN1,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
+ .type = MTK_SMI_GEN2,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(0),
+ .init = mtk_smi_common_mt6795_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(7),
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8186 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(4) | F_MMU1_LARB(7),
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8188_vdo = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(5) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8188_vpp = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(6),
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vdo = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(3) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vpp = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_sub_common_mt8195 = {
+ .type = MTK_SMI_GEN2_SUB_COMM,
+ .has_gals = true,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8365 = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(2) | F_MMU1_LARB(4),
+};
+
+static const struct of_device_id mtk_smi_common_of_ids[] = {
+ {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1},
+ {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
+ {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795},
+ {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
+ {.compatible = "mediatek,mt8186-smi-common", .data = &mtk_smi_common_mt8186},
+ {.compatible = "mediatek,mt8188-smi-common-vdo", .data = &mtk_smi_common_mt8188_vdo},
+ {.compatible = "mediatek,mt8188-smi-common-vpp", .data = &mtk_smi_common_mt8188_vpp},
+ {.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
+ {.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
+ {.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
+ {.compatible = "mediatek,mt8195-smi-sub-common", .data = &mtk_smi_sub_common_mt8195},
+ {.compatible = "mediatek,mt8365-smi-common", .data = &mtk_smi_common_mt8365},
+ {}
+};
+
+static int mtk_smi_common_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_smi *common;
+ int ret, clk_required = MTK_SMI_COM_REQ_CLK_NR;
+
+ common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+ common->plat = of_device_get_match_data(dev);
+
+ if (common->plat->has_gals) {
+ if (common->plat->type == MTK_SMI_GEN2)
+ clk_required = MTK_SMI_COM_GALS_REQ_CLK_NR;
+ else if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ clk_required = MTK_SMI_SUB_COM_GALS_REQ_CLK_NR;
+ }
+ ret = mtk_smi_dts_clk_init(dev, common, mtk_smi_common_clks, clk_required, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * for mtk smi gen 1, we need to get the ao(always on) base to config
+ * m4u port, and we need to enable the aync clock for transform the smi
+ * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
+ * base.
+ */
+ if (common->plat->type == MTK_SMI_GEN1) {
+ common->smi_ao_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(common->smi_ao_base))
+ return PTR_ERR(common->smi_ao_base);
+
+ common->clk_async = devm_clk_get(dev, "async");
+ if (IS_ERR(common->clk_async))
+ return PTR_ERR(common->clk_async);
+
+ ret = clk_prepare_enable(common->clk_async);
+ if (ret)
+ return ret;
+ } else {
+ common->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(common->base))
+ return PTR_ERR(common->base);
+ }
+
+ /* link its smi-common if this is smi-sub-common */
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM) {
+ ret = mtk_smi_device_link_common(dev, &common->smi_common_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, common);
+ return 0;
+}
+
+static int mtk_smi_common_remove(struct platform_device *pdev)
+{
+ struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
+
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused mtk_smi_common_resume(struct device *dev)
+{
+ struct mtk_smi *common = dev_get_drvdata(dev);
+ const struct mtk_smi_reg_pair *init = common->plat->init;
+ u32 bus_sel = common->plat->bus_sel; /* default is 0 */
+ int ret, i;
+
+ ret = clk_bulk_prepare_enable(common->clk_num, common->clks);
+ if (ret)
+ return ret;
+
+ if (common->plat->type != MTK_SMI_GEN2)
+ return 0;
+
+ for (i = 0; i < SMI_COMMON_INIT_REGS_NR && init && init[i].offset; i++)
+ writel_relaxed(init[i].value, common->base + init[i].offset);
+
+ writel(bus_sel, common->base + SMI_BUS_SEL);
+ return 0;
+}
+
+static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
+{
+ struct mtk_smi *common = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(common->clk_num, common->clks);
+ return 0;
+}
+
+static const struct dev_pm_ops smi_common_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+ .remove = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+ .pm = &smi_common_pm_ops,
+ }
+};
+
+static struct platform_driver * const smidrivers[] = {
+ &mtk_smi_common_driver,
+ &mtk_smi_larb_driver,
+};
+
+static int __init mtk_smi_init(void)
+{
+ return platform_register_drivers(smidrivers, ARRAY_SIZE(smidrivers));
+}
+module_init(mtk_smi_init);
+
+static void __exit mtk_smi_exit(void)
+{
+ platform_unregister_drivers(smidrivers, ARRAY_SIZE(smidrivers));
+}
+module_exit(mtk_smi_exit);
+
+MODULE_DESCRIPTION("MediaTek SMI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
new file mode 100644
index 0000000000..406fddcdba
--- /dev/null
+++ b/drivers/memory/mvebu-devbus.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell EBU SoC Device Bus Controller
+ * (memory controller for NOR/NAND/SRAM/FPGA devices)
+ *
+ * Copyright (C) 2013-2014 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mbus.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+/* Register definitions */
+#define ARMADA_DEV_WIDTH_SHIFT 30
+#define ARMADA_BADR_SKEW_SHIFT 28
+#define ARMADA_RD_HOLD_SHIFT 23
+#define ARMADA_ACC_NEXT_SHIFT 17
+#define ARMADA_RD_SETUP_SHIFT 12
+#define ARMADA_ACC_FIRST_SHIFT 6
+
+#define ARMADA_SYNC_ENABLE_SHIFT 24
+#define ARMADA_WR_HIGH_SHIFT 16
+#define ARMADA_WR_LOW_SHIFT 8
+
+#define ARMADA_READ_PARAM_OFFSET 0x0
+#define ARMADA_WRITE_PARAM_OFFSET 0x4
+
+#define ORION_RESERVED (0x2 << 30)
+#define ORION_BADR_SKEW_SHIFT 28
+#define ORION_WR_HIGH_EXT_BIT BIT(27)
+#define ORION_WR_HIGH_EXT_MASK 0x8
+#define ORION_WR_LOW_EXT_BIT BIT(26)
+#define ORION_WR_LOW_EXT_MASK 0x8
+#define ORION_ALE_WR_EXT_BIT BIT(25)
+#define ORION_ALE_WR_EXT_MASK 0x8
+#define ORION_ACC_NEXT_EXT_BIT BIT(24)
+#define ORION_ACC_NEXT_EXT_MASK 0x10
+#define ORION_ACC_FIRST_EXT_BIT BIT(23)
+#define ORION_ACC_FIRST_EXT_MASK 0x10
+#define ORION_TURN_OFF_EXT_BIT BIT(22)
+#define ORION_TURN_OFF_EXT_MASK 0x8
+#define ORION_DEV_WIDTH_SHIFT 20
+#define ORION_WR_HIGH_SHIFT 17
+#define ORION_WR_HIGH_MASK 0x7
+#define ORION_WR_LOW_SHIFT 14
+#define ORION_WR_LOW_MASK 0x7
+#define ORION_ALE_WR_SHIFT 11
+#define ORION_ALE_WR_MASK 0x7
+#define ORION_ACC_NEXT_SHIFT 7
+#define ORION_ACC_NEXT_MASK 0xF
+#define ORION_ACC_FIRST_SHIFT 3
+#define ORION_ACC_FIRST_MASK 0xF
+#define ORION_TURN_OFF_SHIFT 0
+#define ORION_TURN_OFF_MASK 0x7
+
+struct devbus_read_params {
+ u32 bus_width;
+ u32 badr_skew;
+ u32 turn_off;
+ u32 acc_first;
+ u32 acc_next;
+ u32 rd_setup;
+ u32 rd_hold;
+};
+
+struct devbus_write_params {
+ u32 sync_enable;
+ u32 wr_high;
+ u32 wr_low;
+ u32 ale_wr;
+};
+
+struct devbus {
+ struct device *dev;
+ void __iomem *base;
+ unsigned long tick_ps;
+};
+
+static int get_timing_param_ps(struct devbus *devbus,
+ struct device_node *node,
+ const char *name,
+ u32 *ticks)
+{
+ u32 time_ps;
+ int err;
+
+ err = of_property_read_u32(node, name, &time_ps);
+ if (err < 0) {
+ dev_err(devbus->dev, "%pOF has no '%s' property\n",
+ node, name);
+ return err;
+ }
+
+ *ticks = (time_ps + devbus->tick_ps - 1) / devbus->tick_ps;
+
+ dev_dbg(devbus->dev, "%s: %u ps -> 0x%x\n",
+ name, time_ps, *ticks);
+ return 0;
+}
+
+static int devbus_get_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ int err;
+
+ err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width);
+ if (err < 0) {
+ dev_err(devbus->dev,
+ "%pOF has no 'devbus,bus-width' property\n",
+ node);
+ return err;
+ }
+
+ /*
+ * The bus width is encoded into the register as 0 for 8 bits,
+ * and 1 for 16 bits, so we do the necessary conversion here.
+ */
+ if (r->bus_width == 8) {
+ r->bus_width = 0;
+ } else if (r->bus_width == 16) {
+ r->bus_width = 1;
+ } else {
+ dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width);
+ return -EINVAL;
+ }
+
+ err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
+ &r->badr_skew);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
+ &r->turn_off);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
+ &r->acc_first);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
+ &r->acc_next);
+ if (err < 0)
+ return err;
+
+ if (of_device_is_compatible(devbus->dev->of_node, "marvell,mvebu-devbus")) {
+ err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
+ &r->rd_setup);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
+ &r->rd_hold);
+ if (err < 0)
+ return err;
+
+ err = of_property_read_u32(node, "devbus,sync-enable",
+ &w->sync_enable);
+ if (err < 0) {
+ dev_err(devbus->dev,
+ "%pOF has no 'devbus,sync-enable' property\n",
+ node);
+ return err;
+ }
+ }
+
+ err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
+ &w->ale_wr);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
+ &w->wr_low);
+ if (err < 0)
+ return err;
+
+ err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
+ &w->wr_high);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void devbus_orion_set_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ u32 value;
+
+ /*
+ * The hardware designers found it would be a good idea to
+ * split most of the values in the register into two fields:
+ * one containing all the low-order bits, and another one
+ * containing just the high-order bit. For all of those
+ * fields, we have to split the value into these two parts.
+ */
+ value = (r->turn_off & ORION_TURN_OFF_MASK) << ORION_TURN_OFF_SHIFT |
+ (r->acc_first & ORION_ACC_FIRST_MASK) << ORION_ACC_FIRST_SHIFT |
+ (r->acc_next & ORION_ACC_NEXT_MASK) << ORION_ACC_NEXT_SHIFT |
+ (w->ale_wr & ORION_ALE_WR_MASK) << ORION_ALE_WR_SHIFT |
+ (w->wr_low & ORION_WR_LOW_MASK) << ORION_WR_LOW_SHIFT |
+ (w->wr_high & ORION_WR_HIGH_MASK) << ORION_WR_HIGH_SHIFT |
+ r->bus_width << ORION_DEV_WIDTH_SHIFT |
+ ((r->turn_off & ORION_TURN_OFF_EXT_MASK) ? ORION_TURN_OFF_EXT_BIT : 0) |
+ ((r->acc_first & ORION_ACC_FIRST_EXT_MASK) ? ORION_ACC_FIRST_EXT_BIT : 0) |
+ ((r->acc_next & ORION_ACC_NEXT_EXT_MASK) ? ORION_ACC_NEXT_EXT_BIT : 0) |
+ ((w->ale_wr & ORION_ALE_WR_EXT_MASK) ? ORION_ALE_WR_EXT_BIT : 0) |
+ ((w->wr_low & ORION_WR_LOW_EXT_MASK) ? ORION_WR_LOW_EXT_BIT : 0) |
+ ((w->wr_high & ORION_WR_HIGH_EXT_MASK) ? ORION_WR_HIGH_EXT_BIT : 0) |
+ (r->badr_skew << ORION_BADR_SKEW_SHIFT) |
+ ORION_RESERVED;
+
+ writel(value, devbus->base);
+}
+
+static void devbus_armada_set_timing_params(struct devbus *devbus,
+ struct device_node *node,
+ struct devbus_read_params *r,
+ struct devbus_write_params *w)
+{
+ u32 value;
+
+ /* Set read timings */
+ value = r->bus_width << ARMADA_DEV_WIDTH_SHIFT |
+ r->badr_skew << ARMADA_BADR_SKEW_SHIFT |
+ r->rd_hold << ARMADA_RD_HOLD_SHIFT |
+ r->acc_next << ARMADA_ACC_NEXT_SHIFT |
+ r->rd_setup << ARMADA_RD_SETUP_SHIFT |
+ r->acc_first << ARMADA_ACC_FIRST_SHIFT |
+ r->turn_off;
+
+ dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n",
+ devbus->base + ARMADA_READ_PARAM_OFFSET,
+ value);
+
+ writel(value, devbus->base + ARMADA_READ_PARAM_OFFSET);
+
+ /* Set write timings */
+ value = w->sync_enable << ARMADA_SYNC_ENABLE_SHIFT |
+ w->wr_low << ARMADA_WR_LOW_SHIFT |
+ w->wr_high << ARMADA_WR_HIGH_SHIFT |
+ w->ale_wr;
+
+ dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n",
+ devbus->base + ARMADA_WRITE_PARAM_OFFSET,
+ value);
+
+ writel(value, devbus->base + ARMADA_WRITE_PARAM_OFFSET);
+}
+
+static int mvebu_devbus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct devbus_read_params r;
+ struct devbus_write_params w;
+ struct devbus *devbus;
+ struct clk *clk;
+ unsigned long rate;
+ int err;
+
+ devbus = devm_kzalloc(&pdev->dev, sizeof(struct devbus), GFP_KERNEL);
+ if (!devbus)
+ return -ENOMEM;
+
+ devbus->dev = dev;
+ devbus->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(devbus->base))
+ return PTR_ERR(devbus->base);
+
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /*
+ * Obtain clock period in picoseconds,
+ * we need this in order to convert timing
+ * parameters from cycles to picoseconds.
+ */
+ rate = clk_get_rate(clk) / 1000;
+ devbus->tick_ps = 1000000000 / rate;
+
+ dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
+ devbus->tick_ps);
+
+ if (!of_property_read_bool(node, "devbus,keep-config")) {
+ /* Read the Device Tree node */
+ err = devbus_get_timing_params(devbus, node, &r, &w);
+ if (err < 0)
+ return err;
+
+ /* Set the new timing parameters */
+ if (of_device_is_compatible(node, "marvell,orion-devbus"))
+ devbus_orion_set_timing_params(devbus, node, &r, &w);
+ else
+ devbus_armada_set_timing_params(devbus, node, &r, &w);
+ }
+
+ /*
+ * We need to create a child device explicitly from here to
+ * guarantee that the child will be probed after the timing
+ * parameters for the bus are written.
+ */
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static const struct of_device_id mvebu_devbus_of_match[] = {
+ { .compatible = "marvell,mvebu-devbus" },
+ { .compatible = "marvell,orion-devbus" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match);
+
+static struct platform_driver mvebu_devbus_driver = {
+ .probe = mvebu_devbus_probe,
+ .driver = {
+ .name = "mvebu-devbus",
+ .of_match_table = mvebu_devbus_of_match,
+ },
+};
+
+static int __init mvebu_devbus_init(void)
+{
+ return platform_driver_register(&mvebu_devbus_driver);
+}
+module_init(mvebu_devbus_init);
+
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell EBU SoC Device Bus controller");
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
new file mode 100644
index 0000000000..fcd20d85d3
--- /dev/null
+++ b/drivers/memory/of_memory.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OpenFirmware helpers for memory drivers
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+
+#include "jedec_ddr.h"
+#include "of_memory.h"
+
+/**
+ * of_get_min_tck() - extract min timing values for ddr
+ * @np: pointer to ddr device tree node
+ * @dev: device requesting for min timing values
+ *
+ * Populates the lpddr2_min_tck structure by extracting data
+ * from device tree node. Returns a pointer to the populated
+ * structure. If any error in populating the structure, returns
+ * default min timings provided by JEDEC.
+ */
+const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
+ struct device *dev)
+{
+ int ret = 0;
+ struct lpddr2_min_tck *min;
+
+ min = devm_kzalloc(dev, sizeof(*min), GFP_KERNEL);
+ if (!min)
+ goto default_min_tck;
+
+ ret |= of_property_read_u32(np, "tRPab-min-tck", &min->tRPab);
+ ret |= of_property_read_u32(np, "tRCD-min-tck", &min->tRCD);
+ ret |= of_property_read_u32(np, "tWR-min-tck", &min->tWR);
+ ret |= of_property_read_u32(np, "tRASmin-min-tck", &min->tRASmin);
+ ret |= of_property_read_u32(np, "tRRD-min-tck", &min->tRRD);
+ ret |= of_property_read_u32(np, "tWTR-min-tck", &min->tWTR);
+ ret |= of_property_read_u32(np, "tXP-min-tck", &min->tXP);
+ ret |= of_property_read_u32(np, "tRTP-min-tck", &min->tRTP);
+ ret |= of_property_read_u32(np, "tCKE-min-tck", &min->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR-min-tck", &min->tCKESR);
+ ret |= of_property_read_u32(np, "tFAW-min-tck", &min->tFAW);
+
+ if (ret) {
+ devm_kfree(dev, min);
+ goto default_min_tck;
+ }
+
+ return min;
+
+default_min_tck:
+ dev_warn(dev, "Using default min-tck values\n");
+ return &lpddr2_jedec_min_tck;
+}
+EXPORT_SYMBOL(of_get_min_tck);
+
+static int of_do_get_timings(struct device_node *np,
+ struct lpddr2_timings *tim)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "max-freq", &tim->max_freq);
+ ret |= of_property_read_u32(np, "min-freq", &tim->min_freq);
+ ret |= of_property_read_u32(np, "tRPab", &tim->tRPab);
+ ret |= of_property_read_u32(np, "tRCD", &tim->tRCD);
+ ret |= of_property_read_u32(np, "tWR", &tim->tWR);
+ ret |= of_property_read_u32(np, "tRAS-min", &tim->tRAS_min);
+ ret |= of_property_read_u32(np, "tRRD", &tim->tRRD);
+ ret |= of_property_read_u32(np, "tWTR", &tim->tWTR);
+ ret |= of_property_read_u32(np, "tXP", &tim->tXP);
+ ret |= of_property_read_u32(np, "tRTP", &tim->tRTP);
+ ret |= of_property_read_u32(np, "tCKESR", &tim->tCKESR);
+ ret |= of_property_read_u32(np, "tDQSCK-max", &tim->tDQSCK_max);
+ ret |= of_property_read_u32(np, "tFAW", &tim->tFAW);
+ ret |= of_property_read_u32(np, "tZQCS", &tim->tZQCS);
+ ret |= of_property_read_u32(np, "tZQCL", &tim->tZQCL);
+ ret |= of_property_read_u32(np, "tZQinit", &tim->tZQinit);
+ ret |= of_property_read_u32(np, "tRAS-max-ns", &tim->tRAS_max_ns);
+ ret |= of_property_read_u32(np, "tDQSCK-max-derated",
+ &tim->tDQSCK_max_derated);
+
+ return ret;
+}
+
+/**
+ * of_get_ddr_timings() - extracts the ddr timings and updates no of
+ * frequencies available.
+ * @np_ddr: Pointer to ddr device tree node
+ * @dev: Device requesting for ddr timings
+ * @device_type: Type of ddr(LPDDR2 S2/S4)
+ * @nr_frequencies: No of frequencies available for ddr
+ * (updated by this function)
+ *
+ * Populates lpddr2_timings structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If any error
+ * while populating, returns default timings provided by JEDEC.
+ */
+const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev,
+ u32 device_type,
+ u32 *nr_frequencies)
+{
+ struct lpddr2_timings *timings = NULL;
+ u32 arr_sz = 0, i = 0;
+ struct device_node *np_tim;
+ char *tim_compat = NULL;
+
+ switch (device_type) {
+ case DDR_TYPE_LPDDR2_S2:
+ case DDR_TYPE_LPDDR2_S4:
+ tim_compat = "jedec,lpddr2-timings";
+ break;
+ default:
+ dev_warn(dev, "Unsupported memory type\n");
+ }
+
+ for_each_child_of_node(np_ddr, np_tim)
+ if (of_device_is_compatible(np_tim, tim_compat))
+ arr_sz++;
+
+ if (arr_sz)
+ timings = devm_kcalloc(dev, arr_sz, sizeof(*timings),
+ GFP_KERNEL);
+
+ if (!timings)
+ goto default_timings;
+
+ for_each_child_of_node(np_ddr, np_tim) {
+ if (of_device_is_compatible(np_tim, tim_compat)) {
+ if (of_do_get_timings(np_tim, &timings[i])) {
+ of_node_put(np_tim);
+ devm_kfree(dev, timings);
+ goto default_timings;
+ }
+ i++;
+ }
+ }
+
+ *nr_frequencies = arr_sz;
+
+ return timings;
+
+default_timings:
+ dev_warn(dev, "Using default memory timings\n");
+ *nr_frequencies = ARRAY_SIZE(lpddr2_jedec_timings);
+ return lpddr2_jedec_timings;
+}
+EXPORT_SYMBOL(of_get_ddr_timings);
+
+/**
+ * of_lpddr3_get_min_tck() - extract min timing values for lpddr3
+ * @np: pointer to ddr device tree node
+ * @dev: device requesting for min timing values
+ *
+ * Populates the lpddr3_min_tck structure by extracting data
+ * from device tree node. Returns a pointer to the populated
+ * structure. If any error in populating the structure, returns NULL.
+ */
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+ struct device *dev)
+{
+ int ret = 0;
+ struct lpddr3_min_tck *min;
+
+ min = devm_kzalloc(dev, sizeof(*min), GFP_KERNEL);
+ if (!min)
+ goto default_min_tck;
+
+ ret |= of_property_read_u32(np, "tRFC-min-tck", &min->tRFC);
+ ret |= of_property_read_u32(np, "tRRD-min-tck", &min->tRRD);
+ ret |= of_property_read_u32(np, "tRPab-min-tck", &min->tRPab);
+ ret |= of_property_read_u32(np, "tRPpb-min-tck", &min->tRPpb);
+ ret |= of_property_read_u32(np, "tRCD-min-tck", &min->tRCD);
+ ret |= of_property_read_u32(np, "tRC-min-tck", &min->tRC);
+ ret |= of_property_read_u32(np, "tRAS-min-tck", &min->tRAS);
+ ret |= of_property_read_u32(np, "tWTR-min-tck", &min->tWTR);
+ ret |= of_property_read_u32(np, "tWR-min-tck", &min->tWR);
+ ret |= of_property_read_u32(np, "tRTP-min-tck", &min->tRTP);
+ ret |= of_property_read_u32(np, "tW2W-C2C-min-tck", &min->tW2W_C2C);
+ ret |= of_property_read_u32(np, "tR2R-C2C-min-tck", &min->tR2R_C2C);
+ ret |= of_property_read_u32(np, "tWL-min-tck", &min->tWL);
+ ret |= of_property_read_u32(np, "tDQSCK-min-tck", &min->tDQSCK);
+ ret |= of_property_read_u32(np, "tRL-min-tck", &min->tRL);
+ ret |= of_property_read_u32(np, "tFAW-min-tck", &min->tFAW);
+ ret |= of_property_read_u32(np, "tXSR-min-tck", &min->tXSR);
+ ret |= of_property_read_u32(np, "tXP-min-tck", &min->tXP);
+ ret |= of_property_read_u32(np, "tCKE-min-tck", &min->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR-min-tck", &min->tCKESR);
+ ret |= of_property_read_u32(np, "tMRD-min-tck", &min->tMRD);
+
+ if (ret) {
+ dev_warn(dev, "Errors while parsing min-tck values\n");
+ devm_kfree(dev, min);
+ goto default_min_tck;
+ }
+
+ return min;
+
+default_min_tck:
+ dev_warn(dev, "Using default min-tck values\n");
+ return NULL;
+}
+EXPORT_SYMBOL(of_lpddr3_get_min_tck);
+
+static int of_lpddr3_do_get_timings(struct device_node *np,
+ struct lpddr3_timings *tim)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "max-freq", &tim->max_freq);
+ if (ret)
+ /* Deprecated way of passing max-freq as 'reg' */
+ ret = of_property_read_u32(np, "reg", &tim->max_freq);
+ ret |= of_property_read_u32(np, "min-freq", &tim->min_freq);
+ ret |= of_property_read_u32(np, "tRFC", &tim->tRFC);
+ ret |= of_property_read_u32(np, "tRRD", &tim->tRRD);
+ ret |= of_property_read_u32(np, "tRPab", &tim->tRPab);
+ ret |= of_property_read_u32(np, "tRPpb", &tim->tRPpb);
+ ret |= of_property_read_u32(np, "tRCD", &tim->tRCD);
+ ret |= of_property_read_u32(np, "tRC", &tim->tRC);
+ ret |= of_property_read_u32(np, "tRAS", &tim->tRAS);
+ ret |= of_property_read_u32(np, "tWTR", &tim->tWTR);
+ ret |= of_property_read_u32(np, "tWR", &tim->tWR);
+ ret |= of_property_read_u32(np, "tRTP", &tim->tRTP);
+ ret |= of_property_read_u32(np, "tW2W-C2C", &tim->tW2W_C2C);
+ ret |= of_property_read_u32(np, "tR2R-C2C", &tim->tR2R_C2C);
+ ret |= of_property_read_u32(np, "tFAW", &tim->tFAW);
+ ret |= of_property_read_u32(np, "tXSR", &tim->tXSR);
+ ret |= of_property_read_u32(np, "tXP", &tim->tXP);
+ ret |= of_property_read_u32(np, "tCKE", &tim->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR", &tim->tCKESR);
+ ret |= of_property_read_u32(np, "tMRD", &tim->tMRD);
+
+ return ret;
+}
+
+/**
+ * of_lpddr3_get_ddr_timings() - extracts the lpddr3 timings and updates no of
+ * frequencies available.
+ * @np_ddr: Pointer to ddr device tree node
+ * @dev: Device requesting for ddr timings
+ * @device_type: Type of ddr
+ * @nr_frequencies: No of frequencies available for ddr
+ * (updated by this function)
+ *
+ * Populates lpddr3_timings structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If any error
+ * while populating, returns NULL.
+ */
+const struct lpddr3_timings
+*of_lpddr3_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
+ u32 device_type, u32 *nr_frequencies)
+{
+ struct lpddr3_timings *timings = NULL;
+ u32 arr_sz = 0, i = 0;
+ struct device_node *np_tim;
+ char *tim_compat = NULL;
+
+ switch (device_type) {
+ case DDR_TYPE_LPDDR3:
+ tim_compat = "jedec,lpddr3-timings";
+ break;
+ default:
+ dev_warn(dev, "Unsupported memory type\n");
+ }
+
+ for_each_child_of_node(np_ddr, np_tim)
+ if (of_device_is_compatible(np_tim, tim_compat))
+ arr_sz++;
+
+ if (arr_sz)
+ timings = devm_kcalloc(dev, arr_sz, sizeof(*timings),
+ GFP_KERNEL);
+
+ if (!timings)
+ goto default_timings;
+
+ for_each_child_of_node(np_ddr, np_tim) {
+ if (of_device_is_compatible(np_tim, tim_compat)) {
+ if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
+ devm_kfree(dev, timings);
+ of_node_put(np_tim);
+ goto default_timings;
+ }
+ i++;
+ }
+ }
+
+ *nr_frequencies = arr_sz;
+
+ return timings;
+
+default_timings:
+ dev_warn(dev, "Failed to get timings\n");
+ *nr_frequencies = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(of_lpddr3_get_ddr_timings);
+
+/**
+ * of_lpddr2_get_info() - extracts information about the lpddr2 chip.
+ * @np: Pointer to device tree node containing lpddr2 info
+ * @dev: Device requesting info
+ *
+ * Populates lpddr2_info structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If error
+ * happened while populating, returns NULL. If property is missing
+ * in a device-tree, then the corresponding value is set to -ENOENT.
+ */
+const struct lpddr2_info
+*of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ struct lpddr2_info *ret_info, info = {};
+ struct property *prop;
+ const char *cp;
+ int err;
+ u32 revision_id[2];
+
+ err = of_property_read_u32_array(np, "revision-id", revision_id, 2);
+ if (!err) {
+ info.revision_id1 = revision_id[0];
+ info.revision_id2 = revision_id[1];
+ } else {
+ err = of_property_read_u32(np, "revision-id1", &info.revision_id1);
+ if (err)
+ info.revision_id1 = -ENOENT;
+
+ err = of_property_read_u32(np, "revision-id2", &info.revision_id2);
+ if (err)
+ info.revision_id2 = -ENOENT;
+ }
+
+ err = of_property_read_u32(np, "io-width", &info.io_width);
+ if (err)
+ return NULL;
+
+ info.io_width = 32 / info.io_width - 1;
+
+ err = of_property_read_u32(np, "density", &info.density);
+ if (err)
+ return NULL;
+
+ info.density = ffs(info.density) - 7;
+
+ if (of_device_is_compatible(np, "jedec,lpddr2-s4"))
+ info.arch_type = LPDDR2_TYPE_S4;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-s2"))
+ info.arch_type = LPDDR2_TYPE_S2;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-nvm"))
+ info.arch_type = LPDDR2_TYPE_NVM;
+ else
+ return NULL;
+
+ prop = of_find_property(np, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp)) {
+
+#define OF_LPDDR2_VENDOR_CMP(compat, ID) \
+ if (!of_compat_cmp(cp, compat ",", strlen(compat ","))) { \
+ info.manufacturer_id = LPDDR2_MANID_##ID; \
+ break; \
+ }
+
+ OF_LPDDR2_VENDOR_CMP("samsung", SAMSUNG)
+ OF_LPDDR2_VENDOR_CMP("qimonda", QIMONDA)
+ OF_LPDDR2_VENDOR_CMP("elpida", ELPIDA)
+ OF_LPDDR2_VENDOR_CMP("etron", ETRON)
+ OF_LPDDR2_VENDOR_CMP("nanya", NANYA)
+ OF_LPDDR2_VENDOR_CMP("hynix", HYNIX)
+ OF_LPDDR2_VENDOR_CMP("mosel", MOSEL)
+ OF_LPDDR2_VENDOR_CMP("winbond", WINBOND)
+ OF_LPDDR2_VENDOR_CMP("esmt", ESMT)
+ OF_LPDDR2_VENDOR_CMP("spansion", SPANSION)
+ OF_LPDDR2_VENDOR_CMP("sst", SST)
+ OF_LPDDR2_VENDOR_CMP("zmos", ZMOS)
+ OF_LPDDR2_VENDOR_CMP("intel", INTEL)
+ OF_LPDDR2_VENDOR_CMP("numonyx", NUMONYX)
+ OF_LPDDR2_VENDOR_CMP("micron", MICRON)
+
+#undef OF_LPDDR2_VENDOR_CMP
+ }
+
+ if (!info.manufacturer_id)
+ info.manufacturer_id = -ENOENT;
+
+ ret_info = devm_kzalloc(dev, sizeof(*ret_info), GFP_KERNEL);
+ if (ret_info)
+ *ret_info = info;
+
+ return ret_info;
+}
+EXPORT_SYMBOL(of_lpddr2_get_info);
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
new file mode 100644
index 0000000000..1c4e47fede
--- /dev/null
+++ b/drivers/memory/of_memory.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OpenFirmware helpers for memory drivers
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#ifndef __LINUX_MEMORY_OF_REG_H
+#define __LINUX_MEMORY_OF_REG_H
+
+#if defined(CONFIG_OF) && defined(CONFIG_DDR)
+const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
+ struct device *dev);
+const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev,
+ u32 device_type, u32 *nr_frequencies);
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+ struct device *dev);
+const struct lpddr3_timings *
+of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies);
+
+const struct lpddr2_info *of_lpddr2_get_info(struct device_node *np,
+ struct device *dev);
+#else
+static inline const struct lpddr2_min_tck
+ *of_get_min_tck(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
+
+static inline const struct lpddr2_timings
+ *of_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
+ u32 device_type, u32 *nr_frequencies)
+{
+ return NULL;
+}
+
+static inline const struct lpddr3_min_tck
+ *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
+
+static inline const struct lpddr3_timings
+ *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies)
+{
+ return NULL;
+}
+
+static inline const struct lpddr2_info
+ *of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF && CONFIG_DDR */
+
+#endif /* __LINUX_MEMORY_OF_REG_ */
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
new file mode 100644
index 0000000000..d78f73db37
--- /dev/null
+++ b/drivers/memory/omap-gpmc.c
@@ -0,0 +1,2761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPMC support functions
+ *
+ * Copyright (C) 2005-2006 Nokia Corporation
+ *
+ * Author: Juha Yrjola
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+#include <linux/cpu_pm.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h> /* GPIO descriptor enum */
+#include <linux/gpio/machine.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/omap-gpmc.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include <linux/platform_data/mtd-nand-omap2.h>
+
+#define DEVICE_NAME "omap-gpmc"
+
+/* GPMC register offsets */
+#define GPMC_REVISION 0x00
+#define GPMC_SYSCONFIG 0x10
+#define GPMC_SYSSTATUS 0x14
+#define GPMC_IRQSTATUS 0x18
+#define GPMC_IRQENABLE 0x1c
+#define GPMC_TIMEOUT_CONTROL 0x40
+#define GPMC_ERR_ADDRESS 0x44
+#define GPMC_ERR_TYPE 0x48
+#define GPMC_CONFIG 0x50
+#define GPMC_STATUS 0x54
+#define GPMC_PREFETCH_CONFIG1 0x1e0
+#define GPMC_PREFETCH_CONFIG2 0x1e4
+#define GPMC_PREFETCH_CONTROL 0x1ec
+#define GPMC_PREFETCH_STATUS 0x1f0
+#define GPMC_ECC_CONFIG 0x1f4
+#define GPMC_ECC_CONTROL 0x1f8
+#define GPMC_ECC_SIZE_CONFIG 0x1fc
+#define GPMC_ECC1_RESULT 0x200
+#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
+#define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
+#define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
+#define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
+#define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */
+#define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */
+#define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */
+
+/* GPMC ECC control settings */
+#define GPMC_ECC_CTRL_ECCCLEAR 0x100
+#define GPMC_ECC_CTRL_ECCDISABLE 0x000
+#define GPMC_ECC_CTRL_ECCREG1 0x001
+#define GPMC_ECC_CTRL_ECCREG2 0x002
+#define GPMC_ECC_CTRL_ECCREG3 0x003
+#define GPMC_ECC_CTRL_ECCREG4 0x004
+#define GPMC_ECC_CTRL_ECCREG5 0x005
+#define GPMC_ECC_CTRL_ECCREG6 0x006
+#define GPMC_ECC_CTRL_ECCREG7 0x007
+#define GPMC_ECC_CTRL_ECCREG8 0x008
+#define GPMC_ECC_CTRL_ECCREG9 0x009
+
+#define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
+
+#define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS BIT(0)
+
+#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
+#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
+#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
+#define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
+#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
+#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
+
+#define GPMC_CS0_OFFSET 0x60
+#define GPMC_CS_SIZE 0x30
+#define GPMC_BCH_SIZE 0x10
+
+/*
+ * The first 1MB of GPMC address space is typically mapped to
+ * the internal ROM. Never allocate the first page, to
+ * facilitate bug detection; even if we didn't boot from ROM.
+ * As GPMC minimum partition size is 16MB we can only start from
+ * there.
+ */
+#define GPMC_MEM_START 0x1000000
+#define GPMC_MEM_END 0x3FFFFFFF
+
+#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
+#define GPMC_SECTION_SHIFT 28 /* 128 MB */
+
+#define CS_NUM_SHIFT 24
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE 2
+
+#define GPMC_REVISION_MAJOR(l) (((l) >> 4) & 0xf)
+#define GPMC_REVISION_MINOR(l) ((l) & 0xf)
+
+#define GPMC_HAS_WR_ACCESS 0x1
+#define GPMC_HAS_WR_DATA_MUX_BUS 0x2
+#define GPMC_HAS_MUX_AAD 0x4
+
+#define GPMC_NR_WAITPINS 4
+
+#define GPMC_CS_CONFIG1 0x00
+#define GPMC_CS_CONFIG2 0x04
+#define GPMC_CS_CONFIG3 0x08
+#define GPMC_CS_CONFIG4 0x0c
+#define GPMC_CS_CONFIG5 0x10
+#define GPMC_CS_CONFIG6 0x14
+#define GPMC_CS_CONFIG7 0x18
+#define GPMC_CS_NAND_COMMAND 0x1c
+#define GPMC_CS_NAND_ADDRESS 0x20
+#define GPMC_CS_NAND_DATA 0x24
+
+/* Control Commands */
+#define GPMC_CONFIG_RDY_BSY 0x00000001
+#define GPMC_CONFIG_DEV_SIZE 0x00000002
+#define GPMC_CONFIG_DEV_TYPE 0x00000003
+
+#define GPMC_CONFIG_WAITPINPOLARITY(pin) (BIT(pin) << 8)
+#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
+#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
+#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29)
+#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29)
+#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
+#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
+#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
+#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) (((val) & 3) << 25)
+/** CLKACTIVATIONTIME Max Ticks */
+#define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
+#define GPMC_CONFIG1_PAGE_LEN(val) (((val) & 3) << 23)
+/** ATTACHEDDEVICEPAGELENGTH Max Value */
+#define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
+#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
+#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
+#define GPMC_CONFIG1_WAIT_MON_TIME(val) (((val) & 3) << 18)
+/** WAITMONITORINGTIME Max Ticks */
+#define GPMC_CONFIG1_WAITMONITORINGTIME_MAX 2
+#define GPMC_CONFIG1_WAIT_PIN_SEL(val) (((val) & 3) << 16)
+#define GPMC_CONFIG1_DEVICESIZE(val) (((val) & 3) << 12)
+#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
+/** DEVICESIZE Max Value */
+#define GPMC_CONFIG1_DEVICESIZE_MAX 1
+#define GPMC_CONFIG1_DEVICETYPE(val) (((val) & 3) << 10)
+#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
+#define GPMC_CONFIG1_MUXTYPE(val) (((val) & 3) << 8)
+#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
+#define GPMC_CONFIG1_FCLK_DIV(val) ((val) & 3)
+#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
+#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
+#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
+#define GPMC_CONFIG7_CSVALID (1 << 6)
+
+#define GPMC_CONFIG7_BASEADDRESS_MASK 0x3f
+#define GPMC_CONFIG7_CSVALID_MASK BIT(6)
+#define GPMC_CONFIG7_MASKADDRESS_OFFSET 8
+#define GPMC_CONFIG7_MASKADDRESS_MASK (0xf << GPMC_CONFIG7_MASKADDRESS_OFFSET)
+/* All CONFIG7 bits except reserved bits */
+#define GPMC_CONFIG7_MASK (GPMC_CONFIG7_BASEADDRESS_MASK | \
+ GPMC_CONFIG7_CSVALID_MASK | \
+ GPMC_CONFIG7_MASKADDRESS_MASK)
+
+#define GPMC_DEVICETYPE_NOR 0
+#define GPMC_DEVICETYPE_NAND 2
+#define GPMC_CONFIG_WRITEPROTECT 0x00000010
+#define WR_RD_PIN_MONITORING 0x00600000
+
+/* ECC commands */
+#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
+#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
+#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
+
+#define GPMC_NR_NAND_IRQS 2 /* number of NAND specific IRQs */
+
+enum gpmc_clk_domain {
+ GPMC_CD_FCLK,
+ GPMC_CD_CLK
+};
+
+struct gpmc_cs_data {
+ const char *name;
+
+#define GPMC_CS_RESERVED (1 << 0)
+ u32 flags;
+
+ struct resource mem;
+};
+
+/* Structure to save gpmc cs context */
+struct gpmc_cs_config {
+ u32 config1;
+ u32 config2;
+ u32 config3;
+ u32 config4;
+ u32 config5;
+ u32 config6;
+ u32 config7;
+ int is_valid;
+};
+
+/*
+ * Structure to save/restore gpmc context
+ * to support core off on OMAP3
+ */
+struct omap3_gpmc_regs {
+ u32 sysconfig;
+ u32 irqenable;
+ u32 timeout_ctrl;
+ u32 config;
+ u32 prefetch_config1;
+ u32 prefetch_config2;
+ u32 prefetch_control;
+ struct gpmc_cs_config cs_context[GPMC_CS_NUM];
+};
+
+struct gpmc_waitpin {
+ u32 pin;
+ u32 polarity;
+ struct gpio_desc *desc;
+};
+
+struct gpmc_device {
+ struct device *dev;
+ int irq;
+ struct irq_chip irq_chip;
+ struct gpio_chip gpio_chip;
+ struct notifier_block nb;
+ struct omap3_gpmc_regs context;
+ struct gpmc_waitpin *waitpins;
+ int nirqs;
+ unsigned int is_suspended:1;
+ struct resource *data;
+};
+
+static struct irq_domain *gpmc_irq_domain;
+
+static struct resource gpmc_mem_root;
+static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
+static DEFINE_SPINLOCK(gpmc_mem_lock);
+/* Define chip-selects as reserved by default until probe completes */
+static unsigned int gpmc_cs_num = GPMC_CS_NUM;
+static unsigned int gpmc_nr_waitpins;
+static unsigned int gpmc_capability;
+static void __iomem *gpmc_base;
+
+static struct clk *gpmc_l3_clk;
+
+static irqreturn_t gpmc_handle_irq(int irq, void *dev);
+
+static void gpmc_write_reg(int idx, u32 val)
+{
+ writel_relaxed(val, gpmc_base + idx);
+}
+
+static u32 gpmc_read_reg(int idx)
+{
+ return readl_relaxed(gpmc_base + idx);
+}
+
+void gpmc_cs_write_reg(int cs, int idx, u32 val)
+{
+ void __iomem *reg_addr;
+
+ reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
+ writel_relaxed(val, reg_addr);
+}
+
+static u32 gpmc_cs_read_reg(int cs, int idx)
+{
+ void __iomem *reg_addr;
+
+ reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
+ return readl_relaxed(reg_addr);
+}
+
+/* TODO: Add support for gpmc_fck to clock framework and use it */
+static unsigned long gpmc_get_fclk_period(void)
+{
+ unsigned long rate = clk_get_rate(gpmc_l3_clk);
+
+ rate /= 1000;
+ rate = 1000000000 / rate; /* In picoseconds */
+
+ return rate;
+}
+
+/**
+ * gpmc_get_clk_period - get period of selected clock domain in ps
+ * @cs: Chip Select Region.
+ * @cd: Clock Domain.
+ *
+ * GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
+ * prior to calling this function with GPMC_CD_CLK.
+ */
+static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
+{
+ unsigned long tick_ps = gpmc_get_fclk_period();
+ u32 l;
+ int div;
+
+ switch (cd) {
+ case GPMC_CD_CLK:
+ /* get current clk divider */
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+ div = (l & 0x03) + 1;
+ /* get GPMC_CLK period */
+ tick_ps *= div;
+ break;
+ case GPMC_CD_FCLK:
+ default:
+ break;
+ }
+
+ return tick_ps;
+}
+
+static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
+ enum gpmc_clk_domain cd)
+{
+ unsigned long tick_ps;
+
+ /* Calculate in picosecs to yield more exact results */
+ tick_ps = gpmc_get_clk_period(cs, cd);
+
+ return (time_ns * 1000 + tick_ps - 1) / tick_ps;
+}
+
+static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
+{
+ return gpmc_ns_to_clk_ticks(time_ns, /* any CS */ 0, GPMC_CD_FCLK);
+}
+
+static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
+{
+ unsigned long tick_ps;
+
+ /* Calculate in picosecs to yield more exact results */
+ tick_ps = gpmc_get_fclk_period();
+
+ return (time_ps + tick_ps - 1) / tick_ps;
+}
+
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
+{
+ return ticks * gpmc_get_clk_period(cs, cd) / 1000;
+}
+
+unsigned int gpmc_ticks_to_ns(unsigned int ticks)
+{
+ return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
+}
+
+static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
+{
+ return ticks * gpmc_get_fclk_period();
+}
+
+static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
+{
+ unsigned long ticks = gpmc_ps_to_ticks(time_ps);
+
+ return ticks * gpmc_get_fclk_period();
+}
+
+static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
+{
+ u32 l;
+
+ l = gpmc_cs_read_reg(cs, reg);
+ if (value)
+ l |= mask;
+ else
+ l &= ~mask;
+ gpmc_cs_write_reg(cs, reg, l);
+}
+
+static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
+{
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
+ GPMC_CONFIG1_TIME_PARA_GRAN,
+ p->time_para_granularity);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
+ GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
+ GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+ GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+ GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
+ GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
+ p->cycle2cyclesamecsen);
+ gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
+ GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
+ p->cycle2cyclediffcsen);
+}
+
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+/**
+ * get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
+ * @cs: Chip Select Region
+ * @reg: GPMC_CS_CONFIGn register offset.
+ * @st_bit: Start Bit
+ * @end_bit: End Bit. Must be >= @st_bit.
+ * @max: Maximum parameter value (before optional @shift).
+ * If 0, maximum is as high as @st_bit and @end_bit allow.
+ * @name: DTS node name, w/o "gpmc,"
+ * @cd: Clock Domain of timing parameter.
+ * @shift: Parameter value left shifts @shift, which is then printed instead of value.
+ * @raw: Raw Format Option.
+ * raw format: gpmc,name = <value>
+ * tick format: gpmc,name = <value> /&zwj;* x ns -- y ns; x ticks *&zwj;/
+ * Where x ns -- y ns result in the same tick value.
+ * When @max is exceeded, "invalid" is printed inside comment.
+ * @noval: Parameter values equal to 0 are not printed.
+ * @return: Specified timing parameter (after optional @shift).
+ *
+ */
+static int get_gpmc_timing_reg(
+ /* timing specifiers */
+ int cs, int reg, int st_bit, int end_bit, int max,
+ const char *name, const enum gpmc_clk_domain cd,
+ /* value transform */
+ int shift,
+ /* format specifiers */
+ bool raw, bool noval)
+{
+ u32 l;
+ int nr_bits;
+ int mask;
+ bool invalid;
+
+ l = gpmc_cs_read_reg(cs, reg);
+ nr_bits = end_bit - st_bit + 1;
+ mask = (1 << nr_bits) - 1;
+ l = (l >> st_bit) & mask;
+ if (!max)
+ max = mask;
+ invalid = l > max;
+ if (shift)
+ l = (shift << l);
+ if (noval && (l == 0))
+ return 0;
+ if (!raw) {
+ /* DTS tick format for timings in ns */
+ unsigned int time_ns;
+ unsigned int time_ns_min = 0;
+
+ if (l)
+ time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1;
+ time_ns = gpmc_clk_ticks_to_ns(l, cs, cd);
+ pr_info("gpmc,%s = <%u>; /* %u ns - %u ns; %i ticks%s*/\n",
+ name, time_ns, time_ns_min, time_ns, l,
+ invalid ? "; invalid " : " ");
+ } else {
+ /* raw format */
+ pr_info("gpmc,%s = <%u>;%s\n", name, l,
+ invalid ? " /* invalid */" : "");
+ }
+
+ return l;
+}
+
+#define GPMC_PRINT_CONFIG(cs, config) \
+ pr_info("cs%i %s: 0x%08x\n", cs, #config, \
+ gpmc_cs_read_reg(cs, config))
+#define GPMC_GET_RAW(reg, st, end, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 0)
+#define GPMC_GET_RAW_MAX(reg, st, end, max, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, 0, 1, 0)
+#define GPMC_GET_RAW_BOOL(reg, st, end, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 1)
+#define GPMC_GET_RAW_SHIFT_MAX(reg, st, end, shift, max, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, (shift), 1, 1)
+#define GPMC_GET_TICKS(reg, st, end, field) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 0, 0)
+#define GPMC_GET_TICKS_CD(reg, st, end, field, cd) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, (cd), 0, 0, 0)
+#define GPMC_GET_TICKS_CD_MAX(reg, st, end, max, field, cd) \
+ get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, (cd), 0, 0, 0)
+
+static void gpmc_show_regs(int cs, const char *desc)
+{
+ pr_info("gpmc cs%i %s:\n", cs, desc);
+ GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1);
+ GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2);
+ GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3);
+ GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4);
+ GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5);
+ GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6);
+}
+
+/*
+ * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available,
+ * see commit c9fb809.
+ */
+static void gpmc_cs_show_timings(int cs, const char *desc)
+{
+ gpmc_show_regs(cs, desc);
+
+ pr_info("gpmc cs%i access configuration:\n", cs);
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
+ GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
+ GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1,
+ GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
+ GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
+ GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 23, 24, 4,
+ GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX,
+ "burst-length");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap");
+
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay");
+
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay");
+
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay");
+
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen");
+ GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen");
+
+ pr_info("gpmc cs%i timings configuration:\n", cs);
+ GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns");
+
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 4, 6, "adv-aad-mux-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 24, 26,
+ "adv-aad-mux-rd-off-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 28, 30,
+ "adv-aad-mux-wr-off-ns");
+ }
+
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 4, 6, "oe-aad-mux-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 13, 15, "oe-aad-mux-off-ns");
+ }
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
+
+ GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns");
+
+ GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns");
+
+ GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
+
+ GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
+ GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
+ "wait-monitoring-ns", GPMC_CD_CLK);
+ GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
+ GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
+ "clk-activation-ns", GPMC_CD_FCLK);
+
+ GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
+}
+#else
+static inline void gpmc_cs_show_timings(int cs, const char *desc)
+{
+}
+#endif
+
+/**
+ * set_gpmc_timing_reg - set a single timing parameter for Chip Select Region.
+ * Caller is expected to have initialized CONFIG1 GPMCFCLKDIVIDER
+ * prior to calling this function with @cd equal to GPMC_CD_CLK.
+ *
+ * @cs: Chip Select Region.
+ * @reg: GPMC_CS_CONFIGn register offset.
+ * @st_bit: Start Bit
+ * @end_bit: End Bit. Must be >= @st_bit.
+ * @max: Maximum parameter value.
+ * If 0, maximum is as high as @st_bit and @end_bit allow.
+ * @time: Timing parameter in ns.
+ * @cd: Timing parameter clock domain.
+ * @name: Timing parameter name.
+ * @return: 0 on success, -1 on error.
+ */
+static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max,
+ int time, enum gpmc_clk_domain cd, const char *name)
+{
+ u32 l;
+ int ticks, mask, nr_bits;
+
+ if (time == 0)
+ ticks = 0;
+ else
+ ticks = gpmc_ns_to_clk_ticks(time, cs, cd);
+ nr_bits = end_bit - st_bit + 1;
+ mask = (1 << nr_bits) - 1;
+
+ if (!max)
+ max = mask;
+
+ if (ticks > max) {
+ pr_err("%s: GPMC CS%d: %s %d ns, %d ticks > %d ticks\n",
+ __func__, cs, name, time, ticks, max);
+
+ return -1;
+ }
+
+ l = gpmc_cs_read_reg(cs, reg);
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ pr_info("GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
+ cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
+ (l >> st_bit) & mask, time);
+#endif
+ l &= ~(mask << st_bit);
+ l |= ticks << st_bit;
+ gpmc_cs_write_reg(cs, reg, l);
+
+ return 0;
+}
+
+/**
+ * gpmc_calc_waitmonitoring_divider - calculate proper GPMCFCLKDIVIDER based on WAITMONITORINGTIME
+ * WAITMONITORINGTIME will be _at least_ as long as desired, i.e.
+ * read --> don't sample bus too early
+ * write --> data is longer on bus
+ *
+ * Formula:
+ * gpmc_clk_div + 1 = ceil(ceil(waitmonitoringtime_ns / gpmc_fclk_ns)
+ * / waitmonitoring_ticks)
+ * WAITMONITORINGTIME resulting in 0 or 1 tick with div = 1 are caught by
+ * div <= 0 check.
+ *
+ * @wait_monitoring: WAITMONITORINGTIME in ns.
+ * @return: -1 on failure to scale, else proper divider > 0.
+ */
+static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
+{
+ int div = gpmc_ns_to_ticks(wait_monitoring);
+
+ div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
+ div /= GPMC_CONFIG1_WAITMONITORINGTIME_MAX;
+
+ if (div > 4)
+ return -1;
+ if (div <= 0)
+ div = 1;
+
+ return div;
+}
+
+/**
+ * gpmc_calc_divider - calculate GPMC_FCLK divider for sync_clk GPMC_CLK period.
+ * @sync_clk: GPMC_CLK period in ps.
+ * @return: Returns at least 1 if GPMC_FCLK can be divided to GPMC_CLK.
+ * Else, returns -1.
+ */
+int gpmc_calc_divider(unsigned int sync_clk)
+{
+ int div = gpmc_ps_to_ticks(sync_clk);
+
+ if (div > 4)
+ return -1;
+ if (div <= 0)
+ div = 1;
+
+ return div;
+}
+
+/**
+ * gpmc_cs_set_timings - program timing parameters for Chip Select Region.
+ * @cs: Chip Select Region.
+ * @t: GPMC timing parameters.
+ * @s: GPMC timing settings.
+ * @return: 0 on success, -1 on error.
+ */
+int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
+ const struct gpmc_settings *s)
+{
+ int div, ret;
+ u32 l;
+
+ div = gpmc_calc_divider(t->sync_clk);
+ if (div < 0)
+ return -EINVAL;
+
+ /*
+ * See if we need to change the divider for waitmonitoringtime.
+ *
+ * Calculate GPMCFCLKDIVIDER independent of gpmc,sync-clk-ps in DT for
+ * pure asynchronous accesses, i.e. both read and write asynchronous.
+ * However, only do so if WAITMONITORINGTIME is actually used, i.e.
+ * either WAITREADMONITORING or WAITWRITEMONITORING is set.
+ *
+ * This statement must not change div to scale async WAITMONITORINGTIME
+ * to protect mixed synchronous and asynchronous accesses.
+ *
+ * We raise an error later if WAITMONITORINGTIME does not fit.
+ */
+ if (!s->sync_read && !s->sync_write &&
+ (s->wait_on_read || s->wait_on_write)
+ ) {
+ div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
+ if (div < 0) {
+ pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
+ __func__,
+ t->wait_monitoring
+ );
+ return -ENXIO;
+ }
+ }
+
+ ret = 0;
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG2, 0, 3, 0, t->cs_on,
+ GPMC_CD_FCLK, "cs_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG2, 8, 12, 0, t->cs_rd_off,
+ GPMC_CD_FCLK, "cs_rd_off");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG2, 16, 20, 0, t->cs_wr_off,
+ GPMC_CD_FCLK, "cs_wr_off");
+ if (ret)
+ return -ENXIO;
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 0, 3, 0, t->adv_on,
+ GPMC_CD_FCLK, "adv_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 8, 12, 0, t->adv_rd_off,
+ GPMC_CD_FCLK, "adv_rd_off");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 16, 20, 0, t->adv_wr_off,
+ GPMC_CD_FCLK, "adv_wr_off");
+ if (ret)
+ return -ENXIO;
+
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 4, 6, 0,
+ t->adv_aad_mux_on, GPMC_CD_FCLK,
+ "adv_aad_mux_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 24, 26, 0,
+ t->adv_aad_mux_rd_off, GPMC_CD_FCLK,
+ "adv_aad_mux_rd_off");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 28, 30, 0,
+ t->adv_aad_mux_wr_off, GPMC_CD_FCLK,
+ "adv_aad_mux_wr_off");
+ if (ret)
+ return -ENXIO;
+ }
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 0, 3, 0, t->oe_on,
+ GPMC_CD_FCLK, "oe_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 8, 12, 0, t->oe_off,
+ GPMC_CD_FCLK, "oe_off");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 4, 6, 0,
+ t->oe_aad_mux_on, GPMC_CD_FCLK,
+ "oe_aad_mux_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 13, 15, 0,
+ t->oe_aad_mux_off, GPMC_CD_FCLK,
+ "oe_aad_mux_off");
+ }
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 16, 19, 0, t->we_on,
+ GPMC_CD_FCLK, "we_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 24, 28, 0, t->we_off,
+ GPMC_CD_FCLK, "we_off");
+ if (ret)
+ return -ENXIO;
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 0, 4, 0, t->rd_cycle,
+ GPMC_CD_FCLK, "rd_cycle");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 8, 12, 0, t->wr_cycle,
+ GPMC_CD_FCLK, "wr_cycle");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 16, 20, 0, t->access,
+ GPMC_CD_FCLK, "access");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 24, 27, 0,
+ t->page_burst_access, GPMC_CD_FCLK,
+ "page_burst_access");
+ if (ret)
+ return -ENXIO;
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 0, 3, 0,
+ t->bus_turnaround, GPMC_CD_FCLK,
+ "bus_turnaround");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 8, 11, 0,
+ t->cycle2cycle_delay, GPMC_CD_FCLK,
+ "cycle2cycle_delay");
+ if (ret)
+ return -ENXIO;
+
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) {
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 16, 19, 0,
+ t->wr_data_mux_bus, GPMC_CD_FCLK,
+ "wr_data_mux_bus");
+ if (ret)
+ return -ENXIO;
+ }
+ if (gpmc_capability & GPMC_HAS_WR_ACCESS) {
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 24, 28, 0,
+ t->wr_access, GPMC_CD_FCLK,
+ "wr_access");
+ if (ret)
+ return -ENXIO;
+ }
+
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+ l &= ~0x03;
+ l |= (div - 1);
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
+
+ ret = 0;
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG1, 18, 19,
+ GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
+ t->wait_monitoring, GPMC_CD_CLK,
+ "wait_monitoring");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG1, 25, 26,
+ GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
+ t->clk_activation, GPMC_CD_FCLK,
+ "clk_activation");
+ if (ret)
+ return -ENXIO;
+
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ pr_info("GPMC CS%d CLK period is %lu ns (div %d)\n",
+ cs, (div * gpmc_get_fclk_period()) / 1000, div);
+#endif
+
+ gpmc_cs_bool_timings(cs, &t->bool_timings);
+ gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
+
+ return 0;
+}
+
+static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
+{
+ u32 l;
+ u32 mask;
+
+ /*
+ * Ensure that base address is aligned on a
+ * boundary equal to or greater than size.
+ */
+ if (base & (size - 1))
+ return -EINVAL;
+
+ base >>= GPMC_CHUNK_SHIFT;
+ mask = (1 << GPMC_SECTION_SHIFT) - size;
+ mask >>= GPMC_CHUNK_SHIFT;
+ mask <<= GPMC_CONFIG7_MASKADDRESS_OFFSET;
+
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
+ l &= ~GPMC_CONFIG7_MASK;
+ l |= base & GPMC_CONFIG7_BASEADDRESS_MASK;
+ l |= mask & GPMC_CONFIG7_MASKADDRESS_MASK;
+ l |= GPMC_CONFIG7_CSVALID;
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
+
+ return 0;
+}
+
+static void gpmc_cs_enable_mem(int cs)
+{
+ u32 l;
+
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
+ l |= GPMC_CONFIG7_CSVALID;
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
+}
+
+static void gpmc_cs_disable_mem(int cs)
+{
+ u32 l;
+
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
+ l &= ~GPMC_CONFIG7_CSVALID;
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
+}
+
+static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
+{
+ u32 l;
+ u32 mask;
+
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
+ *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
+ mask = (l >> 8) & 0x0f;
+ *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
+}
+
+static int gpmc_cs_mem_enabled(int cs)
+{
+ u32 l;
+
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
+ return l & GPMC_CONFIG7_CSVALID;
+}
+
+static void gpmc_cs_set_reserved(int cs, int reserved)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+
+ gpmc->flags |= GPMC_CS_RESERVED;
+}
+
+static bool gpmc_cs_reserved(int cs)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+
+ return gpmc->flags & GPMC_CS_RESERVED;
+}
+
+static unsigned long gpmc_mem_align(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
+ order = GPMC_CHUNK_SHIFT - 1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ size = 1 << order;
+ return size;
+}
+
+static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+ struct resource *res = &gpmc->mem;
+ int r;
+
+ size = gpmc_mem_align(size);
+ spin_lock(&gpmc_mem_lock);
+ res->start = base;
+ res->end = base + size - 1;
+ r = request_resource(&gpmc_mem_root, res);
+ spin_unlock(&gpmc_mem_lock);
+
+ return r;
+}
+
+static int gpmc_cs_delete_mem(int cs)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+ struct resource *res = &gpmc->mem;
+ int r;
+
+ spin_lock(&gpmc_mem_lock);
+ r = release_resource(res);
+ res->start = 0;
+ res->end = 0;
+ spin_unlock(&gpmc_mem_lock);
+
+ return r;
+}
+
+int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+ struct resource *res = &gpmc->mem;
+ int r = -1;
+
+ if (cs >= gpmc_cs_num) {
+ pr_err("%s: requested chip-select is disabled\n", __func__);
+ return -ENODEV;
+ }
+ size = gpmc_mem_align(size);
+ if (size > (1 << GPMC_SECTION_SHIFT))
+ return -ENOMEM;
+
+ spin_lock(&gpmc_mem_lock);
+ if (gpmc_cs_reserved(cs)) {
+ r = -EBUSY;
+ goto out;
+ }
+ if (gpmc_cs_mem_enabled(cs))
+ r = adjust_resource(res, res->start & ~(size - 1), size);
+ if (r < 0)
+ r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
+ size, NULL, NULL);
+ if (r < 0)
+ goto out;
+
+ /* Disable CS while changing base address and size mask */
+ gpmc_cs_disable_mem(cs);
+
+ r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
+ if (r < 0) {
+ release_resource(res);
+ goto out;
+ }
+
+ /* Enable CS */
+ gpmc_cs_enable_mem(cs);
+ *base = res->start;
+ gpmc_cs_set_reserved(cs, 1);
+out:
+ spin_unlock(&gpmc_mem_lock);
+ return r;
+}
+EXPORT_SYMBOL(gpmc_cs_request);
+
+void gpmc_cs_free(int cs)
+{
+ struct gpmc_cs_data *gpmc;
+ struct resource *res;
+
+ spin_lock(&gpmc_mem_lock);
+ if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
+ WARN(1, "Trying to free non-reserved GPMC CS%d\n", cs);
+ spin_unlock(&gpmc_mem_lock);
+ return;
+ }
+ gpmc = &gpmc_cs[cs];
+ res = &gpmc->mem;
+
+ gpmc_cs_disable_mem(cs);
+ if (res->flags)
+ release_resource(res);
+ gpmc_cs_set_reserved(cs, 0);
+ spin_unlock(&gpmc_mem_lock);
+}
+EXPORT_SYMBOL(gpmc_cs_free);
+
+static bool gpmc_is_valid_waitpin(u32 waitpin)
+{
+ return waitpin < gpmc_nr_waitpins;
+}
+
+static int gpmc_alloc_waitpin(struct gpmc_device *gpmc,
+ struct gpmc_settings *p)
+{
+ int ret;
+ struct gpmc_waitpin *waitpin;
+ struct gpio_desc *waitpin_desc;
+
+ if (!gpmc_is_valid_waitpin(p->wait_pin))
+ return -EINVAL;
+
+ waitpin = &gpmc->waitpins[p->wait_pin];
+
+ if (!waitpin->desc) {
+ /* Reserve the GPIO for wait pin usage.
+ * GPIO polarity doesn't matter here. Wait pin polarity
+ * is set in GPMC_CONFIG register.
+ */
+ waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip,
+ p->wait_pin, "WAITPIN",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_IN);
+
+ ret = PTR_ERR(waitpin_desc);
+ if (IS_ERR(waitpin_desc) && ret != -EBUSY)
+ return ret;
+
+ /* New wait pin */
+ waitpin->desc = waitpin_desc;
+ waitpin->pin = p->wait_pin;
+ waitpin->polarity = p->wait_pin_polarity;
+ } else {
+ /* Shared wait pin */
+ if (p->wait_pin_polarity != waitpin->polarity ||
+ p->wait_pin != waitpin->pin) {
+ dev_err(gpmc->dev,
+ "shared-wait-pin: invalid configuration\n");
+ return -EINVAL;
+ }
+ dev_info(gpmc->dev, "shared wait-pin: %d\n", waitpin->pin);
+ }
+
+ return 0;
+}
+
+static void gpmc_free_waitpin(struct gpmc_device *gpmc,
+ int wait_pin)
+{
+ if (gpmc_is_valid_waitpin(wait_pin))
+ gpiochip_free_own_desc(gpmc->waitpins[wait_pin].desc);
+}
+
+/**
+ * gpmc_configure - write request to configure gpmc
+ * @cmd: command type
+ * @wval: value to write
+ * @return status of the operation
+ */
+int gpmc_configure(int cmd, int wval)
+{
+ u32 regval;
+
+ switch (cmd) {
+ case GPMC_CONFIG_WP:
+ regval = gpmc_read_reg(GPMC_CONFIG);
+ if (wval)
+ regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
+ else
+ regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
+ gpmc_write_reg(GPMC_CONFIG, regval);
+ break;
+
+ default:
+ pr_err("%s: command not supported\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(gpmc_configure);
+
+static bool gpmc_nand_writebuffer_empty(void)
+{
+ if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
+ return true;
+
+ return false;
+}
+
+static struct gpmc_nand_ops nand_ops = {
+ .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
+};
+
+/**
+ * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
+ * @reg: the GPMC NAND register map exclusive for NAND use.
+ * @cs: GPMC chip select number on which the NAND sits. The
+ * register map returned will be specific to this chip select.
+ *
+ * Returns NULL on error e.g. invalid cs.
+ */
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
+{
+ int i;
+
+ if (cs >= gpmc_cs_num)
+ return NULL;
+
+ reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
+ GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
+ reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
+ GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
+ reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
+ GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
+ reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
+ reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
+ reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
+ reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
+ reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
+ reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
+ reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
+ reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
+
+ for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
+ reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
+ GPMC_BCH_SIZE * i;
+ reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
+ GPMC_BCH_SIZE * i;
+ reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
+ GPMC_BCH_SIZE * i;
+ reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
+ GPMC_BCH_SIZE * i;
+ reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 +
+ i * GPMC_BCH_SIZE;
+ reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 +
+ i * GPMC_BCH_SIZE;
+ reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
+ i * GPMC_BCH_SIZE;
+ }
+
+ return &nand_ops;
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+
+static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
+ struct gpmc_settings *s,
+ int freq, int latency)
+{
+ struct gpmc_device_timings dev_t;
+ const int t_cer = 15;
+ const int t_avdp = 12;
+ const int t_cez = 20; /* max of t_cez, t_oez */
+ const int t_wpl = 40;
+ const int t_wph = 30;
+ int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
+
+ switch (freq) {
+ case 104:
+ min_gpmc_clk_period = 9600; /* 104 MHz */
+ t_ces = 3;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 3;
+ t_aavdh = 6;
+ t_rdyo = 6;
+ break;
+ case 83:
+ min_gpmc_clk_period = 12000; /* 83 MHz */
+ t_ces = 5;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 9;
+ break;
+ case 66:
+ min_gpmc_clk_period = 15000; /* 66 MHz */
+ t_ces = 6;
+ t_avds = 5;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 11;
+ break;
+ default:
+ min_gpmc_clk_period = 18500; /* 54 MHz */
+ t_ces = 7;
+ t_avds = 7;
+ t_avdh = 7;
+ t_ach = 9;
+ t_aavdh = 7;
+ t_rdyo = 15;
+ break;
+ }
+
+ /* Set synchronous read timings */
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ if (!s->sync_write) {
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ }
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(t, s, &dev_t);
+}
+
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ int ret;
+ struct gpmc_timings gpmc_t;
+ struct gpmc_settings gpmc_s;
+
+ gpmc_read_settings_dt(dev->of_node, &gpmc_s);
+
+ info->sync_read = gpmc_s.sync_read;
+ info->sync_write = gpmc_s.sync_write;
+ info->burst_len = gpmc_s.burst_len;
+
+ if (!gpmc_s.sync_read && !gpmc_s.sync_write)
+ return 0;
+
+ gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ return ret;
+
+ return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
+
+int gpmc_get_client_irq(unsigned int irq_config)
+{
+ if (!gpmc_irq_domain) {
+ pr_warn("%s called before GPMC IRQ domain available\n",
+ __func__);
+ return 0;
+ }
+
+ /* we restrict this to NAND IRQs only */
+ if (irq_config >= GPMC_NR_NAND_IRQS)
+ return 0;
+
+ return irq_create_mapping(gpmc_irq_domain, irq_config);
+}
+
+static int gpmc_irq_endis(unsigned long hwirq, bool endis)
+{
+ u32 regval;
+
+ /* bits GPMC_NR_NAND_IRQS to 8 are reserved */
+ if (hwirq >= GPMC_NR_NAND_IRQS)
+ hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+ regval = gpmc_read_reg(GPMC_IRQENABLE);
+ if (endis)
+ regval |= BIT(hwirq);
+ else
+ regval &= ~BIT(hwirq);
+ gpmc_write_reg(GPMC_IRQENABLE, regval);
+
+ return 0;
+}
+
+static void gpmc_irq_disable(struct irq_data *p)
+{
+ gpmc_irq_endis(p->hwirq, false);
+}
+
+static void gpmc_irq_enable(struct irq_data *p)
+{
+ gpmc_irq_endis(p->hwirq, true);
+}
+
+static void gpmc_irq_mask(struct irq_data *d)
+{
+ gpmc_irq_endis(d->hwirq, false);
+}
+
+static void gpmc_irq_unmask(struct irq_data *d)
+{
+ gpmc_irq_endis(d->hwirq, true);
+}
+
+static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge)
+{
+ u32 regval;
+
+ /* NAND IRQs polarity is not configurable */
+ if (hwirq < GPMC_NR_NAND_IRQS)
+ return;
+
+ /* WAITPIN starts at BIT 8 */
+ hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+ regval = gpmc_read_reg(GPMC_CONFIG);
+ if (rising_edge)
+ regval &= ~BIT(hwirq);
+ else
+ regval |= BIT(hwirq);
+
+ gpmc_write_reg(GPMC_CONFIG, regval);
+}
+
+static void gpmc_irq_ack(struct irq_data *d)
+{
+ unsigned int hwirq = d->hwirq;
+
+ /* skip reserved bits */
+ if (hwirq >= GPMC_NR_NAND_IRQS)
+ hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+ /* Setting bit to 1 clears (or Acks) the interrupt */
+ gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq));
+}
+
+static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ /* can't set type for NAND IRQs */
+ if (d->hwirq < GPMC_NR_NAND_IRQS)
+ return -EINVAL;
+
+ /* We can support either rising or falling edge at a time */
+ if (trigger == IRQ_TYPE_EDGE_FALLING)
+ gpmc_irq_edge_config(d->hwirq, false);
+ else if (trigger == IRQ_TYPE_EDGE_RISING)
+ gpmc_irq_edge_config(d->hwirq, true);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct gpmc_device *gpmc = d->host_data;
+
+ irq_set_chip_data(virq, gpmc);
+ if (hw < GPMC_NR_NAND_IRQS) {
+ irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
+ irq_set_chip_and_handler(virq, &gpmc->irq_chip,
+ handle_simple_irq);
+ } else {
+ irq_set_chip_and_handler(virq, &gpmc->irq_chip,
+ handle_edge_irq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops gpmc_irq_domain_ops = {
+ .map = gpmc_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static irqreturn_t gpmc_handle_irq(int irq, void *data)
+{
+ int hwirq, virq;
+ u32 regval, regvalx;
+ struct gpmc_device *gpmc = data;
+
+ regval = gpmc_read_reg(GPMC_IRQSTATUS);
+ regvalx = regval;
+
+ if (!regval)
+ return IRQ_NONE;
+
+ for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) {
+ /* skip reserved status bits */
+ if (hwirq == GPMC_NR_NAND_IRQS)
+ regvalx >>= 8 - GPMC_NR_NAND_IRQS;
+
+ if (regvalx & BIT(hwirq)) {
+ virq = irq_find_mapping(gpmc_irq_domain, hwirq);
+ if (!virq) {
+ dev_warn(gpmc->dev,
+ "spurious irq detected hwirq %d, virq %d\n",
+ hwirq, virq);
+ }
+
+ generic_handle_irq(virq);
+ }
+ }
+
+ gpmc_write_reg(GPMC_IRQSTATUS, regval);
+
+ return IRQ_HANDLED;
+}
+
+static int gpmc_setup_irq(struct gpmc_device *gpmc)
+{
+ u32 regval;
+ int rc;
+
+ /* Disable interrupts */
+ gpmc_write_reg(GPMC_IRQENABLE, 0);
+
+ /* clear interrupts */
+ regval = gpmc_read_reg(GPMC_IRQSTATUS);
+ gpmc_write_reg(GPMC_IRQSTATUS, regval);
+
+ gpmc->irq_chip.name = "gpmc";
+ gpmc->irq_chip.irq_enable = gpmc_irq_enable;
+ gpmc->irq_chip.irq_disable = gpmc_irq_disable;
+ gpmc->irq_chip.irq_ack = gpmc_irq_ack;
+ gpmc->irq_chip.irq_mask = gpmc_irq_mask;
+ gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
+ gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
+
+ gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
+ gpmc->nirqs,
+ &gpmc_irq_domain_ops,
+ gpmc);
+ if (!gpmc_irq_domain) {
+ dev_err(gpmc->dev, "IRQ domain add failed\n");
+ return -ENODEV;
+ }
+
+ rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
+ if (rc) {
+ dev_err(gpmc->dev, "failed to request irq %d: %d\n",
+ gpmc->irq, rc);
+ irq_domain_remove(gpmc_irq_domain);
+ gpmc_irq_domain = NULL;
+ }
+
+ return rc;
+}
+
+static int gpmc_free_irq(struct gpmc_device *gpmc)
+{
+ int hwirq;
+
+ free_irq(gpmc->irq, gpmc);
+
+ for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq));
+
+ irq_domain_remove(gpmc_irq_domain);
+ gpmc_irq_domain = NULL;
+
+ return 0;
+}
+
+static void gpmc_mem_exit(void)
+{
+ int cs;
+
+ for (cs = 0; cs < gpmc_cs_num; cs++) {
+ if (!gpmc_cs_mem_enabled(cs))
+ continue;
+ gpmc_cs_delete_mem(cs);
+ }
+}
+
+static void gpmc_mem_init(struct gpmc_device *gpmc)
+{
+ int cs;
+
+ if (!gpmc->data) {
+ /* All legacy devices have same data IO window */
+ gpmc_mem_root.start = GPMC_MEM_START;
+ gpmc_mem_root.end = GPMC_MEM_END;
+ } else {
+ gpmc_mem_root.start = gpmc->data->start;
+ gpmc_mem_root.end = gpmc->data->end;
+ }
+
+ /* Reserve all regions that has been set up by bootloader */
+ for (cs = 0; cs < gpmc_cs_num; cs++) {
+ u32 base, size;
+
+ if (!gpmc_cs_mem_enabled(cs))
+ continue;
+ gpmc_cs_get_memconf(cs, &base, &size);
+ if (gpmc_cs_insert_mem(cs, base, size)) {
+ pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n",
+ __func__, cs, base, base + size);
+ gpmc_cs_disable_mem(cs);
+ }
+ }
+}
+
+static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
+{
+ u32 temp;
+ int div;
+
+ div = gpmc_calc_divider(sync_clk);
+ temp = gpmc_ps_to_ticks(time_ps);
+ temp = (temp + div - 1) / div;
+ return gpmc_ticks_to_ps(temp * div);
+}
+
+/* XXX: can the cycles be avoided ? */
+static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t,
+ bool mux)
+{
+ u32 temp;
+
+ /* adv_rd_off */
+ temp = dev_t->t_avdp_r;
+ /* XXX: mux check required ? */
+ if (mux) {
+ /* XXX: t_avdp not to be required for sync, only added for tusb
+ * this indirectly necessitates requirement of t_avdp_r and
+ * t_avdp_w instead of having a single t_avdp
+ */
+ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ }
+ gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
+
+ /* oe_on */
+ temp = dev_t->t_oeasu; /* XXX: remove this ? */
+ if (mux) {
+ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
+ temp = max_t(u32, temp, gpmc_t->adv_rd_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
+ }
+ gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
+
+ /* access */
+ /* XXX: any scope for improvement ?, by combining oe_on
+ * and clk_activation, need to check whether
+ * access = clk_activation + round to sync clk ?
+ */
+ temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
+ temp += gpmc_t->clk_activation;
+ if (dev_t->cyc_oe)
+ temp = max_t(u32, temp, gpmc_t->oe_on +
+ gpmc_ticks_to_ps(dev_t->cyc_oe));
+ gpmc_t->access = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
+ gpmc_t->cs_rd_off = gpmc_t->oe_off;
+
+ /* rd_cycle */
+ temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
+ temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
+ gpmc_t->access;
+ /* XXX: barter t_ce_rdyz with t_cez_r ? */
+ if (dev_t->t_ce_rdyz)
+ temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
+ gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t,
+ bool mux)
+{
+ u32 temp;
+
+ /* adv_wr_off */
+ temp = dev_t->t_avdp_w;
+ if (mux) {
+ temp = max_t(u32, temp,
+ gpmc_t->clk_activation + dev_t->t_avdh);
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ }
+ gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
+
+ /* wr_data_mux_bus */
+ temp = max_t(u32, dev_t->t_weasu,
+ gpmc_t->clk_activation + dev_t->t_rdyo);
+ /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
+ * and in that case remember to handle we_on properly
+ */
+ if (mux) {
+ temp = max_t(u32, temp,
+ gpmc_t->adv_wr_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
+ }
+ gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
+
+ /* we_on */
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
+ gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
+ else
+ gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
+
+ /* wr_access */
+ /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
+ gpmc_t->wr_access = gpmc_t->access;
+
+ /* we_off */
+ temp = gpmc_t->we_on + dev_t->t_wpl;
+ temp = max_t(u32, temp,
+ gpmc_t->wr_access + gpmc_ticks_to_ps(1));
+ temp = max_t(u32, temp,
+ gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
+ gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
+ dev_t->t_wph);
+
+ /* wr_cycle */
+ temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
+ temp += gpmc_t->wr_access;
+ /* XXX: barter t_ce_rdyz with t_cez_w ? */
+ if (dev_t->t_ce_rdyz)
+ temp = max_t(u32, temp,
+ gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
+ gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t,
+ bool mux)
+{
+ u32 temp;
+
+ /* adv_rd_off */
+ temp = dev_t->t_avdp_r;
+ if (mux)
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
+
+ /* oe_on */
+ temp = dev_t->t_oeasu;
+ if (mux)
+ temp = max_t(u32, temp, gpmc_t->adv_rd_off + dev_t->t_aavdh);
+ gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
+
+ /* access */
+ temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
+ gpmc_t->oe_on + dev_t->t_oe);
+ temp = max_t(u32, temp, gpmc_t->cs_on + dev_t->t_ce);
+ temp = max_t(u32, temp, gpmc_t->adv_on + dev_t->t_aa);
+ gpmc_t->access = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
+ gpmc_t->cs_rd_off = gpmc_t->oe_off;
+
+ /* rd_cycle */
+ temp = max_t(u32, dev_t->t_rd_cycle,
+ gpmc_t->cs_rd_off + dev_t->t_cez_r);
+ temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
+ gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t,
+ bool mux)
+{
+ u32 temp;
+
+ /* adv_wr_off */
+ temp = dev_t->t_avdp_w;
+ if (mux)
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
+
+ /* wr_data_mux_bus */
+ temp = dev_t->t_weasu;
+ if (mux) {
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
+ }
+ gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
+
+ /* we_on */
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
+ gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
+ else
+ gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
+
+ /* we_off */
+ temp = gpmc_t->we_on + dev_t->t_wpl;
+ gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
+ dev_t->t_wph);
+
+ /* wr_cycle */
+ temp = max_t(u32, dev_t->t_wr_cycle,
+ gpmc_t->cs_wr_off + dev_t->t_cez_w);
+ gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ u32 temp;
+
+ gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
+ gpmc_get_fclk_period();
+
+ gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
+ dev_t->t_bacc,
+ gpmc_t->sync_clk);
+
+ temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
+ gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
+
+ if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
+ return 0;
+
+ if (dev_t->ce_xdelay)
+ gpmc_t->bool_timings.cs_extra_delay = true;
+ if (dev_t->avd_xdelay)
+ gpmc_t->bool_timings.adv_extra_delay = true;
+ if (dev_t->oe_xdelay)
+ gpmc_t->bool_timings.oe_extra_delay = true;
+ if (dev_t->we_xdelay)
+ gpmc_t->bool_timings.we_extra_delay = true;
+
+ return 0;
+}
+
+static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t,
+ bool sync)
+{
+ u32 temp;
+
+ /* cs_on */
+ gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
+
+ /* adv_on */
+ temp = dev_t->t_avdasu;
+ if (dev_t->t_ce_avd)
+ temp = max_t(u32, temp,
+ gpmc_t->cs_on + dev_t->t_ce_avd);
+ gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
+
+ if (sync)
+ gpmc_calc_sync_common_timings(gpmc_t, dev_t);
+
+ return 0;
+}
+
+/*
+ * TODO: remove this function once all peripherals are confirmed to
+ * work with generic timing. Simultaneously gpmc_cs_set_timings()
+ * has to be modified to handle timings in ps instead of ns
+ */
+static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
+{
+ t->cs_on /= 1000;
+ t->cs_rd_off /= 1000;
+ t->cs_wr_off /= 1000;
+ t->adv_on /= 1000;
+ t->adv_rd_off /= 1000;
+ t->adv_wr_off /= 1000;
+ t->we_on /= 1000;
+ t->we_off /= 1000;
+ t->oe_on /= 1000;
+ t->oe_off /= 1000;
+ t->page_burst_access /= 1000;
+ t->access /= 1000;
+ t->rd_cycle /= 1000;
+ t->wr_cycle /= 1000;
+ t->bus_turnaround /= 1000;
+ t->cycle2cycle_delay /= 1000;
+ t->wait_monitoring /= 1000;
+ t->clk_activation /= 1000;
+ t->wr_access /= 1000;
+ t->wr_data_mux_bus /= 1000;
+}
+
+int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_settings *gpmc_s,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = false, sync = false;
+
+ if (gpmc_s) {
+ mux = gpmc_s->mux_add_data ? true : false;
+ sync = (gpmc_s->sync_read || gpmc_s->sync_write);
+ }
+
+ memset(gpmc_t, 0, sizeof(*gpmc_t));
+
+ gpmc_calc_common_timings(gpmc_t, dev_t, sync);
+
+ if (gpmc_s && gpmc_s->sync_read)
+ gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
+ else
+ gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
+
+ if (gpmc_s && gpmc_s->sync_write)
+ gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
+ else
+ gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
+
+ /* TODO: remove, see function definition */
+ gpmc_convert_ps_to_ns(gpmc_t);
+
+ return 0;
+}
+
+/**
+ * gpmc_cs_program_settings - programs non-timing related settings
+ * @cs: GPMC chip-select to program
+ * @p: pointer to GPMC settings structure
+ *
+ * Programs non-timing related settings for a GPMC chip-select, such as
+ * bus-width, burst configuration, etc. Function should be called once
+ * for each chip-select that is being used and must be called before
+ * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
+ * register will be initialised to zero by this function. Returns 0 on
+ * success and appropriate negative error code on failure.
+ */
+int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
+{
+ u32 config1;
+
+ if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
+ pr_err("%s: invalid width %d!", __func__, p->device_width);
+ return -EINVAL;
+ }
+
+ /* Address-data multiplexing not supported for NAND devices */
+ if (p->device_nand && p->mux_add_data) {
+ pr_err("%s: invalid configuration!\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((p->mux_add_data > GPMC_MUX_AD) ||
+ ((p->mux_add_data == GPMC_MUX_AAD) &&
+ !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
+ pr_err("%s: invalid multiplex configuration!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
+ if (p->burst_read || p->burst_write) {
+ switch (p->burst_len) {
+ case GPMC_BURST_4:
+ case GPMC_BURST_8:
+ case GPMC_BURST_16:
+ break;
+ default:
+ pr_err("%s: invalid page/burst-length (%d)\n",
+ __func__, p->burst_len);
+ return -EINVAL;
+ }
+ }
+
+ if (p->wait_pin != GPMC_WAITPIN_INVALID &&
+ p->wait_pin > gpmc_nr_waitpins) {
+ pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
+ return -EINVAL;
+ }
+
+ config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
+
+ if (p->sync_read)
+ config1 |= GPMC_CONFIG1_READTYPE_SYNC;
+ if (p->sync_write)
+ config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
+ if (p->wait_on_read)
+ config1 |= GPMC_CONFIG1_WAIT_READ_MON;
+ if (p->wait_on_write)
+ config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
+ if (p->wait_on_read || p->wait_on_write)
+ config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
+ if (p->device_nand)
+ config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
+ if (p->mux_add_data)
+ config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
+ if (p->burst_read)
+ config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
+ if (p->burst_write)
+ config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
+ if (p->burst_read || p->burst_write) {
+ config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
+ config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
+ }
+
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
+
+ if (p->wait_pin_polarity != GPMC_WAITPINPOLARITY_INVALID) {
+ config1 = gpmc_read_reg(GPMC_CONFIG);
+
+ if (p->wait_pin_polarity == GPMC_WAITPINPOLARITY_ACTIVE_LOW)
+ config1 &= ~GPMC_CONFIG_WAITPINPOLARITY(p->wait_pin);
+ else if (p->wait_pin_polarity == GPMC_WAITPINPOLARITY_ACTIVE_HIGH)
+ config1 |= GPMC_CONFIG_WAITPINPOLARITY(p->wait_pin);
+
+ gpmc_write_reg(GPMC_CONFIG, config1);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static void gpmc_cs_set_name(int cs, const char *name)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+
+ gpmc->name = name;
+}
+
+static const char *gpmc_cs_get_name(int cs)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+
+ return gpmc->name;
+}
+
+/**
+ * gpmc_cs_remap - remaps a chip-select physical base address
+ * @cs: chip-select to remap
+ * @base: physical base address to re-map chip-select to
+ *
+ * Re-maps a chip-select to a new physical base address specified by
+ * "base". Returns 0 on success and appropriate negative error code
+ * on failure.
+ */
+static int gpmc_cs_remap(int cs, u32 base)
+{
+ int ret;
+ u32 old_base, size;
+
+ if (cs >= gpmc_cs_num) {
+ pr_err("%s: requested chip-select is disabled\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ * Make sure we ignore any device offsets from the GPMC partition
+ * allocated for the chip select and that the new base confirms
+ * to the GPMC 16MB minimum granularity.
+ */
+ base &= ~(SZ_16M - 1);
+
+ gpmc_cs_get_memconf(cs, &old_base, &size);
+ if (base == old_base)
+ return 0;
+
+ ret = gpmc_cs_delete_mem(cs);
+ if (ret < 0)
+ return ret;
+
+ ret = gpmc_cs_insert_mem(cs, base, size);
+ if (ret < 0)
+ return ret;
+
+ ret = gpmc_cs_set_memconf(cs, base, size);
+
+ return ret;
+}
+
+/**
+ * gpmc_read_settings_dt - read gpmc settings from device-tree
+ * @np: pointer to device-tree node for a gpmc child device
+ * @p: pointer to gpmc settings structure
+ *
+ * Reads the GPMC settings for a GPMC child device from device-tree and
+ * stores them in the GPMC settings structure passed. The GPMC settings
+ * structure is initialised to zero by this function and so any
+ * previously stored settings will be cleared.
+ */
+void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
+{
+ memset(p, 0, sizeof(struct gpmc_settings));
+
+ p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
+ p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
+ of_property_read_u32(np, "gpmc,device-width", &p->device_width);
+ of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
+
+ if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
+ p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
+ p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
+ p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
+ if (!p->burst_read && !p->burst_write)
+ pr_warn("%s: page/burst-length set but not used!\n",
+ __func__);
+ }
+
+ p->wait_pin = GPMC_WAITPIN_INVALID;
+ p->wait_pin_polarity = GPMC_WAITPINPOLARITY_INVALID;
+
+ if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
+ if (!gpmc_is_valid_waitpin(p->wait_pin)) {
+ pr_err("%s: Invalid wait-pin (%d)\n", __func__, p->wait_pin);
+ p->wait_pin = GPMC_WAITPIN_INVALID;
+ }
+
+ if (!of_property_read_u32(np, "ti,wait-pin-polarity",
+ &p->wait_pin_polarity)) {
+ if (p->wait_pin_polarity != GPMC_WAITPINPOLARITY_ACTIVE_HIGH &&
+ p->wait_pin_polarity != GPMC_WAITPINPOLARITY_ACTIVE_LOW) {
+ pr_err("%s: Invalid wait-pin-polarity (%d)\n",
+ __func__, p->wait_pin_polarity);
+ p->wait_pin_polarity = GPMC_WAITPINPOLARITY_INVALID;
+ }
+ }
+
+ p->wait_on_read = of_property_read_bool(np,
+ "gpmc,wait-on-read");
+ p->wait_on_write = of_property_read_bool(np,
+ "gpmc,wait-on-write");
+ if (!p->wait_on_read && !p->wait_on_write)
+ pr_debug("%s: rd/wr wait monitoring not enabled!\n",
+ __func__);
+ }
+}
+
+static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
+ struct gpmc_timings *gpmc_t)
+{
+ struct gpmc_bool_timings *p;
+
+ if (!np || !gpmc_t)
+ return;
+
+ memset(gpmc_t, 0, sizeof(*gpmc_t));
+
+ /* minimum clock period for syncronous mode */
+ of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk);
+
+ /* chip select timtings */
+ of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on);
+ of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off);
+ of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off);
+
+ /* ADV signal timings */
+ of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
+ of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
+ of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-on-ns",
+ &gpmc_t->adv_aad_mux_on);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-rd-off-ns",
+ &gpmc_t->adv_aad_mux_rd_off);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-wr-off-ns",
+ &gpmc_t->adv_aad_mux_wr_off);
+
+ /* WE signal timings */
+ of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
+ of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off);
+
+ /* OE signal timings */
+ of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
+ of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
+ of_property_read_u32(np, "gpmc,oe-aad-mux-on-ns",
+ &gpmc_t->oe_aad_mux_on);
+ of_property_read_u32(np, "gpmc,oe-aad-mux-off-ns",
+ &gpmc_t->oe_aad_mux_off);
+
+ /* access and cycle timings */
+ of_property_read_u32(np, "gpmc,page-burst-access-ns",
+ &gpmc_t->page_burst_access);
+ of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access);
+ of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle);
+ of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle);
+ of_property_read_u32(np, "gpmc,bus-turnaround-ns",
+ &gpmc_t->bus_turnaround);
+ of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns",
+ &gpmc_t->cycle2cycle_delay);
+ of_property_read_u32(np, "gpmc,wait-monitoring-ns",
+ &gpmc_t->wait_monitoring);
+ of_property_read_u32(np, "gpmc,clk-activation-ns",
+ &gpmc_t->clk_activation);
+
+ /* only applicable to OMAP3+ */
+ of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access);
+ of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns",
+ &gpmc_t->wr_data_mux_bus);
+
+ /* bool timing parameters */
+ p = &gpmc_t->bool_timings;
+
+ p->cycle2cyclediffcsen =
+ of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen");
+ p->cycle2cyclesamecsen =
+ of_property_read_bool(np, "gpmc,cycle2cycle-samecsen");
+ p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
+ p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
+ p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
+ p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
+ p->time_para_granularity =
+ of_property_read_bool(np, "gpmc,time-para-granularity");
+}
+
+/**
+ * gpmc_probe_generic_child - configures the gpmc for a child device
+ * @pdev: pointer to gpmc platform device
+ * @child: pointer to device-tree node for child device
+ *
+ * Allocates and configures a GPMC chip-select for a child device.
+ * Returns 0 on success and appropriate negative error code on failure.
+ */
+static int gpmc_probe_generic_child(struct platform_device *pdev,
+ struct device_node *child)
+{
+ struct gpmc_settings gpmc_s;
+ struct gpmc_timings gpmc_t;
+ struct resource res;
+ unsigned long base;
+ const char *name;
+ int ret, cs;
+ u32 val;
+ struct gpmc_device *gpmc = platform_get_drvdata(pdev);
+
+ if (of_property_read_u32(child, "reg", &cs) < 0) {
+ dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
+ child);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res) < 0) {
+ dev_err(&pdev->dev, "%pOF has malformed 'reg' property\n",
+ child);
+ return -ENODEV;
+ }
+
+ /*
+ * Check if we have multiple instances of the same device
+ * on a single chip select. If so, use the already initialized
+ * timings.
+ */
+ name = gpmc_cs_get_name(cs);
+ if (name && of_node_name_eq(child, name))
+ goto no_timings;
+
+ ret = gpmc_cs_request(cs, resource_size(&res), &base);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
+ return ret;
+ }
+ gpmc_cs_set_name(cs, child->full_name);
+
+ gpmc_read_settings_dt(child, &gpmc_s);
+ gpmc_read_timings_dt(child, &gpmc_t);
+
+ /*
+ * For some GPMC devices we still need to rely on the bootloader
+ * timings because the devices can be connected via FPGA.
+ * REVISIT: Add timing support from slls644g.pdf.
+ */
+ if (!gpmc_t.cs_rd_off) {
+ WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n",
+ cs);
+ gpmc_cs_show_timings(cs,
+ "please add GPMC bootloader timings to .dts");
+ goto no_timings;
+ }
+
+ /* CS must be disabled while making changes to gpmc configuration */
+ gpmc_cs_disable_mem(cs);
+
+ /*
+ * FIXME: gpmc_cs_request() will map the CS to an arbitrary
+ * location in the gpmc address space. When booting with
+ * device-tree we want the NOR flash to be mapped to the
+ * location specified in the device-tree blob. So remap the
+ * CS to this location. Once DT migration is complete should
+ * just make gpmc_cs_request() map a specific address.
+ */
+ ret = gpmc_cs_remap(cs, res.start);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
+ cs, &res.start);
+ if (res.start < GPMC_MEM_START) {
+ dev_info(&pdev->dev,
+ "GPMC CS %d start cannot be lesser than 0x%x\n",
+ cs, GPMC_MEM_START);
+ } else if (res.end > GPMC_MEM_END) {
+ dev_info(&pdev->dev,
+ "GPMC CS %d end cannot be greater than 0x%x\n",
+ cs, GPMC_MEM_END);
+ }
+ goto err;
+ }
+
+ if (of_node_name_eq(child, "nand")) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible NAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (of_node_name_eq(child, "onenand")) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible OneNAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (of_match_node(omap_nand_ids, child)) {
+ /* NAND specific setup */
+ val = 8;
+ of_property_read_u32(child, "nand-bus-width", &val);
+ switch (val) {
+ case 8:
+ gpmc_s.device_width = GPMC_DEVWIDTH_8BIT;
+ break;
+ case 16:
+ gpmc_s.device_width = GPMC_DEVWIDTH_16BIT;
+ break;
+ default:
+ dev_err(&pdev->dev, "%pOFn: invalid 'nand-bus-width'\n",
+ child);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* disable write protect */
+ gpmc_configure(GPMC_CONFIG_WP, 0);
+ gpmc_s.device_nand = true;
+ } else {
+ ret = of_property_read_u32(child, "bank-width",
+ &gpmc_s.device_width);
+ if (ret < 0 && !gpmc_s.device_width) {
+ dev_err(&pdev->dev,
+ "%pOF has no 'gpmc,device-width' property\n",
+ child);
+ goto err;
+ }
+ }
+
+ /* Reserve wait pin if it is required and valid */
+ if (gpmc_s.wait_on_read || gpmc_s.wait_on_write) {
+ ret = gpmc_alloc_waitpin(gpmc, &gpmc_s);
+ if (ret < 0)
+ goto err;
+ }
+
+ gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ goto err_cs;
+
+ ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set gpmc timings for: %pOFn\n",
+ child);
+ goto err_cs;
+ }
+
+ /* Clear limited address i.e. enable A26-A11 */
+ val = gpmc_read_reg(GPMC_CONFIG);
+ val &= ~GPMC_CONFIG_LIMITEDADDRESS;
+ gpmc_write_reg(GPMC_CONFIG, val);
+
+ /* Enable CS region */
+ gpmc_cs_enable_mem(cs);
+
+no_timings:
+
+ /* create platform device, NULL on error or when disabled */
+ if (!of_platform_device_create(child, NULL, &pdev->dev))
+ goto err_child_fail;
+
+ /* create children and other common bus children */
+ if (of_platform_default_populate(child, NULL, &pdev->dev))
+ goto err_child_fail;
+
+ return 0;
+
+err_child_fail:
+
+ dev_err(&pdev->dev, "failed to create gpmc child %pOFn\n", child);
+ ret = -ENODEV;
+
+err_cs:
+ gpmc_free_waitpin(gpmc, gpmc_s.wait_pin);
+err:
+ gpmc_cs_free(cs);
+
+ return ret;
+}
+
+static const struct of_device_id gpmc_dt_ids[];
+
+static int gpmc_probe_dt(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *of_id =
+ of_match_device(gpmc_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs",
+ &gpmc_cs_num);
+ if (ret < 0) {
+ pr_err("%s: number of chip-selects not defined\n", __func__);
+ return ret;
+ } else if (gpmc_cs_num < 1) {
+ pr_err("%s: all chip-selects are disabled\n", __func__);
+ return -EINVAL;
+ } else if (gpmc_cs_num > GPMC_CS_NUM) {
+ pr_err("%s: number of supported chip-selects cannot be > %d\n",
+ __func__, GPMC_CS_NUM);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
+ &gpmc_nr_waitpins);
+ if (ret < 0) {
+ pr_err("%s: number of wait pins not found!\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gpmc_probe_dt_children(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *child;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ ret = gpmc_probe_generic_child(pdev, child);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to probe DT child '%pOFn': %d\n",
+ child, ret);
+ }
+ }
+}
+#else
+void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
+{
+ memset(p, 0, sizeof(*p));
+}
+static int gpmc_probe_dt(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static void gpmc_probe_dt_children(struct platform_device *pdev)
+{
+}
+#endif /* CONFIG_OF */
+
+static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ return 1; /* we're input only */
+}
+
+static int gpmc_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return 0; /* we're input only */
+}
+
+static int gpmc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return -EINVAL; /* we're input only */
+}
+
+static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+}
+
+static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg;
+
+ offset += 8;
+
+ reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
+
+ return !!reg;
+}
+
+static int gpmc_gpio_init(struct gpmc_device *gpmc)
+{
+ int ret;
+
+ gpmc->gpio_chip.parent = gpmc->dev;
+ gpmc->gpio_chip.owner = THIS_MODULE;
+ gpmc->gpio_chip.label = DEVICE_NAME;
+ gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
+ gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
+ gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
+ gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
+ gpmc->gpio_chip.set = gpmc_gpio_set;
+ gpmc->gpio_chip.get = gpmc_gpio_get;
+ gpmc->gpio_chip.base = -1;
+
+ ret = devm_gpiochip_add_data(gpmc->dev, &gpmc->gpio_chip, NULL);
+ if (ret < 0) {
+ dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void omap3_gpmc_save_context(struct gpmc_device *gpmc)
+{
+ struct omap3_gpmc_regs *gpmc_context;
+ int i;
+
+ if (!gpmc || !gpmc_base)
+ return;
+
+ gpmc_context = &gpmc->context;
+
+ gpmc_context->sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
+ gpmc_context->irqenable = gpmc_read_reg(GPMC_IRQENABLE);
+ gpmc_context->timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
+ gpmc_context->config = gpmc_read_reg(GPMC_CONFIG);
+ gpmc_context->prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
+ gpmc_context->prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
+ gpmc_context->prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
+ for (i = 0; i < gpmc_cs_num; i++) {
+ gpmc_context->cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
+ if (gpmc_context->cs_context[i].is_valid) {
+ gpmc_context->cs_context[i].config1 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
+ gpmc_context->cs_context[i].config2 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
+ gpmc_context->cs_context[i].config3 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
+ gpmc_context->cs_context[i].config4 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
+ gpmc_context->cs_context[i].config5 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
+ gpmc_context->cs_context[i].config6 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
+ gpmc_context->cs_context[i].config7 =
+ gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
+ }
+ }
+}
+
+static void omap3_gpmc_restore_context(struct gpmc_device *gpmc)
+{
+ struct omap3_gpmc_regs *gpmc_context;
+ int i;
+
+ if (!gpmc || !gpmc_base)
+ return;
+
+ gpmc_context = &gpmc->context;
+
+ gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context->sysconfig);
+ gpmc_write_reg(GPMC_IRQENABLE, gpmc_context->irqenable);
+ gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context->timeout_ctrl);
+ gpmc_write_reg(GPMC_CONFIG, gpmc_context->config);
+ gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context->prefetch_config1);
+ gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context->prefetch_config2);
+ gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context->prefetch_control);
+ for (i = 0; i < gpmc_cs_num; i++) {
+ if (gpmc_context->cs_context[i].is_valid) {
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
+ gpmc_context->cs_context[i].config1);
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
+ gpmc_context->cs_context[i].config2);
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
+ gpmc_context->cs_context[i].config3);
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
+ gpmc_context->cs_context[i].config4);
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
+ gpmc_context->cs_context[i].config5);
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
+ gpmc_context->cs_context[i].config6);
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
+ gpmc_context->cs_context[i].config7);
+ } else {
+ gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, 0);
+ }
+ }
+}
+
+static int omap_gpmc_context_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ struct gpmc_device *gpmc;
+
+ gpmc = container_of(nb, struct gpmc_device, nb);
+ if (gpmc->is_suspended || pm_runtime_suspended(gpmc->dev))
+ return NOTIFY_OK;
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ omap3_gpmc_save_context(gpmc);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ omap3_gpmc_restore_context(gpmc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int gpmc_probe(struct platform_device *pdev)
+{
+ int rc, i;
+ u32 l;
+ struct resource *res;
+ struct gpmc_device *gpmc;
+
+ gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL);
+ if (!gpmc)
+ return -ENOMEM;
+
+ gpmc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gpmc);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!res) {
+ /* legacy DT */
+ gpmc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
+ } else {
+ gpmc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpmc_base))
+ return PTR_ERR(gpmc_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "data");
+ if (!res) {
+ dev_err(&pdev->dev, "couldn't get data reg resource\n");
+ return -ENOENT;
+ }
+
+ gpmc->data = res;
+ }
+
+ gpmc->irq = platform_get_irq(pdev, 0);
+ if (gpmc->irq < 0)
+ return gpmc->irq;
+
+ gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(gpmc_l3_clk)) {
+ dev_err(&pdev->dev, "Failed to get GPMC fck\n");
+ return PTR_ERR(gpmc_l3_clk);
+ }
+
+ if (!clk_get_rate(gpmc_l3_clk)) {
+ dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n");
+ return -EINVAL;
+ }
+
+ if (pdev->dev.of_node) {
+ rc = gpmc_probe_dt(pdev);
+ if (rc)
+ return rc;
+ } else {
+ gpmc_cs_num = GPMC_CS_NUM;
+ gpmc_nr_waitpins = GPMC_NR_WAITPINS;
+ }
+
+ gpmc->waitpins = devm_kzalloc(&pdev->dev,
+ gpmc_nr_waitpins * sizeof(struct gpmc_waitpin),
+ GFP_KERNEL);
+ if (!gpmc->waitpins)
+ return -ENOMEM;
+
+ for (i = 0; i < gpmc_nr_waitpins; i++)
+ gpmc->waitpins[i].pin = GPMC_WAITPIN_INVALID;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ l = gpmc_read_reg(GPMC_REVISION);
+
+ /*
+ * FIXME: Once device-tree migration is complete the below flags
+ * should be populated based upon the device-tree compatible
+ * string. For now just use the IP revision. OMAP3+ devices have
+ * the wr_access and wr_data_mux_bus register fields. OMAP4+
+ * devices support the addr-addr-data multiplex protocol.
+ *
+ * GPMC IP revisions:
+ * - OMAP24xx = 2.0
+ * - OMAP3xxx = 5.0
+ * - OMAP44xx/54xx/AM335x = 6.0
+ */
+ if (GPMC_REVISION_MAJOR(l) > 0x4)
+ gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
+ if (GPMC_REVISION_MAJOR(l) > 0x5)
+ gpmc_capability |= GPMC_HAS_MUX_AAD;
+ dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
+ GPMC_REVISION_MINOR(l));
+
+ gpmc_mem_init(gpmc);
+ rc = gpmc_gpio_init(gpmc);
+ if (rc)
+ goto gpio_init_failed;
+
+ gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins;
+ rc = gpmc_setup_irq(gpmc);
+ if (rc) {
+ dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
+ goto gpio_init_failed;
+ }
+
+ gpmc_probe_dt_children(pdev);
+
+ gpmc->nb.notifier_call = omap_gpmc_context_notifier;
+ cpu_pm_register_notifier(&gpmc->nb);
+
+ return 0;
+
+gpio_init_failed:
+ gpmc_mem_exit();
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return rc;
+}
+
+static int gpmc_remove(struct platform_device *pdev)
+{
+ int i;
+ struct gpmc_device *gpmc = platform_get_drvdata(pdev);
+
+ cpu_pm_unregister_notifier(&gpmc->nb);
+ for (i = 0; i < gpmc_nr_waitpins; i++)
+ gpmc_free_waitpin(gpmc, i);
+ gpmc_free_irq(gpmc);
+ gpmc_mem_exit();
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gpmc_suspend(struct device *dev)
+{
+ struct gpmc_device *gpmc = dev_get_drvdata(dev);
+
+ omap3_gpmc_save_context(gpmc);
+ pm_runtime_put_sync(dev);
+ gpmc->is_suspended = 1;
+
+ return 0;
+}
+
+static int gpmc_resume(struct device *dev)
+{
+ struct gpmc_device *gpmc = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ omap3_gpmc_restore_context(gpmc);
+ gpmc->is_suspended = 0;
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpmc_dt_ids[] = {
+ { .compatible = "ti,omap2420-gpmc" },
+ { .compatible = "ti,omap2430-gpmc" },
+ { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
+ { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
+ { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
+ { .compatible = "ti,am64-gpmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
+#endif
+
+static struct platform_driver gpmc_driver = {
+ .probe = gpmc_probe,
+ .remove = gpmc_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_match_ptr(gpmc_dt_ids),
+ .pm = &gpmc_pm_ops,
+ },
+};
+
+module_platform_driver(gpmc_driver);
+
+MODULE_DESCRIPTION("Texas Instruments GPMC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
new file mode 100644
index 0000000000..9eb8cc7de4
--- /dev/null
+++ b/drivers/memory/pl172.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory controller driver for ARM PrimeCell PL172
+ * PrimeCell MultiPort Memory Controller (PL172)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on:
+ * TI AEMIF driver, Copyright (C) 2010 - 2013 Texas Instruments Inc.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/time.h>
+
+#define MPMC_STATIC_CFG(n) (0x200 + 0x20 * (n))
+#define MPMC_STATIC_CFG_MW_8BIT 0x0
+#define MPMC_STATIC_CFG_MW_16BIT 0x1
+#define MPMC_STATIC_CFG_MW_32BIT 0x2
+#define MPMC_STATIC_CFG_PM BIT(3)
+#define MPMC_STATIC_CFG_PC BIT(6)
+#define MPMC_STATIC_CFG_PB BIT(7)
+#define MPMC_STATIC_CFG_EW BIT(8)
+#define MPMC_STATIC_CFG_B BIT(19)
+#define MPMC_STATIC_CFG_P BIT(20)
+#define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * (n))
+#define MPMC_STATIC_WAIT_WEN_MAX 0x0f
+#define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * (n))
+#define MPMC_STATIC_WAIT_OEN_MAX 0x0f
+#define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * (n))
+#define MPMC_STATIC_WAIT_RD_MAX 0x1f
+#define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * (n))
+#define MPMC_STATIC_WAIT_PAGE_MAX 0x1f
+#define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * (n))
+#define MPMC_STATIC_WAIT_WR_MAX 0x1f
+#define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * (n))
+#define MPMC_STATIC_WAIT_TURN_MAX 0x0f
+
+/* Maximum number of static chip selects */
+#define PL172_MAX_CS 4
+
+struct pl172_data {
+ void __iomem *base;
+ unsigned long rate;
+ struct clk *clk;
+};
+
+static int pl172_timing_prop(struct amba_device *adev,
+ const struct device_node *np, const char *name,
+ u32 reg_offset, u32 max, int start)
+{
+ struct pl172_data *pl172 = amba_get_drvdata(adev);
+ int cycles;
+ u32 val;
+
+ if (!of_property_read_u32(np, name, &val)) {
+ cycles = DIV_ROUND_UP(val * pl172->rate, NSEC_PER_MSEC) - start;
+ if (cycles < 0) {
+ cycles = 0;
+ } else if (cycles > max) {
+ dev_err(&adev->dev, "%s timing too tight\n", name);
+ return -EINVAL;
+ }
+
+ writel(cycles, pl172->base + reg_offset);
+ }
+
+ dev_dbg(&adev->dev, "%s: %u cycle(s)\n", name, start +
+ readl(pl172->base + reg_offset));
+
+ return 0;
+}
+
+static int pl172_setup_static(struct amba_device *adev,
+ struct device_node *np, u32 cs)
+{
+ struct pl172_data *pl172 = amba_get_drvdata(adev);
+ u32 cfg;
+ int ret;
+
+ /* MPMC static memory configuration */
+ if (!of_property_read_u32(np, "mpmc,memory-width", &cfg)) {
+ if (cfg == 8) {
+ cfg = MPMC_STATIC_CFG_MW_8BIT;
+ } else if (cfg == 16) {
+ cfg = MPMC_STATIC_CFG_MW_16BIT;
+ } else if (cfg == 32) {
+ cfg = MPMC_STATIC_CFG_MW_32BIT;
+ } else {
+ dev_err(&adev->dev, "invalid memory width cs%u\n", cs);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&adev->dev, "memory-width property required\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(np, "mpmc,async-page-mode"))
+ cfg |= MPMC_STATIC_CFG_PM;
+
+ if (of_property_read_bool(np, "mpmc,cs-active-high"))
+ cfg |= MPMC_STATIC_CFG_PC;
+
+ if (of_property_read_bool(np, "mpmc,byte-lane-low"))
+ cfg |= MPMC_STATIC_CFG_PB;
+
+ if (of_property_read_bool(np, "mpmc,extended-wait"))
+ cfg |= MPMC_STATIC_CFG_EW;
+
+ if (amba_part(adev) == 0x172 &&
+ of_property_read_bool(np, "mpmc,buffer-enable"))
+ cfg |= MPMC_STATIC_CFG_B;
+
+ if (of_property_read_bool(np, "mpmc,write-protect"))
+ cfg |= MPMC_STATIC_CFG_P;
+
+ writel(cfg, pl172->base + MPMC_STATIC_CFG(cs));
+ dev_dbg(&adev->dev, "mpmc static config cs%u: 0x%08x\n", cs, cfg);
+
+ /* MPMC static memory timing */
+ ret = pl172_timing_prop(adev, np, "mpmc,write-enable-delay",
+ MPMC_STATIC_WAIT_WEN(cs),
+ MPMC_STATIC_WAIT_WEN_MAX, 1);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,output-enable-delay",
+ MPMC_STATIC_WAIT_OEN(cs),
+ MPMC_STATIC_WAIT_OEN_MAX, 0);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,read-access-delay",
+ MPMC_STATIC_WAIT_RD(cs),
+ MPMC_STATIC_WAIT_RD_MAX, 1);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,page-mode-read-delay",
+ MPMC_STATIC_WAIT_PAGE(cs),
+ MPMC_STATIC_WAIT_PAGE_MAX, 1);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,write-access-delay",
+ MPMC_STATIC_WAIT_WR(cs),
+ MPMC_STATIC_WAIT_WR_MAX, 2);
+ if (ret)
+ goto fail;
+
+ ret = pl172_timing_prop(adev, np, "mpmc,turn-round-delay",
+ MPMC_STATIC_WAIT_TURN(cs),
+ MPMC_STATIC_WAIT_TURN_MAX, 1);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ dev_err(&adev->dev, "failed to configure cs%u\n", cs);
+ return ret;
+}
+
+static int pl172_parse_cs_config(struct amba_device *adev,
+ struct device_node *np)
+{
+ u32 cs;
+
+ if (!of_property_read_u32(np, "mpmc,cs", &cs)) {
+ if (cs >= PL172_MAX_CS) {
+ dev_err(&adev->dev, "cs%u invalid\n", cs);
+ return -EINVAL;
+ }
+
+ return pl172_setup_static(adev, np, cs);
+ }
+
+ dev_err(&adev->dev, "cs property required\n");
+
+ return -EINVAL;
+}
+
+static const char * const pl172_revisions[] = {"r1", "r2", "r2p3", "r2p4"};
+static const char * const pl175_revisions[] = {"r1"};
+static const char * const pl176_revisions[] = {"r0"};
+
+static int pl172_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device_node *child_np, *np = adev->dev.of_node;
+ struct device *dev = &adev->dev;
+ static const char *rev = "?";
+ struct pl172_data *pl172;
+ int ret;
+
+ if (amba_part(adev) == 0x172) {
+ if (amba_rev(adev) < ARRAY_SIZE(pl172_revisions))
+ rev = pl172_revisions[amba_rev(adev)];
+ } else if (amba_part(adev) == 0x175) {
+ if (amba_rev(adev) < ARRAY_SIZE(pl175_revisions))
+ rev = pl175_revisions[amba_rev(adev)];
+ } else if (amba_part(adev) == 0x176) {
+ if (amba_rev(adev) < ARRAY_SIZE(pl176_revisions))
+ rev = pl176_revisions[amba_rev(adev)];
+ }
+
+ dev_info(dev, "ARM PL%x revision %s\n", amba_part(adev), rev);
+
+ pl172 = devm_kzalloc(dev, sizeof(*pl172), GFP_KERNEL);
+ if (!pl172)
+ return -ENOMEM;
+
+ pl172->clk = devm_clk_get(dev, "mpmcclk");
+ if (IS_ERR(pl172->clk)) {
+ dev_err(dev, "no mpmcclk provided clock\n");
+ return PTR_ERR(pl172->clk);
+ }
+
+ ret = clk_prepare_enable(pl172->clk);
+ if (ret) {
+ dev_err(dev, "unable to mpmcclk enable clock\n");
+ return ret;
+ }
+
+ pl172->rate = clk_get_rate(pl172->clk) / MSEC_PER_SEC;
+ if (!pl172->rate) {
+ dev_err(dev, "unable to get mpmcclk clock rate\n");
+ ret = -EINVAL;
+ goto err_clk_enable;
+ }
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret) {
+ dev_err(dev, "unable to request AMBA regions\n");
+ goto err_clk_enable;
+ }
+
+ pl172->base = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (!pl172->base) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_no_ioremap;
+ }
+
+ amba_set_drvdata(adev, pl172);
+
+ /*
+ * Loop through each child node, which represent a chip select, and
+ * configure parameters and timing. If successful; populate devices
+ * under that node.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = pl172_parse_cs_config(adev, child_np);
+ if (ret)
+ continue;
+
+ of_platform_populate(child_np, NULL, NULL, dev);
+ }
+
+ return 0;
+
+err_no_ioremap:
+ amba_release_regions(adev);
+err_clk_enable:
+ clk_disable_unprepare(pl172->clk);
+ return ret;
+}
+
+static void pl172_remove(struct amba_device *adev)
+{
+ struct pl172_data *pl172 = amba_get_drvdata(adev);
+
+ clk_disable_unprepare(pl172->clk);
+ amba_release_regions(adev);
+}
+
+static const struct amba_id pl172_ids[] = {
+ /* PrimeCell MPMC PL172, EMC found on NXP LPC18xx and LPC43xx */
+ {
+ .id = 0x07041172,
+ .mask = 0x3f0fffff,
+ },
+ /* PrimeCell MPMC PL175, EMC found on NXP LPC32xx */
+ {
+ .id = 0x07041175,
+ .mask = 0x3f0fffff,
+ },
+ /* PrimeCell MPMC PL176 */
+ {
+ .id = 0x89041176,
+ .mask = 0xff0fffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, pl172_ids);
+
+static struct amba_driver pl172_driver = {
+ .drv = {
+ .name = "memory-pl172",
+ },
+ .probe = pl172_probe,
+ .remove = pl172_remove,
+ .id_table = pl172_ids,
+};
+module_amba_driver(pl172_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("PL172 Memory Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
new file mode 100644
index 0000000000..48540817e0
--- /dev/null
+++ b/drivers/memory/pl353-smc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL353 SMC driver
+ *
+ * Copyright (C) 2012 - 2018 Xilinx, Inc
+ * Author: Punnaiah Choudary Kalluri <punnaiah@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+
+/**
+ * struct pl353_smc_data - Private smc driver structure
+ * @memclk: Pointer to the peripheral clock
+ * @aclk: Pointer to the AXI peripheral clock
+ */
+struct pl353_smc_data {
+ struct clk *memclk;
+ struct clk *aclk;
+};
+
+static int __maybe_unused pl353_smc_suspend(struct device *dev)
+{
+ struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev);
+
+ clk_disable(pl353_smc->memclk);
+ clk_disable(pl353_smc->aclk);
+
+ return 0;
+}
+
+static int __maybe_unused pl353_smc_resume(struct device *dev)
+{
+ struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(pl353_smc->aclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable axi domain clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pl353_smc->memclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable memory clock.\n");
+ clk_disable(pl353_smc->aclk);
+ return ret;
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(pl353_smc_dev_pm_ops, pl353_smc_suspend,
+ pl353_smc_resume);
+
+static const struct of_device_id pl353_smc_supported_children[] = {
+ {
+ .compatible = "cfi-flash"
+ },
+ {
+ .compatible = "arm,pl353-nand-r2p1",
+ },
+ {}
+};
+
+static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device_node *of_node = adev->dev.of_node;
+ const struct of_device_id *match = NULL;
+ struct pl353_smc_data *pl353_smc;
+ struct device_node *child;
+ int err;
+
+ pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL);
+ if (!pl353_smc)
+ return -ENOMEM;
+
+ pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk");
+ if (IS_ERR(pl353_smc->aclk)) {
+ dev_err(&adev->dev, "aclk clock not found.\n");
+ return PTR_ERR(pl353_smc->aclk);
+ }
+
+ pl353_smc->memclk = devm_clk_get(&adev->dev, "memclk");
+ if (IS_ERR(pl353_smc->memclk)) {
+ dev_err(&adev->dev, "memclk clock not found.\n");
+ return PTR_ERR(pl353_smc->memclk);
+ }
+
+ err = clk_prepare_enable(pl353_smc->aclk);
+ if (err) {
+ dev_err(&adev->dev, "Unable to enable AXI clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(pl353_smc->memclk);
+ if (err) {
+ dev_err(&adev->dev, "Unable to enable memory clock.\n");
+ goto disable_axi_clk;
+ }
+
+ amba_set_drvdata(adev, pl353_smc);
+
+ /* Find compatible children. Only a single child is supported */
+ for_each_available_child_of_node(of_node, child) {
+ match = of_match_node(pl353_smc_supported_children, child);
+ if (!match) {
+ dev_warn(&adev->dev, "unsupported child node\n");
+ continue;
+ }
+ break;
+ }
+ if (!match) {
+ err = -ENODEV;
+ dev_err(&adev->dev, "no matching children\n");
+ goto disable_mem_clk;
+ }
+
+ of_platform_device_create(child, NULL, &adev->dev);
+ of_node_put(child);
+
+ return 0;
+
+disable_mem_clk:
+ clk_disable_unprepare(pl353_smc->memclk);
+disable_axi_clk:
+ clk_disable_unprepare(pl353_smc->aclk);
+
+ return err;
+}
+
+static void pl353_smc_remove(struct amba_device *adev)
+{
+ struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
+
+ clk_disable_unprepare(pl353_smc->memclk);
+ clk_disable_unprepare(pl353_smc->aclk);
+}
+
+static const struct amba_id pl353_ids[] = {
+ {
+ .id = 0x00041353,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, pl353_ids);
+
+static struct amba_driver pl353_smc_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "pl353-smc",
+ .pm = &pl353_smc_dev_pm_ops,
+ },
+ .id_table = pl353_ids,
+ .probe = pl353_smc_probe,
+ .remove = pl353_smc_remove,
+};
+
+module_amba_driver(pl353_smc_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("ARM PL353 SMC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
new file mode 100644
index 0000000000..9695b2d3ae
--- /dev/null
+++ b/drivers/memory/renesas-rpc-if.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RPC-IF core driver
+ *
+ * Copyright (C) 2018-2019 Renesas Solutions Corp.
+ * Copyright (C) 2019 Macronix International Co., Ltd.
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <memory/renesas-rpc-if.h>
+
+#define RPCIF_CMNCR 0x0000 /* R/W */
+#define RPCIF_CMNCR_MD BIT(31)
+#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
+#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
+#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
+#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
+#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \
+ RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val))
+#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */
+#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */
+#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
+#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \
+ RPCIF_CMNCR_IO3FV(val))
+#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
+
+#define RPCIF_SSLDR 0x0004 /* R/W */
+#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16)
+#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8)
+#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0)
+
+#define RPCIF_DRCR 0x000C /* R/W */
+#define RPCIF_DRCR_SSLN BIT(24)
+#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16)
+#define RPCIF_DRCR_RCF BIT(9)
+#define RPCIF_DRCR_RBE BIT(8)
+#define RPCIF_DRCR_SSLE BIT(0)
+
+#define RPCIF_DRCMR 0x0010 /* R/W */
+#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPCIF_DREAR 0x0014 /* R/W */
+#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16)
+#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0)
+
+#define RPCIF_DROPR 0x0018 /* R/W */
+
+#define RPCIF_DRENR 0x001C /* R/W */
+#define RPCIF_DRENR_CDB(o) (u32)((((o) & 0x3) << 30))
+#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24)
+#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16)
+#define RPCIF_DRENR_DME BIT(15)
+#define RPCIF_DRENR_CDE BIT(14)
+#define RPCIF_DRENR_OCDE BIT(12)
+#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8)
+#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4)
+
+#define RPCIF_SMCR 0x0020 /* R/W */
+#define RPCIF_SMCR_SSLKP BIT(8)
+#define RPCIF_SMCR_SPIRE BIT(2)
+#define RPCIF_SMCR_SPIWE BIT(1)
+#define RPCIF_SMCR_SPIE BIT(0)
+
+#define RPCIF_SMCMR 0x0024 /* R/W */
+#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPCIF_SMADR 0x0028 /* R/W */
+
+#define RPCIF_SMOPR 0x002C /* R/W */
+#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24)
+#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0)
+
+#define RPCIF_SMENR 0x0030 /* R/W */
+#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30)
+#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24)
+#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPCIF_SMENR_DME BIT(15)
+#define RPCIF_SMENR_CDE BIT(14)
+#define RPCIF_SMENR_OCDE BIT(12)
+#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8)
+#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4)
+#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0)
+
+#define RPCIF_SMRDR0 0x0038 /* R */
+#define RPCIF_SMRDR1 0x003C /* R */
+#define RPCIF_SMWDR0 0x0040 /* W */
+#define RPCIF_SMWDR1 0x0044 /* W */
+
+#define RPCIF_CMNSR 0x0048 /* R */
+#define RPCIF_CMNSR_SSLF BIT(1)
+#define RPCIF_CMNSR_TEND BIT(0)
+
+#define RPCIF_DRDMCR 0x0058 /* R/W */
+#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
+
+#define RPCIF_DRDRENR 0x005C /* R/W */
+#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12)
+#define RPCIF_DRDRENR_ADDRE BIT(8)
+#define RPCIF_DRDRENR_OPDRE BIT(4)
+#define RPCIF_DRDRENR_DRDRE BIT(0)
+
+#define RPCIF_SMDMCR 0x0060 /* R/W */
+#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
+
+#define RPCIF_SMDRENR 0x0064 /* R/W */
+#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12)
+#define RPCIF_SMDRENR_ADDRE BIT(8)
+#define RPCIF_SMDRENR_OPDRE BIT(4)
+#define RPCIF_SMDRENR_SPIDRE BIT(0)
+
+#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+
+#define RPCIF_PHYCNT 0x007C /* R/W */
+#define RPCIF_PHYCNT_CAL BIT(31)
+#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
+#define RPCIF_PHYCNT_EXDS BIT(21)
+#define RPCIF_PHYCNT_OCT BIT(20)
+#define RPCIF_PHYCNT_DDRCAL BIT(19)
+#define RPCIF_PHYCNT_HS BIT(18)
+#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */
+#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15 | ((v) & 0x8) << 24) /* valid for R-Car and RZ/G2{E,H,M,N} */
+
+#define RPCIF_PHYCNT_WBUF2 BIT(4)
+#define RPCIF_PHYCNT_WBUF BIT(2)
+#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
+#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0)
+
+#define RPCIF_PHYOFFSET1 0x0080 /* R/W */
+#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
+
+#define RPCIF_PHYOFFSET2 0x0084 /* R/W */
+#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8)
+
+#define RPCIF_PHYINT 0x0088 /* R/W */
+#define RPCIF_PHYINT_WPVAL BIT(1)
+
+static const struct regmap_range rpcif_volatile_ranges[] = {
+ regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
+ regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1),
+ regmap_reg_range(RPCIF_CMNSR, RPCIF_CMNSR),
+};
+
+static const struct regmap_access_table rpcif_volatile_table = {
+ .yes_ranges = rpcif_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
+};
+
+struct rpcif_info {
+ enum rpcif_type type;
+ u8 strtim;
+};
+
+struct rpcif_priv {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *dirmap;
+ struct regmap *regmap;
+ struct reset_control *rstc;
+ struct platform_device *vdev;
+ size_t size;
+ const struct rpcif_info *info;
+ enum rpcif_data_dir dir;
+ u8 bus_size;
+ u8 xfer_size;
+ void *buffer;
+ u32 xferlen;
+ u32 smcr;
+ u32 smadr;
+ u32 command; /* DRCMR or SMCMR */
+ u32 option; /* DROPR or SMOPR */
+ u32 enable; /* DRENR or SMENR */
+ u32 dummy; /* DRDMCR or SMDMCR */
+ u32 ddr; /* DRDRENR or SMDRENR */
+};
+
+static const struct rpcif_info rpcif_info_r8a7796 = {
+ .type = RPCIF_RCAR_GEN3,
+ .strtim = 6,
+};
+
+static const struct rpcif_info rpcif_info_gen3 = {
+ .type = RPCIF_RCAR_GEN3,
+ .strtim = 7,
+};
+
+static const struct rpcif_info rpcif_info_rz_g2l = {
+ .type = RPCIF_RZ_G2L,
+ .strtim = 7,
+};
+
+static const struct rpcif_info rpcif_info_gen4 = {
+ .type = RPCIF_RCAR_GEN4,
+ .strtim = 15,
+};
+
+/*
+ * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
+ * proper width. Requires rpcif_priv.xfer_size to be correctly set before!
+ */
+static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rpcif_priv *rpc = context;
+
+ switch (reg) {
+ case RPCIF_SMRDR0:
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
+ *val = readb(rpc->base + reg);
+ return 0;
+
+ case 2:
+ *val = readw(rpc->base + reg);
+ return 0;
+
+ case 4:
+ case 8:
+ *val = readl(rpc->base + reg);
+ return 0;
+
+ default:
+ return -EILSEQ;
+ }
+
+ case RPCIF_SMRDR1:
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
+ }
+
+ *val = readl(rpc->base + reg);
+ return 0;
+}
+
+static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rpcif_priv *rpc = context;
+
+ switch (reg) {
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
+ writeb(val, rpc->base + reg);
+ return 0;
+
+ case 2:
+ writew(val, rpc->base + reg);
+ return 0;
+
+ case 4:
+ case 8:
+ writel(val, rpc->base + reg);
+ return 0;
+
+ default:
+ return -EILSEQ;
+ }
+
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
+
+ case RPCIF_SMRDR0:
+ case RPCIF_SMRDR1:
+ return -EPERM;
+ }
+
+ writel(val, rpc->base + reg);
+ return 0;
+}
+
+static const struct regmap_config rpcif_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_read = rpcif_reg_read,
+ .reg_write = rpcif_reg_write,
+ .fast_io = true,
+ .max_register = RPCIF_PHYINT,
+ .volatile_table = &rpcif_volatile_table,
+};
+
+int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+
+ rpcif->dev = dev;
+ rpcif->dirmap = rpc->dirmap;
+ rpcif->size = rpc->size;
+ return 0;
+}
+EXPORT_SYMBOL(rpcif_sw_init);
+
+static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc)
+{
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022);
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024);
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3),
+ RPCIF_PHYCNT_CKSEL(3));
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
+}
+
+int rpcif_hw_init(struct device *dev, bool hyperflash)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ u32 dummy;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ if (rpc->info->type == RPCIF_RZ_G2L) {
+ ret = reset_control_reset(rpc->rstc);
+ if (ret)
+ return ret;
+ usleep_range(200, 300);
+ rpcif_rzg2l_timing_adjust_sdr(rpc);
+ }
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
+ RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
+
+ /* DMA Transfer is not supported */
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_HS, 0);
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ /* create mask with all affected bits set */
+ RPCIF_PHYCNT_STRTIM(BIT(fls(rpc->info->strtim)) - 1),
+ RPCIF_PHYCNT_STRTIM(rpc->info->strtim));
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3),
+ RPCIF_PHYOFFSET1_DDRTMG(3));
+ regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7),
+ RPCIF_PHYOFFSET2_OCTTMG(4));
+
+ if (hyperflash)
+ regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
+ RPCIF_PHYINT_WPVAL, 0);
+
+ if (rpc->info->type == RPCIF_RZ_G2L)
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
+ RPCIF_CMNCR_BSZ(3),
+ RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+ else
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3),
+ RPCIF_CMNCR_MOIIO(3) |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+
+ /* Set RCF after BSZ update */
+ regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
+ /* Dummy read according to spec */
+ regmap_read(rpc->regmap, RPCIF_DRCR, &dummy);
+ regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) |
+ RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7));
+
+ pm_runtime_put(dev);
+
+ rpc->bus_size = hyperflash ? 2 : 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(rpcif_hw_init);
+
+static int wait_msg_xfer_end(struct rpcif_priv *rpc)
+{
+ u32 sts;
+
+ return regmap_read_poll_timeout(rpc->regmap, RPCIF_CMNSR, sts,
+ sts & RPCIF_CMNSR_TEND, 0,
+ USEC_PER_SEC);
+}
+
+static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes)
+{
+ if (rpc->bus_size == 2)
+ nbytes /= 2;
+ nbytes = clamp(nbytes, 1U, 4U);
+ return GENMASK(3, 4 - nbytes);
+}
+
+static u8 rpcif_bit_size(u8 buswidth)
+{
+ return buswidth > 4 ? 2 : ilog2(buswidth);
+}
+
+void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
+ size_t *len)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+
+ rpc->smcr = 0;
+ rpc->smadr = 0;
+ rpc->enable = 0;
+ rpc->command = 0;
+ rpc->option = 0;
+ rpc->dummy = 0;
+ rpc->ddr = 0;
+ rpc->xferlen = 0;
+
+ if (op->cmd.buswidth) {
+ rpc->enable = RPCIF_SMENR_CDE |
+ RPCIF_SMENR_CDB(rpcif_bit_size(op->cmd.buswidth));
+ rpc->command = RPCIF_SMCMR_CMD(op->cmd.opcode);
+ if (op->cmd.ddr)
+ rpc->ddr = RPCIF_SMDRENR_HYPE(0x5);
+ }
+ if (op->ocmd.buswidth) {
+ rpc->enable |= RPCIF_SMENR_OCDE |
+ RPCIF_SMENR_OCDB(rpcif_bit_size(op->ocmd.buswidth));
+ rpc->command |= RPCIF_SMCMR_OCMD(op->ocmd.opcode);
+ }
+
+ if (op->addr.buswidth) {
+ rpc->enable |=
+ RPCIF_SMENR_ADB(rpcif_bit_size(op->addr.buswidth));
+ if (op->addr.nbytes == 4)
+ rpc->enable |= RPCIF_SMENR_ADE(0xF);
+ else
+ rpc->enable |= RPCIF_SMENR_ADE(GENMASK(
+ 2, 3 - op->addr.nbytes));
+ if (op->addr.ddr)
+ rpc->ddr |= RPCIF_SMDRENR_ADDRE;
+
+ if (offs && len)
+ rpc->smadr = *offs;
+ else
+ rpc->smadr = op->addr.val;
+ }
+
+ if (op->dummy.buswidth) {
+ rpc->enable |= RPCIF_SMENR_DME;
+ rpc->dummy = RPCIF_SMDMCR_DMCYC(op->dummy.ncycles);
+ }
+
+ if (op->option.buswidth) {
+ rpc->enable |= RPCIF_SMENR_OPDE(
+ rpcif_bits_set(rpc, op->option.nbytes)) |
+ RPCIF_SMENR_OPDB(rpcif_bit_size(op->option.buswidth));
+ if (op->option.ddr)
+ rpc->ddr |= RPCIF_SMDRENR_OPDRE;
+ rpc->option = op->option.val;
+ }
+
+ rpc->dir = op->data.dir;
+ if (op->data.buswidth) {
+ u32 nbytes;
+
+ rpc->buffer = op->data.buf.in;
+ switch (op->data.dir) {
+ case RPCIF_DATA_IN:
+ rpc->smcr = RPCIF_SMCR_SPIRE;
+ break;
+ case RPCIF_DATA_OUT:
+ rpc->smcr = RPCIF_SMCR_SPIWE;
+ break;
+ default:
+ break;
+ }
+ if (op->data.ddr)
+ rpc->ddr |= RPCIF_SMDRENR_SPIDRE;
+
+ if (offs && len)
+ nbytes = *len;
+ else
+ nbytes = op->data.nbytes;
+ rpc->xferlen = nbytes;
+
+ rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
+ }
+}
+EXPORT_SYMBOL(rpcif_prepare);
+
+int rpcif_manual_xfer(struct device *dev)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL);
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MD, RPCIF_CMNCR_MD);
+ regmap_write(rpc->regmap, RPCIF_SMCMR, rpc->command);
+ regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option);
+ regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy);
+ regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr);
+ regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr);
+ smenr = rpc->enable;
+
+ switch (rpc->dir) {
+ case RPCIF_DATA_OUT:
+ while (pos < rpc->xferlen) {
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2], *p = data;
+
+ smcr = rpc->smcr | RPCIF_SMCR_SPIE;
+
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
+ if (bytes_left > nbytes)
+ smcr |= RPCIF_SMCR_SSLKP;
+
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ rpc->xfer_size = nbytes;
+
+ memcpy(data, rpc->buffer + pos, nbytes);
+ if (nbytes == 8)
+ regmap_write(rpc->regmap, RPCIF_SMWDR1, *p++);
+ regmap_write(rpc->regmap, RPCIF_SMWDR0, *p);
+
+ regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
+ ret = wait_msg_xfer_end(rpc);
+ if (ret)
+ goto err_out;
+
+ pos += nbytes;
+ smenr = rpc->enable &
+ ~RPCIF_SMENR_CDE & ~RPCIF_SMENR_ADE(0xF);
+ }
+ break;
+ case RPCIF_DATA_IN:
+ /*
+ * RPC-IF spoils the data for the commands without an address
+ * phase (like RDID) in the manual mode, so we'll have to work
+ * around this issue by using the external address space read
+ * mode instead.
+ */
+ if (!(smenr & RPCIF_SMENR_ADE(0xF)) && rpc->dirmap) {
+ u32 dummy;
+
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MD, 0);
+ regmap_write(rpc->regmap, RPCIF_DRCR,
+ RPCIF_DRCR_RBURST(32) | RPCIF_DRCR_RBE);
+ regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
+ regmap_write(rpc->regmap, RPCIF_DREAR,
+ RPCIF_DREAR_EAC(1));
+ regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option);
+ regmap_write(rpc->regmap, RPCIF_DRENR,
+ smenr & ~RPCIF_SMENR_SPIDE(0xF));
+ regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
+ regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
+ memcpy_fromio(rpc->buffer, rpc->dirmap, rpc->xferlen);
+ regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
+ /* Dummy read according to spec */
+ regmap_read(rpc->regmap, RPCIF_DRCR, &dummy);
+ break;
+ }
+ while (pos < rpc->xferlen) {
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2], *p = data;
+
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
+
+ regmap_write(rpc->regmap, RPCIF_SMADR,
+ rpc->smadr + pos);
+ smenr &= ~RPCIF_SMENR_SPIDE(0xF);
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ regmap_write(rpc->regmap, RPCIF_SMCR,
+ rpc->smcr | RPCIF_SMCR_SPIE);
+ rpc->xfer_size = nbytes;
+ ret = wait_msg_xfer_end(rpc);
+ if (ret)
+ goto err_out;
+
+ if (nbytes == 8)
+ regmap_read(rpc->regmap, RPCIF_SMRDR1, p++);
+ regmap_read(rpc->regmap, RPCIF_SMRDR0, p);
+ memcpy(rpc->buffer + pos, data, nbytes);
+
+ pos += nbytes;
+ }
+ break;
+ default:
+ regmap_write(rpc->regmap, RPCIF_SMENR, rpc->enable);
+ regmap_write(rpc->regmap, RPCIF_SMCR,
+ rpc->smcr | RPCIF_SMCR_SPIE);
+ ret = wait_msg_xfer_end(rpc);
+ if (ret)
+ goto err_out;
+ }
+
+exit:
+ pm_runtime_put(dev);
+ return ret;
+
+err_out:
+ if (reset_control_reset(rpc->rstc))
+ dev_err(dev, "Failed to reset HW\n");
+ rpcif_hw_init(dev, rpc->bus_size == 2);
+ goto exit;
+}
+EXPORT_SYMBOL(rpcif_manual_xfer);
+
+static void memcpy_fromio_readw(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ const int maxw = (IS_ENABLED(CONFIG_64BIT)) ? 8 : 4;
+ u8 buf[2];
+
+ if (count && ((unsigned long)from & 1)) {
+ *(u16 *)buf = __raw_readw((void __iomem *)((unsigned long)from & ~1));
+ *(u8 *)to = buf[1];
+ from++;
+ to++;
+ count--;
+ }
+ while (count >= 2 && !IS_ALIGNED((unsigned long)from, maxw)) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ while (count >= maxw) {
+#ifdef CONFIG_64BIT
+ *(u64 *)to = __raw_readq(from);
+#else
+ *(u32 *)to = __raw_readl(from);
+#endif
+ from += maxw;
+ to += maxw;
+ count -= maxw;
+ }
+ while (count >= 2) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ if (count) {
+ *(u16 *)buf = __raw_readw(from);
+ *(u8 *)to = buf[0];
+ }
+}
+
+ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ loff_t from = offs & (rpc->size - 1);
+ size_t size = rpc->size - from;
+ int ret;
+
+ if (len > size)
+ len = size;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0);
+ regmap_write(rpc->regmap, RPCIF_DRCR, 0);
+ regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
+ regmap_write(rpc->regmap, RPCIF_DREAR,
+ RPCIF_DREAR_EAV(offs >> 25) | RPCIF_DREAR_EAC(1));
+ regmap_write(rpc->regmap, RPCIF_DROPR, rpc->option);
+ regmap_write(rpc->regmap, RPCIF_DRENR,
+ rpc->enable & ~RPCIF_SMENR_SPIDE(0xF));
+ regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
+ regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
+
+ if (rpc->bus_size == 2)
+ memcpy_fromio_readw(buf, rpc->dirmap + from, len);
+ else
+ memcpy_fromio(buf, rpc->dirmap + from, len);
+
+ pm_runtime_put(dev);
+
+ return len;
+}
+EXPORT_SYMBOL(rpcif_dirmap_read);
+
+static int rpcif_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct platform_device *vdev;
+ struct device_node *flash;
+ struct rpcif_priv *rpc;
+ struct resource *res;
+ const char *name;
+ int ret;
+
+ flash = of_get_next_child(dev->of_node, NULL);
+ if (!flash) {
+ dev_warn(dev, "no flash node found\n");
+ return -ENODEV;
+ }
+
+ if (of_device_is_compatible(flash, "jedec,spi-nor")) {
+ name = "rpc-if-spi";
+ } else if (of_device_is_compatible(flash, "cfi-flash")) {
+ name = "rpc-if-hyperflash";
+ } else {
+ of_node_put(flash);
+ dev_warn(dev, "unknown flash type\n");
+ return -ENODEV;
+ }
+ of_node_put(flash);
+
+ rpc = devm_kzalloc(dev, sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return -ENOMEM;
+
+ rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
+ if (IS_ERR(rpc->base))
+ return PTR_ERR(rpc->base);
+
+ rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config);
+ if (IS_ERR(rpc->regmap)) {
+ dev_err(dev, "failed to init regmap for rpcif, error %ld\n",
+ PTR_ERR(rpc->regmap));
+ return PTR_ERR(rpc->regmap);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+ rpc->dirmap = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rpc->dirmap))
+ return PTR_ERR(rpc->dirmap);
+
+ rpc->size = resource_size(res);
+ rpc->info = of_device_get_match_data(dev);
+ rpc->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rpc->rstc))
+ return PTR_ERR(rpc->rstc);
+
+ vdev = platform_device_alloc(name, pdev->id);
+ if (!vdev)
+ return -ENOMEM;
+ vdev->dev.parent = dev;
+
+ rpc->dev = dev;
+ rpc->vdev = vdev;
+ platform_set_drvdata(pdev, rpc);
+
+ ret = platform_device_add(vdev);
+ if (ret) {
+ platform_device_put(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rpcif_remove(struct platform_device *pdev)
+{
+ struct rpcif_priv *rpc = platform_get_drvdata(pdev);
+
+ platform_device_unregister(rpc->vdev);
+
+ return 0;
+}
+
+static const struct of_device_id rpcif_of_match[] = {
+ { .compatible = "renesas,r8a7796-rpc-if", .data = &rpcif_info_r8a7796 },
+ { .compatible = "renesas,rcar-gen3-rpc-if", .data = &rpcif_info_gen3 },
+ { .compatible = "renesas,rcar-gen4-rpc-if", .data = &rpcif_info_gen4 },
+ { .compatible = "renesas,rzg2l-rpc-if", .data = &rpcif_info_rz_g2l },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpcif_of_match);
+
+static struct platform_driver rpcif_driver = {
+ .probe = rpcif_probe,
+ .remove = rpcif_remove,
+ .driver = {
+ .name = "rpc-if",
+ .of_match_table = rpcif_of_match,
+ },
+};
+module_platform_driver(rpcif_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
new file mode 100644
index 0000000000..7fb70f5730
--- /dev/null
+++ b/drivers/memory/samsung/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+config SAMSUNG_MC
+ bool "Samsung Exynos Memory Controller support" if COMPILE_TEST
+ help
+ Support for the Memory Controller (MC) devices found on
+ Samsung Exynos SoCs.
+
+if SAMSUNG_MC
+
+config EXYNOS5422_DMC
+ tristate "Exynos5422 Dynamic Memory Controller driver"
+ depends on ARCH_EXYNOS || (COMPILE_TEST && HAS_IOMEM)
+ select DDR
+ depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
+ depends on (PM_DEVFREQ && PM_DEVFREQ_EVENT)
+ help
+ This adds driver for Samsung Exynos5422 SoC DMC (Dynamic Memory
+ Controller). The driver provides support for Dynamic Voltage and
+ Frequency Scaling in DMC and DRAM. It also supports changing timings
+ of DRAM running with different frequency. The timings are calculated
+ based on DT memory information.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
+
+config EXYNOS_SROM
+ bool "Exynos SROM controller driver" if COMPILE_TEST
+ depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
+ help
+ This adds driver for Samsung Exynos SoC SROM controller. The driver
+ in basic operation mode only saves and restores SROM registers
+ during suspend. If however appropriate device tree configuration
+ is provided, the driver enables support for external memory
+ or external devices.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
+
+endif
diff --git a/drivers/memory/samsung/Makefile b/drivers/memory/samsung/Makefile
new file mode 100644
index 0000000000..ea071be21c
--- /dev/null
+++ b/drivers/memory/samsung/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_EXYNOS5422_DMC) += exynos5422-dmc.o
+obj-$(CONFIG_EXYNOS_SROM) += exynos-srom.o
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
new file mode 100644
index 0000000000..e73dd330af
--- /dev/null
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// Exynos - SROM Controller support
+// Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "exynos-srom.h"
+
+static const unsigned long exynos_srom_offsets[] = {
+ /* SROM side */
+ EXYNOS_SROM_BW,
+ EXYNOS_SROM_BC0,
+ EXYNOS_SROM_BC1,
+ EXYNOS_SROM_BC2,
+ EXYNOS_SROM_BC3,
+};
+
+/**
+ * struct exynos_srom_reg_dump: register dump of SROM Controller registers.
+ * @offset: srom register offset from the controller base address.
+ * @value: the value of register under the offset.
+ */
+struct exynos_srom_reg_dump {
+ u32 offset;
+ u32 value;
+};
+
+/**
+ * struct exynos_srom: platform data for exynos srom controller driver.
+ * @dev: platform device pointer
+ * @reg_base: srom base address
+ * @reg_offset: exynos_srom_reg_dump pointer to hold offset and its value.
+ */
+struct exynos_srom {
+ struct device *dev;
+ void __iomem *reg_base;
+ struct exynos_srom_reg_dump *reg_offset;
+};
+
+static struct exynos_srom_reg_dump *
+exynos_srom_alloc_reg_dump(const unsigned long *rdump,
+ unsigned long nr_rdump)
+{
+ struct exynos_srom_reg_dump *rd;
+ unsigned int i;
+
+ rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return NULL;
+
+ for (i = 0; i < nr_rdump; ++i)
+ rd[i].offset = rdump[i];
+
+ return rd;
+}
+
+static int exynos_srom_configure_bank(struct exynos_srom *srom,
+ struct device_node *np)
+{
+ u32 bank, width, pmc = 0;
+ u32 timing[6];
+ u32 cs, bw;
+
+ if (of_property_read_u32(np, "reg", &bank))
+ return -EINVAL;
+ if (of_property_read_u32(np, "reg-io-width", &width))
+ width = 1;
+ if (of_property_read_bool(np, "samsung,srom-page-mode"))
+ pmc = 1 << EXYNOS_SROM_BCX__PMC__SHIFT;
+ if (of_property_read_u32_array(np, "samsung,srom-timing", timing,
+ ARRAY_SIZE(timing)))
+ return -EINVAL;
+
+ bank *= 4; /* Convert bank into shift/offset */
+
+ cs = 1 << EXYNOS_SROM_BW__BYTEENABLE__SHIFT;
+ if (width == 2)
+ cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
+
+ bw = readl_relaxed(srom->reg_base + EXYNOS_SROM_BW);
+ bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
+ writel_relaxed(bw, srom->reg_base + EXYNOS_SROM_BW);
+
+ writel_relaxed(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+ (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+ (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+ (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+ (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+ (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+ srom->reg_base + EXYNOS_SROM_BC0 + bank);
+
+ return 0;
+}
+
+static int exynos_srom_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *child;
+ struct exynos_srom *srom;
+ struct device *dev = &pdev->dev;
+ bool bad_bank_config = false;
+
+ np = dev->of_node;
+ if (!np) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ srom = devm_kzalloc(&pdev->dev,
+ sizeof(struct exynos_srom), GFP_KERNEL);
+ if (!srom)
+ return -ENOMEM;
+
+ srom->dev = dev;
+ srom->reg_base = of_iomap(np, 0);
+ if (!srom->reg_base) {
+ dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, srom);
+
+ srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+ ARRAY_SIZE(exynos_srom_offsets));
+ if (!srom->reg_offset) {
+ iounmap(srom->reg_base);
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(np, child) {
+ if (exynos_srom_configure_bank(srom, child)) {
+ dev_err(dev,
+ "Could not decode bank configuration for %pOFn\n",
+ child);
+ bad_bank_config = true;
+ }
+ }
+
+ /*
+ * If any bank failed to configure, we still provide suspend/resume,
+ * but do not probe child devices
+ */
+ if (bad_bank_config)
+ return 0;
+
+ return of_platform_populate(np, NULL, NULL, dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void exynos_srom_save(void __iomem *base,
+ struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ rd->value = readl(base + rd->offset);
+}
+
+static void exynos_srom_restore(void __iomem *base,
+ const struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ writel(rd->value, base + rd->offset);
+}
+
+static int exynos_srom_suspend(struct device *dev)
+{
+ struct exynos_srom *srom = dev_get_drvdata(dev);
+
+ exynos_srom_save(srom->reg_base, srom->reg_offset,
+ ARRAY_SIZE(exynos_srom_offsets));
+ return 0;
+}
+
+static int exynos_srom_resume(struct device *dev)
+{
+ struct exynos_srom *srom = dev_get_drvdata(dev);
+
+ exynos_srom_restore(srom->reg_base, srom->reg_offset,
+ ARRAY_SIZE(exynos_srom_offsets));
+ return 0;
+}
+#endif
+
+static const struct of_device_id of_exynos_srom_ids[] = {
+ {
+ .compatible = "samsung,exynos4210-srom",
+ },
+ {},
+};
+
+static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
+
+static struct platform_driver exynos_srom_driver = {
+ .probe = exynos_srom_probe,
+ .driver = {
+ .name = "exynos-srom",
+ .of_match_table = of_exynos_srom_ids,
+ .pm = &exynos_srom_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(exynos_srom_driver);
diff --git a/drivers/memory/samsung/exynos-srom.h b/drivers/memory/samsung/exynos-srom.h
new file mode 100644
index 0000000000..da612797f5
--- /dev/null
+++ b/drivers/memory/samsung/exynos-srom.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Exynos SROMC register definitions
+ */
+
+#ifndef __EXYNOS_SROM_H
+#define __EXYNOS_SROM_H __FILE__
+
+#define EXYNOS_SROMREG(x) (x)
+
+#define EXYNOS_SROM_BW EXYNOS_SROMREG(0x0)
+#define EXYNOS_SROM_BC0 EXYNOS_SROMREG(0x4)
+#define EXYNOS_SROM_BC1 EXYNOS_SROMREG(0x8)
+#define EXYNOS_SROM_BC2 EXYNOS_SROMREG(0xc)
+#define EXYNOS_SROM_BC3 EXYNOS_SROMREG(0x10)
+#define EXYNOS_SROM_BC4 EXYNOS_SROMREG(0x14)
+#define EXYNOS_SROM_BC5 EXYNOS_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define EXYNOS_SROM_BW__DATAWIDTH__SHIFT 0
+#define EXYNOS_SROM_BW__ADDRMODE__SHIFT 1
+#define EXYNOS_SROM_BW__WAITENABLE__SHIFT 2
+#define EXYNOS_SROM_BW__BYTEENABLE__SHIFT 3
+
+#define EXYNOS_SROM_BW__CS_MASK 0xf
+
+#define EXYNOS_SROM_BW__NCS0__SHIFT 0
+#define EXYNOS_SROM_BW__NCS1__SHIFT 4
+#define EXYNOS_SROM_BW__NCS2__SHIFT 8
+#define EXYNOS_SROM_BW__NCS3__SHIFT 12
+#define EXYNOS_SROM_BW__NCS4__SHIFT 16
+#define EXYNOS_SROM_BW__NCS5__SHIFT 20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define EXYNOS_SROM_BCX__PMC__SHIFT 0
+#define EXYNOS_SROM_BCX__TACP__SHIFT 4
+#define EXYNOS_SROM_BCX__TCAH__SHIFT 8
+#define EXYNOS_SROM_BCX__TCOH__SHIFT 12
+#define EXYNOS_SROM_BCX__TACC__SHIFT 16
+#define EXYNOS_SROM_BCX__TCOS__SHIFT 24
+#define EXYNOS_SROM_BCX__TACS__SHIFT 28
+
+#endif /* __EXYNOS_SROM_H */
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
new file mode 100644
index 0000000000..6d019dbd72
--- /dev/null
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -0,0 +1,1593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+static int irqmode;
+module_param(irqmode, int, 0644);
+MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)");
+
+#define EXYNOS5_DREXI_TIMINGAREF (0x0030)
+#define EXYNOS5_DREXI_TIMINGROW0 (0x0034)
+#define EXYNOS5_DREXI_TIMINGDATA0 (0x0038)
+#define EXYNOS5_DREXI_TIMINGPOWER0 (0x003C)
+#define EXYNOS5_DREXI_TIMINGROW1 (0x00E4)
+#define EXYNOS5_DREXI_TIMINGDATA1 (0x00E8)
+#define EXYNOS5_DREXI_TIMINGPOWER1 (0x00EC)
+#define CDREX_PAUSE (0x2091c)
+#define CDREX_LPDDR3PHY_CON3 (0x20a20)
+#define CDREX_LPDDR3PHY_CLKM_SRC (0x20700)
+#define EXYNOS5_TIMING_SET_SWI BIT(28)
+#define USE_MX_MSPLL_TIMINGS (1)
+#define USE_BPLL_TIMINGS (0)
+#define EXYNOS5_AREF_NORMAL (0x2e)
+
+#define DREX_PPCCLKCON (0x0130)
+#define DREX_PEREV2CONFIG (0x013c)
+#define DREX_PMNC_PPC (0xE000)
+#define DREX_CNTENS_PPC (0xE010)
+#define DREX_CNTENC_PPC (0xE020)
+#define DREX_INTENS_PPC (0xE030)
+#define DREX_INTENC_PPC (0xE040)
+#define DREX_FLAG_PPC (0xE050)
+#define DREX_PMCNT2_PPC (0xE130)
+
+/*
+ * A value for register DREX_PMNC_PPC which should be written to reset
+ * the cycle counter CCNT (a reference wall clock). It sets zero to the
+ * CCNT counter.
+ */
+#define CC_RESET BIT(2)
+
+/*
+ * A value for register DREX_PMNC_PPC which does the reset of all performance
+ * counters to zero.
+ */
+#define PPC_COUNTER_RESET BIT(1)
+
+/*
+ * Enables all configured counters (including cycle counter). The value should
+ * be written to the register DREX_PMNC_PPC.
+ */
+#define PPC_ENABLE BIT(0)
+
+/* A value for register DREX_PPCCLKCON which enables performance events clock.
+ * Must be written before first access to the performance counters register
+ * set, otherwise it could crash.
+ */
+#define PEREV_CLK_EN BIT(0)
+
+/*
+ * Values which are used to enable counters, interrupts or configure flags of
+ * the performance counters. They configure counter 2 and cycle counter.
+ */
+#define PERF_CNT2 BIT(2)
+#define PERF_CCNT BIT(31)
+
+/*
+ * Performance event types which are used for setting the preferred event
+ * to track in the counters.
+ * There is a set of different types, the values are from range 0 to 0x6f.
+ * These settings should be written to the configuration register which manages
+ * the type of the event (register DREX_PEREV2CONFIG).
+ */
+#define READ_TRANSFER_CH0 (0x6d)
+#define READ_TRANSFER_CH1 (0x6f)
+
+#define PERF_COUNTER_START_VALUE 0xff000000
+#define PERF_EVENT_UP_DOWN_THRESHOLD 900000000ULL
+
+/**
+ * struct dmc_opp_table - Operating level desciption
+ * @freq_hz: target frequency in Hz
+ * @volt_uv: target voltage in uV
+ *
+ * Covers frequency and voltage settings of the DMC operating mode.
+ */
+struct dmc_opp_table {
+ u32 freq_hz;
+ u32 volt_uv;
+};
+
+/**
+ * struct exynos5_dmc - main structure describing DMC device
+ * @dev: DMC device
+ * @df: devfreq device structure returned by devfreq framework
+ * @gov_data: configuration of devfreq governor
+ * @base_drexi0: DREX0 registers mapping
+ * @base_drexi1: DREX1 registers mapping
+ * @clk_regmap: regmap for clock controller registers
+ * @lock: protects curr_rate and frequency/voltage setting section
+ * @curr_rate: current frequency
+ * @curr_volt: current voltage
+ * @opp: OPP table
+ * @opp_count: number of 'opp' elements
+ * @timings_arr_size: number of 'timings' elements
+ * @timing_row: values for timing row register, for each OPP
+ * @timing_data: values for timing data register, for each OPP
+ * @timing_power: balues for timing power register, for each OPP
+ * @timings: DDR memory timings, from device tree
+ * @min_tck: DDR memory minimum timing values, from device tree
+ * @bypass_timing_row: value for timing row register for bypass timings
+ * @bypass_timing_data: value for timing data register for bypass timings
+ * @bypass_timing_power: value for timing power register for bypass
+ * timings
+ * @vdd_mif: Memory interface regulator
+ * @fout_spll: clock: SPLL
+ * @fout_bpll: clock: BPLL
+ * @mout_spll: clock: mux SPLL
+ * @mout_bpll: clock: mux BPLL
+ * @mout_mclk_cdrex: clock: mux mclk_cdrex
+ * @mout_mx_mspll_ccore: clock: mux mx_mspll_ccore
+ * @counter: devfreq events
+ * @num_counters: number of 'counter' elements
+ * @last_overflow_ts: time (in ns) of last overflow of each DREX
+ * @load: utilization in percents
+ * @total: total time between devfreq events
+ * @in_irq_mode: whether running in interrupt mode (true)
+ * or polling (false)
+ *
+ * The main structure for the Dynamic Memory Controller which covers clocks,
+ * memory regions, HW information, parameters and current operating mode.
+ */
+struct exynos5_dmc {
+ struct device *dev;
+ struct devfreq *df;
+ struct devfreq_simple_ondemand_data gov_data;
+ void __iomem *base_drexi0;
+ void __iomem *base_drexi1;
+ struct regmap *clk_regmap;
+ /* Protects curr_rate and frequency/voltage setting section */
+ struct mutex lock;
+ unsigned long curr_rate;
+ unsigned long curr_volt;
+ struct dmc_opp_table *opp;
+ int opp_count;
+ u32 timings_arr_size;
+ u32 *timing_row;
+ u32 *timing_data;
+ u32 *timing_power;
+ const struct lpddr3_timings *timings;
+ const struct lpddr3_min_tck *min_tck;
+ u32 bypass_timing_row;
+ u32 bypass_timing_data;
+ u32 bypass_timing_power;
+ struct regulator *vdd_mif;
+ struct clk *fout_spll;
+ struct clk *fout_bpll;
+ struct clk *mout_spll;
+ struct clk *mout_bpll;
+ struct clk *mout_mclk_cdrex;
+ struct clk *mout_mx_mspll_ccore;
+ struct devfreq_event_dev **counter;
+ int num_counters;
+ u64 last_overflow_ts[2];
+ unsigned long load;
+ unsigned long total;
+ bool in_irq_mode;
+};
+
+#define TIMING_FIELD(t_name, t_bit_beg, t_bit_end) \
+ { .name = t_name, .bit_beg = t_bit_beg, .bit_end = t_bit_end }
+
+#define TIMING_VAL2REG(timing, t_val) \
+({ \
+ u32 __val; \
+ __val = (t_val) << (timing)->bit_beg; \
+ __val; \
+})
+
+struct timing_reg {
+ char *name;
+ int bit_beg;
+ int bit_end;
+ unsigned int val;
+};
+
+static const struct timing_reg timing_row_reg_fields[] = {
+ TIMING_FIELD("tRFC", 24, 31),
+ TIMING_FIELD("tRRD", 20, 23),
+ TIMING_FIELD("tRP", 16, 19),
+ TIMING_FIELD("tRCD", 12, 15),
+ TIMING_FIELD("tRC", 6, 11),
+ TIMING_FIELD("tRAS", 0, 5),
+};
+
+static const struct timing_reg timing_data_reg_fields[] = {
+ TIMING_FIELD("tWTR", 28, 31),
+ TIMING_FIELD("tWR", 24, 27),
+ TIMING_FIELD("tRTP", 20, 23),
+ TIMING_FIELD("tW2W-C2C", 14, 14),
+ TIMING_FIELD("tR2R-C2C", 12, 12),
+ TIMING_FIELD("WL", 8, 11),
+ TIMING_FIELD("tDQSCK", 4, 7),
+ TIMING_FIELD("RL", 0, 3),
+};
+
+static const struct timing_reg timing_power_reg_fields[] = {
+ TIMING_FIELD("tFAW", 26, 31),
+ TIMING_FIELD("tXSR", 16, 25),
+ TIMING_FIELD("tXP", 8, 15),
+ TIMING_FIELD("tCKE", 4, 7),
+ TIMING_FIELD("tMRD", 0, 3),
+};
+
+#define TIMING_COUNT (ARRAY_SIZE(timing_row_reg_fields) + \
+ ARRAY_SIZE(timing_data_reg_fields) + \
+ ARRAY_SIZE(timing_power_reg_fields))
+
+static int exynos5_counters_set_event(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_set_event(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos5_counters_enable_edev(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_enable_edev(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_disable_edev(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * find_target_freq_idx() - Finds requested frequency in local DMC configuration
+ * @dmc: device for which the information is checked
+ * @target_rate: requested frequency in KHz
+ *
+ * Seeks in the local DMC driver structure for the requested frequency value
+ * and returns index or error value.
+ */
+static int find_target_freq_idx(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int i;
+
+ for (i = dmc->opp_count - 1; i >= 0; i--)
+ if (dmc->opp[i].freq_hz <= target_rate)
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * exynos5_switch_timing_regs() - Changes bank register set for DRAM timings
+ * @dmc: device for which the new settings is going to be applied
+ * @set: boolean variable passing set value
+ *
+ * Changes the register set, which holds timing parameters.
+ * There is two register sets: 0 and 1. The register set 0
+ * is used in normal operation when the clock is provided from main PLL.
+ * The bank register set 1 is used when the main PLL frequency is going to be
+ * changed and the clock is taken from alternative, stable source.
+ * This function switches between these banks according to the
+ * currently used clock source.
+ */
+static int exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
+ if (ret)
+ return ret;
+
+ if (set)
+ reg |= EXYNOS5_TIMING_SET_SWI;
+ else
+ reg &= ~EXYNOS5_TIMING_SET_SWI;
+
+ regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
+
+ return 0;
+}
+
+/**
+ * exynos5_init_freq_table() - Initialized PM OPP framework
+ * @dmc: DMC device for which the frequencies are used for OPP init
+ * @profile: devfreq device's profile
+ *
+ * Populate the devfreq device's OPP table based on current frequency, voltage.
+ */
+static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
+ struct devfreq_dev_profile *profile)
+{
+ int i, ret;
+ int idx;
+ unsigned long freq;
+
+ ret = devm_pm_opp_of_add_table(dmc->dev);
+ if (ret < 0) {
+ dev_err(dmc->dev, "Failed to get OPP table\n");
+ return ret;
+ }
+
+ dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev);
+
+ dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count,
+ sizeof(struct dmc_opp_table), GFP_KERNEL);
+ if (!dmc->opp)
+ return -ENOMEM;
+
+ idx = dmc->opp_count - 1;
+ for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) {
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ dmc->opp[idx - i].freq_hz = freq;
+ dmc->opp[idx - i].volt_uv = dev_pm_opp_get_voltage(opp);
+
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings
+ * @dmc: device for which the new settings is going to be applied
+ *
+ * Low-level function for changing timings for DRAM memory clocking from
+ * 'bypass' clock source (fixed frequency @400MHz).
+ * It uses timing bank registers set 1.
+ */
+static void exynos5_set_bypass_dram_timings(struct exynos5_dmc *dmc)
+{
+ writel(EXYNOS5_AREF_NORMAL,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
+
+ writel(dmc->bypass_timing_row,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW1);
+ writel(dmc->bypass_timing_row,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW1);
+ writel(dmc->bypass_timing_data,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA1);
+ writel(dmc->bypass_timing_data,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA1);
+ writel(dmc->bypass_timing_power,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER1);
+ writel(dmc->bypass_timing_power,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER1);
+}
+
+/**
+ * exynos5_dram_change_timings() - Low-level changes of the DRAM final timings
+ * @dmc: device for which the new settings is going to be applied
+ * @target_rate: target frequency of the DMC
+ *
+ * Low-level function for changing timings for DRAM memory operating from main
+ * clock source (BPLL), which can have different frequencies. Thus, each
+ * frequency must have corresponding timings register values in order to keep
+ * the needed delays.
+ * It uses timing bank registers set 0.
+ */
+static int exynos5_dram_change_timings(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int idx;
+
+ for (idx = dmc->opp_count - 1; idx >= 0; idx--)
+ if (dmc->opp[idx].freq_hz <= target_rate)
+ break;
+
+ if (idx < 0)
+ return -EINVAL;
+
+ writel(EXYNOS5_AREF_NORMAL,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
+
+ writel(dmc->timing_row[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW0);
+ writel(dmc->timing_row[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW0);
+ writel(dmc->timing_data[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA0);
+ writel(dmc->timing_data[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA0);
+ writel(dmc->timing_power[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER0);
+ writel(dmc->timing_power[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER0);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_align_target_voltage() - Sets the final voltage for the DMC
+ * @dmc: device for which it is going to be set
+ * @target_volt: new voltage which is chosen to be final
+ *
+ * Function tries to align voltage to the safe level for 'normal' mode.
+ * It checks the need of higher voltage and changes the value. The target
+ * voltage might be lower that currently set and still the system will be
+ * stable.
+ */
+static int exynos5_dmc_align_target_voltage(struct exynos5_dmc *dmc,
+ unsigned long target_volt)
+{
+ int ret = 0;
+
+ if (dmc->curr_volt <= target_volt)
+ return 0;
+
+ ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
+ target_volt);
+ if (!ret)
+ dmc->curr_volt = target_volt;
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_align_bypass_voltage() - Sets the voltage for the DMC
+ * @dmc: device for which it is going to be set
+ * @target_volt: new voltage which is chosen to be final
+ *
+ * Function tries to align voltage to the safe level for the 'bypass' mode.
+ * It checks the need of higher voltage and changes the value.
+ * The target voltage must not be less than currently needed, because
+ * for current frequency the device might become unstable.
+ */
+static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc,
+ unsigned long target_volt)
+{
+ int ret = 0;
+
+ if (dmc->curr_volt >= target_volt)
+ return 0;
+
+ ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
+ target_volt);
+ if (!ret)
+ dmc->curr_volt = target_volt;
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_align_bypass_dram_timings() - Chooses and sets DRAM timings
+ * @dmc: device for which it is going to be set
+ * @target_rate: new frequency which is chosen to be final
+ *
+ * Function changes the DRAM timings for the temporary 'bypass' mode.
+ */
+static int exynos5_dmc_align_bypass_dram_timings(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int idx = find_target_freq_idx(dmc, target_rate);
+
+ if (idx < 0)
+ return -EINVAL;
+
+ exynos5_set_bypass_dram_timings(dmc);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_switch_to_bypass_configuration() - Switching to temporary clock
+ * @dmc: DMC device for which the switching is going to happen
+ * @target_rate: new frequency which is going to be set as a final
+ * @target_volt: new voltage which is going to be set as a final
+ *
+ * Function configures DMC and clocks for operating in temporary 'bypass' mode.
+ * This mode is used only temporary but if required, changes voltage and timings
+ * for DRAM chips. It switches the main clock to stable clock source for the
+ * period of the main PLL reconfiguration.
+ */
+static int
+exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
+ unsigned long target_rate,
+ unsigned long target_volt)
+{
+ int ret;
+
+ /*
+ * Having higher voltage for a particular frequency does not harm
+ * the chip. Use it for the temporary frequency change when one
+ * voltage manipulation might be avoided.
+ */
+ ret = exynos5_dmc_align_bypass_voltage(dmc, target_volt);
+ if (ret)
+ return ret;
+
+ /*
+ * Longer delays for DRAM does not cause crash, the opposite does.
+ */
+ ret = exynos5_dmc_align_bypass_dram_timings(dmc, target_rate);
+ if (ret)
+ return ret;
+
+ /*
+ * Delays are long enough, so use them for the new coming clock.
+ */
+ ret = exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_change_freq_and_volt() - Changes voltage and frequency of the DMC
+ * using safe procedure
+ * @dmc: device for which the frequency is going to be changed
+ * @target_rate: requested new frequency
+ * @target_volt: requested voltage which corresponds to the new frequency
+ *
+ * The DMC frequency change procedure requires a few steps.
+ * The main requirement is to change the clock source in the clk mux
+ * for the time of main clock PLL locking. The assumption is that the
+ * alternative clock source set as parent is stable.
+ * The second parent's clock frequency is fixed to 400MHz, it is named 'bypass'
+ * clock. This requires alignment in DRAM timing parameters for the new
+ * T-period. There is two bank sets for keeping DRAM
+ * timings: set 0 and set 1. The set 0 is used when main clock source is
+ * chosen. The 2nd set of regs is used for 'bypass' clock. Switching between
+ * the two bank sets is part of the process.
+ * The voltage must also be aligned to the minimum required level. There is
+ * this intermediate step with switching to 'bypass' parent clock source.
+ * if the old voltage is lower, it requires an increase of the voltage level.
+ * The complexity of the voltage manipulation is hidden in low level function.
+ * In this function there is last alignment of the voltage level at the end.
+ */
+static int
+exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
+ unsigned long target_rate,
+ unsigned long target_volt)
+{
+ int ret;
+
+ ret = exynos5_dmc_switch_to_bypass_configuration(dmc, target_rate,
+ target_volt);
+ if (ret)
+ return ret;
+
+ /*
+ * Voltage is set at least to a level needed for this frequency,
+ * so switching clock source is safe now.
+ */
+ clk_prepare_enable(dmc->fout_spll);
+ clk_prepare_enable(dmc->mout_spll);
+ clk_prepare_enable(dmc->mout_mx_mspll_ccore);
+
+ ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_mx_mspll_ccore);
+ if (ret)
+ goto disable_clocks;
+
+ /*
+ * We are safe to increase the timings for current bypass frequency.
+ * Thanks to this the settings will be ready for the upcoming clock
+ * source change.
+ */
+ exynos5_dram_change_timings(dmc, target_rate);
+
+ clk_set_rate(dmc->fout_bpll, target_rate);
+
+ ret = exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+ if (ret)
+ goto disable_clocks;
+
+ ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
+ if (ret)
+ goto disable_clocks;
+
+ /*
+ * Make sure if the voltage is not from 'bypass' settings and align to
+ * the right level for power efficiency.
+ */
+ ret = exynos5_dmc_align_target_voltage(dmc, target_volt);
+
+disable_clocks:
+ clk_disable_unprepare(dmc->mout_mx_mspll_ccore);
+ clk_disable_unprepare(dmc->mout_spll);
+ clk_disable_unprepare(dmc->fout_spll);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_get_volt_freq() - Gets the frequency and voltage from the OPP
+ * table.
+ * @dmc: device for which the frequency is going to be changed
+ * @freq: requested frequency in KHz
+ * @target_rate: returned frequency which is the same or lower than
+ * requested
+ * @target_volt: returned voltage which corresponds to the returned
+ * frequency
+ * @flags: devfreq flags provided for this frequency change request
+ *
+ * Function gets requested frequency and checks OPP framework for needed
+ * frequency and voltage. It populates the values 'target_rate' and
+ * 'target_volt' or returns error value when OPP framework fails.
+ */
+static int exynos5_dmc_get_volt_freq(struct exynos5_dmc *dmc,
+ unsigned long *freq,
+ unsigned long *target_rate,
+ unsigned long *target_volt, u32 flags)
+{
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dmc->dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ *target_rate = dev_pm_opp_get_freq(opp);
+ *target_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_target() - Function responsible for changing frequency of DMC
+ * @dev: device for which the frequency is going to be changed
+ * @freq: requested frequency in KHz
+ * @flags: flags provided for this frequency change request
+ *
+ * An entry function provided to the devfreq framework which provides frequency
+ * change of the DMC. The function gets the possible rate from OPP table based
+ * on requested frequency. It calls the next function responsible for the
+ * frequency and voltage change. In case of failure, does not set 'curr_rate'
+ * and returns error value to the framework.
+ */
+static int exynos5_dmc_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+ unsigned long target_rate = 0;
+ unsigned long target_volt = 0;
+ int ret;
+
+ ret = exynos5_dmc_get_volt_freq(dmc, freq, &target_rate, &target_volt,
+ flags);
+
+ if (ret)
+ return ret;
+
+ if (target_rate == dmc->curr_rate)
+ return 0;
+
+ mutex_lock(&dmc->lock);
+
+ ret = exynos5_dmc_change_freq_and_volt(dmc, target_rate, target_volt);
+
+ if (ret) {
+ mutex_unlock(&dmc->lock);
+ return ret;
+ }
+
+ dmc->curr_rate = target_rate;
+
+ mutex_unlock(&dmc->lock);
+ return 0;
+}
+
+/**
+ * exynos5_counters_get() - Gets the performance counters values.
+ * @dmc: device for which the counters are going to be checked
+ * @load_count: variable which is populated with counter value
+ * @total_count: variable which is used as 'wall clock' reference
+ *
+ * Function which provides performance counters values. It sums up counters for
+ * two DMC channels. The 'total_count' is used as a reference and max value.
+ * The ratio 'load_count/total_count' shows the busy percentage [0%, 100%].
+ */
+static int exynos5_counters_get(struct exynos5_dmc *dmc,
+ unsigned long *load_count,
+ unsigned long *total_count)
+{
+ unsigned long total = 0;
+ struct devfreq_event_data event;
+ int ret, i;
+
+ *load_count = 0;
+
+ /* Take into account only read+write counters, but stop all */
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+
+ ret = devfreq_event_get_event(dmc->counter[i], &event);
+ if (ret < 0)
+ return ret;
+
+ *load_count += event.load_count;
+
+ if (total < event.total_count)
+ total = event.total_count;
+ }
+
+ *total_count = total;
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_start_perf_events() - Setup and start performance event counters
+ * @dmc: device for which the counters are going to be checked
+ * @beg_value: initial value for the counter
+ *
+ * Function which enables needed counters, interrupts and sets initial values
+ * then starts the counters.
+ */
+static void exynos5_dmc_start_perf_events(struct exynos5_dmc *dmc,
+ u32 beg_value)
+{
+ /* Enable interrupts for counter 2 */
+ writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENS_PPC);
+ writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENS_PPC);
+
+ /* Enable counter 2 and CCNT */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENS_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENS_PPC);
+
+ /* Clear overflow flag for all counters */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
+
+ /* Reset all counters */
+ writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /*
+ * Set start value for the counters, the number of samples that
+ * will be gathered is calculated as: 0xffffffff - beg_value
+ */
+ writel(beg_value, dmc->base_drexi0 + DREX_PMCNT2_PPC);
+ writel(beg_value, dmc->base_drexi1 + DREX_PMCNT2_PPC);
+
+ /* Start all counters */
+ writel(PPC_ENABLE, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(PPC_ENABLE, dmc->base_drexi1 + DREX_PMNC_PPC);
+}
+
+/**
+ * exynos5_dmc_perf_events_calc() - Calculate utilization
+ * @dmc: device for which the counters are going to be checked
+ * @diff_ts: time between last interrupt and current one
+ *
+ * Function which calculates needed utilization for the devfreq governor.
+ * It prepares values for 'busy_time' and 'total_time' based on elapsed time
+ * between interrupts, which approximates utilization.
+ */
+static void exynos5_dmc_perf_events_calc(struct exynos5_dmc *dmc, u64 diff_ts)
+{
+ /*
+ * This is a simple algorithm for managing traffic on DMC.
+ * When there is almost no load the counters overflow every 4s,
+ * no mater the DMC frequency.
+ * The high load might be approximated using linear function.
+ * Knowing that, simple calculation can provide 'busy_time' and
+ * 'total_time' to the devfreq governor which picks up target
+ * frequency.
+ * We want a fast ramp up and slow decay in frequency change function.
+ */
+ if (diff_ts < PERF_EVENT_UP_DOWN_THRESHOLD) {
+ /*
+ * Set higher utilization for the simple_ondemand governor.
+ * The governor should increase the frequency of the DMC.
+ */
+ dmc->load = 70;
+ dmc->total = 100;
+ } else {
+ /*
+ * Set low utilization for the simple_ondemand governor.
+ * The governor should decrease the frequency of the DMC.
+ */
+ dmc->load = 35;
+ dmc->total = 100;
+ }
+
+ dev_dbg(dmc->dev, "diff_ts=%llu\n", diff_ts);
+}
+
+/**
+ * exynos5_dmc_perf_events_check() - Checks the status of the counters
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which is called from threaded IRQ to check the counters state
+ * and to call approximation for the needed utilization.
+ */
+static void exynos5_dmc_perf_events_check(struct exynos5_dmc *dmc)
+{
+ u32 val;
+ u64 diff_ts, ts;
+
+ ts = ktime_get_ns();
+
+ /* Stop all counters */
+ writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /* Check the source in interrupt flag registers (which channel) */
+ val = readl(dmc->base_drexi0 + DREX_FLAG_PPC);
+ if (val) {
+ diff_ts = ts - dmc->last_overflow_ts[0];
+ dmc->last_overflow_ts[0] = ts;
+ dev_dbg(dmc->dev, "drex0 0xE050 val= 0x%08x\n", val);
+ } else {
+ val = readl(dmc->base_drexi1 + DREX_FLAG_PPC);
+ diff_ts = ts - dmc->last_overflow_ts[1];
+ dmc->last_overflow_ts[1] = ts;
+ dev_dbg(dmc->dev, "drex1 0xE050 val= 0x%08x\n", val);
+ }
+
+ exynos5_dmc_perf_events_calc(dmc, diff_ts);
+
+ exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
+}
+
+/**
+ * exynos5_dmc_enable_perf_events() - Enable performance events
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which is setup needed environment and enables counters.
+ */
+static void exynos5_dmc_enable_perf_events(struct exynos5_dmc *dmc)
+{
+ u64 ts;
+
+ /* Enable Performance Event Clock */
+ writel(PEREV_CLK_EN, dmc->base_drexi0 + DREX_PPCCLKCON);
+ writel(PEREV_CLK_EN, dmc->base_drexi1 + DREX_PPCCLKCON);
+
+ /* Select read transfers as performance event2 */
+ writel(READ_TRANSFER_CH0, dmc->base_drexi0 + DREX_PEREV2CONFIG);
+ writel(READ_TRANSFER_CH1, dmc->base_drexi1 + DREX_PEREV2CONFIG);
+
+ ts = ktime_get_ns();
+ dmc->last_overflow_ts[0] = ts;
+ dmc->last_overflow_ts[1] = ts;
+
+ /* Devfreq shouldn't be faster than initialization, play safe though. */
+ dmc->load = 99;
+ dmc->total = 100;
+}
+
+/**
+ * exynos5_dmc_disable_perf_events() - Disable performance events
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which stops, disables performance event counters and interrupts.
+ */
+static void exynos5_dmc_disable_perf_events(struct exynos5_dmc *dmc)
+{
+ /* Stop all counters */
+ writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /* Disable interrupts for counter 2 */
+ writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENC_PPC);
+ writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENC_PPC);
+
+ /* Disable counter 2 and CCNT */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENC_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENC_PPC);
+
+ /* Clear overflow flag for all counters */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
+}
+
+/**
+ * exynos5_dmc_get_status() - Read current DMC performance statistics.
+ * @dev: device for which the statistics are requested
+ * @stat: structure which has statistic fields
+ *
+ * Function reads the DMC performance counters and calculates 'busy_time'
+ * and 'total_time'. To protect from overflow, the values are shifted right
+ * by 10. After read out the counters are setup to count again.
+ */
+static int exynos5_dmc_get_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+ unsigned long load, total;
+ int ret;
+
+ if (dmc->in_irq_mode) {
+ mutex_lock(&dmc->lock);
+ stat->current_frequency = dmc->curr_rate;
+ mutex_unlock(&dmc->lock);
+
+ stat->busy_time = dmc->load;
+ stat->total_time = dmc->total;
+ } else {
+ ret = exynos5_counters_get(dmc, &load, &total);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* To protect from overflow, divide by 1024 */
+ stat->busy_time = load >> 10;
+ stat->total_time = total >> 10;
+
+ ret = exynos5_counters_set_event(dmc);
+ if (ret < 0) {
+ dev_err(dev, "could not set event counter\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_get_cur_freq() - Function returns current DMC frequency
+ * @dev: device for which the framework checks operating frequency
+ * @freq: returned frequency value
+ *
+ * It returns the currently used frequency of the DMC. The real operating
+ * frequency might be lower when the clock source value could not be divided
+ * to the requested value.
+ */
+static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+
+ mutex_lock(&dmc->lock);
+ *freq = dmc->curr_rate;
+ mutex_unlock(&dmc->lock);
+
+ return 0;
+}
+
+/*
+ * exynos5_dmc_df_profile - Devfreq governor's profile structure
+ *
+ * It provides to the devfreq framework needed functions and polling period.
+ */
+static struct devfreq_dev_profile exynos5_dmc_df_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .target = exynos5_dmc_target,
+ .get_dev_status = exynos5_dmc_get_status,
+ .get_cur_freq = exynos5_dmc_get_cur_freq,
+};
+
+/**
+ * exynos5_dmc_align_init_freq() - Align initial frequency value
+ * @dmc: device for which the frequency is going to be set
+ * @bootloader_init_freq: initial frequency set by the bootloader in KHz
+ *
+ * The initial bootloader frequency, which is present during boot, might be
+ * different that supported frequency values in the driver. It is possible
+ * due to different PLL settings or used PLL as a source.
+ * This function provides the 'initial_freq' for the devfreq framework
+ * statistics engine which supports only registered values. Thus, some alignment
+ * must be made.
+ */
+static unsigned long
+exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc,
+ unsigned long bootloader_init_freq)
+{
+ unsigned long aligned_freq;
+ int idx;
+
+ idx = find_target_freq_idx(dmc, bootloader_init_freq);
+ if (idx >= 0)
+ aligned_freq = dmc->opp[idx].freq_hz;
+ else
+ aligned_freq = dmc->opp[dmc->opp_count - 1].freq_hz;
+
+ return aligned_freq;
+}
+
+/**
+ * create_timings_aligned() - Create register values and align with standard
+ * @dmc: device for which the frequency is going to be set
+ * @reg_timing_row: array to fill with values for timing row register
+ * @reg_timing_data: array to fill with values for timing data register
+ * @reg_timing_power: array to fill with values for timing power register
+ * @clk_period_ps: the period of the clock, known as tCK
+ *
+ * The function calculates timings and creates a register value ready for
+ * a frequency transition. The register contains a few timings. They are
+ * shifted by a known offset. The timing value is calculated based on memory
+ * specyfication: minimal time required and minimal cycles required.
+ */
+static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
+ u32 *reg_timing_data, u32 *reg_timing_power,
+ u32 clk_period_ps)
+{
+ u32 val;
+ const struct timing_reg *reg;
+
+ if (clk_period_ps == 0)
+ return -EINVAL;
+
+ *reg_timing_row = 0;
+ *reg_timing_data = 0;
+ *reg_timing_power = 0;
+
+ val = dmc->timings->tRFC / clk_period_ps;
+ val += dmc->timings->tRFC % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRFC);
+ reg = &timing_row_reg_fields[0];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRRD / clk_period_ps;
+ val += dmc->timings->tRRD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRRD);
+ reg = &timing_row_reg_fields[1];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRPab / clk_period_ps;
+ val += dmc->timings->tRPab % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRPab);
+ reg = &timing_row_reg_fields[2];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRCD / clk_period_ps;
+ val += dmc->timings->tRCD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRCD);
+ reg = &timing_row_reg_fields[3];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRC / clk_period_ps;
+ val += dmc->timings->tRC % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRC);
+ reg = &timing_row_reg_fields[4];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRAS / clk_period_ps;
+ val += dmc->timings->tRAS % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRAS);
+ reg = &timing_row_reg_fields[5];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ /* data related timings */
+ val = dmc->timings->tWTR / clk_period_ps;
+ val += dmc->timings->tWTR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWTR);
+ reg = &timing_data_reg_fields[0];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tWR / clk_period_ps;
+ val += dmc->timings->tWR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWR);
+ reg = &timing_data_reg_fields[1];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRTP / clk_period_ps;
+ val += dmc->timings->tRTP % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRTP);
+ reg = &timing_data_reg_fields[2];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tW2W_C2C / clk_period_ps;
+ val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tW2W_C2C);
+ reg = &timing_data_reg_fields[3];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tR2R_C2C / clk_period_ps;
+ val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tR2R_C2C);
+ reg = &timing_data_reg_fields[4];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tWL / clk_period_ps;
+ val += dmc->timings->tWL % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWL);
+ reg = &timing_data_reg_fields[5];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tDQSCK / clk_period_ps;
+ val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tDQSCK);
+ reg = &timing_data_reg_fields[6];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRL / clk_period_ps;
+ val += dmc->timings->tRL % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRL);
+ reg = &timing_data_reg_fields[7];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ /* power related timings */
+ val = dmc->timings->tFAW / clk_period_ps;
+ val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tFAW);
+ reg = &timing_power_reg_fields[0];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tXSR / clk_period_ps;
+ val += dmc->timings->tXSR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXSR);
+ reg = &timing_power_reg_fields[1];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tXP / clk_period_ps;
+ val += dmc->timings->tXP % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXP);
+ reg = &timing_power_reg_fields[2];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tCKE / clk_period_ps;
+ val += dmc->timings->tCKE % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tCKE);
+ reg = &timing_power_reg_fields[3];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tMRD / clk_period_ps;
+ val += dmc->timings->tMRD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tMRD);
+ reg = &timing_power_reg_fields[4];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ return 0;
+}
+
+/**
+ * of_get_dram_timings() - helper function for parsing DT settings for DRAM
+ * @dmc: device for which the frequency is going to be set
+ *
+ * The function parses DT entries with DRAM information.
+ */
+static int of_get_dram_timings(struct exynos5_dmc *dmc)
+{
+ int ret = 0;
+ int idx;
+ struct device_node *np_ddr;
+ u32 freq_mhz, clk_period_ps;
+
+ np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0);
+ if (!np_ddr) {
+ dev_warn(dmc->dev, "could not find 'device-handle' in DT\n");
+ return -EINVAL;
+ }
+
+ dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_row) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
+
+ dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_data) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
+
+ dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_power) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
+
+ dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
+ DDR_TYPE_LPDDR3,
+ &dmc->timings_arr_size);
+ if (!dmc->timings) {
+ dev_warn(dmc->dev, "could not get timings from DT\n");
+ ret = -EINVAL;
+ goto put_node;
+ }
+
+ dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
+ if (!dmc->min_tck) {
+ dev_warn(dmc->dev, "could not get tck from DT\n");
+ ret = -EINVAL;
+ goto put_node;
+ }
+
+ /* Sorted array of OPPs with frequency ascending */
+ for (idx = 0; idx < dmc->opp_count; idx++) {
+ freq_mhz = dmc->opp[idx].freq_hz / 1000000;
+ clk_period_ps = 1000000 / freq_mhz;
+
+ ret = create_timings_aligned(dmc, &dmc->timing_row[idx],
+ &dmc->timing_data[idx],
+ &dmc->timing_power[idx],
+ clk_period_ps);
+ }
+
+
+ /* Take the highest frequency's timings as 'bypass' */
+ dmc->bypass_timing_row = dmc->timing_row[idx - 1];
+ dmc->bypass_timing_data = dmc->timing_data[idx - 1];
+ dmc->bypass_timing_power = dmc->timing_power[idx - 1];
+
+put_node:
+ of_node_put(np_ddr);
+ return ret;
+}
+
+/**
+ * exynos5_dmc_init_clks() - Initialize clocks needed for DMC operation.
+ * @dmc: DMC structure containing needed fields
+ *
+ * Get the needed clocks defined in DT device, enable and set the right parents.
+ * Read current frequency and initialize the initial rate for governor.
+ */
+static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
+{
+ int ret;
+ unsigned long target_volt = 0;
+ unsigned long target_rate = 0;
+ unsigned int tmp;
+
+ dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll");
+ if (IS_ERR(dmc->fout_spll))
+ return PTR_ERR(dmc->fout_spll);
+
+ dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll");
+ if (IS_ERR(dmc->fout_bpll))
+ return PTR_ERR(dmc->fout_bpll);
+
+ dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex");
+ if (IS_ERR(dmc->mout_mclk_cdrex))
+ return PTR_ERR(dmc->mout_mclk_cdrex);
+
+ dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll");
+ if (IS_ERR(dmc->mout_bpll))
+ return PTR_ERR(dmc->mout_bpll);
+
+ dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev,
+ "mout_mx_mspll_ccore");
+ if (IS_ERR(dmc->mout_mx_mspll_ccore))
+ return PTR_ERR(dmc->mout_mx_mspll_ccore);
+
+ dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2");
+ if (IS_ERR(dmc->mout_spll)) {
+ dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll");
+ if (IS_ERR(dmc->mout_spll))
+ return PTR_ERR(dmc->mout_spll);
+ }
+
+ /*
+ * Convert frequency to KHz values and set it for the governor.
+ */
+ dmc->curr_rate = clk_get_rate(dmc->mout_mclk_cdrex);
+ dmc->curr_rate = exynos5_dmc_align_init_freq(dmc, dmc->curr_rate);
+ exynos5_dmc_df_profile.initial_freq = dmc->curr_rate;
+
+ ret = exynos5_dmc_get_volt_freq(dmc, &dmc->curr_rate, &target_rate,
+ &target_volt, 0);
+ if (ret)
+ return ret;
+
+ dmc->curr_volt = target_volt;
+
+ ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
+ if (ret)
+ return ret;
+
+ clk_prepare_enable(dmc->fout_bpll);
+ clk_prepare_enable(dmc->mout_bpll);
+
+ /*
+ * Some bootloaders do not set clock routes correctly.
+ * Stop one path in clocks to PHY.
+ */
+ regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, &tmp);
+ tmp &= ~(BIT(1) | BIT(0));
+ regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, tmp);
+
+ return 0;
+}
+
+/**
+ * exynos5_performance_counters_init() - Initializes performance DMC's counters
+ * @dmc: DMC for which it does the setup
+ *
+ * Initialization of performance counters in DMC for estimating usage.
+ * The counter's values are used for calculation of a memory bandwidth and based
+ * on that the governor changes the frequency.
+ * The counters are not used when the governor is GOVERNOR_USERSPACE.
+ */
+static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
+{
+ int ret, i;
+
+ dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
+ "devfreq-events");
+ if (dmc->num_counters < 0) {
+ dev_err(dmc->dev, "could not get devfreq-event counters\n");
+ return dmc->num_counters;
+ }
+
+ dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
+ sizeof(*dmc->counter), GFP_KERNEL);
+ if (!dmc->counter)
+ return -ENOMEM;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ dmc->counter[i] =
+ devfreq_event_get_edev_by_phandle(dmc->dev,
+ "devfreq-events", i);
+ if (IS_ERR_OR_NULL(dmc->counter[i]))
+ return -EPROBE_DEFER;
+ }
+
+ ret = exynos5_counters_enable_edev(dmc);
+ if (ret < 0) {
+ dev_err(dmc->dev, "could not enable event counter\n");
+ return ret;
+ }
+
+ ret = exynos5_counters_set_event(dmc);
+ if (ret < 0) {
+ exynos5_counters_disable_edev(dmc);
+ dev_err(dmc->dev, "could not set event counter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC
+ * @dmc: device which is used for changing this feature
+ *
+ * There is a need of pausing DREX DMC when divider or MUX in clock tree
+ * changes its configuration. In such situation access to the memory is blocked
+ * in DMC automatically. This feature is used when clock frequency change
+ * request appears and touches clock tree.
+ */
+static inline int exynos5_dmc_set_pause_on_switching(struct exynos5_dmc *dmc)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(dmc->clk_regmap, CDREX_PAUSE, &val);
+ if (ret)
+ return ret;
+
+ val |= 1UL;
+ regmap_write(dmc->clk_regmap, CDREX_PAUSE, val);
+
+ return 0;
+}
+
+static irqreturn_t dmc_irq_thread(int irq, void *priv)
+{
+ int res;
+ struct exynos5_dmc *dmc = priv;
+
+ mutex_lock(&dmc->df->lock);
+ exynos5_dmc_perf_events_check(dmc);
+ res = update_devfreq(dmc->df);
+ mutex_unlock(&dmc->df->lock);
+
+ if (res)
+ dev_warn(dmc->dev, "devfreq failed with %d\n", res);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * exynos5_dmc_probe() - Probe function for the DMC driver
+ * @pdev: platform device for which the driver is going to be initialized
+ *
+ * Initialize basic components: clocks, regulators, performance counters, etc.
+ * Read out product version and based on the information setup
+ * internal structures for the controller (frequency and voltage) and for DRAM
+ * memory parameters: timings for each operating frequency.
+ * Register new devfreq device for controlling DVFS of the DMC.
+ */
+static int exynos5_dmc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos5_dmc *dmc;
+ int irq[2];
+
+ dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return -ENOMEM;
+
+ mutex_init(&dmc->lock);
+
+ dmc->dev = dev;
+ platform_set_drvdata(pdev, dmc);
+
+ dmc->base_drexi0 = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dmc->base_drexi0))
+ return PTR_ERR(dmc->base_drexi0);
+
+ dmc->base_drexi1 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmc->base_drexi1))
+ return PTR_ERR(dmc->base_drexi1);
+
+ dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
+ "samsung,syscon-clk");
+ if (IS_ERR(dmc->clk_regmap))
+ return PTR_ERR(dmc->clk_regmap);
+
+ ret = exynos5_init_freq_table(dmc, &exynos5_dmc_df_profile);
+ if (ret) {
+ dev_warn(dev, "couldn't initialize frequency settings\n");
+ return ret;
+ }
+
+ dmc->vdd_mif = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(dmc->vdd_mif)) {
+ ret = PTR_ERR(dmc->vdd_mif);
+ return ret;
+ }
+
+ ret = exynos5_dmc_init_clks(dmc);
+ if (ret)
+ return ret;
+
+ ret = of_get_dram_timings(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't initialize timings settings\n");
+ goto remove_clocks;
+ }
+
+ ret = exynos5_dmc_set_pause_on_switching(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't get access to PAUSE register\n");
+ goto remove_clocks;
+ }
+
+ /* There is two modes in which the driver works: polling or IRQ */
+ irq[0] = platform_get_irq_byname(pdev, "drex_0");
+ irq[1] = platform_get_irq_byname(pdev, "drex_1");
+ if (irq[0] > 0 && irq[1] > 0 && irqmode) {
+ ret = devm_request_threaded_irq(dev, irq[0], NULL,
+ dmc_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), dmc);
+ if (ret) {
+ dev_err(dev, "couldn't grab IRQ\n");
+ goto remove_clocks;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq[1], NULL,
+ dmc_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), dmc);
+ if (ret) {
+ dev_err(dev, "couldn't grab IRQ\n");
+ goto remove_clocks;
+ }
+
+ /*
+ * Setup default thresholds for the devfreq governor.
+ * The values are chosen based on experiments.
+ */
+ dmc->gov_data.upthreshold = 55;
+ dmc->gov_data.downdifferential = 5;
+
+ exynos5_dmc_enable_perf_events(dmc);
+
+ dmc->in_irq_mode = 1;
+ } else {
+ ret = exynos5_performance_counters_init(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't probe performance counters\n");
+ goto remove_clocks;
+ }
+
+ /*
+ * Setup default thresholds for the devfreq governor.
+ * The values are chosen based on experiments.
+ */
+ dmc->gov_data.upthreshold = 10;
+ dmc->gov_data.downdifferential = 5;
+
+ exynos5_dmc_df_profile.polling_ms = 100;
+ }
+
+ dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &dmc->gov_data);
+
+ if (IS_ERR(dmc->df)) {
+ ret = PTR_ERR(dmc->df);
+ goto err_devfreq_add;
+ }
+
+ if (dmc->in_irq_mode)
+ exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
+
+ dev_info(dev, "DMC initialized, in irq mode: %d\n", dmc->in_irq_mode);
+
+ return 0;
+
+err_devfreq_add:
+ if (dmc->in_irq_mode)
+ exynos5_dmc_disable_perf_events(dmc);
+ else
+ exynos5_counters_disable_edev(dmc);
+remove_clocks:
+ clk_disable_unprepare(dmc->mout_bpll);
+ clk_disable_unprepare(dmc->fout_bpll);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_remove() - Remove function for the platform device
+ * @pdev: platform device which is going to be removed
+ *
+ * The function relies on 'devm' framework function which automatically
+ * clean the device's resources. It just calls explicitly disable function for
+ * the performance counters.
+ */
+static int exynos5_dmc_remove(struct platform_device *pdev)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev);
+
+ if (dmc->in_irq_mode)
+ exynos5_dmc_disable_perf_events(dmc);
+ else
+ exynos5_counters_disable_edev(dmc);
+
+ clk_disable_unprepare(dmc->mout_bpll);
+ clk_disable_unprepare(dmc->fout_bpll);
+
+ return 0;
+}
+
+static const struct of_device_id exynos5_dmc_of_match[] = {
+ { .compatible = "samsung,exynos5422-dmc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match);
+
+static struct platform_driver exynos5_dmc_platdrv = {
+ .probe = exynos5_dmc_probe,
+ .remove = exynos5_dmc_remove,
+ .driver = {
+ .name = "exynos5-dmc",
+ .of_match_table = exynos5_dmc_of_match,
+ },
+};
+module_platform_driver(exynos5_dmc_platdrv);
+MODULE_DESCRIPTION("Driver for Exynos5422 Dynamic Memory Controller dynamic frequency and voltage change");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lukasz Luba");
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
new file mode 100644
index 0000000000..9015e8277d
--- /dev/null
+++ b/drivers/memory/stm32-fmc2-ebi.c
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2020
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1 0x0
+#define FMC2_BTR1 0x4
+#define FMC2_BCR(x) ((x) * 0x8 + FMC2_BCR1)
+#define FMC2_BTR(x) ((x) * 0x8 + FMC2_BTR1)
+#define FMC2_PCSCNTR 0x20
+#define FMC2_BWTR1 0x104
+#define FMC2_BWTR(x) ((x) * 0x8 + FMC2_BWTR1)
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_CCLKEN BIT(20)
+#define FMC2_BCR1_FMC2EN BIT(31)
+
+/* Register: FMC2_BCRx */
+#define FMC2_BCR_MBKEN BIT(0)
+#define FMC2_BCR_MUXEN BIT(1)
+#define FMC2_BCR_MTYP GENMASK(3, 2)
+#define FMC2_BCR_MWID GENMASK(5, 4)
+#define FMC2_BCR_FACCEN BIT(6)
+#define FMC2_BCR_BURSTEN BIT(8)
+#define FMC2_BCR_WAITPOL BIT(9)
+#define FMC2_BCR_WAITCFG BIT(11)
+#define FMC2_BCR_WREN BIT(12)
+#define FMC2_BCR_WAITEN BIT(13)
+#define FMC2_BCR_EXTMOD BIT(14)
+#define FMC2_BCR_ASYNCWAIT BIT(15)
+#define FMC2_BCR_CPSIZE GENMASK(18, 16)
+#define FMC2_BCR_CBURSTRW BIT(19)
+#define FMC2_BCR_NBLSET GENMASK(23, 22)
+
+/* Register: FMC2_BTRx/FMC2_BWTRx */
+#define FMC2_BXTR_ADDSET GENMASK(3, 0)
+#define FMC2_BXTR_ADDHLD GENMASK(7, 4)
+#define FMC2_BXTR_DATAST GENMASK(15, 8)
+#define FMC2_BXTR_BUSTURN GENMASK(19, 16)
+#define FMC2_BTR_CLKDIV GENMASK(23, 20)
+#define FMC2_BTR_DATLAT GENMASK(27, 24)
+#define FMC2_BXTR_ACCMOD GENMASK(29, 28)
+#define FMC2_BXTR_DATAHLD GENMASK(31, 30)
+
+/* Register: FMC2_PCSCNTR */
+#define FMC2_PCSCNTR_CSCOUNT GENMASK(15, 0)
+#define FMC2_PCSCNTR_CNTBEN(x) BIT((x) + 16)
+
+#define FMC2_MAX_EBI_CE 4
+#define FMC2_MAX_BANKS 5
+
+#define FMC2_BCR_CPSIZE_0 0x0
+#define FMC2_BCR_CPSIZE_128 0x1
+#define FMC2_BCR_CPSIZE_256 0x2
+#define FMC2_BCR_CPSIZE_512 0x3
+#define FMC2_BCR_CPSIZE_1024 0x4
+
+#define FMC2_BCR_MWID_8 0x0
+#define FMC2_BCR_MWID_16 0x1
+
+#define FMC2_BCR_MTYP_SRAM 0x0
+#define FMC2_BCR_MTYP_PSRAM 0x1
+#define FMC2_BCR_MTYP_NOR 0x2
+
+#define FMC2_BXTR_EXTMOD_A 0x0
+#define FMC2_BXTR_EXTMOD_B 0x1
+#define FMC2_BXTR_EXTMOD_C 0x2
+#define FMC2_BXTR_EXTMOD_D 0x3
+
+#define FMC2_BCR_NBLSET_MAX 0x3
+#define FMC2_BXTR_ADDSET_MAX 0xf
+#define FMC2_BXTR_ADDHLD_MAX 0xf
+#define FMC2_BXTR_DATAST_MAX 0xff
+#define FMC2_BXTR_BUSTURN_MAX 0xf
+#define FMC2_BXTR_DATAHLD_MAX 0x3
+#define FMC2_BTR_CLKDIV_MAX 0xf
+#define FMC2_BTR_DATLAT_MAX 0xf
+#define FMC2_PCSCNTR_CSCOUNT_MAX 0xff
+
+enum stm32_fmc2_ebi_bank {
+ FMC2_EBI1 = 0,
+ FMC2_EBI2,
+ FMC2_EBI3,
+ FMC2_EBI4,
+ FMC2_NAND
+};
+
+enum stm32_fmc2_ebi_register_type {
+ FMC2_REG_BCR = 1,
+ FMC2_REG_BTR,
+ FMC2_REG_BWTR,
+ FMC2_REG_PCSCNTR
+};
+
+enum stm32_fmc2_ebi_transaction_type {
+ FMC2_ASYNC_MODE_1_SRAM = 0,
+ FMC2_ASYNC_MODE_1_PSRAM,
+ FMC2_ASYNC_MODE_A_SRAM,
+ FMC2_ASYNC_MODE_A_PSRAM,
+ FMC2_ASYNC_MODE_2_NOR,
+ FMC2_ASYNC_MODE_B_NOR,
+ FMC2_ASYNC_MODE_C_NOR,
+ FMC2_ASYNC_MODE_D_NOR,
+ FMC2_SYNC_READ_SYNC_WRITE_PSRAM,
+ FMC2_SYNC_READ_ASYNC_WRITE_PSRAM,
+ FMC2_SYNC_READ_SYNC_WRITE_NOR,
+ FMC2_SYNC_READ_ASYNC_WRITE_NOR
+};
+
+enum stm32_fmc2_ebi_buswidth {
+ FMC2_BUSWIDTH_8 = 8,
+ FMC2_BUSWIDTH_16 = 16
+};
+
+enum stm32_fmc2_ebi_cpsize {
+ FMC2_CPSIZE_0 = 0,
+ FMC2_CPSIZE_128 = 128,
+ FMC2_CPSIZE_256 = 256,
+ FMC2_CPSIZE_512 = 512,
+ FMC2_CPSIZE_1024 = 1024
+};
+
+struct stm32_fmc2_ebi {
+ struct device *dev;
+ struct clk *clk;
+ struct regmap *regmap;
+ u8 bank_assigned;
+
+ u32 bcr[FMC2_MAX_EBI_CE];
+ u32 btr[FMC2_MAX_EBI_CE];
+ u32 bwtr[FMC2_MAX_EBI_CE];
+ u32 pcscntr;
+};
+
+/*
+ * struct stm32_fmc2_prop - STM32 FMC2 EBI property
+ * @name: the device tree binding name of the property
+ * @bprop: indicate that it is a boolean property
+ * @mprop: indicate that it is a mandatory property
+ * @reg_type: the register that have to be modified
+ * @reg_mask: the bit that have to be modified in the selected register
+ * in case of it is a boolean property
+ * @reset_val: the default value that have to be set in case the property
+ * has not been defined in the device tree
+ * @check: this callback ckecks that the property is compliant with the
+ * transaction type selected
+ * @calculate: this callback is called to calculate for exemple a timing
+ * set in nanoseconds in the device tree in clock cycles or in
+ * clock period
+ * @set: this callback applies the values in the registers
+ */
+struct stm32_fmc2_prop {
+ const char *name;
+ bool bprop;
+ bool mprop;
+ int reg_type;
+ u32 reg_mask;
+ u32 reset_val;
+ int (*check)(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop, int cs);
+ u32 (*calculate)(struct stm32_fmc2_ebi *ebi, int cs, u32 setup);
+ int (*set)(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup);
+};
+
+static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr;
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+ if (bcr & FMC2_BCR_MTYP)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+ if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr;
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+ if (bcr & FMC2_BCR_BURSTEN)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr;
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+ if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+
+ if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (prop->reg_type == FMC2_REG_BWTR)
+ regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ else
+ regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+
+ if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) &&
+ ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 bcr, bcr1;
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (cs)
+ regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
+ else
+ bcr1 = bcr;
+
+ if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN)))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int stm32_fmc2_ebi_check_cclk(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ if (cs)
+ return -EINVAL;
+
+ return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs);
+}
+
+static u32 stm32_fmc2_ebi_ns_to_clock_cycles(struct stm32_fmc2_ebi *ebi,
+ int cs, u32 setup)
+{
+ unsigned long hclk = clk_get_rate(ebi->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+
+ return DIV_ROUND_UP(setup * 1000, hclkp);
+}
+
+static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
+ int cs, u32 setup)
+{
+ u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
+ u32 bcr, btr, clk_period;
+
+ regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
+ if (bcr & FMC2_BCR1_CCLKEN || !cs)
+ regmap_read(ebi->regmap, FMC2_BTR1, &btr);
+ else
+ regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+
+ clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
+
+ return DIV_ROUND_UP(nb_clk_cycles, clk_period);
+}
+
+static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg)
+{
+ switch (reg_type) {
+ case FMC2_REG_BCR:
+ *reg = FMC2_BCR(cs);
+ break;
+ case FMC2_REG_BTR:
+ *reg = FMC2_BTR(cs);
+ break;
+ case FMC2_REG_BWTR:
+ *reg = FMC2_BWTR(cs);
+ break;
+ case FMC2_REG_PCSCNTR:
+ *reg = FMC2_PCSCNTR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_bit_field(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 reg;
+ int ret;
+
+ ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(ebi->regmap, reg, prop->reg_mask,
+ setup ? prop->reg_mask : 0);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_trans_type(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 bcr_mask, bcr = FMC2_BCR_WREN;
+ u32 btr_mask, btr = 0;
+ u32 bwtr_mask, bwtr = 0;
+
+ bwtr_mask = FMC2_BXTR_ACCMOD;
+ btr_mask = FMC2_BXTR_ACCMOD;
+ bcr_mask = FMC2_BCR_MUXEN | FMC2_BCR_MTYP | FMC2_BCR_FACCEN |
+ FMC2_BCR_WREN | FMC2_BCR_WAITEN | FMC2_BCR_BURSTEN |
+ FMC2_BCR_EXTMOD | FMC2_BCR_CBURSTRW;
+
+ switch (setup) {
+ case FMC2_ASYNC_MODE_1_SRAM:
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_SRAM);
+ /*
+ * MUXEN = 0, MTYP = 0, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+ */
+ break;
+ case FMC2_ASYNC_MODE_1_PSRAM:
+ /*
+ * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+ break;
+ case FMC2_ASYNC_MODE_A_SRAM:
+ /*
+ * MUXEN = 0, MTYP = 0, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_SRAM);
+ bcr |= FMC2_BCR_EXTMOD;
+ btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+ bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+ break;
+ case FMC2_ASYNC_MODE_A_PSRAM:
+ /*
+ * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+ bcr |= FMC2_BCR_EXTMOD;
+ btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+ bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A);
+ break;
+ case FMC2_ASYNC_MODE_2_NOR:
+ /*
+ * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ bcr |= FMC2_BCR_FACCEN;
+ break;
+ case FMC2_ASYNC_MODE_B_NOR:
+ /*
+ * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 1
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD;
+ btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_B);
+ bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_B);
+ break;
+ case FMC2_ASYNC_MODE_C_NOR:
+ /*
+ * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 2
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD;
+ btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_C);
+ bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_C);
+ break;
+ case FMC2_ASYNC_MODE_D_NOR:
+ /*
+ * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0,
+ * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 3
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD;
+ btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+ bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+ break;
+ case FMC2_SYNC_READ_SYNC_WRITE_PSRAM:
+ /*
+ * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 1, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 1, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+ bcr |= FMC2_BCR_BURSTEN | FMC2_BCR_CBURSTRW;
+ break;
+ case FMC2_SYNC_READ_ASYNC_WRITE_PSRAM:
+ /*
+ * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 1, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+ bcr |= FMC2_BCR_BURSTEN;
+ break;
+ case FMC2_SYNC_READ_SYNC_WRITE_NOR:
+ /*
+ * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 1, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 1, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ bcr |= FMC2_BCR_FACCEN | FMC2_BCR_BURSTEN | FMC2_BCR_CBURSTRW;
+ break;
+ case FMC2_SYNC_READ_ASYNC_WRITE_NOR:
+ /*
+ * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 1, WAITEN = 0,
+ * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0
+ */
+ bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ bcr |= FMC2_BCR_FACCEN | FMC2_BCR_BURSTEN;
+ break;
+ default:
+ /* Type of transaction not supported */
+ return -EINVAL;
+ }
+
+ if (bcr & FMC2_BCR_EXTMOD)
+ regmap_update_bits(ebi->regmap, FMC2_BWTR(cs),
+ bwtr_mask, bwtr);
+ regmap_update_bits(ebi->regmap, FMC2_BTR(cs), btr_mask, btr);
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs), bcr_mask, bcr);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_buswidth(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ switch (setup) {
+ case FMC2_BUSWIDTH_8:
+ val = FIELD_PREP(FMC2_BCR_MWID, FMC2_BCR_MWID_8);
+ break;
+ case FMC2_BUSWIDTH_16:
+ val = FIELD_PREP(FMC2_BCR_MWID, FMC2_BCR_MWID_16);
+ break;
+ default:
+ /* Buswidth not supported */
+ return -EINVAL;
+ }
+
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MWID, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_cpsize(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ switch (setup) {
+ case FMC2_CPSIZE_0:
+ val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_0);
+ break;
+ case FMC2_CPSIZE_128:
+ val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_128);
+ break;
+ case FMC2_CPSIZE_256:
+ val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_256);
+ break;
+ case FMC2_CPSIZE_512:
+ val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_512);
+ break;
+ case FMC2_CPSIZE_1024:
+ val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_1024);
+ break;
+ default:
+ /* Cpsize not supported */
+ return -EINVAL;
+ }
+
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_CPSIZE, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_bl_setup(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ val = min_t(u32, setup, FMC2_BCR_NBLSET_MAX);
+ val = FIELD_PREP(FMC2_BCR_NBLSET, val);
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_NBLSET, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 bcr, bxtr, reg;
+ u32 val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+ int ret;
+
+ ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+ if (ret)
+ return ret;
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (prop->reg_type == FMC2_REG_BWTR)
+ regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ else
+ regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+
+ if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)
+ val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX);
+ else
+ val = min_t(u32, setup, FMC2_BXTR_ADDSET_MAX);
+ val = FIELD_PREP(FMC2_BXTR_ADDSET, val);
+ regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_ADDSET, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_address_hold(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+ if (ret)
+ return ret;
+
+ val = clamp_val(setup, 1, FMC2_BXTR_ADDHLD_MAX);
+ val = FIELD_PREP(FMC2_BXTR_ADDHLD, val);
+ regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_ADDHLD, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_data_setup(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+ if (ret)
+ return ret;
+
+ val = clamp_val(setup, 1, FMC2_BXTR_DATAST_MAX);
+ val = FIELD_PREP(FMC2_BXTR_DATAST, val);
+ regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_DATAST, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_bus_turnaround(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+ if (ret)
+ return ret;
+
+ val = setup ? min_t(u32, setup - 1, FMC2_BXTR_BUSTURN_MAX) : 0;
+ val = FIELD_PREP(FMC2_BXTR_BUSTURN, val);
+ regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_BUSTURN, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_data_hold(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, &reg);
+ if (ret)
+ return ret;
+
+ if (prop->reg_type == FMC2_REG_BWTR)
+ val = setup ? min_t(u32, setup - 1, FMC2_BXTR_DATAHLD_MAX) : 0;
+ else
+ val = min_t(u32, setup, FMC2_BXTR_DATAHLD_MAX);
+ val = FIELD_PREP(FMC2_BXTR_DATAHLD, val);
+ regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_DATAHLD, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_clk_period(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ val = setup ? clamp_val(setup - 1, 1, FMC2_BTR_CLKDIV_MAX) : 1;
+ val = FIELD_PREP(FMC2_BTR_CLKDIV, val);
+ regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_CLKDIV, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_data_latency(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ val = setup > 1 ? min_t(u32, setup - 2, FMC2_BTR_DATLAT_MAX) : 0;
+ val = FIELD_PREP(FMC2_BTR_DATLAT, val);
+ regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_DATLAT, val);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 old_val, new_val, pcscntr;
+
+ if (setup < 1)
+ return 0;
+
+ regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
+
+ /* Enable counter for the bank */
+ regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
+ FMC2_PCSCNTR_CNTBEN(cs),
+ FMC2_PCSCNTR_CNTBEN(cs));
+
+ new_val = min_t(u32, setup - 1, FMC2_PCSCNTR_CSCOUNT_MAX);
+ old_val = FIELD_GET(FMC2_PCSCNTR_CSCOUNT, pcscntr);
+ if (old_val && new_val > old_val)
+ /* Keep current counter value */
+ return 0;
+
+ new_val = FIELD_PREP(FMC2_PCSCNTR_CSCOUNT, new_val);
+ regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
+ FMC2_PCSCNTR_CSCOUNT, new_val);
+
+ return 0;
+}
+
+static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = {
+ /* st,fmc2-ebi-cs-trans-type must be the first property */
+ {
+ .name = "st,fmc2-ebi-cs-transaction-type",
+ .mprop = true,
+ .set = stm32_fmc2_ebi_set_trans_type,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-cclk-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR1_CCLKEN,
+ .check = stm32_fmc2_ebi_check_cclk,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-mux-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_MUXEN,
+ .check = stm32_fmc2_ebi_check_mux,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-buswidth",
+ .reset_val = FMC2_BUSWIDTH_16,
+ .set = stm32_fmc2_ebi_set_buswidth,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-waitpol-high",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITPOL,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-waitcfg-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITCFG,
+ .check = stm32_fmc2_ebi_check_waitcfg,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-wait-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITEN,
+ .check = stm32_fmc2_ebi_check_sync_trans,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-asyncwait-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_ASYNCWAIT,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-cpsize",
+ .check = stm32_fmc2_ebi_check_cpsize,
+ .set = stm32_fmc2_ebi_set_cpsize,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-byte-lane-setup-ns",
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bl_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-address-setup-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_ADDSET_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-address-hold-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_ADDHLD_MAX,
+ .check = stm32_fmc2_ebi_check_address_hold,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-setup-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_DATAST_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-bus-turnaround-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bus_turnaround,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-hold-ns",
+ .reg_type = FMC2_REG_BTR,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-clk-period-ns",
+ .reset_val = FMC2_BTR_CLKDIV_MAX + 1,
+ .check = stm32_fmc2_ebi_check_clk_period,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_clk_period,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-latency-ns",
+ .check = stm32_fmc2_ebi_check_sync_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clk_period,
+ .set = stm32_fmc2_ebi_set_data_latency,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-address-setup-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_ADDSET_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-address-hold-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_ADDHLD_MAX,
+ .check = stm32_fmc2_ebi_check_address_hold,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-data-setup-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_DATAST_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-bus-turnaround-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bus_turnaround,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-data-hold-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-max-low-pulse-ns",
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_max_low_pulse,
+ },
+};
+
+static int stm32_fmc2_ebi_parse_prop(struct stm32_fmc2_ebi *ebi,
+ struct device_node *dev_node,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ struct device *dev = ebi->dev;
+ u32 setup = 0;
+
+ if (!prop->set) {
+ dev_err(dev, "property %s is not well defined\n", prop->name);
+ return -EINVAL;
+ }
+
+ if (prop->check && prop->check(ebi, prop, cs))
+ /* Skeep this property */
+ return 0;
+
+ if (prop->bprop) {
+ bool bprop;
+
+ bprop = of_property_read_bool(dev_node, prop->name);
+ if (prop->mprop && !bprop) {
+ dev_err(dev, "mandatory property %s not defined in the device tree\n",
+ prop->name);
+ return -EINVAL;
+ }
+
+ if (bprop)
+ setup = 1;
+ } else {
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(dev_node, prop->name, &val);
+ if (prop->mprop && ret) {
+ dev_err(dev, "mandatory property %s not defined in the device tree\n",
+ prop->name);
+ return ret;
+ }
+
+ if (ret)
+ setup = prop->reset_val;
+ else if (prop->calculate)
+ setup = prop->calculate(ebi, cs, val);
+ else
+ setup = val;
+ }
+
+ return prop->set(ebi, prop, cs, setup);
+}
+
+static void stm32_fmc2_ebi_enable_bank(struct stm32_fmc2_ebi *ebi, int cs)
+{
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs),
+ FMC2_BCR_MBKEN, FMC2_BCR_MBKEN);
+}
+
+static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs)
+{
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0);
+}
+
+static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int cs;
+
+ for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
+ regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
+ regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
+ }
+
+ regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+}
+
+static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int cs;
+
+ for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ regmap_write(ebi->regmap, FMC2_BCR(cs), ebi->bcr[cs]);
+ regmap_write(ebi->regmap, FMC2_BTR(cs), ebi->btr[cs]);
+ regmap_write(ebi->regmap, FMC2_BWTR(cs), ebi->bwtr[cs]);
+ }
+
+ regmap_write(ebi->regmap, FMC2_PCSCNTR, ebi->pcscntr);
+}
+
+static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int cs;
+
+ for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
+ stm32_fmc2_ebi_disable_bank(ebi, cs);
+ }
+}
+
+/* NWAIT signal can not be connected to EBI controller and NAND controller */
+static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int cs;
+ u32 bcr;
+
+ for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
+ regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) &&
+ ebi->bank_assigned & BIT(FMC2_NAND))
+ return true;
+ }
+
+ return false;
+}
+
+static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi)
+{
+ regmap_update_bits(ebi->regmap, FMC2_BCR1,
+ FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
+}
+
+static void stm32_fmc2_ebi_disable(struct stm32_fmc2_ebi *ebi)
+{
+ regmap_update_bits(ebi->regmap, FMC2_BCR1, FMC2_BCR1_FMC2EN, 0);
+}
+
+static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
+ struct device_node *dev_node,
+ u32 cs)
+{
+ unsigned int i;
+ int ret;
+
+ stm32_fmc2_ebi_disable_bank(ebi, cs);
+
+ for (i = 0; i < ARRAY_SIZE(stm32_fmc2_child_props); i++) {
+ const struct stm32_fmc2_prop *p = &stm32_fmc2_child_props[i];
+
+ ret = stm32_fmc2_ebi_parse_prop(ebi, dev_node, p, cs);
+ if (ret) {
+ dev_err(ebi->dev, "property %s could not be set: %d\n",
+ p->name, ret);
+ return ret;
+ }
+ }
+
+ stm32_fmc2_ebi_enable_bank(ebi, cs);
+
+ return 0;
+}
+
+static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
+{
+ struct device *dev = ebi->dev;
+ struct device_node *child;
+ bool child_found = false;
+ u32 bank;
+ int ret;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &bank);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ of_node_put(child);
+ return ret;
+ }
+
+ if (bank >= FMC2_MAX_BANKS) {
+ dev_err(dev, "invalid reg value: %d\n", bank);
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (ebi->bank_assigned & BIT(bank)) {
+ dev_err(dev, "bank already assigned: %d\n", bank);
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (bank < FMC2_MAX_EBI_CE) {
+ ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank);
+ if (ret) {
+ dev_err(dev, "setup chip select %d failed: %d\n",
+ bank, ret);
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ ebi->bank_assigned |= BIT(bank);
+ child_found = true;
+ }
+
+ if (!child_found) {
+ dev_warn(dev, "no subnodes found, disable the driver.\n");
+ return -ENODEV;
+ }
+
+ if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) {
+ dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
+ return -EINVAL;
+ }
+
+ stm32_fmc2_ebi_enable(ebi);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_fmc2_ebi *ebi;
+ struct reset_control *rstc;
+ int ret;
+
+ ebi = devm_kzalloc(&pdev->dev, sizeof(*ebi), GFP_KERNEL);
+ if (!ebi)
+ return -ENOMEM;
+
+ ebi->dev = dev;
+
+ ebi->regmap = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(ebi->regmap))
+ return PTR_ERR(ebi->regmap);
+
+ ebi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ebi->clk))
+ return PTR_ERR(ebi->clk);
+
+ rstc = devm_reset_control_get(dev, NULL);
+ if (PTR_ERR(rstc) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ ret = clk_prepare_enable(ebi->clk);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
+
+ ret = stm32_fmc2_ebi_parse_dt(ebi);
+ if (ret)
+ goto err_release;
+
+ stm32_fmc2_ebi_save_setup(ebi);
+ platform_set_drvdata(pdev, ebi);
+
+ return 0;
+
+err_release:
+ stm32_fmc2_ebi_disable_banks(ebi);
+ stm32_fmc2_ebi_disable(ebi);
+ clk_disable_unprepare(ebi->clk);
+
+ return ret;
+}
+
+static int stm32_fmc2_ebi_remove(struct platform_device *pdev)
+{
+ struct stm32_fmc2_ebi *ebi = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ stm32_fmc2_ebi_disable_banks(ebi);
+ stm32_fmc2_ebi_disable(ebi);
+ clk_disable_unprepare(ebi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
+{
+ struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+
+ stm32_fmc2_ebi_disable(ebi);
+ clk_disable_unprepare(ebi->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_ebi_resume(struct device *dev)
+{
+ struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = clk_prepare_enable(ebi->clk);
+ if (ret)
+ return ret;
+
+ stm32_fmc2_ebi_set_setup(ebi);
+ stm32_fmc2_ebi_enable(ebi);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_ebi_pm_ops, stm32_fmc2_ebi_suspend,
+ stm32_fmc2_ebi_resume);
+
+static const struct of_device_id stm32_fmc2_ebi_match[] = {
+ {.compatible = "st,stm32mp1-fmc2-ebi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match);
+
+static struct platform_driver stm32_fmc2_ebi_driver = {
+ .probe = stm32_fmc2_ebi_probe,
+ .remove = stm32_fmc2_ebi_remove,
+ .driver = {
+ .name = "stm32_fmc2_ebi",
+ .of_match_table = stm32_fmc2_ebi_match,
+ .pm = &stm32_fmc2_ebi_pm_ops,
+ },
+};
+module_platform_driver(stm32_fmc2_ebi_driver);
+
+MODULE_ALIAS("platform:stm32_fmc2_ebi");
+MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 ebi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
new file mode 100644
index 0000000000..3fe83d7c2b
--- /dev/null
+++ b/drivers/memory/tegra/Kconfig
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config TEGRA_MC
+ bool "NVIDIA Tegra Memory Controller support"
+ default y
+ depends on ARCH_TEGRA || (COMPILE_TEST && COMMON_CLK)
+ select INTERCONNECT
+ help
+ This driver supports the Memory Controller (MC) hardware found on
+ NVIDIA Tegra SoCs.
+
+if TEGRA_MC
+
+config TEGRA20_EMC
+ tristate "NVIDIA Tegra20 External Memory Controller driver"
+ default y
+ depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_DEVFREQ
+ select DDR
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra20 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+config TEGRA30_EMC
+ tristate "NVIDIA Tegra30 External Memory Controller driver"
+ default y
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ select PM_OPP
+ select DDR
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra30 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+config TEGRA124_EMC
+ tristate "NVIDIA Tegra124 External Memory Controller driver"
+ default y
+ depends on ARCH_TEGRA_124_SOC || COMPILE_TEST
+ select TEGRA124_CLK_EMC if ARCH_TEGRA
+ select PM_OPP
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra124 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+config TEGRA210_EMC_TABLE
+ bool
+ depends on ARCH_TEGRA_210_SOC || COMPILE_TEST
+
+config TEGRA210_EMC
+ tristate "NVIDIA Tegra210 External Memory Controller driver"
+ depends on ARCH_TEGRA_210_SOC || COMPILE_TEST
+ select TEGRA210_EMC_TABLE
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra210 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
+endif
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
new file mode 100644
index 0000000000..0750847dac
--- /dev/null
+++ b/drivers/memory/tegra/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+tegra-mc-y := mc.o
+
+tegra-mc-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186.o tegra194.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra186.o tegra234.o
+
+obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
+
+obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
+obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
+obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
+obj-$(CONFIG_TEGRA210_EMC_TABLE) += tegra210-emc-table.o
+obj-$(CONFIG_TEGRA210_EMC) += tegra210-emc.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra186-emc.o
+obj-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra186-emc.o
+
+tegra210-emc-y := tegra210-emc-core.o tegra210-emc-cc-r21021.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
new file mode 100644
index 0000000000..67d6e70b4e
--- /dev/null
+++ b/drivers/memory/tegra/mc.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/tegra-icc.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "mc.h"
+
+static const struct of_device_id tegra_mc_of_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ { .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ { .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+ { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+ { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+ { .compatible = "nvidia,tegra234-mc", .data = &tegra234_mc_soc },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
+
+static void tegra_mc_devm_action_put_device(void *data)
+{
+ struct tegra_mc *mc = data;
+
+ put_device(mc->dev);
+}
+
+/**
+ * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
+ * @dev: device pointer for the consumer device
+ *
+ * This function will search for the Memory Controller node in a device-tree
+ * and retrieve the Memory Controller handle.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
+ */
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct tegra_mc *mc;
+ int err;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ mc = platform_get_drvdata(pdev);
+ if (!mc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
+ if (err)
+ return ERR_PTR(err);
+
+ return mc;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
+
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+ if (mc->soc->ops && mc->soc->ops->probe_device)
+ return mc->soc->ops->probe_device(mc, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_probe_device);
+
+int tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
+ phys_addr_t *base, u64 *size)
+{
+ u32 offset;
+
+ if (id < 1 || id >= mc->soc->num_carveouts)
+ return -EINVAL;
+
+ if (id < 6)
+ offset = 0xc0c + 0x50 * (id - 1);
+ else
+ offset = 0x2004 + 0x50 * (id - 6);
+
+ *base = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x0);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ *base |= (phys_addr_t)mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x4) << 32;
+#endif
+
+ if (size)
+ *size = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x8) << 17;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_get_carveout_info);
+
+static int tegra_mc_block_dma_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) | BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
+}
+
+static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static int tegra_mc_reset_status_common(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
+}
+
+const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
+ .block_dma = tegra_mc_block_dma_common,
+ .dma_idling = tegra_mc_dma_idling_common,
+ .unblock_dma = tegra_mc_unblock_dma_common,
+ .reset_status = tegra_mc_reset_status_common,
+};
+
+static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct tegra_mc, reset);
+}
+
+static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc,
+ unsigned long id)
+{
+ unsigned int i;
+
+ for (i = 0; i < mc->soc->num_resets; i++)
+ if (mc->soc->resets[i].id == id)
+ return &mc->soc->resets[i];
+
+ return NULL;
+}
+
+static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tegra_mc *mc = reset_to_mc(rcdev);
+ const struct tegra_mc_reset_ops *rst_ops;
+ const struct tegra_mc_reset *rst;
+ int retries = 500;
+ int err;
+
+ rst = tegra_mc_reset_find(mc, id);
+ if (!rst)
+ return -ENODEV;
+
+ rst_ops = mc->soc->reset_ops;
+ if (!rst_ops)
+ return -ENODEV;
+
+ /* DMA flushing will fail if reset is already asserted */
+ if (rst_ops->reset_status) {
+ /* check whether reset is asserted */
+ if (rst_ops->reset_status(mc, rst))
+ return 0;
+ }
+
+ if (rst_ops->block_dma) {
+ /* block clients DMA requests */
+ err = rst_ops->block_dma(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to block %s DMA: %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ if (rst_ops->dma_idling) {
+ /* wait for completion of the outstanding DMA requests */
+ while (!rst_ops->dma_idling(mc, rst)) {
+ if (!retries--) {
+ dev_err(mc->dev, "failed to flush %s DMA\n",
+ rst->name);
+ return -EBUSY;
+ }
+
+ usleep_range(10, 100);
+ }
+ }
+
+ if (rst_ops->hotreset_assert) {
+ /* clear clients DMA requests sitting before arbitration */
+ err = rst_ops->hotreset_assert(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to hot reset %s: %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tegra_mc *mc = reset_to_mc(rcdev);
+ const struct tegra_mc_reset_ops *rst_ops;
+ const struct tegra_mc_reset *rst;
+ int err;
+
+ rst = tegra_mc_reset_find(mc, id);
+ if (!rst)
+ return -ENODEV;
+
+ rst_ops = mc->soc->reset_ops;
+ if (!rst_ops)
+ return -ENODEV;
+
+ if (rst_ops->hotreset_deassert) {
+ /* take out client from hot reset */
+ err = rst_ops->hotreset_deassert(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ if (rst_ops->unblock_dma) {
+ /* allow new DMA requests to proceed to arbitration */
+ err = rst_ops->unblock_dma(mc, rst);
+ if (err) {
+ dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
+ rst->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct tegra_mc *mc = reset_to_mc(rcdev);
+ const struct tegra_mc_reset_ops *rst_ops;
+ const struct tegra_mc_reset *rst;
+
+ rst = tegra_mc_reset_find(mc, id);
+ if (!rst)
+ return -ENODEV;
+
+ rst_ops = mc->soc->reset_ops;
+ if (!rst_ops)
+ return -ENODEV;
+
+ return rst_ops->reset_status(mc, rst);
+}
+
+static const struct reset_control_ops tegra_mc_reset_ops = {
+ .assert = tegra_mc_hotreset_assert,
+ .deassert = tegra_mc_hotreset_deassert,
+ .status = tegra_mc_hotreset_status,
+};
+
+static int tegra_mc_reset_setup(struct tegra_mc *mc)
+{
+ int err;
+
+ mc->reset.ops = &tegra_mc_reset_ops;
+ mc->reset.owner = THIS_MODULE;
+ mc->reset.of_node = mc->dev->of_node;
+ mc->reset.of_reset_n_cells = 1;
+ mc->reset.nr_resets = mc->soc->num_resets;
+
+ err = reset_controller_register(&mc->reset);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
+{
+ unsigned int i;
+ struct tegra_mc_timing *timing = NULL;
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (mc->timings[i].rate == rate) {
+ timing = &mc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(mc->dev, "no memory timing registered for rate %lu\n",
+ rate);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc->soc->num_emem_regs; ++i)
+ mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
+
+unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
+{
+ u8 dram_count;
+
+ dram_count = mc_readl(mc, MC_EMEM_ADR_CFG);
+ dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV;
+ dram_count++;
+
+ return dram_count;
+}
+EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
+{
+ unsigned long long tick;
+ unsigned int i;
+ u32 value;
+
+ /* compute the number of MC clock cycles per tick */
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
+ do_div(tick, NSEC_PER_SEC);
+
+ value = mc_readl(mc, MC_EMEM_ARB_CFG);
+ value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
+ value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
+ mc_writel(mc, value, MC_EMEM_ARB_CFG);
+
+ /* write latency allowance defaults */
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+ u32 value;
+
+ value = mc_readl(mc, client->regs.la.reg);
+ value &= ~(client->regs.la.mask << client->regs.la.shift);
+ value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift;
+ mc_writel(mc, value, client->regs.la.reg);
+ }
+
+ /* latch new values */
+ mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+
+ return 0;
+}
+
+static int load_one_timing(struct tegra_mc *mc,
+ struct tegra_mc_timing *timing,
+ struct device_node *node)
+{
+ int err;
+ u32 tmp;
+
+ err = of_property_read_u32(node, "clock-frequency", &tmp);
+ if (err) {
+ dev_err(mc->dev,
+ "timing %pOFn: failed to read rate\n", node);
+ return err;
+ }
+
+ timing->rate = tmp;
+ timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs,
+ sizeof(u32), GFP_KERNEL);
+ if (!timing->emem_data)
+ return -ENOMEM;
+
+ err = of_property_read_u32_array(node, "nvidia,emem-configuration",
+ timing->emem_data,
+ mc->soc->num_emem_regs);
+ if (err) {
+ dev_err(mc->dev,
+ "timing %pOFn: failed to read EMEM configuration\n",
+ node);
+ return err;
+ }
+
+ return 0;
+}
+
+static int load_timings(struct tegra_mc *mc, struct device_node *node)
+{
+ struct device_node *child;
+ struct tegra_mc_timing *timing;
+ int child_count = of_get_child_count(node);
+ int i = 0, err;
+
+ mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!mc->timings)
+ return -ENOMEM;
+
+ mc->num_timings = child_count;
+
+ for_each_child_of_node(node, child) {
+ timing = &mc->timings[i++];
+
+ err = load_one_timing(mc, timing, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_mc_setup_timings(struct tegra_mc *mc)
+{
+ struct device_node *node;
+ u32 ram_code, node_ram_code;
+ int err;
+
+ ram_code = tegra_read_ram_code();
+
+ mc->num_timings = 0;
+
+ for_each_child_of_node(mc->dev->of_node, node) {
+ err = of_property_read_u32(node, "nvidia,ram-code",
+ &node_ram_code);
+ if (err || (node_ram_code != ram_code))
+ continue;
+
+ err = load_timings(mc, node);
+ of_node_put(node);
+ if (err)
+ return err;
+ break;
+ }
+
+ if (mc->num_timings == 0)
+ dev_warn(mc->dev,
+ "no memory timings for RAM code %u registered\n",
+ ram_code);
+
+ return 0;
+}
+
+int tegra30_mc_probe(struct tegra_mc *mc)
+{
+ int err;
+
+ mc->clk = devm_clk_get_optional(mc->dev, "mc");
+ if (IS_ERR(mc->clk)) {
+ dev_err(mc->dev, "failed to get MC clock: %ld\n", PTR_ERR(mc->clk));
+ return PTR_ERR(mc->clk);
+ }
+
+ /* ensure that debug features are disabled */
+ mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
+
+ err = tegra_mc_setup_latency_allowance(mc);
+ if (err < 0) {
+ dev_err(mc->dev, "failed to setup latency allowance: %d\n", err);
+ return err;
+ }
+
+ err = tegra_mc_setup_timings(mc);
+ if (err < 0) {
+ dev_err(mc->dev, "failed to setup timings: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_mc_ops tegra30_mc_ops = {
+ .probe = tegra30_mc_probe,
+ .handle_irq = tegra30_mc_handle_irq,
+};
+#endif
+
+static int mc_global_intstatus_to_channel(const struct tegra_mc *mc, u32 status,
+ unsigned int *mc_channel)
+{
+ if ((status & mc->soc->ch_intmask) == 0)
+ return -EINVAL;
+
+ *mc_channel = __ffs((status & mc->soc->ch_intmask) >>
+ mc->soc->global_intstatus_channel_shift);
+
+ return 0;
+}
+
+static u32 mc_channel_to_global_intstatus(const struct tegra_mc *mc,
+ unsigned int channel)
+{
+ return BIT(channel) << mc->soc->global_intstatus_channel_shift;
+}
+
+irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
+{
+ struct tegra_mc *mc = data;
+ unsigned int bit, channel;
+ unsigned long status;
+
+ if (mc->soc->num_channels) {
+ u32 global_status;
+ int err;
+
+ global_status = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, MC_GLOBAL_INTSTATUS);
+ err = mc_global_intstatus_to_channel(mc, global_status, &channel);
+ if (err < 0) {
+ dev_err_ratelimited(mc->dev, "unknown interrupt channel 0x%08x\n",
+ global_status);
+ return IRQ_NONE;
+ }
+
+ /* mask all interrupts to avoid flooding */
+ status = mc_ch_readl(mc, channel, MC_INTSTATUS) & mc->soc->intmask;
+ } else {
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ }
+
+ if (!status)
+ return IRQ_NONE;
+
+ for_each_set_bit(bit, &status, 32) {
+ const char *error = tegra_mc_status_names[bit] ?: "unknown";
+ const char *client = "unknown", *desc;
+ const char *direction, *secure;
+ u32 status_reg, addr_reg;
+ u32 intmask = BIT(bit);
+ phys_addr_t addr = 0;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ u32 addr_hi_reg = 0;
+#endif
+ unsigned int i;
+ char perm[7];
+ u8 id, type;
+ u32 value;
+
+ switch (intmask) {
+ case MC_INT_DECERR_VPR:
+ status_reg = MC_ERR_VPR_STATUS;
+ addr_reg = MC_ERR_VPR_ADR;
+ break;
+
+ case MC_INT_SECERR_SEC:
+ status_reg = MC_ERR_SEC_STATUS;
+ addr_reg = MC_ERR_SEC_ADR;
+ break;
+
+ case MC_INT_DECERR_MTS:
+ status_reg = MC_ERR_MTS_STATUS;
+ addr_reg = MC_ERR_MTS_ADR;
+ break;
+
+ case MC_INT_DECERR_GENERALIZED_CARVEOUT:
+ status_reg = MC_ERR_GENERALIZED_CARVEOUT_STATUS;
+ addr_reg = MC_ERR_GENERALIZED_CARVEOUT_ADR;
+ break;
+
+ case MC_INT_DECERR_ROUTE_SANITY:
+ status_reg = MC_ERR_ROUTE_SANITY_STATUS;
+ addr_reg = MC_ERR_ROUTE_SANITY_ADR;
+ break;
+
+ default:
+ status_reg = MC_ERR_STATUS;
+ addr_reg = MC_ERR_ADR;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (mc->soc->has_addr_hi_reg)
+ addr_hi_reg = MC_ERR_ADR_HI;
+#endif
+ break;
+ }
+
+ if (mc->soc->num_channels)
+ value = mc_ch_readl(mc, channel, status_reg);
+ else
+ value = mc_readl(mc, status_reg);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (mc->soc->num_address_bits > 32) {
+ if (addr_hi_reg) {
+ if (mc->soc->num_channels)
+ addr = mc_ch_readl(mc, channel, addr_hi_reg);
+ else
+ addr = mc_readl(mc, addr_hi_reg);
+ } else {
+ addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
+ MC_ERR_STATUS_ADR_HI_MASK);
+ }
+ addr <<= 32;
+ }
+#endif
+
+ if (value & MC_ERR_STATUS_RW)
+ direction = "write";
+ else
+ direction = "read";
+
+ if (value & MC_ERR_STATUS_SECURITY)
+ secure = "secure ";
+ else
+ secure = "";
+
+ id = value & mc->soc->client_id_mask;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == id) {
+ client = mc->soc->clients[i].name;
+ break;
+ }
+ }
+
+ type = (value & MC_ERR_STATUS_TYPE_MASK) >>
+ MC_ERR_STATUS_TYPE_SHIFT;
+ desc = tegra_mc_error_names[type];
+
+ switch (value & MC_ERR_STATUS_TYPE_MASK) {
+ case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
+ perm[0] = ' ';
+ perm[1] = '[';
+
+ if (value & MC_ERR_STATUS_READABLE)
+ perm[2] = 'R';
+ else
+ perm[2] = '-';
+
+ if (value & MC_ERR_STATUS_WRITABLE)
+ perm[3] = 'W';
+ else
+ perm[3] = '-';
+
+ if (value & MC_ERR_STATUS_NONSECURE)
+ perm[4] = '-';
+ else
+ perm[4] = 'S';
+
+ perm[5] = ']';
+ perm[6] = '\0';
+ break;
+
+ default:
+ perm[0] = '\0';
+ break;
+ }
+
+ if (mc->soc->num_channels)
+ value = mc_ch_readl(mc, channel, addr_reg);
+ else
+ value = mc_readl(mc, addr_reg);
+ addr |= value;
+
+ dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
+ client, secure, direction, &addr, error,
+ desc, perm);
+ }
+
+ /* clear interrupts */
+ if (mc->soc->num_channels) {
+ mc_ch_writel(mc, channel, status, MC_INTSTATUS);
+ mc_ch_writel(mc, MC_BROADCAST_CHANNEL,
+ mc_channel_to_global_intstatus(mc, channel),
+ MC_GLOBAL_INTSTATUS);
+ } else {
+ mc_writel(mc, status, MC_INTSTATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+const char *const tegra_mc_status_names[32] = {
+ [ 1] = "External interrupt",
+ [ 6] = "EMEM address decode error",
+ [ 7] = "GART page fault",
+ [ 8] = "Security violation",
+ [ 9] = "EMEM arbitration error",
+ [10] = "Page fault",
+ [11] = "Invalid APB ASID update",
+ [12] = "VPR violation",
+ [13] = "Secure carveout violation",
+ [16] = "MTS carveout violation",
+ [17] = "Generalized carveout violation",
+ [20] = "Route Sanity error",
+};
+
+const char *const tegra_mc_error_names[8] = {
+ [2] = "EMEM decode error",
+ [3] = "TrustZone violation",
+ [4] = "Carveout violation",
+ [6] = "SMMU translation error",
+};
+
+struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id == spec->args[0])
+ return node;
+ }
+
+ /*
+ * If a client driver calls devm_of_icc_get() before the MC driver
+ * is probed, then return EPROBE_DEFER to the client driver.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int tegra_mc_icc_get(struct icc_node *node, u32 *average, u32 *peak)
+{
+ *average = 0;
+ *peak = 0;
+
+ return 0;
+}
+
+static int tegra_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ return 0;
+}
+
+const struct tegra_mc_icc_ops tegra_mc_icc_ops = {
+ .xlate = tegra_mc_icc_xlate,
+ .aggregate = icc_std_aggregate,
+ .get_bw = tegra_mc_icc_get,
+ .set = tegra_mc_icc_set,
+};
+
+/*
+ * Memory Controller (MC) has few Memory Clients that are issuing memory
+ * bandwidth allocation requests to the MC interconnect provider. The MC
+ * provider aggregates the requests and then sends the aggregated request
+ * up to the External Memory Controller (EMC) interconnect provider which
+ * re-configures hardware interface to External Memory (EMEM) in accordance
+ * to the required bandwidth. Each MC interconnect node represents an
+ * individual Memory Client.
+ *
+ * Memory interconnect topology:
+ *
+ * +----+
+ * +--------+ | |
+ * | TEXSRD +--->+ |
+ * +--------+ | |
+ * | | +-----+ +------+
+ * ... | MC +--->+ EMC +--->+ EMEM |
+ * | | +-----+ +------+
+ * +--------+ | |
+ * | DISP.. +--->+ |
+ * +--------+ | |
+ * +----+
+ */
+static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+{
+ struct icc_node *node;
+ unsigned int i;
+ int err;
+
+ /* older device-trees don't have interconnect properties */
+ if (!device_property_present(mc->dev, "#interconnect-cells") ||
+ !mc->soc->icc_ops)
+ return 0;
+
+ mc->provider.dev = mc->dev;
+ mc->provider.data = &mc->provider;
+ mc->provider.set = mc->soc->icc_ops->set;
+ mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ mc->provider.get_bw = mc->soc->icc_ops->get_bw;
+ mc->provider.xlate = mc->soc->icc_ops->xlate;
+ mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+
+ icc_provider_init(&mc->provider);
+
+ /* create Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_MC);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->name = "Memory Controller";
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Controller to External Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_EMC);
+ if (err)
+ goto remove_nodes;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ /* create MC client node */
+ node = icc_node_create(mc->soc->clients[i].id);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = mc->soc->clients[i].name;
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Client to Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_MC);
+ if (err)
+ goto remove_nodes;
+
+ node->data = (struct tegra_mc_client *)&(mc->soc->clients[i]);
+ }
+
+ err = icc_provider_register(&mc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&mc->provider);
+
+ return err;
+}
+
+static void tegra_mc_num_channel_enabled(struct tegra_mc *mc)
+{
+ unsigned int i;
+ u32 value;
+
+ value = mc_ch_readl(mc, 0, MC_EMEM_ADR_CFG_CHANNEL_ENABLE);
+ if (value <= 0) {
+ mc->num_channels = mc->soc->num_channels;
+ return;
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (value & BIT(i))
+ mc->num_channels++;
+ }
+}
+
+static int tegra_mc_probe(struct platform_device *pdev)
+{
+ struct tegra_mc *mc;
+ u64 mask;
+ int err;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mc);
+ spin_lock_init(&mc->lock);
+ mc->soc = of_device_get_match_data(&pdev->dev);
+ mc->dev = &pdev->dev;
+
+ mask = DMA_BIT_MASK(mc->soc->num_address_bits);
+
+ err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ /* length of MC tick in nanoseconds */
+ mc->tick = 30;
+
+ mc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mc->regs))
+ return PTR_ERR(mc->regs);
+
+ mc->debugfs.root = debugfs_create_dir("mc", NULL);
+
+ if (mc->soc->ops && mc->soc->ops->probe) {
+ err = mc->soc->ops->probe(mc);
+ if (err < 0)
+ return err;
+ }
+
+ tegra_mc_num_channel_enabled(mc);
+
+ if (mc->soc->ops && mc->soc->ops->handle_irq) {
+ mc->irq = platform_get_irq(pdev, 0);
+ if (mc->irq < 0)
+ return mc->irq;
+
+ WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
+
+ if (mc->soc->num_channels)
+ mc_ch_writel(mc, MC_BROADCAST_CHANNEL, mc->soc->intmask,
+ MC_INTMASK);
+ else
+ mc_writel(mc, mc->soc->intmask, MC_INTMASK);
+
+ err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
+ dev_name(&pdev->dev), mc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
+ err);
+ return err;
+ }
+ }
+
+ if (mc->soc->reset_ops) {
+ err = tegra_mc_reset_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to register reset controller: %d\n", err);
+ }
+
+ err = tegra_mc_interconnect_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
+ err);
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
+ mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
+ if (IS_ERR(mc->smmu)) {
+ dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
+ PTR_ERR(mc->smmu));
+ mc->smmu = NULL;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
+ mc->gart = tegra_gart_probe(&pdev->dev, mc);
+ if (IS_ERR(mc->gart)) {
+ dev_err(&pdev->dev, "failed to probe GART: %ld\n",
+ PTR_ERR(mc->gart));
+ mc->gart = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_mc_suspend(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ if (mc->soc->ops && mc->soc->ops->suspend)
+ return mc->soc->ops->suspend(mc);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_mc_resume(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ if (mc->soc->ops && mc->soc->ops->resume)
+ return mc->soc->ops->resume(mc);
+
+ return 0;
+}
+
+static void tegra_mc_sync_state(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ /* check whether ICC provider is registered */
+ if (mc->provider.dev == dev)
+ icc_sync_state(dev);
+}
+
+static const struct dev_pm_ops tegra_mc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
+};
+
+static struct platform_driver tegra_mc_driver = {
+ .driver = {
+ .name = "tegra-mc",
+ .of_match_table = tegra_mc_of_match,
+ .pm = &tegra_mc_pm_ops,
+ .suppress_bind_attrs = true,
+ .sync_state = tegra_mc_sync_state,
+ },
+ .prevent_deferred_probe = true,
+ .probe = tegra_mc_probe,
+};
+
+static int tegra_mc_init(void)
+{
+ return platform_driver_register(&tegra_mc_driver);
+}
+arch_initcall(tegra_mc_init);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
new file mode 100644
index 0000000000..c3f6655bec
--- /dev/null
+++ b/drivers/memory/tegra/mc.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef MEMORY_TEGRA_MC_H
+#define MEMORY_TEGRA_MC_H
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/types.h>
+
+#include <soc/tegra/mc.h>
+
+#define MC_INTSTATUS 0x00
+#define MC_INTMASK 0x04
+#define MC_ERR_STATUS 0x08
+#define MC_ERR_ADR 0x0c
+#define MC_GART_ERROR_REQ 0x30
+#define MC_EMEM_ADR_CFG 0x54
+#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
+#define MC_SECURITY_VIOLATION_STATUS 0x74
+#define MC_EMEM_ARB_CFG 0x90
+#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
+#define MC_EMEM_ARB_TIMING_RCD 0x98
+#define MC_EMEM_ARB_TIMING_RP 0x9c
+#define MC_EMEM_ARB_TIMING_RC 0xa0
+#define MC_EMEM_ARB_TIMING_RAS 0xa4
+#define MC_EMEM_ARB_TIMING_FAW 0xa8
+#define MC_EMEM_ARB_TIMING_RRD 0xac
+#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
+#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
+#define MC_EMEM_ARB_TIMING_R2R 0xb8
+#define MC_EMEM_ARB_TIMING_W2W 0xbc
+#define MC_EMEM_ARB_TIMING_R2W 0xc0
+#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_MISC2 0xc8
+#define MC_EMEM_ARB_DA_TURNS 0xd0
+#define MC_EMEM_ARB_DA_COVERS 0xd4
+#define MC_EMEM_ARB_MISC0 0xd8
+#define MC_EMEM_ARB_MISC1 0xdc
+#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
+#define MC_EMEM_ARB_OVERRIDE 0xe8
+#define MC_TIMING_CONTROL_DBG 0xf8
+#define MC_TIMING_CONTROL 0xfc
+#define MC_ERR_VPR_STATUS 0x654
+#define MC_ERR_VPR_ADR 0x658
+#define MC_ERR_SEC_STATUS 0x67c
+#define MC_ERR_SEC_ADR 0x680
+#define MC_ERR_MTS_STATUS 0x9b0
+#define MC_ERR_MTS_ADR 0x9b4
+#define MC_ERR_ROUTE_SANITY_STATUS 0x9c0
+#define MC_ERR_ROUTE_SANITY_ADR 0x9c4
+#define MC_ERR_GENERALIZED_CARVEOUT_STATUS 0xc00
+#define MC_ERR_GENERALIZED_CARVEOUT_ADR 0xc04
+#define MC_EMEM_ADR_CFG_CHANNEL_ENABLE 0xdf8
+#define MC_GLOBAL_INTSTATUS 0xf24
+#define MC_ERR_ADR_HI 0x11fc
+
+#define MC_INT_DECERR_ROUTE_SANITY BIT(20)
+#define MC_INT_DECERR_GENERALIZED_CARVEOUT BIT(17)
+#define MC_INT_DECERR_MTS BIT(16)
+#define MC_INT_SECERR_SEC BIT(13)
+#define MC_INT_DECERR_VPR BIT(12)
+#define MC_INT_INVALID_APB_ASID_UPDATE BIT(11)
+#define MC_INT_INVALID_SMMU_PAGE BIT(10)
+#define MC_INT_ARBITRATION_EMEM BIT(9)
+#define MC_INT_SECURITY_VIOLATION BIT(8)
+#define MC_INT_INVALID_GART_PAGE BIT(7)
+#define MC_INT_DECERR_EMEM BIT(6)
+
+#define MC_ERR_STATUS_TYPE_SHIFT 28
+#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (0x6 << 28)
+#define MC_ERR_STATUS_TYPE_MASK (0x7 << 28)
+#define MC_ERR_STATUS_READABLE BIT(27)
+#define MC_ERR_STATUS_WRITABLE BIT(26)
+#define MC_ERR_STATUS_NONSECURE BIT(25)
+#define MC_ERR_STATUS_ADR_HI_SHIFT 20
+#define MC_ERR_STATUS_ADR_HI_MASK 0x3
+#define MC_ERR_STATUS_SECURITY BIT(17)
+#define MC_ERR_STATUS_RW BIT(16)
+
+#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
+
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) ((x) & 0x1ff)
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
+
+#define MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK 0x1ff
+#define MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE BIT(30)
+#define MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE BIT(31)
+
+#define MC_EMEM_ARB_OVERRIDE_EACK_MASK 0x3
+
+#define MC_TIMING_UPDATE BIT(0)
+
+#define MC_BROADCAST_CHANNEL ~0
+
+static inline u32 tegra_mc_scale_percents(u64 val, unsigned int percents)
+{
+ val = val * percents;
+ do_div(val, 100);
+
+ return min_t(u64, val, U32_MAX);
+}
+
+static inline struct tegra_mc *
+icc_provider_to_tegra_mc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_mc, provider);
+}
+
+static inline u32 mc_ch_readl(const struct tegra_mc *mc, int ch,
+ unsigned long offset)
+{
+ if (!mc->bcast_ch_regs)
+ return 0;
+
+ if (ch == MC_BROADCAST_CHANNEL)
+ return readl_relaxed(mc->bcast_ch_regs + offset);
+
+ return readl_relaxed(mc->ch_regs[ch] + offset);
+}
+
+static inline void mc_ch_writel(const struct tegra_mc *mc, int ch,
+ u32 value, unsigned long offset)
+{
+ if (!mc->bcast_ch_regs)
+ return;
+
+ if (ch == MC_BROADCAST_CHANNEL)
+ writel_relaxed(value, mc->bcast_ch_regs + offset);
+ else
+ writel_relaxed(value, mc->ch_regs[ch] + offset);
+}
+
+static inline u32 mc_readl(const struct tegra_mc *mc, unsigned long offset)
+{
+ return readl_relaxed(mc->regs + offset);
+}
+
+static inline void mc_writel(const struct tegra_mc *mc, u32 value,
+ unsigned long offset)
+{
+ writel_relaxed(value, mc->regs + offset);
+}
+
+extern const struct tegra_mc_reset_ops tegra_mc_reset_ops_common;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_mc_soc tegra20_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_mc_soc tegra30_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_mc_soc tegra114_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+extern const struct tegra_mc_soc tegra124_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+extern const struct tegra_mc_soc tegra132_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_mc_soc tegra210_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+extern const struct tegra_mc_soc tegra186_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_mc_soc tegra194_mc_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+extern const struct tegra_mc_soc tegra234_mc_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC)
+int tegra30_mc_probe(struct tegra_mc *mc);
+extern const struct tegra_mc_ops tegra30_mc_ops;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct tegra_mc_ops tegra186_mc_ops;
+#endif
+
+irqreturn_t tegra30_mc_handle_irq(int irq, void *data);
+extern const char * const tegra_mc_status_names[32];
+extern const char * const tegra_mc_error_names[8];
+
+/*
+ * These IDs are for internal use of Tegra ICC drivers. The ID numbers are
+ * chosen such that they don't conflict with the device-tree ICC node IDs.
+ */
+#define TEGRA_ICC_MC 1000
+#define TEGRA_ICC_EMC 1001
+#define TEGRA_ICC_EMEM 1002
+
+#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
new file mode 100644
index 0000000000..41350570c8
--- /dev/null
+++ b/drivers/memory/tegra/tegra114.c
@@ -0,0 +1,1117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/mm.h>
+
+#include <dt-bindings/memory/tegra114-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra114_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ },
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ }, {
+ .id = 0x09,
+ .name = "eppup",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ }, {
+ .id = 0x0a,
+ .name = "g2pr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ }, {
+ .id = 0x0b,
+ .name = "g2sr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x68,
+ },
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x68,
+ },
+ },
+ }, {
+ .id = 0x12,
+ .name = "fdcdrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x13,
+ .name = "fdcdrd2",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x14,
+ .name = "g2dr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x18,
+ .name = "idxsrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0b,
+ },
+ },
+ }, {
+ .id = 0x1c,
+ .name = "msencsrd",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xe8,
+ },
+ },
+ }, {
+ .id = 0x20,
+ .name = "texl2srd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x22,
+ .name = "vdebsevr",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x23,
+ .name = "vdember",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x24,
+ .name = "vdemcer",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xb8,
+ },
+ },
+ }, {
+ .id = 0x25,
+ .name = "vdetper",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xee,
+ },
+ },
+ }, {
+ .id = 0x26,
+ .name = "mpcorelpr",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x28,
+ .name = "eppu",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ }, {
+ .id = 0x29,
+ .name = "eppv",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ }, {
+ .id = 0x2a,
+ .name = "eppy",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ }, {
+ .id = 0x2b,
+ .name = "msencswr",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x2c,
+ .name = "viwsb",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x47,
+ },
+ },
+ }, {
+ .id = 0x2d,
+ .name = "viwu",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x2e,
+ .name = "viwv",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x2f,
+ .name = "viwy",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x36c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x47,
+ },
+ },
+ }, {
+ .id = 0x30,
+ .name = "g2dw",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x9,
+ },
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ }, {
+ .id = 0x33,
+ .name = "fdcdwr",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x34,
+ .name = "fdcdwr2",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x25,
+ },
+ },
+ }, {
+ .id = 0x37,
+ .name = "ispw",
+ .swgroup = TEGRA_SWGROUP_ISP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x31c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x38,
+ .name = "mpcorelpw",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xe8,
+ },
+ },
+ }, {
+ .id = 0x3e,
+ .name = "vdebsevw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x3f,
+ .name = "vdedbgw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x40,
+ .name = "vdembew",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x89,
+ },
+ },
+ }, {
+ .id = 0x41,
+ .name = "vdetpmw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x59,
+ },
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x4e,
+ .name = "fdcdwr3",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x4f,
+ .name = "fdcdrd3",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x50,
+ .name = "fdcwr4",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ }, {
+ .id = 0x51,
+ .name = "fdcrd4",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ }, {
+ .id = 0x52,
+ .name = "emucifr",
+ .swgroup = TEGRA_SWGROUP_EMUCIF,
+ .regs = {
+ .la = {
+ .reg = 0x38c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x53,
+ .name = "emucifw",
+ .swgroup = TEGRA_SWGROUP_EMUCIF,
+ .regs = {
+ .la = {
+ .reg = 0x38c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "epp", .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 },
+ { .name = "g2", .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "msenc", .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+};
+
+static const unsigned int tegra114_group_drm[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
+};
+
+static const struct tegra_smmu_group_soc tegra114_groups[] = {
+ {
+ .name = "drm",
+ .swgroups = tegra114_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra114_group_drm),
+ },
+};
+
+static const struct tegra_smmu_soc tegra114_smmu_soc = {
+ .clients = tegra114_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra114_mc_clients),
+ .swgroups = tegra114_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
+ .groups = tegra114_groups,
+ .num_groups = ARRAY_SIZE(tegra114_groups),
+ .supports_round_robin_arbitration = false,
+ .supports_request_limit = false,
+ .num_tlb_lines = 32,
+ .num_asids = 4,
+};
+
+#define TEGRA114_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA114_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra114_mc_resets[] = {
+ TEGRA114_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA114_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA114_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA114_MC_RESET(EPP, 0x200, 0x204, 4),
+ TEGRA114_MC_RESET(2D, 0x200, 0x204, 5),
+ TEGRA114_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA114_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA114_MC_RESET(ISP, 0x200, 0x204, 8),
+ TEGRA114_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA114_MC_RESET(MPCORELP, 0x200, 0x204, 10),
+ TEGRA114_MC_RESET(MPE, 0x200, 0x204, 11),
+ TEGRA114_MC_RESET(3D, 0x200, 0x204, 12),
+ TEGRA114_MC_RESET(3D2, 0x200, 0x204, 13),
+ TEGRA114_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA114_MC_RESET(VDE, 0x200, 0x204, 16),
+ TEGRA114_MC_RESET(VI, 0x200, 0x204, 17),
+};
+
+const struct tegra_mc_soc tegra114_mc_soc = {
+ .clients = tegra114_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra114_mc_clients),
+ .num_address_bits = 32,
+ .atom_size = 32,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra114_smmu_soc,
+ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+ MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra114_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra114_mc_resets),
+ .ops = &tegra30_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
new file mode 100644
index 0000000000..00ed2b6a0d
--- /dev/null
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -0,0 +1,1535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "mc.h"
+
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X64 BIT(4)
+
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
+
+#define EMC_CFG 0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR BIT(30)
+#define EMC_CFG_DRAM_ACPD BIT(29)
+#define EMC_CFG_DYN_SREF BIT(28)
+#define EMC_CFG_PWR_MASK ((0xF << 28) | BIT(18))
+#define EMC_CFG_DSR_VTTGEN_DRV_EN BIT(18)
+
+#define EMC_REFCTRL 0x20
+#define EMC_REFCTRL_DEV_SEL_SHIFT 0
+#define EMC_REFCTRL_ENABLE BIT(31)
+
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_ODT_WRITE 0xb0
+#define EMC_ODT_READ 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_CTT 0xbc
+#define EMC_RFC_SLR 0xc0
+#define EMC_MRS_WAIT_CNT2 0xc4
+
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_MODE_SET_DLL_RESET BIT(8)
+#define EMC_MODE_SET_LONG_CNT BIT(26)
+#define EMC_EMRS 0xd0
+#define EMC_REF 0xd4
+#define EMC_PRE 0xd8
+
+#define EMC_SELF_REF 0xe0
+#define EMC_SELF_REF_CMD_ENABLED BIT(0)
+#define EMC_SELF_REF_DEV_SEL_SHIFT 30
+
+#define EMC_MRW 0xe8
+
+#define EMC_MRR 0xec
+#define EMC_MRR_MA_SHIFT 16
+#define LPDDR2_MR4_TEMP_SHIFT 0
+
+#define EMC_XM2DQSPADCTRL3 0xf8
+#define EMC_FBIO_SPARE 0x100
+
+#define EMC_FBIO_CFG6 0x114
+#define EMC_EMRS2 0x12c
+#define EMC_MRW2 0x134
+#define EMC_MRW4 0x13c
+#define EMC_EINPUT 0x14c
+#define EMC_EINPUT_DURATION 0x150
+#define EMC_PUTERM_EXTRA 0x154
+#define EMC_TCKESR 0x158
+#define EMC_TPD 0x15c
+
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31)
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
+#define EMC_STATUS 0x2b4
+#define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_2_MODE_SHIFT 0
+#define EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR BIT(6)
+
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_RDV_MASK 0x2cc
+#define EMC_WDV_MASK 0x2d0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_CMD BIT(0)
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL 0x2f8
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE BIT(0)
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
+#define EMC_XM2DQPADCTRL 0x300
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2VTTGENPADCTRL3 0x318
+#define EMC_XM2DQSPADCTRL4 0x320
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD BIT(2)
+#define EMC_SEL_DPD_CTRL_DDR3_MASK \
+ ((0xf << 2) | BIT(8))
+#define EMC_SEL_DPD_CTRL_MASK \
+ ((0x3 << 2) | BIT(5) | BIT(8))
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+#define EMC_CCFIFO_ADDR 0x3e8
+#define EMC_CCFIFO_DATA 0x3ec
+#define EMC_CCFIFO_STATUS 0x3f0
+#define EMC_CDB_CNTL_1 0x3f4
+#define EMC_CDB_CNTL_2 0x3f8
+#define EMC_XM2CLKPADCTRL2 0x3fc
+#define EMC_AUTO_CAL_CONFIG2 0x458
+#define EMC_AUTO_CAL_CONFIG3 0x45c
+#define EMC_IBDLY 0x468
+#define EMC_DLL_XFORM_ADDR0 0x46c
+#define EMC_DLL_XFORM_ADDR1 0x470
+#define EMC_DLL_XFORM_ADDR2 0x474
+#define EMC_DSR_VTTGEN_DRV 0x47c
+#define EMC_TXDSRVTTGEN 0x480
+#define EMC_XM2CMDPADCTRL4 0x484
+#define EMC_XM2CMDPADCTRL5 0x488
+#define EMC_DLL_XFORM_DQS8 0x4a0
+#define EMC_DLL_XFORM_DQS9 0x4a4
+#define EMC_DLL_XFORM_DQS10 0x4a8
+#define EMC_DLL_XFORM_DQS11 0x4ac
+#define EMC_DLL_XFORM_DQS12 0x4b0
+#define EMC_DLL_XFORM_DQS13 0x4b4
+#define EMC_DLL_XFORM_DQS14 0x4b8
+#define EMC_DLL_XFORM_DQS15 0x4bc
+#define EMC_DLL_XFORM_QUSE8 0x4c0
+#define EMC_DLL_XFORM_QUSE9 0x4c4
+#define EMC_DLL_XFORM_QUSE10 0x4c8
+#define EMC_DLL_XFORM_QUSE11 0x4cc
+#define EMC_DLL_XFORM_QUSE12 0x4d0
+#define EMC_DLL_XFORM_QUSE13 0x4d4
+#define EMC_DLL_XFORM_QUSE14 0x4d8
+#define EMC_DLL_XFORM_QUSE15 0x4dc
+#define EMC_DLL_XFORM_DQ4 0x4e0
+#define EMC_DLL_XFORM_DQ5 0x4e4
+#define EMC_DLL_XFORM_DQ6 0x4e8
+#define EMC_DLL_XFORM_DQ7 0x4ec
+#define EMC_DLI_TRIM_TXDQS8 0x520
+#define EMC_DLI_TRIM_TXDQS9 0x524
+#define EMC_DLI_TRIM_TXDQS10 0x528
+#define EMC_DLI_TRIM_TXDQS11 0x52c
+#define EMC_DLI_TRIM_TXDQS12 0x530
+#define EMC_DLI_TRIM_TXDQS13 0x534
+#define EMC_DLI_TRIM_TXDQS14 0x538
+#define EMC_DLI_TRIM_TXDQS15 0x53c
+#define EMC_CDB_CNTL_3 0x540
+#define EMC_XM2DQSPADCTRL5 0x544
+#define EMC_XM2DQSPADCTRL6 0x548
+#define EMC_XM2DQPADCTRL3 0x54c
+#define EMC_DLL_XFORM_ADDR3 0x550
+#define EMC_DLL_XFORM_ADDR4 0x554
+#define EMC_DLL_XFORM_ADDR5 0x558
+#define EMC_CFG_PIPE 0x560
+#define EMC_QPOP 0x564
+#define EMC_QUSE_WIDTH 0x568
+#define EMC_PUTERM_WIDTH 0x56c
+#define EMC_BGBIAS_CTL0 0x570
+#define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX BIT(3)
+#define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN BIT(2)
+#define EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD BIT(1)
+#define EMC_PUTERM_ADJ 0x574
+
+#define DRAM_DEV_SEL_ALL 0
+#define DRAM_DEV_SEL_0 BIT(31)
+#define DRAM_DEV_SEL_1 BIT(30)
+
+#define EMC_CFG_POWER_FEATURES_MASK \
+ (EMC_CFG_DYN_SREF | EMC_CFG_DRAM_ACPD | EMC_CFG_DRAM_CLKSTOP_SR | \
+ EMC_CFG_DRAM_CLKSTOP_PD | EMC_CFG_DSR_VTTGEN_DRV_EN)
+#define EMC_REFCTRL_DEV_SEL(n) (((n > 1) ? 0 : 2) << EMC_REFCTRL_DEV_SEL_SHIFT)
+#define EMC_DRAM_DEV_SEL(n) ((n > 1) ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+/* Maximum amount of time in us. to wait for changes to become effective */
+#define EMC_STATUS_UPDATE_TIMEOUT 1000
+
+enum emc_dram_type {
+ DRAM_TYPE_DDR3 = 0,
+ DRAM_TYPE_DDR1 = 1,
+ DRAM_TYPE_LPDDR3 = 2,
+ DRAM_TYPE_DDR2 = 3
+};
+
+enum emc_dll_change {
+ DLL_CHANGE_NONE,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF
+};
+
+static const unsigned long emc_burst_regs[] = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RFC_SLR,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WEXT,
+ EMC_WDV,
+ EMC_WDV_MASK,
+ EMC_QUSE,
+ EMC_QUSE_WIDTH,
+ EMC_IBDLY,
+ EMC_EINPUT,
+ EMC_EINPUT_DURATION,
+ EMC_PUTERM_EXTRA,
+ EMC_PUTERM_WIDTH,
+ EMC_PUTERM_ADJ,
+ EMC_CDB_CNTL_1,
+ EMC_CDB_CNTL_2,
+ EMC_CDB_CNTL_3,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_RDV_MASK,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PRE_REFRESH_REQ_CNT,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_TXSR,
+ EMC_TXSRDLL,
+ EMC_TCKE,
+ EMC_TCKESR,
+ EMC_TPD,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_TREFBW,
+ EMC_FBIO_CFG6,
+ EMC_ODT_WRITE,
+ EMC_ODT_READ,
+ EMC_FBIO_CFG5,
+ EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_PERIOD,
+ EMC_DLL_XFORM_DQS0,
+ EMC_DLL_XFORM_DQS1,
+ EMC_DLL_XFORM_DQS2,
+ EMC_DLL_XFORM_DQS3,
+ EMC_DLL_XFORM_DQS4,
+ EMC_DLL_XFORM_DQS5,
+ EMC_DLL_XFORM_DQS6,
+ EMC_DLL_XFORM_DQS7,
+ EMC_DLL_XFORM_DQS8,
+ EMC_DLL_XFORM_DQS9,
+ EMC_DLL_XFORM_DQS10,
+ EMC_DLL_XFORM_DQS11,
+ EMC_DLL_XFORM_DQS12,
+ EMC_DLL_XFORM_DQS13,
+ EMC_DLL_XFORM_DQS14,
+ EMC_DLL_XFORM_DQS15,
+ EMC_DLL_XFORM_QUSE0,
+ EMC_DLL_XFORM_QUSE1,
+ EMC_DLL_XFORM_QUSE2,
+ EMC_DLL_XFORM_QUSE3,
+ EMC_DLL_XFORM_QUSE4,
+ EMC_DLL_XFORM_QUSE5,
+ EMC_DLL_XFORM_QUSE6,
+ EMC_DLL_XFORM_QUSE7,
+ EMC_DLL_XFORM_ADDR0,
+ EMC_DLL_XFORM_ADDR1,
+ EMC_DLL_XFORM_ADDR2,
+ EMC_DLL_XFORM_ADDR3,
+ EMC_DLL_XFORM_ADDR4,
+ EMC_DLL_XFORM_ADDR5,
+ EMC_DLL_XFORM_QUSE8,
+ EMC_DLL_XFORM_QUSE9,
+ EMC_DLL_XFORM_QUSE10,
+ EMC_DLL_XFORM_QUSE11,
+ EMC_DLL_XFORM_QUSE12,
+ EMC_DLL_XFORM_QUSE13,
+ EMC_DLL_XFORM_QUSE14,
+ EMC_DLL_XFORM_QUSE15,
+ EMC_DLI_TRIM_TXDQS0,
+ EMC_DLI_TRIM_TXDQS1,
+ EMC_DLI_TRIM_TXDQS2,
+ EMC_DLI_TRIM_TXDQS3,
+ EMC_DLI_TRIM_TXDQS4,
+ EMC_DLI_TRIM_TXDQS5,
+ EMC_DLI_TRIM_TXDQS6,
+ EMC_DLI_TRIM_TXDQS7,
+ EMC_DLI_TRIM_TXDQS8,
+ EMC_DLI_TRIM_TXDQS9,
+ EMC_DLI_TRIM_TXDQS10,
+ EMC_DLI_TRIM_TXDQS11,
+ EMC_DLI_TRIM_TXDQS12,
+ EMC_DLI_TRIM_TXDQS13,
+ EMC_DLI_TRIM_TXDQS14,
+ EMC_DLI_TRIM_TXDQS15,
+ EMC_DLL_XFORM_DQ0,
+ EMC_DLL_XFORM_DQ1,
+ EMC_DLL_XFORM_DQ2,
+ EMC_DLL_XFORM_DQ3,
+ EMC_DLL_XFORM_DQ4,
+ EMC_DLL_XFORM_DQ5,
+ EMC_DLL_XFORM_DQ6,
+ EMC_DLL_XFORM_DQ7,
+ EMC_XM2CMDPADCTRL,
+ EMC_XM2CMDPADCTRL4,
+ EMC_XM2CMDPADCTRL5,
+ EMC_XM2DQPADCTRL2,
+ EMC_XM2DQPADCTRL3,
+ EMC_XM2CLKPADCTRL,
+ EMC_XM2CLKPADCTRL2,
+ EMC_XM2COMPPADCTRL,
+ EMC_XM2VTTGENPADCTRL,
+ EMC_XM2VTTGENPADCTRL2,
+ EMC_XM2VTTGENPADCTRL3,
+ EMC_XM2DQSPADCTRL3,
+ EMC_XM2DQSPADCTRL4,
+ EMC_XM2DQSPADCTRL5,
+ EMC_XM2DQSPADCTRL6,
+ EMC_DSR_VTTGEN_DRV,
+ EMC_TXDSRVTTGEN,
+ EMC_FBIO_SPARE,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_MRS_WAIT_CNT2,
+ EMC_CTT,
+ EMC_CTT_DURATION,
+ EMC_CFG_PIPE,
+ EMC_DYN_SELF_REF_CONTROL,
+ EMC_QPOP
+};
+
+struct emc_timing {
+ unsigned long rate;
+
+ u32 emc_burst_data[ARRAY_SIZE(emc_burst_regs)];
+
+ u32 emc_auto_cal_config;
+ u32 emc_auto_cal_config2;
+ u32 emc_auto_cal_config3;
+ u32 emc_auto_cal_interval;
+ u32 emc_bgbias_ctl0;
+ u32 emc_cfg;
+ u32 emc_cfg_2;
+ u32 emc_ctt_term_ctrl;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_4;
+ u32 emc_mode_reset;
+ u32 emc_mrs_wait_cnt;
+ u32 emc_sel_dpd_ctrl;
+ u32 emc_xm2dqspadctrl2;
+ u32 emc_zcal_cnt_long;
+ u32 emc_zcal_interval;
+};
+
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct tegra_emc {
+ struct device *dev;
+
+ struct tegra_mc *mc;
+
+ void __iomem *regs;
+
+ struct clk *clk;
+
+ enum emc_dram_type dram_type;
+ unsigned int dram_bus_width;
+ unsigned int dram_num;
+
+ struct emc_timing last_timing;
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ struct icc_provider provider;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+};
+
+/* Timing change sequence functions */
+
+static void emc_ccfifo_writel(struct tegra_emc *emc, u32 value,
+ unsigned long offset)
+{
+ writel(value, emc->regs + EMC_CCFIFO_DATA);
+ writel(offset, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static void emc_seq_update_timing(struct tegra_emc *emc)
+{
+ unsigned int i;
+ u32 value;
+
+ writel(1, emc->regs + EMC_TIMING_CONTROL);
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) {
+ value = readl(emc->regs + EMC_STATUS);
+ if ((value & EMC_STATUS_TIMING_UPDATE_STALLED) == 0)
+ return;
+ udelay(1);
+ }
+
+ dev_err(emc->dev, "timing update timed out\n");
+}
+
+static void emc_seq_disable_auto_cal(struct tegra_emc *emc)
+{
+ unsigned int i;
+ u32 value;
+
+ writel(0, emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) {
+ value = readl(emc->regs + EMC_AUTO_CAL_STATUS);
+ if ((value & EMC_AUTO_CAL_STATUS_ACTIVE) == 0)
+ return;
+ udelay(1);
+ }
+
+ dev_err(emc->dev, "auto cal disable timed out\n");
+}
+
+static void emc_seq_wait_clkchange(struct tegra_emc *emc)
+{
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; ++i) {
+ value = readl(emc->regs + EMC_INTSTATUS);
+ if (value & EMC_INTSTATUS_CLKCHANGE_COMPLETE)
+ return;
+ udelay(1);
+ }
+
+ dev_err(emc->dev, "clock change timed out\n");
+}
+
+static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate == rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static int tegra_emc_prepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *last = &emc->last_timing;
+ enum emc_dll_change dll_change;
+ unsigned int pre_wait = 0;
+ u32 val, val2, mask;
+ bool update = false;
+ unsigned int i;
+
+ if (!timing)
+ return -ENOENT;
+
+ if ((last->emc_mode_1 & 0x1) == (timing->emc_mode_1 & 0x1))
+ dll_change = DLL_CHANGE_NONE;
+ else if (timing->emc_mode_1 & 0x1)
+ dll_change = DLL_CHANGE_ON;
+ else
+ dll_change = DLL_CHANGE_OFF;
+
+ /* Clear CLKCHANGE_COMPLETE interrupts */
+ writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, emc->regs + EMC_INTSTATUS);
+
+ /* Disable dynamic self-refresh */
+ val = readl(emc->regs + EMC_CFG);
+ if (val & EMC_CFG_PWR_MASK) {
+ val &= ~EMC_CFG_POWER_FEATURES_MASK;
+ writel(val, emc->regs + EMC_CFG);
+
+ pre_wait = 5;
+ }
+
+ /* Disable SEL_DPD_CTRL for clock change */
+ if (emc->dram_type == DRAM_TYPE_DDR3)
+ mask = EMC_SEL_DPD_CTRL_DDR3_MASK;
+ else
+ mask = EMC_SEL_DPD_CTRL_MASK;
+
+ val = readl(emc->regs + EMC_SEL_DPD_CTRL);
+ if (val & mask) {
+ val &= ~mask;
+ writel(val, emc->regs + EMC_SEL_DPD_CTRL);
+ }
+
+ /* Prepare DQ/DQS for clock change */
+ val = readl(emc->regs + EMC_BGBIAS_CTL0);
+ val2 = last->emc_bgbias_ctl0;
+ if (!(timing->emc_bgbias_ctl0 &
+ EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX) &&
+ (val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX)) {
+ val2 &= ~EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX;
+ update = true;
+ }
+
+ if ((val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD) ||
+ (val & EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN)) {
+ update = true;
+ }
+
+ if (update) {
+ writel(val2, emc->regs + EMC_BGBIAS_CTL0);
+ if (pre_wait < 5)
+ pre_wait = 5;
+ }
+
+ update = false;
+ val = readl(emc->regs + EMC_XM2DQSPADCTRL2);
+ if (timing->emc_xm2dqspadctrl2 & EMC_XM2DQSPADCTRL2_VREF_ENABLE &&
+ !(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE;
+ update = true;
+ }
+
+ if (timing->emc_xm2dqspadctrl2 & EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE &&
+ !(val & EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_RX_FT_REC_ENABLE;
+ update = true;
+ }
+
+ if (update) {
+ writel(val, emc->regs + EMC_XM2DQSPADCTRL2);
+ if (pre_wait < 30)
+ pre_wait = 30;
+ }
+
+ /* Wait to settle */
+ if (pre_wait) {
+ emc_seq_update_timing(emc);
+ udelay(pre_wait);
+ }
+
+ /* Program CTT_TERM control */
+ if (last->emc_ctt_term_ctrl != timing->emc_ctt_term_ctrl) {
+ emc_seq_disable_auto_cal(emc);
+ writel(timing->emc_ctt_term_ctrl,
+ emc->regs + EMC_CTT_TERM_CTRL);
+ emc_seq_update_timing(emc);
+ }
+
+ /* Program burst shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->emc_burst_data); ++i)
+ writel(timing->emc_burst_data[i],
+ emc->regs + emc_burst_regs[i]);
+
+ writel(timing->emc_xm2dqspadctrl2, emc->regs + EMC_XM2DQSPADCTRL2);
+ writel(timing->emc_zcal_interval, emc->regs + EMC_ZCAL_INTERVAL);
+
+ tegra_mc_write_emem_configuration(emc->mc, timing->rate);
+
+ val = timing->emc_cfg & ~EMC_CFG_POWER_FEATURES_MASK;
+ emc_ccfifo_writel(emc, val, EMC_CFG);
+
+ /* Program AUTO_CAL_CONFIG */
+ if (timing->emc_auto_cal_config2 != last->emc_auto_cal_config2)
+ emc_ccfifo_writel(emc, timing->emc_auto_cal_config2,
+ EMC_AUTO_CAL_CONFIG2);
+
+ if (timing->emc_auto_cal_config3 != last->emc_auto_cal_config3)
+ emc_ccfifo_writel(emc, timing->emc_auto_cal_config3,
+ EMC_AUTO_CAL_CONFIG3);
+
+ if (timing->emc_auto_cal_config != last->emc_auto_cal_config) {
+ val = timing->emc_auto_cal_config;
+ val &= EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+ emc_ccfifo_writel(emc, val, EMC_AUTO_CAL_CONFIG);
+ }
+
+ /* DDR3: predict MRS long wait count */
+ if (emc->dram_type == DRAM_TYPE_DDR3 &&
+ dll_change == DLL_CHANGE_ON) {
+ u32 cnt = 512;
+
+ if (timing->emc_zcal_interval != 0 &&
+ last->emc_zcal_interval == 0)
+ cnt -= emc->dram_num * 256;
+
+ val = (timing->emc_mrs_wait_cnt
+ & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK)
+ >> EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
+ if (cnt < val)
+ cnt = val;
+
+ val = timing->emc_mrs_wait_cnt
+ & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+ val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+ & EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ writel(val, emc->regs + EMC_MRS_WAIT_CNT);
+ }
+
+ val = timing->emc_cfg_2;
+ val &= ~EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR;
+ emc_ccfifo_writel(emc, val, EMC_CFG_2);
+
+ /* DDR3: Turn off DLL and enter self-refresh */
+ if (emc->dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_OFF)
+ emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_EMRS);
+
+ /* Disable refresh controller */
+ emc_ccfifo_writel(emc, EMC_REFCTRL_DEV_SEL(emc->dram_num),
+ EMC_REFCTRL);
+ if (emc->dram_type == DRAM_TYPE_DDR3)
+ emc_ccfifo_writel(emc, EMC_DRAM_DEV_SEL(emc->dram_num) |
+ EMC_SELF_REF_CMD_ENABLED,
+ EMC_SELF_REF);
+
+ /* Flow control marker */
+ emc_ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
+
+ /* DDR3: Exit self-refresh */
+ if (emc->dram_type == DRAM_TYPE_DDR3)
+ emc_ccfifo_writel(emc, EMC_DRAM_DEV_SEL(emc->dram_num),
+ EMC_SELF_REF);
+ emc_ccfifo_writel(emc, EMC_REFCTRL_DEV_SEL(emc->dram_num) |
+ EMC_REFCTRL_ENABLE,
+ EMC_REFCTRL);
+
+ /* Set DRAM mode registers */
+ if (emc->dram_type == DRAM_TYPE_DDR3) {
+ if (timing->emc_mode_1 != last->emc_mode_1)
+ emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_EMRS);
+ if (timing->emc_mode_2 != last->emc_mode_2)
+ emc_ccfifo_writel(emc, timing->emc_mode_2, EMC_EMRS2);
+
+ if ((timing->emc_mode_reset != last->emc_mode_reset) ||
+ dll_change == DLL_CHANGE_ON) {
+ val = timing->emc_mode_reset;
+ if (dll_change == DLL_CHANGE_ON) {
+ val |= EMC_MODE_SET_DLL_RESET;
+ val |= EMC_MODE_SET_LONG_CNT;
+ } else {
+ val &= ~EMC_MODE_SET_DLL_RESET;
+ }
+ emc_ccfifo_writel(emc, val, EMC_MRS);
+ }
+ } else {
+ if (timing->emc_mode_2 != last->emc_mode_2)
+ emc_ccfifo_writel(emc, timing->emc_mode_2, EMC_MRW2);
+ if (timing->emc_mode_1 != last->emc_mode_1)
+ emc_ccfifo_writel(emc, timing->emc_mode_1, EMC_MRW);
+ if (timing->emc_mode_4 != last->emc_mode_4)
+ emc_ccfifo_writel(emc, timing->emc_mode_4, EMC_MRW4);
+ }
+
+ /* Issue ZCAL command if turning ZCAL on */
+ if (timing->emc_zcal_interval != 0 && last->emc_zcal_interval == 0) {
+ emc_ccfifo_writel(emc, EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
+ if (emc->dram_num > 1)
+ emc_ccfifo_writel(emc, EMC_ZQ_CAL_LONG_CMD_DEV1,
+ EMC_ZQ_CAL);
+ }
+
+ /* Write to RO register to remove stall after change */
+ emc_ccfifo_writel(emc, 0, EMC_CCFIFO_STATUS);
+
+ if (timing->emc_cfg_2 & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR)
+ emc_ccfifo_writel(emc, timing->emc_cfg_2, EMC_CFG_2);
+
+ /* Disable AUTO_CAL for clock change */
+ emc_seq_disable_auto_cal(emc);
+
+ /* Read register to wait until programming has settled */
+ readl(emc->regs + EMC_INTSTATUS);
+
+ return 0;
+}
+
+static void tegra_emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ struct emc_timing *last = &emc->last_timing;
+ u32 val;
+
+ if (!timing)
+ return;
+
+ /* Wait until the state machine has settled */
+ emc_seq_wait_clkchange(emc);
+
+ /* Restore AUTO_CAL */
+ if (timing->emc_ctt_term_ctrl != last->emc_ctt_term_ctrl)
+ writel(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* Restore dynamic self-refresh */
+ if (timing->emc_cfg & EMC_CFG_PWR_MASK)
+ writel(timing->emc_cfg, emc->regs + EMC_CFG);
+
+ /* Set ZCAL wait count */
+ writel(timing->emc_zcal_cnt_long, emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* LPDDR3: Turn off BGBIAS if low frequency */
+ if (emc->dram_type == DRAM_TYPE_LPDDR3 &&
+ timing->emc_bgbias_ctl0 &
+ EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_RX) {
+ val = timing->emc_bgbias_ctl0;
+ val |= EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD_IBIAS_VTTGEN;
+ val |= EMC_BGBIAS_CTL0_BIAS0_DSC_E_PWRD;
+ writel(val, emc->regs + EMC_BGBIAS_CTL0);
+ } else {
+ if (emc->dram_type == DRAM_TYPE_DDR3 &&
+ readl(emc->regs + EMC_BGBIAS_CTL0) !=
+ timing->emc_bgbias_ctl0) {
+ writel(timing->emc_bgbias_ctl0,
+ emc->regs + EMC_BGBIAS_CTL0);
+ }
+
+ writel(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+ }
+
+ /* Wait for timing to settle */
+ udelay(2);
+
+ /* Reprogram SEL_DPD_CTRL */
+ writel(timing->emc_sel_dpd_ctrl, emc->regs + EMC_SEL_DPD_CTRL);
+ emc_seq_update_timing(emc);
+
+ emc->last_timing = *timing;
+}
+
+/* Initialization and deinitialization */
+
+static void emc_read_current_timing(struct tegra_emc *emc,
+ struct emc_timing *timing)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(emc_burst_regs); ++i)
+ timing->emc_burst_data[i] =
+ readl(emc->regs + emc_burst_regs[i]);
+
+ timing->emc_cfg = readl(emc->regs + EMC_CFG);
+
+ timing->emc_auto_cal_interval = 0;
+ timing->emc_zcal_cnt_long = 0;
+ timing->emc_mode_1 = 0;
+ timing->emc_mode_2 = 0;
+ timing->emc_mode_4 = 0;
+ timing->emc_mode_reset = 0;
+}
+
+static int emc_init(struct tegra_emc *emc)
+{
+ emc->dram_type = readl(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc->dram_type & EMC_FBIO_CFG5_DRAM_WIDTH_X64)
+ emc->dram_bus_width = 64;
+ else
+ emc->dram_bus_width = 32;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+
+ emc->dram_type &= EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+ emc->dram_type >>= EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+ emc->dram_num = tegra_mc_get_emem_device_count(emc->mc);
+
+ emc_read_current_timing(emc, &emc->last_timing);
+
+ return 0;
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(node, "clock-frequency", &value);
+ if (err) {
+ dev_err(emc->dev, "timing %pOFn: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ timing->rate = value;
+
+ err = of_property_read_u32_array(node, "nvidia,emc-configuration",
+ timing->emc_burst_data,
+ ARRAY_SIZE(timing->emc_burst_data));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOFn: failed to read emc burst data: %d\n",
+ node, err);
+ return err;
+ }
+
+#define EMC_READ_PROP(prop, dtprop) { \
+ err = of_property_read_u32(node, dtprop, &timing->prop); \
+ if (err) { \
+ dev_err(emc->dev, "timing %pOFn: failed to read " #prop ": %d\n", \
+ node, err); \
+ return err; \
+ } \
+}
+
+ EMC_READ_PROP(emc_auto_cal_config, "nvidia,emc-auto-cal-config")
+ EMC_READ_PROP(emc_auto_cal_config2, "nvidia,emc-auto-cal-config2")
+ EMC_READ_PROP(emc_auto_cal_config3, "nvidia,emc-auto-cal-config3")
+ EMC_READ_PROP(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval")
+ EMC_READ_PROP(emc_bgbias_ctl0, "nvidia,emc-bgbias-ctl0")
+ EMC_READ_PROP(emc_cfg, "nvidia,emc-cfg")
+ EMC_READ_PROP(emc_cfg_2, "nvidia,emc-cfg-2")
+ EMC_READ_PROP(emc_ctt_term_ctrl, "nvidia,emc-ctt-term-ctrl")
+ EMC_READ_PROP(emc_mode_1, "nvidia,emc-mode-1")
+ EMC_READ_PROP(emc_mode_2, "nvidia,emc-mode-2")
+ EMC_READ_PROP(emc_mode_4, "nvidia,emc-mode-4")
+ EMC_READ_PROP(emc_mode_reset, "nvidia,emc-mode-reset")
+ EMC_READ_PROP(emc_mrs_wait_cnt, "nvidia,emc-mrs-wait-cnt")
+ EMC_READ_PROP(emc_sel_dpd_ctrl, "nvidia,emc-sel-dpd-ctrl")
+ EMC_READ_PROP(emc_xm2dqspadctrl2, "nvidia,emc-xm2dqspadctrl2")
+ EMC_READ_PROP(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long")
+ EMC_READ_PROP(emc_zcal_interval, "nvidia,emc-zcal-interval")
+
+#undef EMC_READ_PROP
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+ else if (a->rate == b->rate)
+ return 0;
+ else
+ return 1;
+}
+
+static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ int child_count = of_get_child_count(node);
+ struct device_node *child;
+ struct emc_timing *timing;
+ unsigned int i = 0;
+ int err;
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ emc->num_timings = child_count;
+
+ for_each_child_of_node(node, child) {
+ timing = &emc->timings[i++];
+
+ err = load_one_timing_from_dt(emc, timing, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra124-emc" },
+ { .compatible = "nvidia,tegra132-emc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+
+static struct device_node *
+tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
+{
+ struct device_node *np;
+ int err;
+
+ for_each_child_of_node(node, np) {
+ u32 value;
+
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || (value != ram_code))
+ continue;
+
+ return np;
+ }
+
+ return NULL;
+}
+
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
+{
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra124 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ int opp_token, err;
+
+ err = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ return err;
+ }
+ opp_token = err;
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info_once(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(opp_token);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra124_clk_set_emc_callbacks(NULL, NULL);
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_emc *emc;
+ u32 ram_code;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ mutex_init(&emc->rate_lock);
+ emc->dev = &pdev->dev;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ ram_code = tegra_read_ram_code();
+
+ np = tegra_emc_find_node_by_ram_code(pdev->dev.of_node, ram_code);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ } else {
+ dev_info_once(&pdev->dev,
+ "no memory timings for RAM code %u found in DT\n",
+ ram_code);
+ }
+
+ err = emc_init(emc);
+ if (err) {
+ dev_err(&pdev->dev, "EMC initialization failed: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, emc);
+
+ tegra124_clk_set_emc_callbacks(tegra_emc_prepare_timing_change,
+ tegra_emc_complete_timing_change);
+
+ err = devm_add_action_or_reset(&pdev->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
+
+ tegra_emc_rate_requests_init(emc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ emc_debugfs_init(&pdev->dev, emc);
+
+ tegra_emc_interconnect_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
+ return 0;
+};
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra-emc",
+ .of_match_table = tegra_emc_of_match,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(tegra_emc_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra124 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
new file mode 100644
index 0000000000..470b7dbab2
--- /dev/null
+++ b/drivers/memory/tegra/tegra124.c
@@ -0,0 +1,1311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/memory/tegra124-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra124_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ },
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc2,
+ },
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xc6,
+ },
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x1c,
+ .name = "msencsrd",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ },
+ }, {
+ .id = 0x22,
+ .name = "vdebsevr",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4f,
+ },
+ },
+ }, {
+ .id = 0x23,
+ .name = "vdember",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x3d,
+ },
+ },
+ }, {
+ .id = 0x24,
+ .name = "vdemcer",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x66,
+ },
+ },
+ }, {
+ .id = 0x25,
+ .name = "vdetper",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xa5,
+ },
+ },
+ }, {
+ .id = 0x26,
+ .name = "mpcorelpr",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x2b,
+ .name = "msencswr",
+ .swgroup = TEGRA_SWGROUP_MSENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x38,
+ .name = "mpcorelpw",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ },
+ }, {
+ .id = 0x3e,
+ .name = "vdebsevw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3f,
+ .name = "vdedbgw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x40,
+ .name = "vdembew",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x41,
+ .name = "vdetpmw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x44,
+ .name = "ispra",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x46,
+ .name = "ispwa",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x47,
+ .name = "ispwb",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4e,
+ .name = "isprab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x50,
+ .name = "ispwab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x51,
+ .name = "ispwbb",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x56,
+ .name = "a9avpscr",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x57,
+ .name = "a9avpscw",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x58,
+ .name = "gpusrd",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x59,
+ .name = "gpuswr",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x5a,
+ .name = "displayt",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x60,
+ .name = "sdmmcra",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x61,
+ .name = "sdmmcraa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x62,
+ .name = "sdmmcr",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x63,
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .name = "sdmmcrab",
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x64,
+ .name = "sdmmcwa",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x65,
+ .name = "sdmmcwaa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x66,
+ .name = "sdmmcw",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x67,
+ .name = "sdmmcwab",
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x6c,
+ .name = "vicsrd",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x6d,
+ .name = "vicswr",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x72,
+ .name = "viw",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x73,
+ .name = "displayd",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "msenc", .swgroup = TEGRA_SWGROUP_MSENC, .reg = 0x264 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
+ { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
+ { .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
+ { .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
+ { .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+};
+
+static const unsigned int tegra124_group_drm[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_VIC,
+};
+
+static const struct tegra_smmu_group_soc tegra124_groups[] = {
+ {
+ .name = "drm",
+ .swgroups = tegra124_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra124_group_drm),
+ },
+};
+
+#define TEGRA124_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA124_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra124_mc_resets[] = {
+ TEGRA124_MC_RESET(AFI, 0x200, 0x204, 0),
+ TEGRA124_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA124_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA124_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA124_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA124_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA124_MC_RESET(ISP2, 0x200, 0x204, 8),
+ TEGRA124_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA124_MC_RESET(MPCORELP, 0x200, 0x204, 10),
+ TEGRA124_MC_RESET(MSENC, 0x200, 0x204, 11),
+ TEGRA124_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA124_MC_RESET(SATA, 0x200, 0x204, 15),
+ TEGRA124_MC_RESET(VDE, 0x200, 0x204, 16),
+ TEGRA124_MC_RESET(VI, 0x200, 0x204, 17),
+ TEGRA124_MC_RESET(VIC, 0x200, 0x204, 18),
+ TEGRA124_MC_RESET(XUSB_HOST, 0x200, 0x204, 19),
+ TEGRA124_MC_RESET(XUSB_DEV, 0x200, 0x204, 20),
+ TEGRA124_MC_RESET(TSEC, 0x200, 0x204, 21),
+ TEGRA124_MC_RESET(SDMMC1, 0x200, 0x204, 22),
+ TEGRA124_MC_RESET(SDMMC2, 0x200, 0x204, 23),
+ TEGRA124_MC_RESET(SDMMC3, 0x200, 0x204, 25),
+ TEGRA124_MC_RESET(SDMMC4, 0x970, 0x974, 0),
+ TEGRA124_MC_RESET(ISP2B, 0x970, 0x974, 1),
+ TEGRA124_MC_RESET(GPU, 0x970, 0x974, 2),
+};
+
+static int tegra124_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /* TODO: program PTSA */
+ return 0;
+}
+
+static int tegra124_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra124_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra124_mc_icc_ops = {
+ .xlate_extended = tegra124_mc_of_icc_xlate_extended,
+ .aggregate = tegra124_mc_icc_aggreate,
+ .set = tegra124_mc_icc_set,
+};
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+static const unsigned long tegra124_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_RING1_THROTTLE
+};
+
+static const struct tegra_smmu_soc tegra124_smmu_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .swgroups = tegra124_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ .groups = tegra124_groups,
+ .num_groups = ARRAY_SIZE(tegra124_groups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 32,
+ .num_asids = 128,
+};
+
+const struct tegra_mc_soc tegra124_mc_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 32,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra124_smmu_soc,
+ .emem_regs = tegra124_mc_emem_regs,
+ .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra124_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra124_mc_resets),
+ .icc_ops = &tegra124_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
+};
+#endif /* CONFIG_ARCH_TEGRA_124_SOC */
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+static const struct tegra_smmu_soc tegra132_smmu_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .swgroups = tegra124_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ .groups = tegra124_groups,
+ .num_groups = ARRAY_SIZE(tegra124_groups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 32,
+ .num_asids = 128,
+};
+
+const struct tegra_mc_soc tegra132_mc_soc = {
+ .clients = tegra124_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra124_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 32,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra132_smmu_soc,
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra124_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra124_mc_resets),
+ .icc_ops = &tegra124_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
+};
+#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
new file mode 100644
index 0000000000..4007f4e16d
--- /dev/null
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include "mc.h"
+
+struct tegra186_emc_dvfs {
+ unsigned long latency;
+ unsigned long rate;
+};
+
+struct tegra186_emc {
+ struct tegra_bpmp *bpmp;
+ struct device *dev;
+ struct clk *clk;
+
+ struct tegra186_emc_dvfs *dvfs;
+ unsigned int num_dvfs;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ struct icc_provider provider;
+};
+
+static inline struct tegra186_emc *to_tegra186_emc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra186_emc, provider);
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra186_emc_validate_rate(struct tegra186_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_dvfs; i++)
+ if (rate == emc->dvfs[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra186_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra186_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_dvfs; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->dvfs[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra186_emc_debug_available_rates);
+
+static int tegra186_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra186_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra186_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra186_emc *emc = data;
+ int err;
+
+ if (!tegra186_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra186_emc_debug_min_rate_fops,
+ tegra186_emc_debug_min_rate_get,
+ tegra186_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra186_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra186_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra186_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra186_emc *emc = data;
+ int err;
+
+ if (!tegra186_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra186_emc_debug_max_rate_fops,
+ tegra186_emc_debug_max_rate_get,
+ tegra186_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra186_emc_get_emc_dvfs_latency(struct tegra186_emc *emc)
+{
+ struct mrq_emc_dvfs_latency_response response;
+ struct tegra_bpmp_message msg;
+ unsigned int i;
+ int err;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_EMC_DVFS_LATENCY;
+ msg.tx.data = NULL;
+ msg.tx.size = 0;
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ err = tegra_bpmp_transfer(emc->bpmp, &msg);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to EMC DVFS pairs: %d\n", err);
+ return err;
+ }
+ if (msg.rx.ret < 0) {
+ dev_err(emc->dev, "EMC DVFS MRQ failed: %d (BPMP error code)\n", msg.rx.ret);
+ return -EINVAL;
+ }
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ emc->num_dvfs = response.num_pairs;
+
+ emc->dvfs = devm_kmalloc_array(emc->dev, emc->num_dvfs, sizeof(*emc->dvfs), GFP_KERNEL);
+ if (!emc->dvfs)
+ return -ENOMEM;
+
+ dev_dbg(emc->dev, "%u DVFS pairs:\n", emc->num_dvfs);
+
+ for (i = 0; i < emc->num_dvfs; i++) {
+ emc->dvfs[i].rate = response.pairs[i].freq * 1000;
+ emc->dvfs[i].latency = response.pairs[i].latency;
+
+ if (emc->dvfs[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->dvfs[i].rate;
+
+ if (emc->dvfs[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->dvfs[i].rate;
+
+ dev_dbg(emc->dev, " %2u: %lu Hz -> %lu us\n", i,
+ emc->dvfs[i].rate, emc->dvfs[i].latency);
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate, emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate, emc->clk);
+ return err;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra186_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+ &tegra186_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+ &tegra186_emc_debug_max_rate_fops);
+
+ return 0;
+}
+
+/*
+ * tegra_emc_icc_set_bw() - Set BW api for EMC provider
+ * @src: ICC node for External Memory Controller (EMC)
+ * @dst: ICC node for External Memory (DRAM)
+ *
+ * Do nothing here as info to BPMP-FW is now passed in the BW set function
+ * of the MC driver. BPMP-FW sets the final Freq based on the passed values.
+ */
+static int tegra_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst)
+{
+ return 0;
+}
+
+static struct icc_node *
+tegra_emc_of_icc_xlate(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ return node;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int tegra_emc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+ *avg = 0;
+ *peak = 0;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra186_emc *emc)
+{
+ struct tegra_mc *mc = dev_get_drvdata(emc->dev->parent);
+ const struct tegra_mc_soc *soc = mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = tegra_emc_icc_set_bw;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate = tegra_emc_of_icc_xlate;
+ emc->provider.get_bw = tegra_emc_icc_get_init_bw;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra186_emc_probe(struct platform_device *pdev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(pdev->dev.parent);
+ struct tegra186_emc *emc;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->bpmp = tegra_bpmp_get(&pdev->dev);
+ if (IS_ERR(emc->bpmp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp), "failed to get BPMP\n");
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get EMC clock: %d\n", err);
+ goto put_bpmp;
+ }
+
+ platform_set_drvdata(pdev, emc);
+ emc->dev = &pdev->dev;
+
+ if (tegra_bpmp_mrq_is_supported(emc->bpmp, MRQ_EMC_DVFS_LATENCY)) {
+ err = tegra186_emc_get_emc_dvfs_latency(emc);
+ if (err)
+ goto put_bpmp;
+ }
+
+ if (mc && mc->soc->icc_ops) {
+ if (tegra_bpmp_mrq_is_supported(emc->bpmp, MRQ_BWMGR_INT)) {
+ mc->bwmgr_mrq_supported = true;
+
+ /*
+ * MC driver probe can't get BPMP reference as it gets probed
+ * earlier than BPMP. So, save the BPMP ref got from the EMC
+ * DT node in the mc->bpmp and use it in MC's icc_set hook.
+ */
+ mc->bpmp = emc->bpmp;
+ barrier();
+ }
+
+ /*
+ * Initialize the ICC even if BPMP-FW doesn't support 'MRQ_BWMGR_INT'.
+ * Use the flag 'mc->bwmgr_mrq_supported' within MC driver and return
+ * EINVAL instead of passing the request to BPMP-FW later when the BW
+ * request is made by client with 'icc_set_bw()' call.
+ */
+ err = tegra_emc_interconnect_init(emc);
+ if (err) {
+ mc->bpmp = NULL;
+ goto put_bpmp;
+ }
+ }
+
+ return 0;
+
+put_bpmp:
+ tegra_bpmp_put(emc->bpmp);
+ return err;
+}
+
+static int tegra186_emc_remove(struct platform_device *pdev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(pdev->dev.parent);
+ struct tegra186_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+
+ mc->bpmp = NULL;
+ tegra_bpmp_put(emc->bpmp);
+
+ return 0;
+}
+
+static const struct of_device_id tegra186_emc_of_match[] = {
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+ { .compatible = "nvidia,tegra186-emc" },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ { .compatible = "nvidia,tegra194-emc" },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ { .compatible = "nvidia,tegra234-emc" },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra186_emc_of_match);
+
+static struct platform_driver tegra186_emc_driver = {
+ .driver = {
+ .name = "tegra186-emc",
+ .of_match_table = tegra186_emc_of_match,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+ .probe = tegra186_emc_probe,
+ .remove = tegra186_emc_remove,
+};
+module_platform_driver(tegra186_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 External Memory Controller driver");
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
new file mode 100644
index 0000000000..533f85a4b2
--- /dev/null
+++ b/drivers/memory/tegra/tegra186.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/mc.h>
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+#include <dt-bindings/memory/tegra186-mc.h>
+#endif
+
+#include "mc.h"
+
+#define MC_SID_STREAMID_OVERRIDE_MASK GENMASK(7, 0)
+#define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
+#define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
+
+static int tegra186_mc_probe(struct tegra_mc *mc)
+{
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ unsigned int i;
+ char name[8];
+ int err;
+
+ mc->bcast_ch_regs = devm_platform_ioremap_resource_byname(pdev, "broadcast");
+ if (IS_ERR(mc->bcast_ch_regs)) {
+ if (PTR_ERR(mc->bcast_ch_regs) == -EINVAL) {
+ dev_warn(&pdev->dev,
+ "Broadcast channel is missing, please update your device-tree\n");
+ mc->bcast_ch_regs = NULL;
+ goto populate;
+ }
+
+ return PTR_ERR(mc->bcast_ch_regs);
+ }
+
+ mc->ch_regs = devm_kcalloc(mc->dev, mc->soc->num_channels, sizeof(*mc->ch_regs),
+ GFP_KERNEL);
+ if (!mc->ch_regs)
+ return -ENOMEM;
+
+ for (i = 0; i < mc->soc->num_channels; i++) {
+ snprintf(name, sizeof(name), "ch%u", i);
+
+ mc->ch_regs[i] = devm_platform_ioremap_resource_byname(pdev, name);
+ if (IS_ERR(mc->ch_regs[i]))
+ return PTR_ERR(mc->ch_regs[i]);
+ }
+
+populate:
+ err = of_platform_populate(mc->dev->of_node, NULL, NULL, mc->dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra186_mc_remove(struct tegra_mc *mc)
+{
+ of_platform_depopulate(mc->dev);
+}
+
+#if IS_ENABLED(CONFIG_IOMMU_API)
+static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int sid)
+{
+ u32 value, old;
+
+ value = readl(mc->regs + client->regs.sid.security);
+ if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) {
+ /*
+ * If the secure firmware has locked this down the override
+ * for this memory client, there's nothing we can do here.
+ */
+ if (value & MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED)
+ return;
+
+ /*
+ * Otherwise, try to set the override itself. Typically the
+ * secure firmware will never have set this configuration.
+ * Instead, it will either have disabled write access to
+ * this field, or it will already have set an explicit
+ * override itself.
+ */
+ WARN_ON((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0);
+
+ value |= MC_SID_STREAMID_SECURITY_OVERRIDE;
+ writel(value, mc->regs + client->regs.sid.security);
+ }
+
+ value = readl(mc->regs + client->regs.sid.override);
+ old = value & MC_SID_STREAMID_OVERRIDE_MASK;
+
+ if (old != sid) {
+ dev_dbg(mc->dev, "overriding SID %x for %s with %x\n", old,
+ client->name, sid);
+ writel(sid, mc->regs + client->regs.sid.override);
+ }
+}
+#endif
+
+static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct of_phandle_args args;
+ unsigned int i, index = 0;
+
+ while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells",
+ index, &args)) {
+ if (args.np == mc->dev->of_node && args.args_count != 0) {
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+
+ if (client->id == args.args[0]) {
+ u32 sid = fwspec->ids[0] & MC_SID_STREAMID_OVERRIDE_MASK;
+
+ tegra186_mc_client_sid_override(mc, client, sid);
+ }
+ }
+ }
+
+ index++;
+ }
+#endif
+
+ return 0;
+}
+
+const struct tegra_mc_ops tegra186_mc_ops = {
+ .probe = tegra186_mc_probe,
+ .remove = tegra186_mc_remove,
+ .probe_device = tegra186_mc_probe_device,
+ .handle_irq = tegra30_mc_handle_irq,
+};
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct tegra_mc_client tegra186_mc_clients[] = {
+ {
+ .id = TEGRA186_MEMORY_CLIENT_PTCR,
+ .name = "ptcr",
+ .sid = TEGRA186_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x000,
+ .security = 0x004,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AFIR,
+ .name = "afir",
+ .sid = TEGRA186_SID_AFI,
+ .regs = {
+ .sid = {
+ .override = 0x070,
+ .security = 0x074,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .sid = TEGRA186_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_HOST1XDMAR,
+ .name = "host1xdmar",
+ .sid = TEGRA186_SID_HOST1X,
+ .regs = {
+ .sid = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .sid = TEGRA186_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SATAR,
+ .name = "satar",
+ .sid = TEGRA186_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_MPCORER,
+ .name = "mpcorer",
+ .sid = TEGRA186_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .sid = TEGRA186_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AFIW,
+ .name = "afiw",
+ .sid = TEGRA186_SID_AFI,
+ .regs = {
+ .sid = {
+ .override = 0x188,
+ .security = 0x18c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .sid = TEGRA186_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_MPCOREW,
+ .name = "mpcorew",
+ .sid = TEGRA186_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SATAW,
+ .name = "sataw",
+ .sid = TEGRA186_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPRA,
+ .name = "ispra",
+ .sid = TEGRA186_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x220,
+ .security = 0x224,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPWA,
+ .name = "ispwa",
+ .sid = TEGRA186_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x230,
+ .security = 0x234,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ISPWB,
+ .name = "ispwb",
+ .sid = TEGRA186_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_HOSTR,
+ .name = "xusb_hostr",
+ .sid = TEGRA186_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x250,
+ .security = 0x254,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_HOSTW,
+ .name = "xusb_hostw",
+ .sid = TEGRA186_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_DEVR,
+ .name = "xusb_devr",
+ .sid = TEGRA186_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x260,
+ .security = 0x264,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_XUSB_DEVW,
+ .name = "xusb_devw",
+ .sid = TEGRA186_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSRD,
+ .name = "tsecsrd",
+ .sid = TEGRA186_SID_TSEC,
+ .regs = {
+ .sid = {
+ .override = 0x2a0,
+ .security = 0x2a4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSWR,
+ .name = "tsecswr",
+ .sid = TEGRA186_SID_TSEC,
+ .regs = {
+ .sid = {
+ .override = 0x2a8,
+ .security = 0x2ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSRD,
+ .name = "gpusrd",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSWR,
+ .name = "gpuswr",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRA,
+ .name = "sdmmcra",
+ .sid = TEGRA186_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x300,
+ .security = 0x304,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRAA,
+ .name = "sdmmcraa",
+ .sid = TEGRA186_SID_SDMMC2,
+ .regs = {
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCR,
+ .name = "sdmmcr",
+ .sid = TEGRA186_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x310,
+ .security = 0x314,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .sid = TEGRA186_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWA,
+ .name = "sdmmcwa",
+ .sid = TEGRA186_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x320,
+ .security = 0x324,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWAA,
+ .name = "sdmmcwaa",
+ .sid = TEGRA186_SID_SDMMC2,
+ .regs = {
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCW,
+ .name = "sdmmcw",
+ .sid = TEGRA186_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x330,
+ .security = 0x334,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .sid = TEGRA186_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .sid = TEGRA186_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .sid = TEGRA186_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VIW,
+ .name = "viw",
+ .sid = TEGRA186_SID_VI,
+ .regs = {
+ .sid = {
+ .override = 0x390,
+ .security = 0x394,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .sid = TEGRA186_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .sid = TEGRA186_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d8,
+ .security = 0x3dc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .sid = TEGRA186_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .sid = TEGRA186_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f8,
+ .security = 0x3fc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SESRD,
+ .name = "sesrd",
+ .sid = TEGRA186_SID_SE,
+ .regs = {
+ .sid = {
+ .override = 0x400,
+ .security = 0x404,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SESWR,
+ .name = "seswr",
+ .sid = TEGRA186_SID_SE,
+ .regs = {
+ .sid = {
+ .override = 0x408,
+ .security = 0x40c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ETRR,
+ .name = "etrr",
+ .sid = TEGRA186_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x420,
+ .security = 0x424,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_ETRW,
+ .name = "etrw",
+ .sid = TEGRA186_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSRDB,
+ .name = "tsecsrdb",
+ .sid = TEGRA186_SID_TSECB,
+ .regs = {
+ .sid = {
+ .override = 0x430,
+ .security = 0x434,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_TSECSWRB,
+ .name = "tsecswrb",
+ .sid = TEGRA186_SID_TSECB,
+ .regs = {
+ .sid = {
+ .override = 0x438,
+ .security = 0x43c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSRD2,
+ .name = "gpusrd2",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x440,
+ .security = 0x444,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_GPUSWR2,
+ .name = "gpuswr2",
+ .sid = TEGRA186_SID_GPU,
+ .regs = {
+ .sid = {
+ .override = 0x448,
+ .security = 0x44c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AXISR,
+ .name = "axisr",
+ .sid = TEGRA186_SID_GPCDMA_0,
+ .regs = {
+ .sid = {
+ .override = 0x460,
+ .security = 0x464,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AXISW,
+ .name = "axisw",
+ .sid = TEGRA186_SID_GPCDMA_0,
+ .regs = {
+ .sid = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_EQOSR,
+ .name = "eqosr",
+ .sid = TEGRA186_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x470,
+ .security = 0x474,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_EQOSW,
+ .name = "eqosw",
+ .sid = TEGRA186_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_UFSHCR,
+ .name = "ufshcr",
+ .sid = TEGRA186_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x480,
+ .security = 0x484,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_UFSHCW,
+ .name = "ufshcw",
+ .sid = TEGRA186_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .sid = TEGRA186_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA186_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONR,
+ .name = "aonr",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONW,
+ .name = "aonw",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONDMAR,
+ .name = "aondmar",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_AONDMAW,
+ .name = "aondmaw",
+ .sid = TEGRA186_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCER,
+ .name = "scer",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEW,
+ .name = "scew",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEDMAR,
+ .name = "scedmar",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_SCEDMAW,
+ .name = "scedmaw",
+ .sid = TEGRA186_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .sid = TEGRA186_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .sid = TEGRA186_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_VICSRD1,
+ .name = "vicsrd1",
+ .sid = TEGRA186_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x510,
+ .security = 0x514,
+ },
+ },
+ }, {
+ .id = TEGRA186_MEMORY_CLIENT_NVDECSRD1,
+ .name = "nvdecsrd1",
+ .sid = TEGRA186_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
+ },
+ },
+};
+
+const struct tegra_mc_soc tegra186_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra186_mc_clients),
+ .clients = tegra186_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 4,
+ .client_id_mask = 0xff,
+ .intmask = MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .ops = &tegra186_mc_ops,
+ .ch_intmask = 0x0000000f,
+ .global_intstatus_channel_shift = 0,
+};
+#endif
diff --git a/drivers/memory/tegra/tegra194.c b/drivers/memory/tegra/tegra194.c
new file mode 100644
index 0000000000..26035ac3a1
--- /dev/null
+++ b/drivers/memory/tegra/tegra194.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/mc.h>
+
+#include <dt-bindings/memory/tegra194-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra194_mc_clients[] = {
+ {
+ .id = TEGRA194_MEMORY_CLIENT_PTCR,
+ .name = "ptcr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x000,
+ .security = 0x004,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU7R,
+ .name = "miu7r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x008,
+ .security = 0x00c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU7W,
+ .name = "miu7w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x010,
+ .security = 0x014,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x0a8,
+ .security = 0x0ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HOST1XDMAR,
+ .name = "host1xdmar",
+ .sid = TEGRA194_SID_HOST1X,
+ .regs = {
+ .sid = {
+ .override = 0x0b0,
+ .security = 0x0b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x0e0,
+ .security = 0x0e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SATAR,
+ .name = "satar",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x0f8,
+ .security = 0x0fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MPCORER,
+ .name = "mpcorer",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x138,
+ .security = 0x13c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .sid = TEGRA194_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MPCOREW,
+ .name = "mpcorew",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x1c8,
+ .security = 0x1cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SATAW,
+ .name = "sataw",
+ .sid = TEGRA194_SID_SATA,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPRA,
+ .name = "ispra",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x220,
+ .security = 0x224,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPFALR,
+ .name = "ispfalr",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x228,
+ .security = 0x22c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPWA,
+ .name = "ispwa",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x230,
+ .security = 0x234,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPWB,
+ .name = "ispwb",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x238,
+ .security = 0x23c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_HOSTR,
+ .name = "xusb_hostr",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x250,
+ .security = 0x254,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_HOSTW,
+ .name = "xusb_hostw",
+ .sid = TEGRA194_SID_XUSB_HOST,
+ .regs = {
+ .sid = {
+ .override = 0x258,
+ .security = 0x25c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_DEVR,
+ .name = "xusb_devr",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x260,
+ .security = 0x264,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_XUSB_DEVW,
+ .name = "xusb_devw",
+ .sid = TEGRA194_SID_XUSB_DEV,
+ .regs = {
+ .sid = {
+ .override = 0x268,
+ .security = 0x26c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCRA,
+ .name = "sdmmcra",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x300,
+ .security = 0x304,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCR,
+ .name = "sdmmcr",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x310,
+ .security = 0x314,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCWA,
+ .name = "sdmmcwa",
+ .sid = TEGRA194_SID_SDMMC1,
+ .regs = {
+ .sid = {
+ .override = 0x320,
+ .security = 0x324,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCW,
+ .name = "sdmmcw",
+ .sid = TEGRA194_SID_SDMMC3,
+ .regs = {
+ .sid = {
+ .override = 0x330,
+ .security = 0x334,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .sid = TEGRA194_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIW,
+ .name = "viw",
+ .sid = TEGRA194_SID_VI,
+ .regs = {
+ .sid = {
+ .override = 0x390,
+ .security = 0x394,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .sid = TEGRA194_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .name = "axiapr",
+ .id = TEGRA194_MEMORY_CLIENT_AXIAPR,
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x410,
+ .security = 0x414,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXIAPW,
+ .name = "axiapw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x418,
+ .security = 0x41c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ETRR,
+ .name = "etrr",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x420,
+ .security = 0x424,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ETRW,
+ .name = "etrw",
+ .sid = TEGRA194_SID_ETR,
+ .regs = {
+ .sid = {
+ .override = 0x428,
+ .security = 0x42c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXISR,
+ .name = "axisr",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x460,
+ .security = 0x464,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AXISW,
+ .name = "axisw",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x468,
+ .security = 0x46c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_EQOSR,
+ .name = "eqosr",
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x470,
+ .security = 0x474,
+ },
+ },
+ }, {
+ .name = "eqosw",
+ .id = TEGRA194_MEMORY_CLIENT_EQOSW,
+ .sid = TEGRA194_SID_EQOS,
+ .regs = {
+ .sid = {
+ .override = 0x478,
+ .security = 0x47c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_UFSHCR,
+ .name = "ufshcr",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x480,
+ .security = 0x484,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_UFSHCW,
+ .name = "ufshcw",
+ .sid = TEGRA194_SID_UFSHC,
+ .regs = {
+ .sid = {
+ .override = 0x488,
+ .security = 0x48c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA194_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONR,
+ .name = "aonr",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4b8,
+ .security = 0x4bc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONW,
+ .name = "aonw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c0,
+ .security = 0x4c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONDMAR,
+ .name = "aondmar",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4c8,
+ .security = 0x4cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_AONDMAW,
+ .name = "aondmaw",
+ .sid = TEGRA194_SID_AON,
+ .regs = {
+ .sid = {
+ .override = 0x4d0,
+ .security = 0x4d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCER,
+ .name = "scer",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4d8,
+ .security = 0x4dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEW,
+ .name = "scew",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e0,
+ .security = 0x4e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEDMAR,
+ .name = "scedmar",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4e8,
+ .security = 0x4ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_SCEDMAW,
+ .name = "scedmaw",
+ .sid = TEGRA194_SID_SCE,
+ .regs = {
+ .sid = {
+ .override = 0x4f0,
+ .security = 0x4f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .sid = TEGRA194_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .sid = TEGRA194_SID_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VICSRD1,
+ .name = "vicsrd1",
+ .sid = TEGRA194_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x510,
+ .security = 0x514,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDECSRD1,
+ .name = "nvdecsrd1",
+ .sid = TEGRA194_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x518,
+ .security = 0x51c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU0R,
+ .name = "miu0r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x530,
+ .security = 0x534,
+ },
+ },
+ }, {
+ .name = "miu0w",
+ .id = TEGRA194_MEMORY_CLIENT_MIU0W,
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x538,
+ .security = 0x53c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU1R,
+ .name = "miu1r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x540,
+ .security = 0x544,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU1W,
+ .name = "miu1w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x548,
+ .security = 0x54c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU2R,
+ .name = "miu2r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x570,
+ .security = 0x574,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU2W,
+ .name = "miu2w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x578,
+ .security = 0x57c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU3R,
+ .name = "miu3r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x580,
+ .security = 0x584,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU3W,
+ .name = "miu3w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x588,
+ .security = 0x58c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU4R,
+ .name = "miu4r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x590,
+ .security = 0x594,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU4W,
+ .name = "miu4w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DPMUR,
+ .name = "dpmur",
+ .sid = TEGRA194_SID_PASSTHROUGH,
+ .regs = {
+ .sid = {
+ .override = 0x598,
+ .security = 0x59c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIFALR,
+ .name = "vifalr",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x5e0,
+ .security = 0x5e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_VIFALW,
+ .name = "vifalw",
+ .sid = TEGRA194_SID_VI_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x5e8,
+ .security = 0x5ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0RDA,
+ .name = "dla0rda",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f0,
+ .security = 0x5f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f8,
+ .security = 0x5fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0WRA,
+ .name = "dla0wra",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x600,
+ .security = 0x604,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x608,
+ .security = 0x60c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1RDA,
+ .name = "dla1rda",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x610,
+ .security = 0x614,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1FALRDB,
+ .name = "dla1falrdb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x618,
+ .security = 0x61c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1WRA,
+ .name = "dla1wra",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x620,
+ .security = 0x624,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1FALWRB,
+ .name = "dla1falwrb",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x628,
+ .security = 0x62c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDA,
+ .name = "pva0rda",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x630,
+ .security = 0x634,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDB,
+ .name = "pva0rdb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x638,
+ .security = 0x63c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDC,
+ .name = "pva0rdc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x640,
+ .security = 0x644,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRA,
+ .name = "pva0wra",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x648,
+ .security = 0x64c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRB,
+ .name = "pva0wrb",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x650,
+ .security = 0x654,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0WRC,
+ .name = "pva0wrc",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x658,
+ .security = 0x65c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDA,
+ .name = "pva1rda",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x660,
+ .security = 0x664,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDB,
+ .name = "pva1rdb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x668,
+ .security = 0x66c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDC,
+ .name = "pva1rdc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x670,
+ .security = 0x674,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRA,
+ .name = "pva1wra",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x678,
+ .security = 0x67c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRB,
+ .name = "pva1wrb",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x680,
+ .security = 0x684,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1WRC,
+ .name = "pva1wrc",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x688,
+ .security = 0x68c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCER,
+ .name = "rcer",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x690,
+ .security = 0x694,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEW,
+ .name = "rcew",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x698,
+ .security = 0x69c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEDMAR,
+ .name = "rcedmar",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x6a0,
+ .security = 0x6a4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_RCEDMAW,
+ .name = "rcedmaw",
+ .sid = TEGRA194_SID_RCE,
+ .regs = {
+ .sid = {
+ .override = 0x6a8,
+ .security = 0x6ac,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SRD,
+ .name = "nvenc1srd",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x6b0,
+ .security = 0x6b4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SWR,
+ .name = "nvenc1swr",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x6b8,
+ .security = 0x6bc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0R,
+ .name = "pcie0r",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c0,
+ .security = 0x6c4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0W,
+ .name = "pcie0w",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c8,
+ .security = 0x6cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE1R,
+ .name = "pcie1r",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d0,
+ .security = 0x6d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE1W,
+ .name = "pcie1w",
+ .sid = TEGRA194_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d8,
+ .security = 0x6dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE2AR,
+ .name = "pcie2ar",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e0,
+ .security = 0x6e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE2AW,
+ .name = "pcie2aw",
+ .sid = TEGRA194_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e8,
+ .security = 0x6ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE3R,
+ .name = "pcie3r",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f0,
+ .security = 0x6f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE3W,
+ .name = "pcie3w",
+ .sid = TEGRA194_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f8,
+ .security = 0x6fc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE4R,
+ .name = "pcie4r",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x700,
+ .security = 0x704,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE4W,
+ .name = "pcie4w",
+ .sid = TEGRA194_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x708,
+ .security = 0x70c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5R,
+ .name = "pcie5r",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x710,
+ .security = 0x714,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5W,
+ .name = "pcie5w",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x718,
+ .security = 0x71c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPFALW,
+ .name = "ispfalw",
+ .sid = TEGRA194_SID_ISP_FALCON,
+ .regs = {
+ .sid = {
+ .override = 0x720,
+ .security = 0x724,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA0RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA194_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x748,
+ .security = 0x74c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_DLA1RDA1,
+ .name = "dla1rda1",
+ .sid = TEGRA194_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x750,
+ .security = 0x754,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDA1,
+ .name = "pva0rda1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x758,
+ .security = 0x75c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA0RDB1,
+ .name = "pva0rdb1",
+ .sid = TEGRA194_SID_PVA0,
+ .regs = {
+ .sid = {
+ .override = 0x760,
+ .security = 0x764,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDA1,
+ .name = "pva1rda1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x768,
+ .security = 0x76c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PVA1RDB1,
+ .name = "pva1rdb1",
+ .sid = TEGRA194_SID_PVA1,
+ .regs = {
+ .sid = {
+ .override = 0x770,
+ .security = 0x774,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE5R1,
+ .name = "pcie5r1",
+ .sid = TEGRA194_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x778,
+ .security = 0x77c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENCSRD1,
+ .name = "nvencsrd1",
+ .sid = TEGRA194_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x780,
+ .security = 0x784,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVENC1SRD1,
+ .name = "nvenc1srd1",
+ .sid = TEGRA194_SID_NVENC1,
+ .regs = {
+ .sid = {
+ .override = 0x788,
+ .security = 0x78c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_ISPRA1,
+ .name = "ispra1",
+ .sid = TEGRA194_SID_ISP,
+ .regs = {
+ .sid = {
+ .override = 0x790,
+ .security = 0x794,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_PCIE0R1,
+ .name = "pcie0r1",
+ .sid = TEGRA194_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x798,
+ .security = 0x79c,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SRD,
+ .name = "nvdec1srd",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7c8,
+ .security = 0x7cc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SRD1,
+ .name = "nvdec1srd1",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7d0,
+ .security = 0x7d4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_NVDEC1SWR,
+ .name = "nvdec1swr",
+ .sid = TEGRA194_SID_NVDEC1,
+ .regs = {
+ .sid = {
+ .override = 0x7d8,
+ .security = 0x7dc,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU5R,
+ .name = "miu5r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7e0,
+ .security = 0x7e4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU5W,
+ .name = "miu5w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7e8,
+ .security = 0x7ec,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU6R,
+ .name = "miu6r",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7f0,
+ .security = 0x7f4,
+ },
+ },
+ }, {
+ .id = TEGRA194_MEMORY_CLIENT_MIU6W,
+ .name = "miu6w",
+ .sid = TEGRA194_SID_MIU,
+ .regs = {
+ .sid = {
+ .override = 0x7f8,
+ .security = 0x7fc,
+ },
+ },
+ },
+};
+
+const struct tegra_mc_soc tegra194_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra194_mc_clients),
+ .clients = tegra194_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 16,
+ .client_id_mask = 0xff,
+ .intmask = MC_INT_DECERR_ROUTE_SANITY |
+ MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .has_addr_hi_reg = true,
+ .ops = &tegra186_mc_ops,
+ .icc_ops = &tegra_mc_icc_ops,
+ .ch_intmask = 0x00000f00,
+ .global_intstatus_channel_shift = 8,
+};
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
new file mode 100644
index 0000000000..fd595c851a
--- /dev/null
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -0,0 +1,1279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tegra20 External Memory Controller driver
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/devfreq.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+#include "mc.h"
+
+#define EMC_INTSTATUS 0x000
+#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
+#define EMC_ADR_CFG_0 0x010
+#define EMC_TIMING_CONTROL 0x028
+#define EMC_RC 0x02c
+#define EMC_RFC 0x030
+#define EMC_RAS 0x034
+#define EMC_RP 0x038
+#define EMC_R2W 0x03c
+#define EMC_W2R 0x040
+#define EMC_R2P 0x044
+#define EMC_W2P 0x048
+#define EMC_RD_RCD 0x04c
+#define EMC_WR_RCD 0x050
+#define EMC_RRD 0x054
+#define EMC_REXT 0x058
+#define EMC_WDV 0x05c
+#define EMC_QUSE 0x060
+#define EMC_QRST 0x064
+#define EMC_QSAFE 0x068
+#define EMC_RDV 0x06c
+#define EMC_REFRESH 0x070
+#define EMC_BURST_REFRESH_NUM 0x074
+#define EMC_PDEX2WR 0x078
+#define EMC_PDEX2RD 0x07c
+#define EMC_PCHG2PDEN 0x080
+#define EMC_ACT2PDEN 0x084
+#define EMC_AR2PDEN 0x088
+#define EMC_RW2PDEN 0x08c
+#define EMC_TXSR 0x090
+#define EMC_TCKE 0x094
+#define EMC_TFAW 0x098
+#define EMC_TRPAB 0x09c
+#define EMC_TCLKSTABLE 0x0a0
+#define EMC_TCLKSTOP 0x0a4
+#define EMC_TREFBW 0x0a8
+#define EMC_QUSE_EXTRA 0x0ac
+#define EMC_ODT_WRITE 0x0b0
+#define EMC_ODT_READ 0x0b4
+#define EMC_MRR 0x0ec
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG6 0x114
+#define EMC_STAT_CONTROL 0x160
+#define EMC_STAT_LLMC_CONTROL 0x178
+#define EMC_STAT_PWR_CLOCK_LIMIT 0x198
+#define EMC_STAT_PWR_CLOCKS 0x19c
+#define EMC_STAT_PWR_COUNT 0x1a0
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_DLL_XFORM_DQS 0x2c0
+#define EMC_DLL_XFORM_QUSE 0x2c4
+#define EMC_ZCAL_REF_CNT 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_CFG_CLKTRIM_0 0x2d0
+#define EMC_CFG_CLKTRIM_1 0x2d4
+#define EMC_CFG_CLKTRIM_2 0x2d8
+
+#define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
+#define EMC_CLKCHANGE_PD_ENABLE BIT(1)
+#define EMC_CLKCHANGE_SR_ENABLE BIT(2)
+
+#define EMC_TIMING_UPDATE BIT(0)
+
+#define EMC_REFRESH_OVERFLOW_INT BIT(3)
+#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
+
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_READ_DQM_CTRL BIT(9)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+#define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
+
+#define EMC_PWR_GATHER_CLEAR (1 << 8)
+#define EMC_PWR_GATHER_DISABLE (2 << 8)
+#define EMC_PWR_GATHER_ENABLE (3 << 8)
+
+enum emc_dram_type {
+ DRAM_TYPE_RESERVED,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
+static const u16 emc_timing_registers[] = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WDV,
+ EMC_QUSE,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_TXSR,
+ EMC_TCKE,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_TREFBW,
+ EMC_QUSE_EXTRA,
+ EMC_FBIO_CFG6,
+ EMC_ODT_WRITE,
+ EMC_ODT_READ,
+ EMC_FBIO_CFG5,
+ EMC_CFG_DIG_DLL,
+ EMC_DLL_XFORM_DQS,
+ EMC_DLL_XFORM_QUSE,
+ EMC_ZCAL_REF_CNT,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_AUTO_CAL_INTERVAL,
+ EMC_CFG_CLKTRIM_0,
+ EMC_CFG_CLKTRIM_1,
+ EMC_CFG_CLKTRIM_2,
+};
+
+struct emc_timing {
+ unsigned long rate;
+ u32 data[ARRAY_SIZE(emc_timing_registers)];
+};
+
+enum emc_rate_request_type {
+ EMC_RATE_DEVFREQ,
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct tegra_emc {
+ struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
+ struct notifier_block clk_nb;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int dram_bus_width;
+
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ struct devfreq_simple_ondemand_data ondemand_data;
+
+ /* memory chip identity information */
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ bool mrr_error;
+};
+
+static irqreturn_t tegra_emc_isr(int irq, void *data)
+{
+ struct tegra_emc *emc = data;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ u32 status;
+
+ status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ /* notify about HW problem */
+ if (status & EMC_REFRESH_OVERFLOW_INT)
+ dev_err_ratelimited(emc->dev,
+ "refresh request overflow timeout\n");
+
+ /* clear interrupts */
+ writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static struct emc_timing *tegra_emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate >= rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
+{
+ struct emc_timing *timing = tegra_emc_find_timing(emc, rate);
+ unsigned int i;
+
+ if (!timing)
+ return -EINVAL;
+
+ dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
+ __func__, timing->rate, rate);
+
+ /* program shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->data); i++)
+ writel_relaxed(timing->data[i],
+ emc->regs + emc_timing_registers[i]);
+
+ /* wait until programming has settled */
+ readl_relaxed(emc->regs + emc_timing_registers[i - 1]);
+
+ return 0;
+}
+
+static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
+{
+ int err;
+ u32 v;
+
+ dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
+
+ if (flush) {
+ /* manually initiate memory timing update */
+ writel_relaxed(EMC_TIMING_UPDATE,
+ emc->regs + EMC_TIMING_CONTROL);
+ return 0;
+ }
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
+ struct clk_notifier_data *cnd = data;
+ int err;
+
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ err = emc_prepare_timing_change(emc, cnd->new_rate);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = emc_prepare_timing_change(emc, cnd->old_rate);
+ if (err)
+ break;
+
+ err = emc_complete_timing_change(emc, true);
+ break;
+
+ case POST_RATE_CHANGE:
+ err = emc_complete_timing_change(emc, false);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 rate;
+ int err;
+
+ if (!of_device_is_compatible(node, "nvidia,tegra20-emc-table")) {
+ dev_err(emc->dev, "incompatible DT node: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(node, "clock-frequency", &rate);
+ if (err) {
+ dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ err = of_property_read_u32_array(node, "nvidia,emc-registers",
+ timing->data,
+ ARRAY_SIZE(emc_timing_registers));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOF: failed to read emc timing data: %d\n",
+ node, err);
+ return err;
+ }
+
+ /*
+ * The EMC clock rate is twice the bus rate, and the bus rate is
+ * measured in kHz.
+ */
+ timing->rate = rate * 2 * 1000;
+
+ dev_dbg(emc->dev, "%s: %pOF: EMC rate %lu\n",
+ __func__, node, timing->rate);
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+
+ if (a->rate > b->rate)
+ return 1;
+
+ return 0;
+}
+
+static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ struct device_node *child;
+ struct emc_timing *timing;
+ int child_count;
+ int err;
+
+ child_count = of_get_child_count(node);
+ if (!child_count) {
+ dev_err(emc->dev, "no memory timings in DT node: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ timing = emc->timings;
+
+ for_each_child_of_node(node, child) {
+ if (of_node_name_eq(child, "lpddr2"))
+ continue;
+
+ err = load_one_timing_from_dt(emc, timing++, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+
+ emc->num_timings++;
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ dev_info_once(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
+ return 0;
+}
+
+static struct device_node *
+tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ struct device_node *np;
+ u32 value, ram_code;
+ int err;
+
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info_once(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
+ if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code"))
+ return of_node_get(dev->of_node);
+
+ ram_code = tegra_read_ram_code();
+
+ for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np;
+ np = of_find_node_by_name(np, "emc-tables")) {
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || value != ram_code) {
+ struct device_node *lpddr2_np;
+ bool cfg_mismatches = false;
+
+ lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ if (lpddr2_np) {
+ const struct lpddr2_info *info;
+
+ info = of_lpddr2_get_info(lpddr2_np, dev);
+ if (info) {
+ if (info->manufacturer_id >= 0 &&
+ info->manufacturer_id != emc->manufacturer_id)
+ cfg_mismatches = true;
+
+ if (info->revision_id1 >= 0 &&
+ info->revision_id1 != emc->revision_id1)
+ cfg_mismatches = true;
+
+ if (info->revision_id2 >= 0 &&
+ info->revision_id2 != emc->revision_id2)
+ cfg_mismatches = true;
+
+ if (info->density != emc->basic_conf4.density)
+ cfg_mismatches = true;
+
+ if (info->io_width != emc->basic_conf4.io_width)
+ cfg_mismatches = true;
+
+ if (info->arch_type != emc->basic_conf4.arch_type)
+ cfg_mismatches = true;
+ } else {
+ dev_err(dev, "failed to parse %pOF\n", lpddr2_np);
+ cfg_mismatches = true;
+ }
+
+ of_node_put(lpddr2_np);
+ } else {
+ cfg_mismatches = true;
+ }
+
+ if (cfg_mismatches) {
+ of_node_put(np);
+ continue;
+ }
+ }
+
+ return np;
+ }
+
+ dev_err(dev, "no memory timings for RAM code %u found in device tree\n",
+ ram_code);
+
+ return NULL;
+}
+
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev ? 1 : 2;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ bool print_out)
+{
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value);
+
+ if (!print_out)
+ return;
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, emc->manufacturer_id,
+ lpddr2_jedec_manufacturer(emc->manufacturer_id),
+ emc->revision_id1, emc->revision_id2,
+ 4 >> emc->basic_conf4.arch_type,
+ 64 << emc->basic_conf4.density,
+ 32 >> emc->basic_conf4.io_width);
+}
+
+static int emc_setup_hw(struct tegra_emc *emc)
+{
+ u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
+
+ emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
+
+ /*
+ * Depending on a memory type, DRAM should enter either self-refresh
+ * or power-down state on EMC clock change.
+ */
+ if (!(emc_cfg & EMC_CLKCHANGE_PD_ENABLE) &&
+ !(emc_cfg & EMC_CLKCHANGE_SR_ENABLE)) {
+ dev_err(emc->dev,
+ "bootloader didn't specify DRAM auto-suspend mode\n");
+ return -EINVAL;
+ }
+
+ /* enable EMC and CAR to handshake on PLL divider/source changes */
+ emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
+ writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
+
+ /* initialize interrupt */
+ writel_relaxed(intmask, emc->regs + EMC_INTMASK);
+ writel_relaxed(intmask, emc->regs + EMC_INTSTATUS);
+
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16)
+ emc->dram_bus_width = 16;
+ else
+ emc->dram_bus_width = 32;
+
+ dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio);
+
+ switch (dram_type) {
+ case DRAM_TYPE_RESERVED:
+ dram_type_str = "INVALID";
+ break;
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n",
+ emc->dram_bus_width, emem_numdev, dram_type_str,
+ emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev,
+ !print_sdram_info_once);
+ print_sdram_info_once = true;
+ }
+
+ return 0;
+}
+
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
+{
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
+
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
+
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ return timing->rate;
+}
+
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
+ emc, &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ int err;
+
+ /*
+ * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
+ * is sampled on both clock edges. This means that EMC clock rate
+ * equals to the peak data-rate.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc;
+ struct icc_node *node;
+ int err;
+
+ emc->mc = devm_tegra_memory_controller_get(emc->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ soc = emc->mc->soc;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for %lu Hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ rate = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
+}
+
+static int tegra_emc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /* freeze counters */
+ writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL);
+
+ /*
+ * busy_time: number of clocks EMC request was accepted
+ * total_time: number of clocks PWR_GATHER control was set to ENABLE
+ */
+ stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT);
+ stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS);
+ stat->current_frequency = clk_get_rate(emc->clk);
+
+ /* clear counters and restart */
+ writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
+ .polling_ms = 30,
+ .target = tegra_emc_devfreq_target,
+ .get_dev_status = tegra_emc_devfreq_get_dev_status,
+};
+
+static int tegra_emc_devfreq_init(struct tegra_emc *emc)
+{
+ struct devfreq *devfreq;
+
+ /*
+ * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
+ * should be less than 50. Secondly, multiple active memory clients
+ * may cause over 20% of lost clock cycles due to stalls caused by
+ * competing memory accesses. This means that threshold should be
+ * set to a less than 30 in order to have a properly working governor.
+ */
+ emc->ondemand_data.upthreshold = 20;
+
+ /*
+ * Reset statistic gathers state, select global bandwidth for the
+ * statistics collection mode and set clocks counter saturation
+ * limit to maximum.
+ */
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
+ writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
+
+ devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &emc->ondemand_data);
+ if (IS_ERR(devfreq)) {
+ dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
+ return PTR_ERR(devfreq);
+ }
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int irq, err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "please update your device tree\n");
+ return irq;
+ }
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ mutex_init(&emc->rate_lock);
+ emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
+ emc->dev = &pdev->dev;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ err = emc_setup_hw(emc);
+ if (err)
+ return err;
+
+ np = tegra_emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
+ dev_name(&pdev->dev), emc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
+ tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+ tegra_emc_devfreq_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra20-emc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra20-emc",
+ .of_match_table = tegra_emc_of_match,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(tegra_emc_driver);
+
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
new file mode 100644
index 0000000000..544bfd216a
--- /dev/null
+++ b/drivers/memory/tegra/tegra20.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <dt-bindings/memory/tegra20-mc.h>
+
+#include "mc.h"
+
+#define MC_STAT_CONTROL 0x90
+#define MC_STAT_EMC_CLOCK_LIMIT 0xa0
+#define MC_STAT_EMC_CLOCKS 0xa4
+#define MC_STAT_EMC_CONTROL_0 0xa8
+#define MC_STAT_EMC_CONTROL_1 0xac
+#define MC_STAT_EMC_COUNT_0 0xb8
+#define MC_STAT_EMC_COUNT_1 0xbc
+
+#define MC_STAT_CONTROL_CLIENT_ID GENMASK(13, 8)
+#define MC_STAT_CONTROL_EVENT GENMASK(23, 16)
+#define MC_STAT_CONTROL_PRI_EVENT GENMASK(25, 24)
+#define MC_STAT_CONTROL_FILTER_CLIENT_ENABLE GENMASK(26, 26)
+#define MC_STAT_CONTROL_FILTER_PRI GENMASK(29, 28)
+
+#define MC_STAT_CONTROL_PRI_EVENT_HP 0
+#define MC_STAT_CONTROL_PRI_EVENT_TM 1
+#define MC_STAT_CONTROL_PRI_EVENT_BW 2
+
+#define MC_STAT_CONTROL_FILTER_PRI_DISABLE 0
+#define MC_STAT_CONTROL_FILTER_PRI_NO 1
+#define MC_STAT_CONTROL_FILTER_PRI_YES 2
+
+#define MC_STAT_CONTROL_EVENT_QUALIFIED 0
+#define MC_STAT_CONTROL_EVENT_ANY_READ 1
+#define MC_STAT_CONTROL_EVENT_ANY_WRITE 2
+#define MC_STAT_CONTROL_EVENT_RD_WR_CHANGE 3
+#define MC_STAT_CONTROL_EVENT_SUCCESSIVE 4
+#define MC_STAT_CONTROL_EVENT_ARB_BANK_AA 5
+#define MC_STAT_CONTROL_EVENT_ARB_BANK_BB 6
+#define MC_STAT_CONTROL_EVENT_PAGE_MISS 7
+#define MC_STAT_CONTROL_EVENT_AUTO_PRECHARGE 8
+
+#define EMC_GATHER_RST (0 << 8)
+#define EMC_GATHER_CLEAR (1 << 8)
+#define EMC_GATHER_DISABLE (2 << 8)
+#define EMC_GATHER_ENABLE (3 << 8)
+
+#define MC_STAT_SAMPLE_TIME_USEC 16000
+
+/* we store collected statistics as a fixed point values */
+#define MC_FX_FRAC_SCALE 100
+
+static DEFINE_MUTEX(tegra20_mc_stat_lock);
+
+struct tegra20_mc_stat_gather {
+ unsigned int pri_filter;
+ unsigned int pri_event;
+ unsigned int result;
+ unsigned int client;
+ unsigned int event;
+ bool client_enb;
+};
+
+struct tegra20_mc_stat {
+ struct tegra20_mc_stat_gather gather0;
+ struct tegra20_mc_stat_gather gather1;
+ unsigned int sample_time_usec;
+ const struct tegra_mc *mc;
+};
+
+struct tegra20_mc_client_stat {
+ unsigned int events;
+ unsigned int arb_high_prio;
+ unsigned int arb_timeout;
+ unsigned int arb_bandwidth;
+ unsigned int rd_wr_change;
+ unsigned int successive;
+ unsigned int page_miss;
+ unsigned int auto_precharge;
+ unsigned int arb_bank_aa;
+ unsigned int arb_bank_bb;
+};
+
+static const struct tegra_mc_client tegra20_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "display0a",
+ }, {
+ .id = 0x01,
+ .name = "display0ab",
+ }, {
+ .id = 0x02,
+ .name = "display0b",
+ }, {
+ .id = 0x03,
+ .name = "display0bb",
+ }, {
+ .id = 0x04,
+ .name = "display0c",
+ }, {
+ .id = 0x05,
+ .name = "display0cb",
+ }, {
+ .id = 0x06,
+ .name = "display1b",
+ }, {
+ .id = 0x07,
+ .name = "display1bb",
+ }, {
+ .id = 0x08,
+ .name = "eppup",
+ }, {
+ .id = 0x09,
+ .name = "g2pr",
+ }, {
+ .id = 0x0a,
+ .name = "g2sr",
+ }, {
+ .id = 0x0b,
+ .name = "mpeunifbr",
+ }, {
+ .id = 0x0c,
+ .name = "viruv",
+ }, {
+ .id = 0x0d,
+ .name = "avpcarm7r",
+ }, {
+ .id = 0x0e,
+ .name = "displayhc",
+ }, {
+ .id = 0x0f,
+ .name = "displayhcb",
+ }, {
+ .id = 0x10,
+ .name = "fdcdrd",
+ }, {
+ .id = 0x11,
+ .name = "g2dr",
+ }, {
+ .id = 0x12,
+ .name = "host1xdmar",
+ }, {
+ .id = 0x13,
+ .name = "host1xr",
+ }, {
+ .id = 0x14,
+ .name = "idxsrd",
+ }, {
+ .id = 0x15,
+ .name = "mpcorer",
+ }, {
+ .id = 0x16,
+ .name = "mpe_ipred",
+ }, {
+ .id = 0x17,
+ .name = "mpeamemrd",
+ }, {
+ .id = 0x18,
+ .name = "mpecsrd",
+ }, {
+ .id = 0x19,
+ .name = "ppcsahbdmar",
+ }, {
+ .id = 0x1a,
+ .name = "ppcsahbslvr",
+ }, {
+ .id = 0x1b,
+ .name = "texsrd",
+ }, {
+ .id = 0x1c,
+ .name = "vdebsevr",
+ }, {
+ .id = 0x1d,
+ .name = "vdember",
+ }, {
+ .id = 0x1e,
+ .name = "vdemcer",
+ }, {
+ .id = 0x1f,
+ .name = "vdetper",
+ }, {
+ .id = 0x20,
+ .name = "eppu",
+ }, {
+ .id = 0x21,
+ .name = "eppv",
+ }, {
+ .id = 0x22,
+ .name = "eppy",
+ }, {
+ .id = 0x23,
+ .name = "mpeunifbw",
+ }, {
+ .id = 0x24,
+ .name = "viwsb",
+ }, {
+ .id = 0x25,
+ .name = "viwu",
+ }, {
+ .id = 0x26,
+ .name = "viwv",
+ }, {
+ .id = 0x27,
+ .name = "viwy",
+ }, {
+ .id = 0x28,
+ .name = "g2dw",
+ }, {
+ .id = 0x29,
+ .name = "avpcarm7w",
+ }, {
+ .id = 0x2a,
+ .name = "fdcdwr",
+ }, {
+ .id = 0x2b,
+ .name = "host1xw",
+ }, {
+ .id = 0x2c,
+ .name = "ispw",
+ }, {
+ .id = 0x2d,
+ .name = "mpcorew",
+ }, {
+ .id = 0x2e,
+ .name = "mpecswr",
+ }, {
+ .id = 0x2f,
+ .name = "ppcsahbdmaw",
+ }, {
+ .id = 0x30,
+ .name = "ppcsahbslvw",
+ }, {
+ .id = 0x31,
+ .name = "vdebsevw",
+ }, {
+ .id = 0x32,
+ .name = "vdembew",
+ }, {
+ .id = 0x33,
+ .name = "vdetpmw",
+ },
+};
+
+#define TEGRA20_MC_RESET(_name, _control, _status, _reset, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA20_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .reset = _reset, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra20_mc_resets[] = {
+ TEGRA20_MC_RESET(AVPC, 0x100, 0x140, 0x104, 0),
+ TEGRA20_MC_RESET(DC, 0x100, 0x144, 0x104, 1),
+ TEGRA20_MC_RESET(DCB, 0x100, 0x148, 0x104, 2),
+ TEGRA20_MC_RESET(EPP, 0x100, 0x14c, 0x104, 3),
+ TEGRA20_MC_RESET(2D, 0x100, 0x150, 0x104, 4),
+ TEGRA20_MC_RESET(HC, 0x100, 0x154, 0x104, 5),
+ TEGRA20_MC_RESET(ISP, 0x100, 0x158, 0x104, 6),
+ TEGRA20_MC_RESET(MPCORE, 0x100, 0x15c, 0x104, 7),
+ TEGRA20_MC_RESET(MPEA, 0x100, 0x160, 0x104, 8),
+ TEGRA20_MC_RESET(MPEB, 0x100, 0x164, 0x104, 9),
+ TEGRA20_MC_RESET(MPEC, 0x100, 0x168, 0x104, 10),
+ TEGRA20_MC_RESET(3D, 0x100, 0x16c, 0x104, 11),
+ TEGRA20_MC_RESET(PPCS, 0x100, 0x170, 0x104, 12),
+ TEGRA20_MC_RESET(VDE, 0x100, 0x174, 0x104, 13),
+ TEGRA20_MC_RESET(VI, 0x100, 0x178, 0x104, 14),
+};
+
+static int tegra20_mc_hotreset_assert(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->reset);
+ mc_writel(mc, value & ~BIT(rst->bit), rst->reset);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static int tegra20_mc_hotreset_deassert(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->reset);
+ mc_writel(mc, value | BIT(rst->bit), rst->reset);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static int tegra20_mc_block_dma(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static bool tegra20_mc_dma_idling(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return mc_readl(mc, rst->status) == 0;
+}
+
+static int tegra20_mc_reset_status(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ return (mc_readl(mc, rst->reset) & BIT(rst->bit)) == 0;
+}
+
+static int tegra20_mc_unblock_dma(struct tegra_mc *mc,
+ const struct tegra_mc_reset *rst)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&mc->lock, flags);
+
+ value = mc_readl(mc, rst->control) | BIT(rst->bit);
+ mc_writel(mc, value, rst->control);
+
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return 0;
+}
+
+static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
+ .hotreset_assert = tegra20_mc_hotreset_assert,
+ .hotreset_deassert = tegra20_mc_hotreset_deassert,
+ .block_dma = tegra20_mc_block_dma,
+ .dma_idling = tegra20_mc_dma_idling,
+ .unblock_dma = tegra20_mc_unblock_dma,
+ .reset_status = tegra20_mc_reset_status,
+};
+
+static int tegra20_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /*
+ * It should be possible to tune arbitration knobs here, but the
+ * default values are known to work well on all devices. Hence
+ * nothing to do here so far.
+ */
+ return 0;
+}
+
+static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 300);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ /* these clients are isochronous by default */
+ if (strstarts(node->name, "display") ||
+ strstarts(node->name, "vi"))
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ else
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra20_mc_icc_ops = {
+ .xlate_extended = tegra20_mc_of_icc_xlate_extended,
+ .aggregate = tegra20_mc_icc_aggreate,
+ .set = tegra20_mc_icc_set,
+};
+
+static u32 tegra20_mc_stat_gather_control(const struct tegra20_mc_stat_gather *g)
+{
+ u32 control;
+
+ control = FIELD_PREP(MC_STAT_CONTROL_EVENT, g->event);
+ control |= FIELD_PREP(MC_STAT_CONTROL_CLIENT_ID, g->client);
+ control |= FIELD_PREP(MC_STAT_CONTROL_PRI_EVENT, g->pri_event);
+ control |= FIELD_PREP(MC_STAT_CONTROL_FILTER_PRI, g->pri_filter);
+ control |= FIELD_PREP(MC_STAT_CONTROL_FILTER_CLIENT_ENABLE, g->client_enb);
+
+ return control;
+}
+
+static void tegra20_mc_stat_gather(struct tegra20_mc_stat *stat)
+{
+ u32 clocks, count0, count1, control_0, control_1;
+ const struct tegra_mc *mc = stat->mc;
+
+ control_0 = tegra20_mc_stat_gather_control(&stat->gather0);
+ control_1 = tegra20_mc_stat_gather_control(&stat->gather1);
+
+ /*
+ * Reset statistic gathers state, select statistics collection mode
+ * and set clocks counter saturation limit to maximum.
+ */
+ mc_writel(mc, 0x00000000, MC_STAT_CONTROL);
+ mc_writel(mc, control_0, MC_STAT_EMC_CONTROL_0);
+ mc_writel(mc, control_1, MC_STAT_EMC_CONTROL_1);
+ mc_writel(mc, 0xffffffff, MC_STAT_EMC_CLOCK_LIMIT);
+
+ mc_writel(mc, EMC_GATHER_ENABLE, MC_STAT_CONTROL);
+ fsleep(stat->sample_time_usec);
+ mc_writel(mc, EMC_GATHER_DISABLE, MC_STAT_CONTROL);
+
+ count0 = mc_readl(mc, MC_STAT_EMC_COUNT_0);
+ count1 = mc_readl(mc, MC_STAT_EMC_COUNT_1);
+ clocks = mc_readl(mc, MC_STAT_EMC_CLOCKS);
+ clocks = max(clocks / 100 / MC_FX_FRAC_SCALE, 1u);
+
+ stat->gather0.result = DIV_ROUND_UP(count0, clocks);
+ stat->gather1.result = DIV_ROUND_UP(count1, clocks);
+}
+
+static void tegra20_mc_stat_events(const struct tegra_mc *mc,
+ const struct tegra_mc_client *client0,
+ const struct tegra_mc_client *client1,
+ unsigned int pri_filter,
+ unsigned int pri_event,
+ unsigned int event,
+ unsigned int *result0,
+ unsigned int *result1)
+{
+ struct tegra20_mc_stat stat = {};
+
+ stat.gather0.client = client0 ? client0->id : 0;
+ stat.gather0.pri_filter = pri_filter;
+ stat.gather0.client_enb = !!client0;
+ stat.gather0.pri_event = pri_event;
+ stat.gather0.event = event;
+
+ stat.gather1.client = client1 ? client1->id : 0;
+ stat.gather1.pri_filter = pri_filter;
+ stat.gather1.client_enb = !!client1;
+ stat.gather1.pri_event = pri_event;
+ stat.gather1.event = event;
+
+ stat.sample_time_usec = MC_STAT_SAMPLE_TIME_USEC;
+ stat.mc = mc;
+
+ tegra20_mc_stat_gather(&stat);
+
+ *result0 = stat.gather0.result;
+ *result1 = stat.gather1.result;
+}
+
+static void tegra20_mc_collect_stats(const struct tegra_mc *mc,
+ struct tegra20_mc_client_stat *stats)
+{
+ const struct tegra_mc_client *client0, *client1;
+ unsigned int i;
+
+ /* collect memory controller utilization percent for each client */
+ for (i = 0; i < mc->soc->num_clients; i += 2) {
+ client0 = &mc->soc->clients[i];
+ client1 = &mc->soc->clients[i + 1];
+
+ if (i + 1 == mc->soc->num_clients)
+ client1 = NULL;
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[i + 0].events,
+ &stats[i + 1].events);
+ }
+
+ /* collect more info from active clients */
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ unsigned int clienta, clientb = mc->soc->num_clients;
+
+ for (client0 = NULL; i < mc->soc->num_clients; i++) {
+ if (stats[i].events) {
+ client0 = &mc->soc->clients[i];
+ clienta = i++;
+ break;
+ }
+ }
+
+ for (client1 = NULL; i < mc->soc->num_clients; i++) {
+ if (stats[i].events) {
+ client1 = &mc->soc->clients[i];
+ clientb = i;
+ break;
+ }
+ }
+
+ if (!client0 && !client1)
+ break;
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_high_prio,
+ &stats[clientb].arb_high_prio);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_TM,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_timeout,
+ &stats[clientb].arb_timeout);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_YES,
+ MC_STAT_CONTROL_PRI_EVENT_BW,
+ MC_STAT_CONTROL_EVENT_QUALIFIED,
+ &stats[clienta].arb_bandwidth,
+ &stats[clientb].arb_bandwidth);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_RD_WR_CHANGE,
+ &stats[clienta].rd_wr_change,
+ &stats[clientb].rd_wr_change);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_SUCCESSIVE,
+ &stats[clienta].successive,
+ &stats[clientb].successive);
+
+ tegra20_mc_stat_events(mc, client0, client1,
+ MC_STAT_CONTROL_FILTER_PRI_DISABLE,
+ MC_STAT_CONTROL_PRI_EVENT_HP,
+ MC_STAT_CONTROL_EVENT_PAGE_MISS,
+ &stats[clienta].page_miss,
+ &stats[clientb].page_miss);
+ }
+}
+
+static void tegra20_mc_printf_percents(struct seq_file *s,
+ const char *fmt,
+ unsigned int percents_fx)
+{
+ char percents_str[8];
+
+ snprintf(percents_str, ARRAY_SIZE(percents_str), "%3u.%02u%%",
+ percents_fx / MC_FX_FRAC_SCALE, percents_fx % MC_FX_FRAC_SCALE);
+
+ seq_printf(s, fmt, percents_str);
+}
+
+static int tegra20_mc_stats_show(struct seq_file *s, void *unused)
+{
+ const struct tegra_mc *mc = dev_get_drvdata(s->private);
+ struct tegra20_mc_client_stat *stats;
+ unsigned int i;
+
+ stats = kcalloc(mc->soc->num_clients + 1, sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ mutex_lock(&tegra20_mc_stat_lock);
+
+ tegra20_mc_collect_stats(mc, stats);
+
+ mutex_unlock(&tegra20_mc_stat_lock);
+
+ seq_puts(s, "Memory client Events Timeout High priority Bandwidth ARB RW change Successive Page miss\n");
+ seq_puts(s, "-----------------------------------------------------------------------------------------------------\n");
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ seq_printf(s, "%-14s ", mc->soc->clients[i].name);
+
+ /* An event is generated when client performs R/W request. */
+ tegra20_mc_printf_percents(s, "%-9s", stats[i].events);
+
+ /*
+ * An event is generated based on the timeout (TM) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-10s", stats[i].arb_timeout);
+
+ /*
+ * An event is generated based on the high-priority (HP) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-16s", stats[i].arb_high_prio);
+
+ /*
+ * An event is generated based on the bandwidth (BW) signal
+ * accompanying a request for arbitration.
+ */
+ tegra20_mc_printf_percents(s, "%-16s", stats[i].arb_bandwidth);
+
+ /*
+ * An event is generated when the memory controller switches
+ * between making a read request to making a write request.
+ */
+ tegra20_mc_printf_percents(s, "%-12s", stats[i].rd_wr_change);
+
+ /*
+ * An even generated when the chosen client has wins arbitration
+ * when it was also the winner at the previous request. If a
+ * client makes N requests in a row that are honored, SUCCESSIVE
+ * will be counted (N-1) times. Large values for this event
+ * imply that if we were patient enough, all of those requests
+ * could have been coalesced.
+ */
+ tegra20_mc_printf_percents(s, "%-13s", stats[i].successive);
+
+ /*
+ * An event is generated when the memory controller detects a
+ * page miss for the current request.
+ */
+ tegra20_mc_printf_percents(s, "%-12s\n", stats[i].page_miss);
+ }
+
+ kfree(stats);
+
+ return 0;
+}
+
+static int tegra20_mc_probe(struct tegra_mc *mc)
+{
+ debugfs_create_devm_seqfile(mc->dev, "stats", mc->debugfs.root,
+ tegra20_mc_stats_show);
+
+ return 0;
+}
+
+static int tegra20_mc_suspend(struct tegra_mc *mc)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_suspend(mc->gart);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra20_mc_resume(struct tegra_mc *mc)
+{
+ int err;
+
+ if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
+ err = tegra_gart_resume(mc->gart);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
+{
+ struct tegra_mc *mc = data;
+ unsigned long status;
+ unsigned int bit;
+
+ /* mask all interrupts to avoid flooding */
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ for_each_set_bit(bit, &status, 32) {
+ const char *error = tegra_mc_status_names[bit];
+ const char *direction = "read", *secure = "";
+ const char *client, *desc;
+ phys_addr_t addr;
+ u32 value, reg;
+ u8 id, type;
+
+ switch (BIT(bit)) {
+ case MC_INT_DECERR_EMEM:
+ reg = MC_DECERR_EMEM_OTHERS_STATUS;
+ value = mc_readl(mc, reg);
+
+ id = value & mc->soc->client_id_mask;
+ desc = tegra_mc_error_names[2];
+
+ if (value & BIT(31))
+ direction = "write";
+ break;
+
+ case MC_INT_INVALID_GART_PAGE:
+ reg = MC_GART_ERROR_REQ;
+ value = mc_readl(mc, reg);
+
+ id = (value >> 1) & mc->soc->client_id_mask;
+ desc = tegra_mc_error_names[2];
+
+ if (value & BIT(0))
+ direction = "write";
+ break;
+
+ case MC_INT_SECURITY_VIOLATION:
+ reg = MC_SECURITY_VIOLATION_STATUS;
+ value = mc_readl(mc, reg);
+
+ id = value & mc->soc->client_id_mask;
+ type = (value & BIT(30)) ? 4 : 3;
+ desc = tegra_mc_error_names[type];
+ secure = "secure ";
+
+ if (value & BIT(31))
+ direction = "write";
+ break;
+
+ default:
+ continue;
+ }
+
+ client = mc->soc->clients[id].name;
+ addr = mc_readl(mc, reg + sizeof(u32));
+
+ dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s)\n",
+ client, secure, direction, &addr, error,
+ desc);
+ }
+
+ /* clear interrupts */
+ mc_writel(mc, status, MC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static const struct tegra_mc_ops tegra20_mc_ops = {
+ .probe = tegra20_mc_probe,
+ .suspend = tegra20_mc_suspend,
+ .resume = tegra20_mc_resume,
+ .handle_irq = tegra20_mc_handle_irq,
+};
+
+const struct tegra_mc_soc tegra20_mc_soc = {
+ .clients = tegra20_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra20_mc_clients),
+ .num_address_bits = 32,
+ .client_id_mask = 0x3f,
+ .intmask = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE |
+ MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra20_mc_reset_ops,
+ .resets = tegra20_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra20_mc_resets),
+ .icc_ops = &tegra20_mc_icc_ops,
+ .ops = &tegra20_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
new file mode 100644
index 0000000000..4cb608c71e
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -0,0 +1,1774 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/*
+ * Enable flags for specifying verbosity.
+ */
+#define INFO (1 << 0)
+#define STEPS (1 << 1)
+#define SUB_STEPS (1 << 2)
+#define PRELOCK (1 << 3)
+#define PRELOCK_STEPS (1 << 4)
+#define ACTIVE_EN (1 << 5)
+#define PRAMP_UP (1 << 6)
+#define PRAMP_DN (1 << 7)
+#define EMA_WRITES (1 << 10)
+#define EMA_UPDATES (1 << 11)
+#define PER_TRAIN (1 << 16)
+#define CC_PRINT (1 << 17)
+#define CCFIFO (1 << 29)
+#define REGS (1 << 30)
+#define REG_LISTS (1 << 31)
+
+#define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
+
+#define DVFS_CLOCK_CHANGE_VERSION 21021
+#define EMC_PRELOCK_VERSION 2101
+
+enum {
+ DVFS_SEQUENCE = 1,
+ WRITE_TRAINING_SEQUENCE = 2,
+ PERIODIC_TRAINING_SEQUENCE = 3,
+ DVFS_PT1 = 10,
+ DVFS_UPDATE = 11,
+ TRAINING_PT1 = 12,
+ TRAINING_UPDATE = 13,
+ PERIODIC_TRAINING_UPDATE = 14
+};
+
+/*
+ * PTFV defines - basically just indexes into the per table PTFV array.
+ */
+#define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX 0
+#define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX 1
+#define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX 2
+#define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX 3
+#define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX 4
+#define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX 5
+#define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX 6
+#define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX 7
+#define PTFV_DVFS_SAMPLES_INDEX 9
+#define PTFV_MOVAVG_WEIGHT_INDEX 10
+#define PTFV_CONFIG_CTRL_INDEX 11
+
+#define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA (1 << 0)
+
+/*
+ * Do arithmetic in fixed point.
+ */
+#define MOVAVG_PRECISION_FACTOR 100
+
+/*
+ * The division portion of the average operation.
+ */
+#define __AVERAGE_PTFV(dev) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \
+ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+/*
+ * Convert val to fixed point and add it to the temporary average.
+ */
+#define __INCREMENT_PTFV(dev, val) \
+ ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \
+ ((val) * MOVAVG_PRECISION_FACTOR); })
+
+/*
+ * Convert a moving average back to integral form and return the value.
+ */
+#define __MOVAVG_AC(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
+ MOVAVG_PRECISION_FACTOR)
+
+/* Weighted update. */
+#define __WEIGHTED_UPDATE_PTFV(dev, nval) \
+ do { \
+ int w = PTFV_MOVAVG_WEIGHT_INDEX; \
+ int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \
+ \
+ next->ptfv_list[dqs] = \
+ ((nval * MOVAVG_PRECISION_FACTOR) + \
+ (next->ptfv_list[dqs] * \
+ next->ptfv_list[w])) / \
+ (next->ptfv_list[w] + 1); \
+ \
+ emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \
+ __stringify(dev), nval, next->ptfv_list[dqs]); \
+ } while (0)
+
+/* Access a particular average. */
+#define __MOVAVG(timing, dev) \
+ ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
+
+static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
+{
+ bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
+ struct tegra210_emc_timing *last = emc->last;
+ struct tegra210_emc_timing *next = emc->next;
+ u32 last_timing_rate_mhz = last->rate / 1000;
+ u32 next_timing_rate_mhz = next->rate / 1000;
+ bool dvfs_update = type == DVFS_UPDATE;
+ s32 tdel = 0, tmdel = 0, adel = 0;
+ bool dvfs_pt1 = type == DVFS_PT1;
+ unsigned long cval = 0;
+ u32 temp[2][2], value;
+ unsigned int i;
+
+ /*
+ * Dev0 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 2, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev0 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 2, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U0] -
+ __MOVAVG_AC(next, C0D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U0] =
+ __MOVAVG_AC(next, C0D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D0U1] -
+ __MOVAVG_AC(next, C0D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D0U1] =
+ __MOVAVG_AC(next, C0D0U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U0] -
+ __MOVAVG_AC(next, C1D0U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U0] =
+ __MOVAVG_AC(next, C1D0U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D0U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D0U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D0U1] -
+ __MOVAVG_AC(next, C1D0U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D0U1] =
+ __MOVAVG_AC(next, C1D0U1);
+ }
+ }
+
+ if (emc->num_devices < 2)
+ goto done;
+
+ /*
+ * Dev1 MSB.
+ */
+ if (dvfs_pt1 || periodic_training_update) {
+ value = tegra210_emc_mrr_read(emc, 1, 19);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] = (value & 0x00ff) << 8;
+ temp[i][1] = (value & 0xff00) << 0;
+ value >>= 16;
+ }
+
+ /*
+ * Dev1 LSB.
+ */
+ value = tegra210_emc_mrr_read(emc, 1, 18);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ temp[i][0] |= (value & 0x00ff) >> 0;
+ temp[i][1] |= (value & 0xff00) >> 8;
+ value >>= 16;
+ }
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U0] -
+ __MOVAVG_AC(next, C0D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U0] =
+ __MOVAVG_AC(next, C0D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[0][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C0D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C0D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C0D1U1] -
+ __MOVAVG_AC(next, C0D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C0D1U1] =
+ __MOVAVG_AC(next, C0D1U1);
+ }
+
+ if (emc->num_channels > 1) {
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][0];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U0, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U0);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U0] -
+ __MOVAVG_AC(next, C1D1U0);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U0] =
+ __MOVAVG_AC(next, C1D1U0);
+ }
+
+ if (dvfs_pt1 || periodic_training_update) {
+ cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ cval *= 1000000;
+ cval /= last_timing_rate_mhz * 2 * temp[1][1];
+ }
+
+ if (dvfs_pt1)
+ __INCREMENT_PTFV(C1D1U1, cval);
+ else if (dvfs_update)
+ __AVERAGE_PTFV(C1D1U1);
+ else if (periodic_training_update)
+ __WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
+
+ if (dvfs_update || periodic_training_update) {
+ tdel = next->current_dram_clktree[C1D1U1] -
+ __MOVAVG_AC(next, C1D1U1);
+ tmdel = (tdel < 0) ? -1 * tdel : tdel;
+
+ if (tmdel > adel)
+ adel = tmdel;
+
+ if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
+ next->tree_margin)
+ next->current_dram_clktree[C1D1U1] =
+ __MOVAVG_AC(next, C1D1U1);
+ }
+ }
+
+done:
+ return adel;
+}
+
+static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
+ struct tegra210_emc_timing *last,
+ struct tegra210_emc_timing *next)
+{
+#define __COPY_EMA(nt, lt, dev) \
+ ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
+ (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
+
+ u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
+ u32 delay;
+
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay = 2 + (delay / last->rate);
+
+ if (!next->periodic_training)
+ return 0;
+
+ if (type == DVFS_SEQUENCE) {
+ if (last->periodic_training &&
+ (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
+ PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
+ /*
+ * If the previous frequency was using periodic
+ * calibration then we can reuse the previous
+ * frequencies EMA data.
+ */
+ __COPY_EMA(next, last, C0D0U0);
+ __COPY_EMA(next, last, C0D0U1);
+ __COPY_EMA(next, last, C1D0U0);
+ __COPY_EMA(next, last, C1D0U1);
+ __COPY_EMA(next, last, C0D1U0);
+ __COPY_EMA(next, last, C0D1U1);
+ __COPY_EMA(next, last, C1D1U0);
+ __COPY_EMA(next, last, C1D1U1);
+ } else {
+ /* Reset the EMA.*/
+ __MOVAVG(next, C0D0U0) = 0;
+ __MOVAVG(next, C0D0U1) = 0;
+ __MOVAVG(next, C1D0U0) = 0;
+ __MOVAVG(next, C1D0U1) = 0;
+ __MOVAVG(next, C0D1U0) = 0;
+ __MOVAVG(next, C0D1U1) = 0;
+ __MOVAVG(next, C1D1U0) = 0;
+ __MOVAVG(next, C1D1U1) = 0;
+
+ for (i = 0; i < samples; i++) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ /*
+ * Generate next sample of data.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_PT1);
+ }
+ }
+
+ /*
+ * Seems like it should be part of the
+ * 'if (last_timing->periodic_training)' conditional
+ * since is already done for the else clause.
+ */
+ adel = update_clock_tree_delay(emc, DVFS_UPDATE);
+ }
+
+ if (type == PERIODIC_TRAINING_SEQUENCE) {
+ tegra210_emc_start_periodic_compensation(emc);
+ udelay(delay);
+
+ adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
+ }
+
+ return adel;
+}
+
+static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
+ static const u32 list[] = {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_DATA_BRLSHFT_0,
+ EMC_DATA_BRLSHFT_1
+ };
+ struct tegra210_emc_timing *last = emc->last;
+ unsigned int items = ARRAY_SIZE(list), i;
+ unsigned long delay;
+
+ if (last->periodic_training) {
+ emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
+
+ value = emc_readl(emc, EMC_DBG);
+ emc_cfg_o = emc_readl(emc, EMC_CFG);
+ emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
+ EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_PD);
+
+
+ /*
+ * 1. Power optimizations should be off.
+ */
+ emc_writel(emc, emc_cfg, EMC_CFG);
+
+ /* Does emc_timing_update() for above changes. */
+ tegra210_emc_dll_disable(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
+ value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
+ value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_UPDATE);
+
+ /*
+ * 2. osc kick off - this assumes training and dvfs have set
+ * correct MR23.
+ */
+ tegra210_emc_start_periodic_compensation(emc);
+
+ /*
+ * 3. Let dram capture its clock tree delays.
+ */
+ delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
+ delay *= 1000;
+ delay /= last->rate + 1;
+ udelay(delay);
+
+ /*
+ * 4. Check delta wrt previous values (save value if margin
+ * exceeds what is set in table).
+ */
+ del = periodic_compensation_handler(emc,
+ PERIODIC_TRAINING_SEQUENCE,
+ last, last);
+
+ /*
+ * 5. Apply compensation w.r.t. trained values (if clock tree
+ * has drifted more than the set margin).
+ */
+ if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
+ for (i = 0; i < items; i++) {
+ value = tegra210_emc_compensate(last, list[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ list[i], value);
+ emc_writel(emc, value, list[i]);
+ }
+ }
+
+ emc_writel(emc, emc_cfg_o, EMC_CFG);
+
+ /*
+ * 6. Timing update actally applies the new trimmers.
+ */
+ tegra210_emc_timing_update(emc);
+
+ /* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
+ emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
+
+ /* 6.2. Restore the DLL. */
+ tegra210_emc_dll_enable(emc);
+ }
+
+ return 0;
+}
+
+/*
+ * Do the clock change sequence.
+ */
+static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ /* state variables */
+ static bool fsp_for_next_freq;
+ /* constant configuration parameters */
+ const bool save_restore_clkstop_pd = true;
+ const u32 zqcal_before_cc_cutoff = 2400;
+ const bool cya_allow_ref_cc = false;
+ const bool cya_issue_pc_ref = false;
+ const bool opt_cc_short_zcal = true;
+ const bool ref_b4_sref_en = false;
+ const u32 tZQCAL_lpddr4 = 1000000;
+ const bool opt_short_zcal = true;
+ const bool opt_do_sw_qrst = true;
+ const u32 opt_dvfs_mode = MAN_SR;
+ /*
+ * This is the timing table for the source frequency. It does _not_
+ * necessarily correspond to the actual timing values in the EMC at the
+ * moment. If the boot BCT differs from the table then this can happen.
+ * However, we need it for accessing the dram_timings (which are not
+ * really registers) array for the current frequency.
+ */
+ struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
+ u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
+ u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
+ u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
+ u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
+ u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
+ u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
+ bool opt_zcal_en_cc = false, is_lpddr3 = false;
+ bool compensate_trimmer_applicable = false;
+ u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
+ u32 src_clk_period, dst_clk_period; /* in picoseconds */
+ bool shared_zq_resistor = false;
+ u32 value, dram_type;
+ u32 opt_dll_mode = 0;
+ unsigned long delay;
+ unsigned int i;
+
+ emc_dbg(emc, INFO, "Running clock change.\n");
+
+ /* XXX fake == last */
+ fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
+ fsp_for_next_freq = !fsp_for_next_freq;
+
+ value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+ dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
+
+ if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
+ shared_zq_resistor = true;
+
+ if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
+ last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
+ dram_type == DRAM_TYPE_LPDDR4)
+ opt_zcal_en_cc = true;
+
+ if (dram_type == DRAM_TYPE_DDR3)
+ opt_dll_mode = tegra210_emc_get_dll_state(next);
+
+ if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
+ (dram_type == DRAM_TYPE_LPDDR2))
+ is_lpddr3 = true;
+
+ emc_readl(emc, EMC_CFG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG);
+
+ src_clk_period = 1000000000 / last->rate;
+ dst_clk_period = 1000000000 / next->rate;
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff)
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
+ else
+ tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
+
+ tZQCAL_lpddr4_fc_adj /= dst_clk_period;
+
+ emc_dbg = emc_readl(emc, EMC_DBG);
+ emc_pin = emc_readl(emc, EMC_PIN);
+ emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
+
+ emc_cfg = next->burst_regs[EMC_CFG_INDEX];
+ emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
+ EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
+ emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
+ emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
+ EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
+
+ emc_dbg(emc, INFO, "Clock change version: %d\n",
+ DVFS_CLOCK_CHANGE_VERSION);
+ emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
+ emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
+ emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
+ emc_dbg(emc, INFO, "DLL clksrc: 0x%08x\n", next->dll_clk_src);
+ emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
+ next->rate);
+ emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
+ src_clk_period, dst_clk_period);
+ emc_dbg(emc, INFO, " shared_zq_resistor: %d\n", !!shared_zq_resistor);
+ emc_dbg(emc, INFO, " num_channels: %u\n", emc->num_channels);
+ emc_dbg(emc, INFO, " opt_dll_mode: %d\n", opt_dll_mode);
+
+ /*
+ * Step 1:
+ * Pre DVFS SW sequence.
+ */
+ emc_dbg(emc, STEPS, "Step 1\n");
+ emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
+
+ emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
+
+ emc_auto_cal_config = next->emc_auto_cal_config;
+ auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
+ emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
+ emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
+ emc_auto_cal_config |= auto_cal_en;
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+ emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
+
+ emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ if (next->periodic_training) {
+ tegra210_emc_reset_dram_clktree_values(next);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
+ 0);
+
+ tegra210_emc_start_periodic_compensation(emc);
+
+ delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
+ udelay((delay / last->rate) + 2);
+
+ value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
+ next);
+ value = (value * 128 * next->rate / 1000) / 1000000;
+
+ if (next->periodic_training && value > next->tree_margin)
+ compensate_trimmer_applicable = true;
+ }
+
+ emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, emc_cfg, EMC_CFG);
+ emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+ emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
+ EMC_CFG_PIPE_CLK);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
+ ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+
+ bg_reg_mode_change =
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
+ ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
+ (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
+ enable_bglp_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
+ enable_bg_reg =
+ (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+
+ if (enable_bglp_reg)
+ emc_writel(emc, last->burst_regs
+ [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /* Check if we need to turn on VREF generator. */
+ if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
+ (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
+ ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
+ u32 pad_tx_ctrl =
+ next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 last_pad_tx_ctrl =
+ last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ u32 next_dq_e_ivref, next_dqs_e_ivref;
+
+ next_dqs_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
+ next_dq_e_ivref = pad_tx_ctrl &
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
+ value = (last_pad_tx_ctrl &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
+ ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
+ next_dq_e_ivref | next_dqs_e_ivref;
+ emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
+ udelay(1);
+ } else if (bg_reg_mode_change) {
+ udelay(1);
+ }
+
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 2:
+ * Prelock the DLL.
+ */
+ emc_dbg(emc, STEPS, "Step 2\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
+ EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
+ value = tegra210_emc_dll_prelock(emc, clksrc);
+ emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
+ } else {
+ emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
+ tegra210_emc_dll_disable(emc);
+ }
+
+ /*
+ * Step 3:
+ * Prepare autocal for the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 3\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
+ emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
+ emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
+ emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
+ emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
+ emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
+ emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
+ auto_cal_en);
+ emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /*
+ * Step 4:
+ * Update EMC_CFG. (??)
+ */
+ emc_dbg(emc, STEPS, "Step 4\n");
+
+ if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
+ else
+ emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
+
+ /*
+ * Step 5:
+ * Prepare reference variables for ZQCAL regs.
+ */
+ emc_dbg(emc, STEPS, "Step 5\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
+ else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
+ zq_wait_long = max(next->min_mrs_wait,
+ div_o3(360000, dst_clk_period)) + 4;
+ else if (dram_type == DRAM_TYPE_DDR3)
+ zq_wait_long = max((u32)256,
+ div_o3(320000, dst_clk_period) + 2);
+ else
+ zq_wait_long = 0;
+
+ /*
+ * Step 6:
+ * Training code - removed.
+ */
+ emc_dbg(emc, STEPS, "Step 6\n");
+
+ /*
+ * Step 7:
+ * Program FSP reference registers and send MRWs to new FSPWR.
+ */
+ emc_dbg(emc, STEPS, "Step 7\n");
+ emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
+
+ /* WAR 200024907 */
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ u32 nRTP = 16;
+
+ if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
+ nRTP = 14;
+
+ if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
+ nRTP = 12;
+
+ if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
+ nRTP = 10;
+
+ if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
+ nRTP = 8;
+
+ deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
+
+ /*
+ * Originally there was a + .5 in the tRPST calculation.
+ * However since we can't do FP in the kernel and the tRTM
+ * computation was in a floating point ceiling function, adding
+ * one to tRTP should be ok. There is no other source of non
+ * integer values, so the result was always going to be
+ * something for the form: f_ceil(N + .5) = N + 1;
+ */
+ tRPST = (last->emc_mrw & 0x80) >> 7;
+ tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
+ max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
+ 1 + nRTP;
+
+ emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
+ next->burst_regs[EMC_RP_INDEX]);
+
+ if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
+ if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
+ last->burst_regs[EMC_RP_INDEX])) {
+ R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+
+ if (R2P_war > 63) {
+ RP_war = R2P_war +
+ last->burst_regs[EMC_RP_INDEX] - 63;
+
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+
+ R2P_war = 63;
+ }
+ } else {
+ R2P_war = last->burst_regs[EMC_R2P_INDEX];
+ RP_war = last->burst_regs[EMC_RP_INDEX];
+ TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
+ }
+
+ if (RP_war < deltaTWATM) {
+ W2P_war = last->burst_regs[EMC_W2P_INDEX]
+ + deltaTWATM - RP_war;
+ if (W2P_war > 63) {
+ RP_war = RP_war + W2P_war - 63;
+ if (TRPab_war < RP_war)
+ TRPab_war = RP_war;
+ W2P_war = 63;
+ }
+ } else {
+ W2P_war = last->burst_regs[
+ EMC_W2P_INDEX];
+ }
+
+ if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
+ (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
+ (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
+ (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
+ emc_writel(emc, RP_war, EMC_RP);
+ emc_writel(emc, R2P_war, EMC_R2P);
+ emc_writel(emc, W2P_war, EMC_W2P);
+ emc_writel(emc, TRPab_war, EMC_TRPAB);
+ }
+
+ tegra210_emc_timing_update(emc);
+ } else {
+ emc_dbg(emc, INFO, "Skipped WAR\n");
+ }
+ }
+
+ if (!fsp_for_next_freq) {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
+ } else {
+ mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
+ mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
+ emc_writel(emc, next->emc_mrw, EMC_MRW);
+ emc_writel(emc, next->emc_mrw2, EMC_MRW2);
+ }
+
+ /*
+ * Step 8:
+ * Program the shadow registers.
+ */
+ emc_dbg(emc, STEPS, "Step 8\n");
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
+
+ for (i = 0; i < next->num_burst; i++) {
+ const u16 *offsets = emc->offsets->burst;
+ u16 offset;
+
+ if (!offsets[i])
+ continue;
+
+ value = next->burst_regs[i];
+ offset = offsets[i];
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (offset == EMC_MRW6 || offset == EMC_MRW7 ||
+ offset == EMC_MRW8 || offset == EMC_MRW9 ||
+ offset == EMC_MRW10 || offset == EMC_MRW11 ||
+ offset == EMC_MRW12 || offset == EMC_MRW13 ||
+ offset == EMC_MRW14 || offset == EMC_MRW15 ||
+ offset == EMC_TRAINING_CTRL))
+ continue;
+
+ /* Pain... And suffering. */
+ if (offset == EMC_CFG) {
+ value &= ~EMC_CFG_DRAM_ACPD;
+ value &= ~EMC_CFG_DYN_SELF_REF;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
+ value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
+ }
+ } else if (offset == EMC_MRS_WAIT_CNT &&
+ dram_type == DRAM_TYPE_LPDDR2 &&
+ opt_zcal_en_cc && !opt_cc_short_zcal &&
+ opt_short_zcal) {
+ value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
+ ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_WAIT_CNT &&
+ dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
+ !opt_cc_short_zcal && opt_short_zcal) {
+ value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
+ EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
+ ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
+ EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
+ } else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
+ value = 0; /* EMC_ZCAL_INTERVAL reset value. */
+ } else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
+ value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
+ } else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
+ value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
+ value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ } else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
+ value &= 0xf800f800;
+ } else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
+ value &= 0xfffffff0;
+ }
+
+ emc_writel(emc, value, offset);
+ }
+
+ /* SW addition: do EMC refresh adjustment here. */
+ tegra210_emc_adjust_timing(emc, next);
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (23 << EMC_MRW_MRW_MA_SHIFT) |
+ (next->run_clocks & EMC_MRW_MRW_OP_MASK);
+ emc_writel(emc, value, EMC_MRW);
+ }
+
+ /* Per channel burst registers. */
+ emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
+
+ for (i = 0; i < next->num_burst_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *burst =
+ emc->offsets->burst_per_channel;
+
+ if (!burst[i].offset)
+ continue;
+
+ if (dram_type != DRAM_TYPE_LPDDR4 &&
+ (burst[i].offset == EMC_MRW6 ||
+ burst[i].offset == EMC_MRW7 ||
+ burst[i].offset == EMC_MRW8 ||
+ burst[i].offset == EMC_MRW9 ||
+ burst[i].offset == EMC_MRW10 ||
+ burst[i].offset == EMC_MRW11 ||
+ burst[i].offset == EMC_MRW12 ||
+ burst[i].offset == EMC_MRW13 ||
+ burst[i].offset == EMC_MRW14 ||
+ burst[i].offset == EMC_MRW15))
+ continue;
+
+ /* Filter out second channel if not in DUAL_CHANNEL mode. */
+ if (emc->num_channels < 2 && burst[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->burst_reg_per_ch[i], burst[i].offset);
+ emc_channel_writel(emc, burst[i].bank,
+ next->burst_reg_per_ch[i],
+ burst[i].offset);
+ }
+
+ /* Vref regs. */
+ emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
+
+ for (i = 0; i < next->vref_num; i++) {
+ const struct tegra210_emc_per_channel_regs *vref =
+ emc->offsets->vref_per_channel;
+
+ if (!vref[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && vref[i].bank >= 1)
+ continue;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->vref_perch_regs[i], vref[i].offset);
+ emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
+ vref[i].offset);
+ }
+
+ /* Trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
+
+ for (i = 0; i < next->num_trim; i++) {
+ const u16 *offsets = emc->offsets->trim;
+
+ if (!offsets[i])
+ continue;
+
+ if (compensate_trimmer_applicable &&
+ (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offsets[i] == EMC_DATA_BRLSHFT_0 ||
+ offsets[i] == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offsets[i]);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offsets[i]);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
+ (u32)(u64)offsets[i], value);
+ emc_writel(emc, value, offsets[i]);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_regs[i], offsets[i]);
+ emc_writel(emc, next->trim_regs[i], offsets[i]);
+ }
+ }
+
+ /* Per channel trimmers. */
+ emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
+
+ for (i = 0; i < next->num_trim_per_ch; i++) {
+ const struct tegra210_emc_per_channel_regs *trim =
+ &emc->offsets->trim_per_channel[0];
+ unsigned int offset;
+
+ if (!trim[i].offset)
+ continue;
+
+ if (emc->num_channels < 2 && trim[i].bank >= 1)
+ continue;
+
+ offset = trim[i].offset;
+
+ if (compensate_trimmer_applicable &&
+ (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
+ offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
+ offset == EMC_DATA_BRLSHFT_0 ||
+ offset == EMC_DATA_BRLSHFT_1)) {
+ value = tegra210_emc_compensate(next, offset);
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ value, offset);
+ emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
+ value);
+ emc_channel_writel(emc, trim[i].bank, value, offset);
+ } else {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->trim_perch_regs[i], offset);
+ emc_channel_writel(emc, trim[i].bank,
+ next->trim_perch_regs[i], offset);
+ }
+ }
+
+ emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
+
+ for (i = 0; i < next->num_mc_regs; i++) {
+ const u16 *offsets = emc->offsets->burst_mc;
+ u32 *values = next->burst_mc_regs;
+
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ values[i], offsets[i]);
+ mc_writel(emc->mc, values[i], offsets[i]);
+ }
+
+ /* Registers to be programmed on the faster clock. */
+ if (next->rate < last->rate) {
+ const u16 *la = emc->offsets->la_scale;
+
+ emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
+
+ for (i = 0; i < next->num_up_down; i++) {
+ emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
+ next->la_scale_regs[i], la[i]);
+ mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
+ }
+ }
+
+ /* Flush all the burst register writes. */
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ /*
+ * Step 9:
+ * LPDDR4 section A.
+ */
+ emc_dbg(emc, STEPS, "Step 9\n");
+
+ value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
+ value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
+
+ value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
+ EMC_DBG_WRITE_ACTIVE_ONLY);
+
+ emc_writel(emc, value, EMC_DBG);
+ emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
+ emc_writel(emc, emc_dbg, EMC_DBG);
+ }
+
+ /*
+ * Step 10:
+ * LPDDR4 and DDR3 common section.
+ */
+ emc_dbg(emc, STEPS, "Step 10\n");
+
+ if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
+ if (dram_type == DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
+ else
+ ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
+
+ if (dram_type == DRAM_TYPE_LPDDR4 &&
+ dst_clk_period <= zqcal_before_cc_cutoff) {
+ ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW6_INDEX] &
+ 0x0000C0C0), EMC_MRW6, 0);
+ ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW14_INDEX] &
+ 0x00003838), EMC_MRW14, 0);
+
+ if (emc->num_devices > 1) {
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW7_INDEX] &
+ 0xFFFF3F3F) |
+ (last->burst_regs[EMC_MRW7_INDEX] &
+ 0x0000C0C0), EMC_MRW7, 0);
+ ccfifo_writel(emc,
+ (next->burst_regs[EMC_MRW15_INDEX] &
+ 0xFFFF0707) |
+ (last->burst_regs[EMC_MRW15_INDEX] &
+ 0x00003838), EMC_MRW15, 0);
+ }
+
+ if (opt_zcal_en_cc) {
+ if (emc->num_devices < 2)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else if (shared_zq_resistor)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
+ | EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ else
+ ccfifo_writel(emc,
+ EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
+ ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
+ ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
+ }
+
+ if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
+ delay = 30;
+
+ if (cya_allow_ref_cc) {
+ delay += (1000 * fake->dram_timings[T_RP]) /
+ src_clk_period;
+ delay += 4000 * fake->dram_timings[T_RFC];
+ }
+
+ ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
+ EMC_PIN_PIN_CKEB |
+ EMC_PIN_PIN_CKE),
+ EMC_PIN, delay);
+ }
+
+ /* calculate reference delay multiplier */
+ value = 1;
+
+ if (ref_b4_sref_en)
+ value++;
+
+ if (cya_allow_ref_cc)
+ value++;
+
+ if (cya_issue_pc_ref)
+ value++;
+
+ if (dram_type != DRAM_TYPE_LPDDR4) {
+ delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
+ (1000 * fake->dram_timings[T_RFC] / src_clk_period));
+ delay = value * delay + 20;
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Step 11:
+ * Ramp down.
+ */
+ emc_dbg(emc, STEPS, "Step 11\n");
+
+ ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
+
+ value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
+ 0);
+
+ /*
+ * Step 12:
+ * And finally - trigger the clock change.
+ */
+ emc_dbg(emc, STEPS, "Step 12\n");
+
+ ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
+ value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
+ ccfifo_writel(emc, value, EMC_DBG, 0);
+
+ /*
+ * Step 13:
+ * Ramp up.
+ */
+ emc_dbg(emc, STEPS, "Step 13\n");
+
+ ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+
+ /*
+ * Step 14:
+ * Bringup CKE pins.
+ */
+ emc_dbg(emc, STEPS, "Step 14\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ value = emc_pin | EMC_PIN_PIN_CKE;
+
+ if (emc->num_devices <= 1)
+ value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
+ else
+ value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
+
+ ccfifo_writel(emc, value, EMC_PIN, 0);
+ }
+
+ /*
+ * Step 15: (two step 15s ??)
+ * Calculate zqlatch wait time; has dependency on ramping times.
+ */
+ emc_dbg(emc, STEPS, "Step 15\n");
+
+ if (dst_clk_period <= zqcal_before_cc_cutoff) {
+ s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
+ (s32)dst_clk_period;
+ zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
+ } else {
+ zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
+ div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+ }
+
+ emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
+ emc_dbg(emc, INFO, "dst_clk_period = %u\n",
+ dst_clk_period);
+ emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
+ next->dram_timings[T_PDEX]);
+ emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+
+ if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
+ delay = div_o3(1000 * next->dram_timings[T_PDEX],
+ dst_clk_period);
+
+ if (emc->num_devices < 2) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ } else if (shared_zq_resistor) {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc,
+ 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ delay);
+
+ ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time) +
+ delay);
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD,
+ EMC_ZQ_CAL, 0);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, 0);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ tZQCAL_lpddr4 / dst_clk_period);
+ } else {
+ if (dst_clk_period > zqcal_before_cc_cutoff)
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
+ EMC_ZQ_CAL, delay);
+
+ value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
+ ccfifo_writel(emc, value, EMC_MRW3, delay);
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
+ max_t(s32, 0, zq_latch_dvfs_wait_time));
+ }
+ }
+
+ /* WAR: delay for zqlatch */
+ ccfifo_writel(emc, 0, 0, 10);
+
+ /*
+ * Step 16:
+ * LPDDR4 Conditional Training Kickoff. Removed.
+ */
+
+ /*
+ * Step 17:
+ * MANSR exit self refresh.
+ */
+ emc_dbg(emc, STEPS, "Step 17\n");
+
+ if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
+
+ /*
+ * Step 18:
+ * Send MRWs to LPDDR3/DDR3.
+ */
+ emc_dbg(emc, STEPS, "Step 18\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
+ ccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);
+ if (is_lpddr3)
+ ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ if (opt_dll_mode)
+ ccfifo_writel(emc, next->emc_emrs &
+ ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
+ ccfifo_writel(emc, next->emc_emrs2 &
+ ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
+ ccfifo_writel(emc, next->emc_mrs |
+ EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
+ }
+
+ /*
+ * Step 19:
+ * ZQCAL for LPDDR3/DDR3
+ */
+ emc_dbg(emc, STEPS, "Step 19\n");
+
+ if (opt_zcal_en_cc) {
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ value = opt_cc_short_zcal ? 90000 : 360000;
+ value = div_o3(value, dst_clk_period);
+ value = value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
+ value <<
+ EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
+
+ value = opt_cc_short_zcal ? 0x56 : 0xab;
+ ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT,
+ EMC_MRW, 0);
+
+ if (emc->num_devices > 1) {
+ value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
+ EMC_MRW_USE_MRW_EXT_CNT |
+ 10 << EMC_MRW_MRW_MA_SHIFT |
+ value << EMC_MRW_MRW_OP_SHIFT;
+ ccfifo_writel(emc, value, EMC_MRW, 0);
+ }
+ } else if (dram_type == DRAM_TYPE_DDR3) {
+ value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
+
+ ccfifo_writel(emc, value |
+ 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
+ 0);
+
+ if (emc->num_devices > 1) {
+ value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
+ EMC_ZQ_CAL_ZQ_CAL_CMD;
+ ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
+ }
+ }
+ }
+
+ if (bg_reg_mode_change) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+
+ if (ramp_up_wait <= 1250000)
+ delay = (1250000 - ramp_up_wait) / dst_clk_period;
+ else
+ delay = 0;
+
+ ccfifo_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
+ EMC_PMACRO_BG_BIAS_CTRL_0, delay);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 20:
+ * Issue ref and optional QRST.
+ */
+ emc_dbg(emc, STEPS, "Step 20\n");
+
+ if (dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, 0, EMC_REF, 0);
+
+ if (opt_do_sw_qrst) {
+ ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
+ ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
+ }
+
+ /*
+ * Step 21:
+ * Restore ZCAL and ZCAL interval.
+ */
+ emc_dbg(emc, STEPS, "Step 21\n");
+
+ if (save_restore_clkstop_pd || opt_zcal_en_cc) {
+ ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ EMC_DBG, 0);
+ if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
+ ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL, 0);
+
+ if (save_restore_clkstop_pd)
+ ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
+ ~EMC_CFG_DYN_SELF_REF,
+ EMC_CFG, 0);
+ ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
+ }
+
+ /*
+ * Step 22:
+ * Restore EMC_CFG_PIPE_CLK.
+ */
+ emc_dbg(emc, STEPS, "Step 22\n");
+
+ ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
+
+ if (bg_reg_mode_change) {
+ if (enable_bg_reg)
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ else
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
+ ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
+ EMC_PMACRO_BG_BIAS_CTRL_0);
+ }
+
+ /*
+ * Step 23:
+ */
+ emc_dbg(emc, STEPS, "Step 23\n");
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_do_clock_change(emc, clksrc);
+
+ /*
+ * Step 24:
+ * Save training results. Removed.
+ */
+
+ /*
+ * Step 25:
+ * Program MC updown registers.
+ */
+ emc_dbg(emc, STEPS, "Step 25\n");
+
+ if (next->rate > last->rate) {
+ for (i = 0; i < next->num_up_down; i++)
+ mc_writel(emc->mc, next->la_scale_regs[i],
+ emc->offsets->la_scale[i]);
+
+ tegra210_emc_timing_update(emc);
+ }
+
+ /*
+ * Step 26:
+ * Restore ZCAL registers.
+ */
+ emc_dbg(emc, STEPS, "Step 26\n");
+
+ if (dram_type == DRAM_TYPE_LPDDR4) {
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
+ EMC_ZCAL_INTERVAL);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
+ !opt_short_zcal && opt_cc_short_zcal) {
+ udelay(2);
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ if (dram_type == DRAM_TYPE_LPDDR2)
+ emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
+ EMC_MRS_WAIT_CNT);
+ else if (dram_type == DRAM_TYPE_DDR3)
+ emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
+ EMC_ZCAL_WAIT_CNT);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ }
+
+ /*
+ * Step 27:
+ * Restore EMC_CFG, FDPD registers.
+ */
+ emc_dbg(emc, STEPS, "Step 27\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+ emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
+ EMC_FDPD_CTRL_CMD_NO_RAMP);
+ emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
+
+ /*
+ * Step 28:
+ * Training recover. Removed.
+ */
+ emc_dbg(emc, STEPS, "Step 28\n");
+
+ tegra210_emc_set_shadow_bypass(emc, ACTIVE);
+ emc_writel(emc,
+ next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
+ EMC_PMACRO_AUTOCAL_CFG_COMMON);
+ tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
+
+ /*
+ * Step 29:
+ * Power fix WAR.
+ */
+ emc_dbg(emc, STEPS, "Step 29\n");
+
+ emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
+ EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
+ EMC_PMACRO_CFG_PM_GLOBAL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_0);
+ emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
+ EMC_PMACRO_TRAINING_CTRL_1);
+ emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
+
+ /*
+ * Step 30:
+ * Re-enable autocal.
+ */
+ emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
+
+ if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
+ (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+ }
+
+ emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
+
+ /* Done! Yay. */
+}
+
+const struct tegra210_emc_sequence tegra210_emc_r21021 = {
+ .revision = 0x7,
+ .set_clock = tegra210_emc_r21021_set_clock,
+ .periodic_compensation = tegra210_emc_r21021_periodic_compensation,
+};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
new file mode 100644
index 0000000000..3300bde47c
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -0,0 +1,2064 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/mc.h>
+
+#include "tegra210-emc.h"
+#include "tegra210-mc.h"
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define EMC_CLK_EMC_2X_CLK_SRC_SHIFT 29
+#define EMC_CLK_EMC_2X_CLK_SRC_MASK \
+ (0x7 << EMC_CLK_EMC_2X_CLK_SRC_SHIFT)
+#define EMC_CLK_SOURCE_PLLM_LJ 0x4
+#define EMC_CLK_SOURCE_PLLMB_LJ 0x5
+#define EMC_CLK_FORCE_CC_TRIGGER BIT(27)
+#define EMC_CLK_MC_EMC_SAME_FREQ BIT(16)
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT 0
+#define EMC_CLK_EMC_2X_CLK_DIVISOR_MASK \
+ (0xff << EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT)
+
+/* CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL */
+#define DLL_CLK_EMC_DLL_CLK_SRC_SHIFT 29
+#define DLL_CLK_EMC_DLL_CLK_SRC_MASK \
+ (0x7 << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT)
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT 10
+#define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK \
+ (0x3 << DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT)
+#define PLLM_VCOA 0
+#define PLLM_VCOB 1
+#define EMC_DLL_SWITCH_OUT 2
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT 0
+#define DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK \
+ (0xff << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT)
+
+/* MC_EMEM_ARB_MISC0 */
+#define MC_EMEM_ARB_MISC0_EMC_SAME_FREQ BIT(27)
+
+/* EMC_DATA_BRLSHFT_X */
+#define EMC0_EMC_DATA_BRLSHFT_0_INDEX 2
+#define EMC1_EMC_DATA_BRLSHFT_0_INDEX 3
+#define EMC0_EMC_DATA_BRLSHFT_1_INDEX 4
+#define EMC1_EMC_DATA_BRLSHFT_1_INDEX 5
+
+#define TRIM_REG(chan, rank, reg, byte) \
+ (((EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _MASK & \
+ next->trim_regs[EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## \
+ rank ## _ ## reg ## _INDEX]) >> \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte ## _SHIFT) \
+ + \
+ (((EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_MASK & \
+ next->trim_perch_regs[EMC ## chan ## \
+ _EMC_DATA_BRLSHFT_ ## rank ## _INDEX]) >> \
+ EMC_DATA_BRLSHFT_ ## rank ## _RANK ## rank ## _BYTE ## \
+ byte ## _DATA_BRLSHFT_SHIFT) * 64))
+
+#define CALC_TEMP(rank, reg, byte1, byte2, n) \
+ (((new[n] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## \
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte1 ## _MASK) \
+ | \
+ ((new[n + 1] << EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ##\
+ reg ## _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _SHIFT) & \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK ## rank ## _ ## reg ## \
+ _OB_DDLL_LONG_DQ_RANK ## rank ## _BYTE ## byte2 ## _MASK))
+
+#define REFRESH_SPEEDUP(value, speedup) \
+ (((value) & 0xffff0000) | ((value) & 0xffff) * (speedup))
+
+#define LPDDR2_MR4_SRR GENMASK(2, 0)
+
+static const struct tegra210_emc_sequence *tegra210_emc_sequences[] = {
+ &tegra210_emc_r21021,
+};
+
+static const struct tegra210_emc_table_register_offsets
+tegra210_emc_table_register_offsets = {
+ .burst = {
+ EMC_RC,
+ EMC_RFC,
+ EMC_RFCPB,
+ EMC_REFCTRL2,
+ EMC_RFC_SLR,
+ EMC_RAS,
+ EMC_RP,
+ EMC_R2W,
+ EMC_W2R,
+ EMC_R2P,
+ EMC_W2P,
+ EMC_R2R,
+ EMC_TPPD,
+ EMC_CCDMW,
+ EMC_RD_RCD,
+ EMC_WR_RCD,
+ EMC_RRD,
+ EMC_REXT,
+ EMC_WEXT,
+ EMC_WDV_CHK,
+ EMC_WDV,
+ EMC_WSV,
+ EMC_WEV,
+ EMC_WDV_MASK,
+ EMC_WS_DURATION,
+ EMC_WE_DURATION,
+ EMC_QUSE,
+ EMC_QUSE_WIDTH,
+ EMC_IBDLY,
+ EMC_OBDLY,
+ EMC_EINPUT,
+ EMC_MRW6,
+ EMC_EINPUT_DURATION,
+ EMC_PUTERM_EXTRA,
+ EMC_PUTERM_WIDTH,
+ EMC_QRST,
+ EMC_QSAFE,
+ EMC_RDV,
+ EMC_RDV_MASK,
+ EMC_RDV_EARLY,
+ EMC_RDV_EARLY_MASK,
+ EMC_REFRESH,
+ EMC_BURST_REFRESH_NUM,
+ EMC_PRE_REFRESH_REQ_CNT,
+ EMC_PDEX2WR,
+ EMC_PDEX2RD,
+ EMC_PCHG2PDEN,
+ EMC_ACT2PDEN,
+ EMC_AR2PDEN,
+ EMC_RW2PDEN,
+ EMC_CKE2PDEN,
+ EMC_PDEX2CKE,
+ EMC_PDEX2MRR,
+ EMC_TXSR,
+ EMC_TXSRDLL,
+ EMC_TCKE,
+ EMC_TCKESR,
+ EMC_TPD,
+ EMC_TFAW,
+ EMC_TRPAB,
+ EMC_TCLKSTABLE,
+ EMC_TCLKSTOP,
+ EMC_MRW7,
+ EMC_TREFBW,
+ EMC_ODT_WRITE,
+ EMC_FBIO_CFG5,
+ EMC_FBIO_CFG7,
+ EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_PERIOD,
+ EMC_PMACRO_IB_RXRT,
+ EMC_CFG_PIPE_1,
+ EMC_CFG_PIPE_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_4,
+ EMC_PMACRO_QUSE_DDLL_RANK0_5,
+ EMC_PMACRO_QUSE_DDLL_RANK1_4,
+ EMC_PMACRO_QUSE_DDLL_RANK1_5,
+ EMC_MRW8,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5,
+ EMC_PMACRO_DDLL_LONG_CMD_0,
+ EMC_PMACRO_DDLL_LONG_CMD_1,
+ EMC_PMACRO_DDLL_LONG_CMD_2,
+ EMC_PMACRO_DDLL_LONG_CMD_3,
+ EMC_PMACRO_DDLL_LONG_CMD_4,
+ EMC_PMACRO_DDLL_SHORT_CMD_0,
+ EMC_PMACRO_DDLL_SHORT_CMD_1,
+ EMC_PMACRO_DDLL_SHORT_CMD_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3,
+ EMC_TXDSRVTTGEN,
+ EMC_FDPD_CTRL_DQ,
+ EMC_FDPD_CTRL_CMD,
+ EMC_FBIO_SPARE,
+ EMC_ZCAL_INTERVAL,
+ EMC_ZCAL_WAIT_CNT,
+ EMC_MRS_WAIT_CNT,
+ EMC_MRS_WAIT_CNT2,
+ EMC_AUTO_CAL_CHANNEL,
+ EMC_DLL_CFG_0,
+ EMC_DLL_CFG_1,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON,
+ EMC_PMACRO_ZCTRL,
+ EMC_CFG,
+ EMC_CFG_PIPE,
+ EMC_DYN_SELF_REF_CONTROL,
+ EMC_QPOP,
+ EMC_DQS_BRLSHFT_0,
+ EMC_DQS_BRLSHFT_1,
+ EMC_CMD_BRLSHFT_2,
+ EMC_CMD_BRLSHFT_3,
+ EMC_PMACRO_PAD_CFG_CTRL,
+ EMC_PMACRO_DATA_PAD_RX_CTRL,
+ EMC_PMACRO_CMD_PAD_RX_CTRL,
+ EMC_PMACRO_DATA_RX_TERM_MODE,
+ EMC_PMACRO_CMD_RX_TERM_MODE,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ EMC_PMACRO_DATA_PAD_TX_CTRL,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ EMC_PMACRO_VTTGEN_CTRL_0,
+ EMC_PMACRO_VTTGEN_CTRL_1,
+ EMC_PMACRO_VTTGEN_CTRL_2,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ EMC_PMACRO_CMD_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BRICK_CTRL_RFU2,
+ EMC_PMACRO_DATA_BRICK_CTRL_FDPD,
+ EMC_PMACRO_BG_BIAS_CTRL_0,
+ EMC_CFG_3,
+ EMC_PMACRO_TX_PWRD_0,
+ EMC_PMACRO_TX_PWRD_1,
+ EMC_PMACRO_TX_PWRD_2,
+ EMC_PMACRO_TX_PWRD_3,
+ EMC_PMACRO_TX_PWRD_4,
+ EMC_PMACRO_TX_PWRD_5,
+ EMC_CONFIG_SAMPLE_DELAY,
+ EMC_PMACRO_TX_SEL_CLK_SRC_0,
+ EMC_PMACRO_TX_SEL_CLK_SRC_1,
+ EMC_PMACRO_TX_SEL_CLK_SRC_2,
+ EMC_PMACRO_TX_SEL_CLK_SRC_3,
+ EMC_PMACRO_TX_SEL_CLK_SRC_4,
+ EMC_PMACRO_TX_SEL_CLK_SRC_5,
+ EMC_PMACRO_DDLL_BYPASS,
+ EMC_PMACRO_DDLL_PWRD_0,
+ EMC_PMACRO_DDLL_PWRD_1,
+ EMC_PMACRO_DDLL_PWRD_2,
+ EMC_PMACRO_CMD_CTRL_0,
+ EMC_PMACRO_CMD_CTRL_1,
+ EMC_PMACRO_CMD_CTRL_2,
+ EMC_TR_TIMING_0,
+ EMC_TR_DVFS,
+ EMC_TR_CTRL_1,
+ EMC_TR_RDV,
+ EMC_TR_QPOP,
+ EMC_TR_RDV_MASK,
+ EMC_MRW14,
+ EMC_TR_QSAFE,
+ EMC_TR_QRST,
+ EMC_TRAINING_CTRL,
+ EMC_TRAINING_SETTLE,
+ EMC_TRAINING_VREF_SETTLE,
+ EMC_TRAINING_CA_FINE_CTRL,
+ EMC_TRAINING_CA_CTRL_MISC,
+ EMC_TRAINING_CA_CTRL_MISC1,
+ EMC_TRAINING_CA_VREF_CTRL,
+ EMC_TRAINING_QUSE_CORS_CTRL,
+ EMC_TRAINING_QUSE_FINE_CTRL,
+ EMC_TRAINING_QUSE_CTRL_MISC,
+ EMC_TRAINING_QUSE_VREF_CTRL,
+ EMC_TRAINING_READ_FINE_CTRL,
+ EMC_TRAINING_READ_CTRL_MISC,
+ EMC_TRAINING_READ_VREF_CTRL,
+ EMC_TRAINING_WRITE_FINE_CTRL,
+ EMC_TRAINING_WRITE_CTRL_MISC,
+ EMC_TRAINING_WRITE_VREF_CTRL,
+ EMC_TRAINING_MPC,
+ EMC_MRW15,
+ },
+ .trim = {
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2,
+ EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_IB_VREF_DQS_0,
+ EMC_PMACRO_IB_VREF_DQS_1,
+ EMC_PMACRO_IB_VREF_DQ_0,
+ EMC_PMACRO_IB_VREF_DQ_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1,
+ EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_0,
+ EMC_PMACRO_QUSE_DDLL_RANK0_1,
+ EMC_PMACRO_QUSE_DDLL_RANK0_2,
+ EMC_PMACRO_QUSE_DDLL_RANK0_3,
+ EMC_PMACRO_QUSE_DDLL_RANK1_0,
+ EMC_PMACRO_QUSE_DDLL_RANK1_1,
+ EMC_PMACRO_QUSE_DDLL_RANK1_2,
+ EMC_PMACRO_QUSE_DDLL_RANK1_3
+ },
+ .burst_mc = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_REFPB_HP_CTRL,
+ MC_EMEM_ARB_REFPB_BANK_CTRL,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_CCDMW,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_TIMING_RFCPB,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_MISC1,
+ MC_EMEM_ARB_MISC2,
+ MC_EMEM_ARB_RING1_THROTTLE,
+ MC_EMEM_ARB_DHYST_CTRL,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6,
+ MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7,
+ },
+ .la_scale = {
+ MC_MLL_MPCORER_PTSA_RATE,
+ MC_FTOP_PTSA_RATE,
+ MC_PTSA_GRANT_DECREMENT,
+ MC_LATENCY_ALLOWANCE_XUSB_0,
+ MC_LATENCY_ALLOWANCE_XUSB_1,
+ MC_LATENCY_ALLOWANCE_TSEC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCA_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAA_0,
+ MC_LATENCY_ALLOWANCE_SDMMC_0,
+ MC_LATENCY_ALLOWANCE_SDMMCAB_0,
+ MC_LATENCY_ALLOWANCE_PPCS_0,
+ MC_LATENCY_ALLOWANCE_PPCS_1,
+ MC_LATENCY_ALLOWANCE_MPCORE_0,
+ MC_LATENCY_ALLOWANCE_HC_0,
+ MC_LATENCY_ALLOWANCE_HC_1,
+ MC_LATENCY_ALLOWANCE_AVPC_0,
+ MC_LATENCY_ALLOWANCE_GPU_0,
+ MC_LATENCY_ALLOWANCE_GPU2_0,
+ MC_LATENCY_ALLOWANCE_NVENC_0,
+ MC_LATENCY_ALLOWANCE_NVDEC_0,
+ MC_LATENCY_ALLOWANCE_VIC_0,
+ MC_LATENCY_ALLOWANCE_VI2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_0,
+ MC_LATENCY_ALLOWANCE_ISP2_1,
+ },
+ .burst_per_channel = {
+ { .bank = 0, .offset = EMC_MRW10, },
+ { .bank = 1, .offset = EMC_MRW10, },
+ { .bank = 0, .offset = EMC_MRW11, },
+ { .bank = 1, .offset = EMC_MRW11, },
+ { .bank = 0, .offset = EMC_MRW12, },
+ { .bank = 1, .offset = EMC_MRW12, },
+ { .bank = 0, .offset = EMC_MRW13, },
+ { .bank = 1, .offset = EMC_MRW13, },
+ },
+ .trim_per_channel = {
+ { .bank = 0, .offset = EMC_CMD_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_CMD_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_0, },
+ { .bank = 0, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 1, .offset = EMC_DATA_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_0, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_1, },
+ { .bank = 0, .offset = EMC_QUSE_BRLSHFT_2, },
+ { .bank = 1, .offset = EMC_QUSE_BRLSHFT_3, },
+ },
+ .vref_per_channel = {
+ {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK0,
+ }, {
+ .bank = 0,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ }, {
+ .bank = 1,
+ .offset = EMC_TRAINING_OPT_DQS_IB_VREF_RANK1,
+ },
+ },
+};
+
+static void tegra210_emc_train(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, training);
+ unsigned long flags;
+
+ if (!emc->last)
+ return;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (emc->sequence->periodic_compensation)
+ emc->sequence->periodic_compensation(emc);
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_start(struct tegra210_emc *emc)
+{
+ mod_timer(&emc->training,
+ jiffies + msecs_to_jiffies(emc->training_interval));
+}
+
+static void tegra210_emc_training_stop(struct tegra210_emc *emc)
+{
+ del_timer(&emc->training);
+}
+
+static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc)
+{
+ unsigned long flags;
+ u32 value, max = 0;
+ unsigned int i;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ for (i = 0; i < emc->num_devices; i++) {
+ value = tegra210_emc_mrr_read(emc, i, 4);
+
+ if (value & BIT(7))
+ dev_dbg(emc->dev,
+ "sensor reading changed for device %u: %08x\n",
+ i, value);
+
+ value = FIELD_GET(LPDDR2_MR4_SRR, value);
+ if (value > max)
+ max = value;
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return max;
+}
+
+static void tegra210_emc_poll_refresh(struct timer_list *timer)
+{
+ struct tegra210_emc *emc = from_timer(emc, timer, refresh_timer);
+ unsigned int temperature;
+
+ if (!emc->debugfs.temperature)
+ temperature = tegra210_emc_get_temperature(emc);
+ else
+ temperature = emc->debugfs.temperature;
+
+ if (temperature == emc->temperature)
+ goto reset;
+
+ switch (temperature) {
+ case 0 ... 3:
+ /* temperature is fine, using regular refresh */
+ dev_dbg(emc->dev, "switching to nominal refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_NOMINAL);
+ break;
+
+ case 4:
+ dev_dbg(emc->dev, "switching to 2x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_2X);
+ break;
+
+ case 5:
+ dev_dbg(emc->dev, "switching to 4x refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_4X);
+ break;
+
+ case 6 ... 7:
+ dev_dbg(emc->dev, "switching to throttle refresh...\n");
+ tegra210_emc_set_refresh(emc, TEGRA210_EMC_REFRESH_THROTTLE);
+ break;
+
+ default:
+ WARN(1, "invalid DRAM temperature state %u\n", temperature);
+ return;
+ }
+
+ emc->temperature = temperature;
+
+reset:
+ if (atomic_read(&emc->refresh_poll) > 0) {
+ unsigned int interval = emc->refresh_poll_interval;
+ unsigned int timeout = msecs_to_jiffies(interval);
+
+ mod_timer(&emc->refresh_timer, jiffies + timeout);
+ }
+}
+
+static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 0);
+ del_timer_sync(&emc->refresh_timer);
+}
+
+static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
+{
+ atomic_set(&emc->refresh_poll, 1);
+
+ mod_timer(&emc->refresh_timer,
+ jiffies + msecs_to_jiffies(emc->refresh_poll_interval));
+}
+
+static int tegra210_emc_cd_max_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int tegra210_emc_cd_get_state(struct thermal_cooling_device *cd,
+ unsigned long *state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ *state = atomic_read(&emc->refresh_poll);
+
+ return 0;
+}
+
+static int tegra210_emc_cd_set_state(struct thermal_cooling_device *cd,
+ unsigned long state)
+{
+ struct tegra210_emc *emc = cd->devdata;
+
+ if (state == atomic_read(&emc->refresh_poll))
+ return 0;
+
+ if (state)
+ tegra210_emc_poll_refresh_start(emc);
+ else
+ tegra210_emc_poll_refresh_stop(emc);
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops tegra210_emc_cd_ops = {
+ .get_max_state = tegra210_emc_cd_max_state,
+ .get_cur_state = tegra210_emc_cd_get_state,
+ .set_cur_state = tegra210_emc_cd_set_state,
+};
+
+static void tegra210_emc_set_clock(struct tegra210_emc *emc, u32 clksrc)
+{
+ emc->sequence->set_clock(emc, clksrc);
+
+ if (emc->next->periodic_training)
+ tegra210_emc_training_start(emc);
+ else
+ tegra210_emc_training_stop(emc);
+}
+
+static void tegra210_change_dll_src(struct tegra210_emc *emc,
+ u32 clksrc)
+{
+ u32 dll_setting = emc->next->dll_clk_src;
+ u32 emc_clk_src;
+ u32 emc_clk_div;
+
+ emc_clk_src = (clksrc & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
+ EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
+ emc_clk_div = (clksrc & EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
+ EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~(DLL_CLK_EMC_DLL_CLK_SRC_MASK |
+ DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK);
+ dll_setting |= emc_clk_src << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT;
+ dll_setting |= emc_clk_div << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT;
+
+ dll_setting &= ~DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK;
+ if (emc_clk_src == EMC_CLK_SOURCE_PLLMB_LJ)
+ dll_setting |= (PLLM_VCOB <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else if (emc_clk_src == EMC_CLK_SOURCE_PLLM_LJ)
+ dll_setting |= (PLLM_VCOA <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+ else
+ dll_setting |= (EMC_DLL_SWITCH_OUT <<
+ DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
+
+ tegra210_clk_emc_dll_update_setting(dll_setting);
+
+ if (emc->next->clk_out_enb_x_0_clk_enb_emc_dll)
+ tegra210_clk_emc_dll_enable(true);
+ else
+ tegra210_clk_emc_dll_enable(false);
+}
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh)
+{
+ struct tegra210_emc_timing *timings;
+ unsigned long flags;
+
+ if ((emc->dram_type != DRAM_TYPE_LPDDR2 &&
+ emc->dram_type != DRAM_TYPE_LPDDR4) ||
+ !emc->last)
+ return -ENODEV;
+
+ if (refresh > TEGRA210_EMC_REFRESH_THROTTLE)
+ return -EINVAL;
+
+ if (refresh == emc->refresh)
+ return 0;
+
+ spin_lock_irqsave(&emc->lock, flags);
+
+ if (refresh == TEGRA210_EMC_REFRESH_THROTTLE && emc->derated)
+ timings = emc->derated;
+ else
+ timings = emc->nominal;
+
+ if (timings != emc->timings) {
+ unsigned int index = emc->last - emc->timings;
+ u32 clksrc;
+
+ clksrc = emc->provider.configs[index].value |
+ EMC_CLK_FORCE_CC_TRIGGER;
+
+ emc->next = &timings[index];
+ emc->timings = timings;
+
+ tegra210_emc_set_clock(emc, clksrc);
+ } else {
+ tegra210_emc_adjust_timing(emc, emc->last);
+ tegra210_emc_timing_update(emc);
+
+ if (refresh != TEGRA210_EMC_REFRESH_NOMINAL)
+ emc_writel(emc, EMC_REF_REF_CMD, EMC_REF);
+ }
+
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address)
+{
+ u32 value, ret = 0;
+ unsigned int i;
+
+ value = (chip & EMC_MRR_DEV_SEL_MASK) << EMC_MRR_DEV_SEL_SHIFT |
+ (address & EMC_MRR_MA_MASK) << EMC_MRR_MA_SHIFT;
+ emc_writel(emc, value, EMC_MRR);
+
+ for (i = 0; i < emc->num_channels; i++)
+ WARN(tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_MRR_DIVLD, 1),
+ "Timed out waiting for MRR %u (ch=%u)\n", address, i);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ value = emc_channel_readl(emc, i, EMC_MRR);
+ value &= EMC_MRR_DATA_MASK;
+
+ ret = (ret << 16) | value;
+ }
+
+ return ret;
+}
+
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc)
+{
+ int err;
+
+ mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+ emc_readl(emc, EMC_INTSTATUS);
+
+ tegra210_clk_emc_update_setting(clksrc);
+
+ err = tegra210_emc_wait_for_update(emc, 0, EMC_INTSTATUS,
+ EMC_INTSTATUS_CLKCHANGE_COMPLETE,
+ true);
+ if (err)
+ dev_warn(emc->dev, "clock change completion error: %d\n", err);
+}
+
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (emc->timings[i].rate * 1000UL == rate)
+ return &emc->timings[i];
+
+ return NULL;
+}
+
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state)
+{
+ unsigned int i;
+ u32 value;
+
+ for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
+ value = emc_channel_readl(emc, channel, offset);
+ if (!!(value & bit_mask) == state)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set)
+{
+ u32 emc_dbg = emc_readl(emc, EMC_DBG);
+
+ if (set)
+ emc_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+ else
+ emc_writel(emc, emc_dbg & ~EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
+}
+
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next)
+{
+ if (next->emc_emrs & 0x1)
+ return 0;
+
+ return 1;
+}
+
+void tegra210_emc_timing_update(struct tegra210_emc *emc)
+{
+ unsigned int i;
+ int err = 0;
+
+ emc_writel(emc, 0x1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ err |= tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ false);
+ }
+
+ if (err)
+ dev_warn(emc->dev, "timing update error: %d\n", err);
+}
+
+unsigned long tegra210_emc_actual_osc_clocks(u32 in)
+{
+ if (in < 0x40)
+ return in * 16;
+ else if (in < 0x80)
+ return 2048;
+ else if (in < 0xc0)
+ return 4096;
+ else
+ return 8192;
+}
+
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc)
+{
+ u32 mpc_req = 0x4b;
+
+ emc_writel(emc, mpc_req, EMC_MPC);
+ mpc_req = emc_readl(emc, EMC_MPC);
+}
+
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset)
+{
+ u32 temp = 0, rate = next->rate / 1000;
+ s32 delta[4], delta_taps[4];
+ s32 new[] = {
+ TRIM_REG(0, 0, 0, 0),
+ TRIM_REG(0, 0, 0, 1),
+ TRIM_REG(0, 0, 1, 2),
+ TRIM_REG(0, 0, 1, 3),
+
+ TRIM_REG(1, 0, 2, 4),
+ TRIM_REG(1, 0, 2, 5),
+ TRIM_REG(1, 0, 3, 6),
+ TRIM_REG(1, 0, 3, 7),
+
+ TRIM_REG(0, 1, 0, 0),
+ TRIM_REG(0, 1, 0, 1),
+ TRIM_REG(0, 1, 1, 2),
+ TRIM_REG(0, 1, 1, 3),
+
+ TRIM_REG(1, 1, 2, 4),
+ TRIM_REG(1, 1, 2, 5),
+ TRIM_REG(1, 1, 3, 6),
+ TRIM_REG(1, 1, 3, 7)
+ };
+ unsigned i;
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ case EMC_DATA_BRLSHFT_0:
+ delta[0] = 128 * (next->current_dram_clktree[C0D0U0] -
+ next->trained_dram_clktree[C0D0U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D0U1] -
+ next->trained_dram_clktree[C0D0U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D0U0] -
+ next->trained_dram_clktree[C1D0U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D0U1] -
+ next->trained_dram_clktree[C1D0U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[i * 2] = new[i * 2] + delta_taps[i];
+ new[i * 2 + 1] = new[i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_0) {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i] = new[i] % 64;
+ }
+
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ case EMC_DATA_BRLSHFT_1:
+ delta[0] = 128 * (next->current_dram_clktree[C0D1U0] -
+ next->trained_dram_clktree[C0D1U0]);
+ delta[1] = 128 * (next->current_dram_clktree[C0D1U1] -
+ next->trained_dram_clktree[C0D1U1]);
+ delta[2] = 128 * (next->current_dram_clktree[C1D1U0] -
+ next->trained_dram_clktree[C1D1U0]);
+ delta[3] = 128 * (next->current_dram_clktree[C1D1U1] -
+ next->trained_dram_clktree[C1D1U1]);
+
+ delta_taps[0] = (delta[0] * (s32)rate) / 1000000;
+ delta_taps[1] = (delta[1] * (s32)rate) / 1000000;
+ delta_taps[2] = (delta[2] * (s32)rate) / 1000000;
+ delta_taps[3] = (delta[3] * (s32)rate) / 1000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((delta_taps[i] > next->tree_margin) ||
+ (delta_taps[i] < (-1 * next->tree_margin))) {
+ new[8 + i * 2] = new[8 + i * 2] +
+ delta_taps[i];
+ new[8 + i * 2 + 1] = new[8 + i * 2 + 1] +
+ delta_taps[i];
+ }
+ }
+
+ if (offset == EMC_DATA_BRLSHFT_1) {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] / 64;
+ } else {
+ for (i = 0; i < 8; i++)
+ new[i + 8] = new[i + 8] % 64;
+ }
+
+ break;
+ }
+
+ switch (offset) {
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0:
+ temp = CALC_TEMP(0, 0, 0, 1, 0);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1:
+ temp = CALC_TEMP(0, 1, 2, 3, 2);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2:
+ temp = CALC_TEMP(0, 2, 4, 5, 4);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3:
+ temp = CALC_TEMP(0, 3, 6, 7, 6);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0:
+ temp = CALC_TEMP(1, 0, 0, 1, 8);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1:
+ temp = CALC_TEMP(1, 1, 2, 3, 10);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2:
+ temp = CALC_TEMP(1, 2, 4, 5, 12);
+ break;
+
+ case EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3:
+ temp = CALC_TEMP(1, 3, 6, 7, 14);
+ break;
+
+ case EMC_DATA_BRLSHFT_0:
+ temp = ((new[0] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[1] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[2] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[3] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[4] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[5] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[6] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[7] <<
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ case EMC_DATA_BRLSHFT_1:
+ temp = ((new[8] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK) |
+ ((new[9] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK) |
+ ((new[10] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK) |
+ ((new[11] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK) |
+ ((new[12] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK) |
+ ((new[13] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK) |
+ ((new[14] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK) |
+ ((new[15] <<
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT) &
+ EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK);
+ break;
+
+ default:
+ break;
+ }
+
+ return temp;
+}
+
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc)
+{
+ unsigned int i;
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK;
+ value |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ emc_writel(emc, 1, EMC_TIMING_CONTROL);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
+ EMC_EMC_STATUS_TIMING_UPDATE_STALLED,
+ 0);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, i, EMC_CFG_DIG_DLL);
+ if ((value & EMC_CFG_DIG_DLL_CFG_DLL_EN) == 0)
+ break;
+ }
+ }
+
+ value = emc->next->burst_regs[EMC_DLL_CFG_0_INDEX];
+ emc_writel(emc, value, EMC_DLL_CFG_0);
+
+ value = emc_readl(emc, EMC_DLL_CFG_1);
+ value &= EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK;
+
+ if (emc->next->rate >= 400000 && emc->next->rate < 600000)
+ value |= 150;
+ else if (emc->next->rate >= 600000 && emc->next->rate < 800000)
+ value |= 100;
+ else if (emc->next->rate >= 800000 && emc->next->rate < 1000000)
+ value |= 70;
+ else if (emc->next->rate >= 1000000 && emc->next->rate < 1200000)
+ value |= 30;
+ else
+ value |= 20;
+
+ emc_writel(emc, value, EMC_DLL_CFG_1);
+
+ tegra210_change_dll_src(emc, clksrc);
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++) {
+ while (true) {
+ value = emc_channel_readl(emc, 0, EMC_CFG_DIG_DLL);
+ if (value & EMC_CFG_DIG_DLL_CFG_DLL_EN)
+ break;
+ }
+ }
+
+ while (true) {
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED) == 0)
+ continue;
+
+ if ((value & EMC_DIG_DLL_STATUS_DLL_LOCK) == 0)
+ continue;
+
+ break;
+ }
+
+ value = emc_readl(emc, EMC_DIG_DLL_STATUS);
+
+ return value & EMC_DIG_DLL_STATUS_DLL_OUT_MASK;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 cmd_pad, dq_pad, rfu1, cfg5, common_tx, ramp_up_wait = 0;
+ const struct tegra210_emc_timing *timing;
+
+ if (flip_backward)
+ timing = emc->last;
+ else
+ timing = emc->next;
+
+ cmd_pad = timing->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = timing->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = timing->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = timing->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, common_tx & 0xa,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, common_tx & 0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx | 0x8,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
+ }
+
+ if (clk < 1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD) {
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & 0xfe40fe40,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & 0xfeedfeed,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1,
+ EMC_PMACRO_BRICK_CTRL_RFU1,
+ (100000 / clk) + 1);
+ ramp_up_wait += 100000;
+ }
+
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + (10 * clk);
+ } else if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
+ ccfifo_writel(emc, rfu1 | 0x06000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, (100000 / clk) + 10);
+ ramp_up_wait += 100000 + 10 * clk;
+ } else {
+ ccfifo_writel(emc, rfu1 | 0x00000600,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ ccfifo_writel(emc, cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_up_wait += 12 * clk;
+ }
+
+ cmd_pad &= ~EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 5);
+
+ return ramp_up_wait;
+}
+
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward)
+{
+ u32 ramp_down_wait = 0, cmd_pad, dq_pad, rfu1, cfg5, common_tx;
+ const struct tegra210_emc_timing *entry;
+ u32 seq_wait;
+
+ if (flip_backward)
+ entry = emc->next;
+ else
+ entry = emc->last;
+
+ cmd_pad = entry->burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
+ dq_pad = entry->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
+ rfu1 = entry->burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
+ cfg5 = entry->burst_regs[EMC_FBIO_CFG5_INDEX];
+ common_tx = entry->burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
+
+ cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
+
+ ccfifo_writel(emc, cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, cfg5 | EMC_FBIO_CFG5_CMD_TX_DIS,
+ EMC_FBIO_CFG5, 12);
+ ramp_down_wait = 12 * clk;
+
+ seq_wait = (100000 / clk) + 1;
+
+ if (clk < (1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD)) {
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
+ cmd_pad |=
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
+ dq_pad |=
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x01120112,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+
+ ccfifo_writel(emc, rfu1 & ~0x01bf01bf,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+
+ if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
+ cmd_pad &=
+ ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
+ EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, cmd_pad,
+ EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+
+ dq_pad &=
+ ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
+ EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC);
+ ccfifo_writel(emc, dq_pad,
+ EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, 0);
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0x07ff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
+ ramp_down_wait += 100000;
+ }
+ } else {
+ ccfifo_writel(emc, rfu1 & ~0xffff07ff,
+ EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait + 19);
+ ramp_down_wait += 100000 + (20 * clk);
+ }
+
+ if (clk < (1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD)) {
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0x5,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ ramp_down_wait += 100000;
+ ccfifo_writel(emc, 0, 0, seq_wait);
+ ramp_down_wait += 100000;
+ } else {
+ ccfifo_writel(emc, common_tx & ~0xf,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
+ }
+
+ return ramp_down_wait;
+}
+
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing)
+{
+ timing->current_dram_clktree[C0D0U0] =
+ timing->trained_dram_clktree[C0D0U0];
+ timing->current_dram_clktree[C0D0U1] =
+ timing->trained_dram_clktree[C0D0U1];
+ timing->current_dram_clktree[C1D0U0] =
+ timing->trained_dram_clktree[C1D0U0];
+ timing->current_dram_clktree[C1D0U1] =
+ timing->trained_dram_clktree[C1D0U1];
+ timing->current_dram_clktree[C1D1U0] =
+ timing->trained_dram_clktree[C1D1U0];
+ timing->current_dram_clktree[C1D1U1] =
+ timing->trained_dram_clktree[C1D1U1];
+}
+
+static void update_dll_control(struct tegra210_emc *emc, u32 value, bool state)
+{
+ unsigned int i;
+
+ emc_writel(emc, value, EMC_CFG_DIG_DLL);
+ tegra210_emc_timing_update(emc);
+
+ for (i = 0; i < emc->num_channels; i++)
+ tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
+ EMC_CFG_DIG_DLL_CFG_DLL_EN,
+ state);
+}
+
+void tegra210_emc_dll_disable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, false);
+}
+
+void tegra210_emc_dll_enable(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ value = emc_readl(emc, EMC_CFG_DIG_DLL);
+ value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
+
+ update_dll_control(emc, value, true);
+}
+
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing)
+{
+ u32 dsr_cntrl = timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
+ u32 pre_ref = timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
+ u32 ref = timing->burst_regs[EMC_REFRESH_INDEX];
+
+ switch (emc->refresh) {
+ case TEGRA210_EMC_REFRESH_NOMINAL:
+ case TEGRA210_EMC_REFRESH_THROTTLE:
+ break;
+
+ case TEGRA210_EMC_REFRESH_2X:
+ ref = REFRESH_SPEEDUP(ref, 2);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 2);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 2);
+ break;
+
+ case TEGRA210_EMC_REFRESH_4X:
+ ref = REFRESH_SPEEDUP(ref, 4);
+ pre_ref = REFRESH_SPEEDUP(pre_ref, 4);
+ dsr_cntrl = REFRESH_SPEEDUP(dsr_cntrl, 4);
+ break;
+
+ default:
+ dev_warn(emc->dev, "failed to set refresh: %d\n", emc->refresh);
+ return;
+ }
+
+ emc_writel(emc, ref, emc->offsets->burst[EMC_REFRESH_INDEX]);
+ emc_writel(emc, pre_ref,
+ emc->offsets->burst[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
+ emc_writel(emc, dsr_cntrl,
+ emc->offsets->burst[EMC_DYN_SELF_REF_CONTROL_INDEX]);
+}
+
+static int tegra210_emc_set_rate(struct device *dev,
+ const struct tegra210_clk_emc_config *config)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timing = NULL;
+ unsigned long rate = config->rate;
+ s64 last_change_delay;
+ unsigned long flags;
+ unsigned int i;
+
+ if (rate == emc->last->rate * 1000UL)
+ return 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL == rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing)
+ return -EINVAL;
+
+ if (rate > 204000000 && !timing->trained)
+ return -EINVAL;
+
+ emc->next = timing;
+ last_change_delay = ktime_us_delta(ktime_get(), emc->clkchange_time);
+
+ /* XXX use non-busy-looping sleep? */
+ if ((last_change_delay >= 0) &&
+ (last_change_delay < emc->clkchange_delay))
+ udelay(emc->clkchange_delay - (int)last_change_delay);
+
+ spin_lock_irqsave(&emc->lock, flags);
+ tegra210_emc_set_clock(emc, config->value);
+ emc->clkchange_time = ktime_get();
+ emc->last = timing;
+ spin_unlock_irqrestore(&emc->lock, flags);
+
+ return 0;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra210_emc_validate_rate(struct tegra210_emc *emc,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate * 1000UL)
+ return true;
+
+ return false;
+}
+
+static int tegra210_emc_debug_available_rates_show(struct seq_file *s,
+ void *data)
+{
+ struct tegra210_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%u", prefix, emc->timings[i].rate * 1000);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra210_emc_debug_available_rates);
+
+static int tegra210_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_min_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+ tegra210_emc_debug_min_rate_get,
+ tegra210_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra210_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra210_emc *emc = data;
+ int err;
+
+ if (!tegra210_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = clk_set_max_rate(emc->clk, rate);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+ tegra210_emc_debug_max_rate_get,
+ tegra210_emc_debug_max_rate_set, "%llu\n");
+
+static int tegra210_emc_debug_temperature_get(void *data, u64 *temperature)
+{
+ struct tegra210_emc *emc = data;
+ unsigned int value;
+
+ if (!emc->debugfs.temperature)
+ value = tegra210_emc_get_temperature(emc);
+ else
+ value = emc->debugfs.temperature;
+
+ *temperature = value;
+
+ return 0;
+}
+
+static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
+{
+ struct tegra210_emc *emc = data;
+
+ if (temperature > 7)
+ return -EINVAL;
+
+ emc->debugfs.temperature = temperature;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+ tegra210_emc_debug_temperature_get,
+ tegra210_emc_debug_temperature_set, "%llu\n");
+
+static void tegra210_emc_debugfs_init(struct tegra210_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate * 1000UL < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate * 1000UL;
+
+ if (emc->timings[i].rate * 1000UL > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate * 1000UL;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ return;
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root, emc,
+ &tegra210_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_max_rate_fops);
+ debugfs_create_file("temperature", 0644, emc->debugfs.root, emc,
+ &tegra210_emc_debug_temperature_fops);
+}
+
+static void tegra210_emc_detect(struct tegra210_emc *emc)
+{
+ u32 value;
+
+ /* probe the number of connected DRAM devices */
+ value = mc_readl(emc->mc, MC_EMEM_ADR_CFG);
+
+ if (value & MC_EMEM_ADR_CFG_EMEM_NUMDEV)
+ emc->num_devices = 2;
+ else
+ emc->num_devices = 1;
+
+ /* probe the type of DRAM */
+ value = emc_readl(emc, EMC_FBIO_CFG5);
+ emc->dram_type = value & 0x3;
+
+ /* probe the number of channels */
+ value = emc_readl(emc, EMC_FBIO_CFG7);
+
+ if ((value & EMC_FBIO_CFG7_CH1_ENABLE) &&
+ (value & EMC_FBIO_CFG7_CH0_ENABLE))
+ emc->num_channels = 2;
+ else
+ emc->num_channels = 1;
+}
+
+static int tegra210_emc_validate_timings(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timings,
+ unsigned int num_timings)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_timings; i++) {
+ u32 min_volt = timings[i].min_volt;
+ u32 rate = timings[i].rate;
+
+ if (!rate)
+ return -EINVAL;
+
+ if ((i > 0) && ((rate <= timings[i - 1].rate) ||
+ (min_volt < timings[i - 1].min_volt)))
+ return -EINVAL;
+
+ if (timings[i].revision != timings[0].revision)
+ continue;
+ }
+
+ return 0;
+}
+
+static int tegra210_emc_probe(struct platform_device *pdev)
+{
+ struct thermal_cooling_device *cd;
+ unsigned long current_rate;
+ struct tegra210_emc *emc;
+ struct device_node *np;
+ unsigned int i;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk))
+ return PTR_ERR(emc->clk);
+
+ platform_set_drvdata(pdev, emc);
+ spin_lock_init(&emc->lock);
+ emc->dev = &pdev->dev;
+
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ for (i = 0; i < 2; i++) {
+ emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
+ if (IS_ERR(emc->channel[i]))
+ return PTR_ERR(emc->channel[i]);
+
+ }
+
+ tegra210_emc_detect(emc);
+ np = pdev->dev.of_node;
+
+ /* attach to the nominal and (optional) derated tables */
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
+ if (err < 0) {
+ dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
+ return err;
+ }
+
+ err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
+ if (err < 0 && err != -ENODEV) {
+ dev_err(emc->dev, "failed to get derated EMC table: %d\n", err);
+ goto release;
+ }
+
+ /* validate the tables */
+ if (emc->nominal) {
+ err = tegra210_emc_validate_timings(emc, emc->nominal,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ if (emc->derated) {
+ err = tegra210_emc_validate_timings(emc, emc->derated,
+ emc->num_timings);
+ if (err < 0)
+ goto release;
+ }
+
+ /* default to the nominal table */
+ emc->timings = emc->nominal;
+
+ /* pick the current timing based on the current EMC clock rate */
+ current_rate = clk_get_rate(emc->clk) / 1000;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate == current_rate) {
+ emc->last = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (i == emc->num_timings) {
+ dev_err(emc->dev, "no EMC table entry found for %lu kHz\n",
+ current_rate);
+ err = -ENOENT;
+ goto release;
+ }
+
+ /* pick a compatible clock change sequence for the EMC table */
+ for (i = 0; i < ARRAY_SIZE(tegra210_emc_sequences); i++) {
+ const struct tegra210_emc_sequence *sequence =
+ tegra210_emc_sequences[i];
+
+ if (emc->timings[0].revision == sequence->revision) {
+ emc->sequence = sequence;
+ break;
+ }
+ }
+
+ if (!emc->sequence) {
+ dev_err(&pdev->dev, "sequence %u not supported\n",
+ emc->timings[0].revision);
+ err = -ENOTSUPP;
+ goto release;
+ }
+
+ emc->offsets = &tegra210_emc_table_register_offsets;
+ emc->refresh = TEGRA210_EMC_REFRESH_NOMINAL;
+
+ emc->provider.owner = THIS_MODULE;
+ emc->provider.dev = &pdev->dev;
+ emc->provider.set_rate = tegra210_emc_set_rate;
+
+ emc->provider.configs = devm_kcalloc(&pdev->dev, emc->num_timings,
+ sizeof(*emc->provider.configs),
+ GFP_KERNEL);
+ if (!emc->provider.configs) {
+ err = -ENOMEM;
+ goto release;
+ }
+
+ emc->provider.num_configs = emc->num_timings;
+
+ for (i = 0; i < emc->provider.num_configs; i++) {
+ struct tegra210_emc_timing *timing = &emc->timings[i];
+ struct tegra210_clk_emc_config *config =
+ &emc->provider.configs[i];
+ u32 value;
+
+ config->rate = timing->rate * 1000UL;
+ config->value = timing->clk_src_emc;
+
+ value = timing->burst_mc_regs[MC_EMEM_ARB_MISC0_INDEX];
+
+ if ((value & MC_EMEM_ARB_MISC0_EMC_SAME_FREQ) == 0)
+ config->same_freq = false;
+ else
+ config->same_freq = true;
+ }
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to attach to EMC clock: %d\n", err);
+ goto release;
+ }
+
+ emc->clkchange_delay = 100;
+ emc->training_interval = 100;
+ dev_set_drvdata(emc->dev, emc);
+
+ timer_setup(&emc->refresh_timer, tegra210_emc_poll_refresh,
+ TIMER_DEFERRABLE);
+ atomic_set(&emc->refresh_poll, 0);
+ emc->refresh_poll_interval = 1000;
+
+ timer_setup(&emc->training, tegra210_emc_train, 0);
+
+ tegra210_emc_debugfs_init(emc);
+
+ cd = devm_thermal_of_cooling_device_register(emc->dev, np, "emc", emc,
+ &tegra210_emc_cd_ops);
+ if (IS_ERR(cd)) {
+ err = PTR_ERR(cd);
+ dev_err(emc->dev, "failed to register cooling device: %d\n",
+ err);
+ goto detach;
+ }
+
+ return 0;
+
+detach:
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+release:
+ of_reserved_mem_device_release(emc->dev);
+
+ return err;
+}
+
+static int tegra210_emc_remove(struct platform_device *pdev)
+{
+ struct tegra210_emc *emc = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(emc->debugfs.root);
+ tegra210_clk_emc_detach(emc->clk);
+ of_reserved_mem_device_release(emc->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_suspend(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err < 0) {
+ dev_err(emc->dev, "failed to acquire clock: %d\n", err);
+ return err;
+ }
+
+ emc->resume_rate = clk_get_rate(emc->clk);
+
+ clk_set_rate(emc->clk, 204000000);
+ tegra210_clk_emc_detach(emc->clk);
+
+ dev_dbg(dev, "suspending at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static int __maybe_unused tegra210_emc_resume(struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra210_clk_emc_attach(emc->clk, &emc->provider);
+ if (err < 0) {
+ dev_err(dev, "failed to attach to EMC clock: %d\n", err);
+ return err;
+ }
+
+ clk_set_rate(emc->clk, emc->resume_rate);
+ clk_rate_exclusive_put(emc->clk);
+
+ dev_dbg(dev, "resuming at %lu Hz\n", clk_get_rate(emc->clk));
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra210_emc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra210_emc_suspend, tegra210_emc_resume)
+};
+
+static const struct of_device_id tegra210_emc_of_match[] = {
+ { .compatible = "nvidia,tegra210-emc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra210_emc_of_match);
+
+static struct platform_driver tegra210_emc_driver = {
+ .driver = {
+ .name = "tegra210-emc",
+ .of_match_table = tegra210_emc_of_match,
+ .pm = &tegra210_emc_pm_ops,
+ },
+ .probe = tegra210_emc_probe,
+ .remove = tegra210_emc_remove,
+};
+
+module_platform_driver(tegra210_emc_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_AUTHOR("Joseph Lo <josephl@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra210 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-table.c b/drivers/memory/tegra/tegra210-emc-table.c
new file mode 100644
index 0000000000..34a8785d28
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc-table.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/of_reserved_mem.h>
+
+#include "tegra210-emc.h"
+
+#define TEGRA_EMC_MAX_FREQS 16
+
+static int tegra210_emc_table_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+ struct tegra210_emc_timing *timings;
+ unsigned int i, count = 0;
+
+ timings = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!timings) {
+ dev_err(dev, "failed to map EMC table\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
+ if (timings[i].revision == 0)
+ break;
+
+ count++;
+ }
+
+ /* only the nominal and derated tables are expected */
+ if (emc->derated) {
+ dev_warn(dev, "excess EMC table '%s'\n", rmem->name);
+ goto out;
+ }
+
+ if (emc->nominal) {
+ if (count != emc->num_timings) {
+ dev_warn(dev, "%u derated vs. %u nominal entries\n",
+ count, emc->num_timings);
+ memunmap(timings);
+ return -EINVAL;
+ }
+
+ emc->derated = timings;
+ } else {
+ emc->num_timings = count;
+ emc->nominal = timings;
+ }
+
+out:
+ /* keep track of which table this is */
+ rmem->priv = timings;
+
+ return 0;
+}
+
+static void tegra210_emc_table_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct tegra210_emc_timing *timings = rmem->priv;
+ struct tegra210_emc *emc = dev_get_drvdata(dev);
+
+ if ((emc->nominal && timings != emc->nominal) &&
+ (emc->derated && timings != emc->derated))
+ dev_warn(dev, "trying to release unassigned EMC table '%s'\n",
+ rmem->name);
+
+ memunmap(timings);
+}
+
+static const struct reserved_mem_ops tegra210_emc_table_ops = {
+ .device_init = tegra210_emc_table_device_init,
+ .device_release = tegra210_emc_table_device_release,
+};
+
+static int tegra210_emc_table_init(struct reserved_mem *rmem)
+{
+ pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
+ (unsigned long)rmem->size);
+
+ rmem->ops = &tegra210_emc_table_ops;
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
+ tegra210_emc_table_init);
diff --git a/drivers/memory/tegra/tegra210-emc.h b/drivers/memory/tegra/tegra210-emc.h
new file mode 100644
index 0000000000..8988bcf152
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-emc.h
@@ -0,0 +1,1016 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_EMC_H
+#define TEGRA210_EMC_H
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#define DVFS_FGCG_HIGH_SPEED_THRESHOLD 1000
+#define IOBRICK_DCC_THRESHOLD 2400
+#define DVFS_FGCG_MID_SPEED_THRESHOLD 600
+
+#define EMC_STATUS_UPDATE_TIMEOUT 1000
+
+/* register definitions */
+#define EMC_INTSTATUS 0x0
+#define EMC_INTSTATUS_CLKCHANGE_COMPLETE BIT(4)
+#define EMC_DBG 0x8
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_WRITE_ACTIVE_ONLY BIT(30)
+#define EMC_CFG 0xc
+#define EMC_CFG_DRAM_CLKSTOP_PD BIT(31)
+#define EMC_CFG_DRAM_CLKSTOP_SR BIT(30)
+#define EMC_CFG_DRAM_ACPD BIT(29)
+#define EMC_CFG_DYN_SELF_REF BIT(28)
+#define EMC_PIN 0x24
+#define EMC_PIN_PIN_CKE BIT(0)
+#define EMC_PIN_PIN_CKEB BIT(1)
+#define EMC_PIN_PIN_CKE_PER_DEV BIT(2)
+#define EMC_TIMING_CONTROL 0x28
+#define EMC_RC 0x2c
+#define EMC_RFC 0x30
+#define EMC_RAS 0x34
+#define EMC_RP 0x38
+#define EMC_R2W 0x3c
+#define EMC_W2R 0x40
+#define EMC_R2P 0x44
+#define EMC_W2P 0x48
+#define EMC_RD_RCD 0x4c
+#define EMC_WR_RCD 0x50
+#define EMC_RRD 0x54
+#define EMC_REXT 0x58
+#define EMC_WDV 0x5c
+#define EMC_QUSE 0x60
+#define EMC_QRST 0x64
+#define EMC_QSAFE 0x68
+#define EMC_RDV 0x6c
+#define EMC_REFRESH 0x70
+#define EMC_BURST_REFRESH_NUM 0x74
+#define EMC_PDEX2WR 0x78
+#define EMC_PDEX2RD 0x7c
+#define EMC_PCHG2PDEN 0x80
+#define EMC_ACT2PDEN 0x84
+#define EMC_AR2PDEN 0x88
+#define EMC_RW2PDEN 0x8c
+#define EMC_TXSR 0x90
+#define EMC_TCKE 0x94
+#define EMC_TFAW 0x98
+#define EMC_TRPAB 0x9c
+#define EMC_TCLKSTABLE 0xa0
+#define EMC_TCLKSTOP 0xa4
+#define EMC_TREFBW 0xa8
+#define EMC_TPPD 0xac
+#define EMC_ODT_WRITE 0xb0
+#define EMC_PDEX2MRR 0xb4
+#define EMC_WEXT 0xb8
+#define EMC_RFC_SLR 0xc0
+#define EMC_MRS_WAIT_CNT2 0xc4
+#define EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT 16
+#define EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT 0
+#define EMC_MRS_WAIT_CNT 0xc8
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT 0
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK \
+ (0x3FF << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)
+
+#define EMC_MRS 0xcc
+#define EMC_EMRS 0xd0
+#define EMC_EMRS_USE_EMRS_LONG_CNT BIT(26)
+#define EMC_REF 0xd4
+#define EMC_REF_REF_CMD BIT(0)
+#define EMC_SELF_REF 0xe0
+#define EMC_MRW 0xe8
+#define EMC_MRW_MRW_OP_SHIFT 0
+#define EMC_MRW_MRW_OP_MASK \
+ (0xff << EMC_MRW_MRW_OP_SHIFT)
+#define EMC_MRW_MRW_MA_SHIFT 16
+#define EMC_MRW_USE_MRW_EXT_CNT 27
+#define EMC_MRW_MRW_DEV_SELECTN_SHIFT 30
+
+#define EMC_MRR 0xec
+#define EMC_MRR_DEV_SEL_SHIFT 30
+#define EMC_MRR_DEV_SEL_MASK 0x3
+#define EMC_MRR_MA_SHIFT 16
+#define EMC_MRR_MA_MASK 0xff
+#define EMC_MRR_DATA_SHIFT 0
+#define EMC_MRR_DATA_MASK 0xffff
+
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG5_DRAM_TYPE_SHIFT 0
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK \
+ (0x3 << EMC_FBIO_CFG5_DRAM_TYPE_SHIFT)
+#define EMC_FBIO_CFG5_CMD_TX_DIS BIT(8)
+
+#define EMC_PDEX2CKE 0x118
+#define EMC_CKE2PDEN 0x11c
+#define EMC_MPC 0x128
+#define EMC_EMRS2 0x12c
+#define EMC_EMRS2_USE_EMRS2_LONG_CNT BIT(26)
+#define EMC_MRW2 0x134
+#define EMC_MRW3 0x138
+#define EMC_MRW4 0x13c
+#define EMC_R2R 0x144
+#define EMC_EINPUT 0x14c
+#define EMC_EINPUT_DURATION 0x150
+#define EMC_PUTERM_EXTRA 0x154
+#define EMC_TCKESR 0x158
+#define EMC_TPD 0x15c
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START BIT(0)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL BIT(9)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL BIT(10)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE BIT(29)
+#define EMC_AUTO_CAL_CONFIG_AUTO_CAL_START BIT(31)
+#define EMC_EMC_STATUS 0x2b4
+#define EMC_EMC_STATUS_MRR_DIVLD BIT(20)
+#define EMC_EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT 4
+#define EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_POWERDOWN_SHIFT)
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT 8
+#define EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK \
+ (0x3 << EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_SHIFT)
+
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_CFG_DLL_EN BIT(0)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK BIT(1)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC BIT(3)
+#define EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK BIT(4)
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT 6
+#define EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK \
+ (0x3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT)
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT 8
+#define EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK \
+ (0x7 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_DIG_DLL_STATUS 0x2c4
+#define EMC_DIG_DLL_STATUS_DLL_LOCK BIT(15)
+#define EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED BIT(17)
+#define EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT 0
+#define EMC_DIG_DLL_STATUS_DLL_OUT_MASK \
+ (0x7ff << EMC_DIG_DLL_STATUS_DLL_OUT_SHIFT)
+
+#define EMC_CFG_DIG_DLL_1 0x2c8
+#define EMC_RDV_MASK 0x2cc
+#define EMC_WDV_MASK 0x2d0
+#define EMC_RDV_EARLY_MASK 0x2d4
+#define EMC_RDV_EARLY 0x2d8
+#define EMC_AUTO_CAL_CONFIG8 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK 0x7ff
+#define EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT 0
+
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_ZQ_CAL_DEV_SEL_SHIFT 30
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_ZQ_LATCH_CMD BIT(1)
+#define EMC_ZQ_CAL_ZQ_CAL_CMD BIT(0)
+#define EMC_FDPD_CTRL_DQ 0x310
+#define EMC_FDPD_CTRL_CMD 0x314
+#define EMC_PMACRO_CMD_BRICK_CTRL_FDPD 0x318
+#define EMC_PMACRO_DATA_BRICK_CTRL_FDPD 0x31c
+#define EMC_PMACRO_BRICK_CTRL_RFU1 0x330
+#define EMC_PMACRO_BRICK_CTRL_RFU2 0x334
+#define EMC_TR_TIMING_0 0x3b4
+#define EMC_TR_CTRL_1 0x3bc
+#define EMC_TR_RDV 0x3c4
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN BIT(8)
+#define EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN BIT(5)
+#define EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN BIT(4)
+#define EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN BIT(3)
+#define EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN BIT(2)
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+#define EMC_CCFIFO_ADDR 0x3e8
+#define EMC_CCFIFO_ADDR_STALL_BY_1 (1 << 31)
+#define EMC_CCFIFO_ADDR_STALL(x) (((x) & 0x7fff) << 16)
+#define EMC_CCFIFO_ADDR_OFFSET(x) ((x) & 0xffff)
+#define EMC_CCFIFO_DATA 0x3ec
+#define EMC_TR_QPOP 0x3f4
+#define EMC_TR_RDV_MASK 0x3f8
+#define EMC_TR_QSAFE 0x3fc
+#define EMC_TR_QRST 0x400
+#define EMC_ISSUE_QRST 0x428
+#define EMC_AUTO_CAL_CONFIG2 0x458
+#define EMC_AUTO_CAL_CONFIG3 0x45c
+#define EMC_TR_DVFS 0x460
+#define EMC_AUTO_CAL_CHANNEL 0x464
+#define EMC_IBDLY 0x468
+#define EMC_OBDLY 0x46c
+#define EMC_TXDSRVTTGEN 0x480
+#define EMC_WE_DURATION 0x48c
+#define EMC_WS_DURATION 0x490
+#define EMC_WEV 0x494
+#define EMC_WSV 0x498
+#define EMC_CFG_3 0x49c
+#define EMC_MRW6 0x4a4
+#define EMC_MRW7 0x4a8
+#define EMC_MRW8 0x4ac
+#define EMC_MRW9 0x4b0
+#define EMC_MRW10 0x4b4
+#define EMC_MRW11 0x4b8
+#define EMC_MRW12 0x4bc
+#define EMC_MRW13 0x4c0
+#define EMC_MRW14 0x4c4
+#define EMC_MRW15 0x4d0
+#define EMC_CFG_SYNC 0x4d4
+#define EMC_FDPD_CTRL_CMD_NO_RAMP 0x4d8
+#define EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE BIT(0)
+#define EMC_WDV_CHK 0x4e0
+#define EMC_CFG_PIPE_2 0x554
+#define EMC_CFG_PIPE_CLK 0x558
+#define EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON BIT(0)
+#define EMC_CFG_PIPE_1 0x55c
+#define EMC_CFG_PIPE 0x560
+#define EMC_QPOP 0x564
+#define EMC_QUSE_WIDTH 0x568
+#define EMC_PUTERM_WIDTH 0x56c
+#define EMC_AUTO_CAL_CONFIG7 0x574
+#define EMC_REFCTRL2 0x580
+#define EMC_FBIO_CFG7 0x584
+#define EMC_FBIO_CFG7_CH0_ENABLE BIT(1)
+#define EMC_FBIO_CFG7_CH1_ENABLE BIT(2)
+#define EMC_DATA_BRLSHFT_0 0x588
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_0_RANK0_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_DATA_BRLSHFT_1 0x58c
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT 21
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE7_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT 18
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE6_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT 15
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE5_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT 12
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE4_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT 9
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE3_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT 6
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE2_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT 3
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE1_DATA_BRLSHFT_SHIFT)
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT 0
+#define EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_MASK \
+ (0x7 << EMC_DATA_BRLSHFT_1_RANK1_BYTE0_DATA_BRLSHFT_SHIFT)
+
+#define EMC_RFCPB 0x590
+#define EMC_DQS_BRLSHFT_0 0x594
+#define EMC_DQS_BRLSHFT_1 0x598
+#define EMC_CMD_BRLSHFT_0 0x59c
+#define EMC_CMD_BRLSHFT_1 0x5a0
+#define EMC_CMD_BRLSHFT_2 0x5a4
+#define EMC_CMD_BRLSHFT_3 0x5a8
+#define EMC_QUSE_BRLSHFT_0 0x5ac
+#define EMC_AUTO_CAL_CONFIG4 0x5b0
+#define EMC_AUTO_CAL_CONFIG5 0x5b4
+#define EMC_QUSE_BRLSHFT_1 0x5b8
+#define EMC_QUSE_BRLSHFT_2 0x5bc
+#define EMC_CCDMW 0x5c0
+#define EMC_QUSE_BRLSHFT_3 0x5c4
+#define EMC_AUTO_CAL_CONFIG6 0x5cc
+#define EMC_DLL_CFG_0 0x5e4
+#define EMC_DLL_CFG_1 0x5e8
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT 10
+#define EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK \
+ (0x7ff << EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_SHIFT)
+
+#define EMC_CONFIG_SAMPLE_DELAY 0x5f0
+#define EMC_CFG_UPDATE 0x5f4
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT 9
+#define EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK \
+ (0x3 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT)
+
+#define EMC_PMACRO_QUSE_DDLL_RANK0_0 0x600
+#define EMC_PMACRO_QUSE_DDLL_RANK0_1 0x604
+#define EMC_PMACRO_QUSE_DDLL_RANK0_2 0x608
+#define EMC_PMACRO_QUSE_DDLL_RANK0_3 0x60c
+#define EMC_PMACRO_QUSE_DDLL_RANK0_4 0x610
+#define EMC_PMACRO_QUSE_DDLL_RANK0_5 0x614
+#define EMC_PMACRO_QUSE_DDLL_RANK1_0 0x620
+#define EMC_PMACRO_QUSE_DDLL_RANK1_1 0x624
+#define EMC_PMACRO_QUSE_DDLL_RANK1_2 0x628
+#define EMC_PMACRO_QUSE_DDLL_RANK1_3 0x62c
+#define EMC_PMACRO_QUSE_DDLL_RANK1_4 0x630
+#define EMC_PMACRO_QUSE_DDLL_RANK1_5 0x634
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 0x640
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_OB_DDLL_LONG_DQ_RANK0_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 0x644
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_OB_DDLL_LONG_DQ_RANK0_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 0x648
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_OB_DDLL_LONG_DQ_RANK0_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 0x64c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_OB_DDLL_LONG_DQ_RANK0_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4 0x650
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5 0x654
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 0x660
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE1_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_OB_DDLL_LONG_DQ_RANK1_BYTE0_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 0x664
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE3_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_OB_DDLL_LONG_DQ_RANK1_BYTE2_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 0x668
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE5_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_OB_DDLL_LONG_DQ_RANK1_BYTE4_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 0x66c
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT \
+ 16
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE7_SHIFT)
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT \
+ 0
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_MASK \
+ (0x3ff << \
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_OB_DDLL_LONG_DQ_RANK1_BYTE6_SHIFT)
+
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4 0x670
+#define EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5 0x674
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0 0x680
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1 0x684
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2 0x688
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3 0x68c
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4 0x690
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5 0x694
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0 0x6a0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1 0x6a4
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2 0x6a8
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3 0x6ac
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4 0x6b0
+#define EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5 0x6b4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0 0x6c0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1 0x6c4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2 0x6c8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3 0x6cc
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0 0x6e0
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1 0x6e4
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2 0x6e8
+#define EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3 0x6ec
+#define EMC_PMACRO_TX_PWRD_0 0x720
+#define EMC_PMACRO_TX_PWRD_1 0x724
+#define EMC_PMACRO_TX_PWRD_2 0x728
+#define EMC_PMACRO_TX_PWRD_3 0x72c
+#define EMC_PMACRO_TX_PWRD_4 0x730
+#define EMC_PMACRO_TX_PWRD_5 0x734
+#define EMC_PMACRO_TX_SEL_CLK_SRC_0 0x740
+#define EMC_PMACRO_TX_SEL_CLK_SRC_1 0x744
+#define EMC_PMACRO_TX_SEL_CLK_SRC_3 0x74c
+#define EMC_PMACRO_TX_SEL_CLK_SRC_2 0x748
+#define EMC_PMACRO_TX_SEL_CLK_SRC_4 0x750
+#define EMC_PMACRO_TX_SEL_CLK_SRC_5 0x754
+#define EMC_PMACRO_DDLL_BYPASS 0x760
+#define EMC_PMACRO_DDLL_PWRD_0 0x770
+#define EMC_PMACRO_DDLL_PWRD_1 0x774
+#define EMC_PMACRO_DDLL_PWRD_2 0x778
+#define EMC_PMACRO_CMD_CTRL_0 0x780
+#define EMC_PMACRO_CMD_CTRL_1 0x784
+#define EMC_PMACRO_CMD_CTRL_2 0x788
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0x800
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0x804
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0x808
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3 0x80c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0x810
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0x814
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0x818
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3 0x81c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0x820
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0x824
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0x828
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3 0x82c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0x830
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0x834
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0x838
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3 0x83c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0x840
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0x844
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0x848
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3 0x84c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0x850
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0x854
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0x858
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3 0x85c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0x860
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0x864
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0x868
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3 0x86c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0x870
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0x874
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0x878
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3 0x87c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0 0x880
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1 0x884
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2 0x888
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3 0x88c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0 0x890
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1 0x894
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2 0x898
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3 0x89c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0 0x8a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1 0x8a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2 0x8a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3 0x8ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0 0x8b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1 0x8b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2 0x8b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3 0x8bc
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0x900
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0x904
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0x908
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3 0x90c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0x910
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0x914
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0x918
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3 0x91c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0x920
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0x924
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0x928
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3 0x92c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0x930
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0x934
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0x938
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3 0x93c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0x940
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0x944
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0x948
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3 0x94c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0x950
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0x954
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0x958
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3 0x95c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0x960
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0x964
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0x968
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3 0x96c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0x970
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0x974
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0x978
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3 0x97c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0 0x980
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1 0x984
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2 0x988
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3 0x98c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0 0x990
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1 0x994
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2 0x998
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3 0x99c
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0 0x9a0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1 0x9a4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2 0x9a8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3 0x9ac
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0 0x9b0
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1 0x9b4
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2 0x9b8
+#define EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3 0x9bc
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0 0xa00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1 0xa04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2 0xa08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0 0xa10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1 0xa14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2 0xa18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0 0xa20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1 0xa24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2 0xa28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0 0xa30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1 0xa34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2 0xa38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0 0xa40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1 0xa44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2 0xa48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0 0xa50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1 0xa54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2 0xa58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0 0xa60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1 0xa64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2 0xa68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0 0xa70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1 0xa74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2 0xa78
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0 0xb00
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1 0xb04
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2 0xb08
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0 0xb10
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1 0xb14
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2 0xb18
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0 0xb20
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1 0xb24
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2 0xb28
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0 0xb30
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1 0xb34
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2 0xb38
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0 0xb40
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1 0xb44
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2 0xb48
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0 0xb50
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1 0xb54
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2 0xb58
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0 0xb60
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1 0xb64
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2 0xb68
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0 0xb70
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1 0xb74
+#define EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2 0xb78
+#define EMC_PMACRO_IB_VREF_DQ_0 0xbe0
+#define EMC_PMACRO_IB_VREF_DQ_1 0xbe4
+#define EMC_PMACRO_IB_VREF_DQS_0 0xbf0
+#define EMC_PMACRO_IB_VREF_DQS_1 0xbf4
+#define EMC_PMACRO_DDLL_LONG_CMD_0 0xc00
+#define EMC_PMACRO_DDLL_LONG_CMD_1 0xc04
+#define EMC_PMACRO_DDLL_LONG_CMD_2 0xc08
+#define EMC_PMACRO_DDLL_LONG_CMD_3 0xc0c
+#define EMC_PMACRO_DDLL_LONG_CMD_4 0xc10
+#define EMC_PMACRO_DDLL_LONG_CMD_5 0xc14
+#define EMC_PMACRO_DDLL_SHORT_CMD_0 0xc20
+#define EMC_PMACRO_DDLL_SHORT_CMD_1 0xc24
+#define EMC_PMACRO_DDLL_SHORT_CMD_2 0xc28
+#define EMC_PMACRO_CFG_PM_GLOBAL_0 0xc30
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 BIT(16)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 BIT(17)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 BIT(18)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 BIT(19)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 BIT(20)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 BIT(21)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 BIT(22)
+#define EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7 BIT(23)
+#define EMC_PMACRO_VTTGEN_CTRL_0 0xc34
+#define EMC_PMACRO_VTTGEN_CTRL_1 0xc38
+#define EMC_PMACRO_BG_BIAS_CTRL_0 0xc3c
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD BIT(0)
+#define EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD BIT(2)
+#define EMC_PMACRO_PAD_CFG_CTRL 0xc40
+#define EMC_PMACRO_ZCTRL 0xc44
+#define EMC_PMACRO_CMD_PAD_RX_CTRL 0xc50
+#define EMC_PMACRO_DATA_PAD_RX_CTRL 0xc54
+#define EMC_PMACRO_CMD_RX_TERM_MODE 0xc58
+#define EMC_PMACRO_DATA_RX_TERM_MODE 0xc5c
+#define EMC_PMACRO_CMD_PAD_TX_CTRL 0xc60
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC BIT(24)
+#define EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON BIT(26)
+
+#define EMC_PMACRO_DATA_PAD_TX_CTRL 0xc64
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF BIT(0)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC BIT(1)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF BIT(8)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC BIT(9)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC BIT(16)
+#define EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC BIT(24)
+
+#define EMC_PMACRO_COMMON_PAD_TX_CTRL 0xc68
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON 0xc78
+#define EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS BIT(16)
+#define EMC_PMACRO_VTTGEN_CTRL_2 0xcf0
+#define EMC_PMACRO_IB_RXRT 0xcf4
+#define EMC_PMACRO_TRAINING_CTRL_0 0xcf8
+#define EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR BIT(3)
+#define EMC_PMACRO_TRAINING_CTRL_1 0xcfc
+#define EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR BIT(3)
+#define EMC_TRAINING_CTRL 0xe04
+#define EMC_TRAINING_QUSE_CORS_CTRL 0xe0c
+#define EMC_TRAINING_QUSE_FINE_CTRL 0xe10
+#define EMC_TRAINING_QUSE_CTRL_MISC 0xe14
+#define EMC_TRAINING_WRITE_FINE_CTRL 0xe18
+#define EMC_TRAINING_WRITE_CTRL_MISC 0xe1c
+#define EMC_TRAINING_WRITE_VREF_CTRL 0xe20
+#define EMC_TRAINING_READ_FINE_CTRL 0xe24
+#define EMC_TRAINING_READ_CTRL_MISC 0xe28
+#define EMC_TRAINING_READ_VREF_CTRL 0xe2c
+#define EMC_TRAINING_CA_FINE_CTRL 0xe30
+#define EMC_TRAINING_CA_CTRL_MISC 0xe34
+#define EMC_TRAINING_CA_CTRL_MISC1 0xe38
+#define EMC_TRAINING_CA_VREF_CTRL 0xe3c
+#define EMC_TRAINING_SETTLE 0xe44
+#define EMC_TRAINING_MPC 0xe5c
+#define EMC_TRAINING_VREF_SETTLE 0xe6c
+#define EMC_TRAINING_QUSE_VREF_CTRL 0xed0
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK0 0xed4
+#define EMC_TRAINING_OPT_DQS_IB_VREF_RANK1 0xed8
+
+#define EMC_COPY_TABLE_PARAM_PERIODIC_FIELDS BIT(0)
+#define EMC_COPY_TABLE_PARAM_TRIM_REGS BIT(1)
+
+enum burst_regs_list {
+ EMC_RP_INDEX = 6,
+ EMC_R2P_INDEX = 9,
+ EMC_W2P_INDEX,
+ EMC_MRW6_INDEX = 31,
+ EMC_REFRESH_INDEX = 41,
+ EMC_PRE_REFRESH_REQ_CNT_INDEX = 43,
+ EMC_TRPAB_INDEX = 59,
+ EMC_MRW7_INDEX = 62,
+ EMC_FBIO_CFG5_INDEX = 65,
+ EMC_FBIO_CFG7_INDEX,
+ EMC_CFG_DIG_DLL_INDEX,
+ EMC_ZCAL_INTERVAL_INDEX = 139,
+ EMC_ZCAL_WAIT_CNT_INDEX,
+ EMC_MRS_WAIT_CNT_INDEX = 141,
+ EMC_DLL_CFG_0_INDEX = 144,
+ EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX = 146,
+ EMC_CFG_INDEX = 148,
+ EMC_DYN_SELF_REF_CONTROL_INDEX = 150,
+ EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX = 161,
+ EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX,
+ EMC_PMACRO_BRICK_CTRL_RFU1_INDEX = 167,
+ EMC_PMACRO_BG_BIAS_CTRL_0_INDEX = 171,
+ EMC_MRW14_INDEX = 199,
+ EMC_MRW15_INDEX = 220,
+};
+
+enum trim_regs_list {
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0_INDEX = 60,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2_INDEX,
+ EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3_INDEX,
+};
+
+enum burst_mc_regs_list {
+ MC_EMEM_ARB_MISC0_INDEX = 20,
+};
+
+enum {
+ T_RP,
+ T_FC_LPDDR4,
+ T_RFC,
+ T_PDEX,
+ RL,
+};
+
+enum {
+ AUTO_PD = 0,
+ MAN_SR = 2,
+};
+
+enum {
+ ASSEMBLY = 0,
+ ACTIVE,
+};
+
+enum {
+ C0D0U0,
+ C0D0U1,
+ C0D1U0,
+ C0D1U1,
+ C1D0U0,
+ C1D0U1,
+ C1D1U0,
+ C1D1U1,
+ DRAM_CLKTREE_NUM,
+};
+
+#define VREF_REGS_PER_CHANNEL_SIZE 4
+#define DRAM_TIMINGS_NUM 5
+#define BURST_REGS_PER_CHANNEL_SIZE 8
+#define TRIM_REGS_PER_CHANNEL_SIZE 10
+#define PTFV_ARRAY_SIZE 12
+#define SAVE_RESTORE_MOD_REGS_SIZE 12
+#define TRAINING_MOD_REGS_SIZE 20
+#define BURST_UP_DOWN_REGS_SIZE 24
+#define BURST_MC_REGS_SIZE 33
+#define TRIM_REGS_SIZE 138
+#define BURST_REGS_SIZE 221
+
+struct tegra210_emc_per_channel_regs {
+ u16 bank;
+ u16 offset;
+};
+
+struct tegra210_emc_table_register_offsets {
+ u16 burst[BURST_REGS_SIZE];
+ u16 trim[TRIM_REGS_SIZE];
+ u16 burst_mc[BURST_MC_REGS_SIZE];
+ u16 la_scale[BURST_UP_DOWN_REGS_SIZE];
+ struct tegra210_emc_per_channel_regs burst_per_channel[BURST_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs trim_per_channel[TRIM_REGS_PER_CHANNEL_SIZE];
+ struct tegra210_emc_per_channel_regs vref_per_channel[VREF_REGS_PER_CHANNEL_SIZE];
+};
+
+struct tegra210_emc_timing {
+ u32 revision;
+ const char dvfs_ver[60];
+ u32 rate;
+ u32 min_volt;
+ u32 gpu_min_volt;
+ const char clock_src[32];
+ u32 clk_src_emc;
+ u32 needs_training;
+ u32 training_pattern;
+ u32 trained;
+
+ u32 periodic_training;
+ u32 trained_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 current_dram_clktree[DRAM_CLKTREE_NUM];
+ u32 run_clocks;
+ u32 tree_margin;
+
+ u32 num_burst;
+ u32 num_burst_per_ch;
+ u32 num_trim;
+ u32 num_trim_per_ch;
+ u32 num_mc_regs;
+ u32 num_up_down;
+ u32 vref_num;
+ u32 training_mod_num;
+ u32 dram_timing_num;
+
+ u32 ptfv_list[PTFV_ARRAY_SIZE];
+
+ u32 burst_regs[BURST_REGS_SIZE];
+ u32 burst_reg_per_ch[BURST_REGS_PER_CHANNEL_SIZE];
+ u32 shadow_regs_ca_train[BURST_REGS_SIZE];
+ u32 shadow_regs_quse_train[BURST_REGS_SIZE];
+ u32 shadow_regs_rdwr_train[BURST_REGS_SIZE];
+
+ u32 trim_regs[TRIM_REGS_SIZE];
+ u32 trim_perch_regs[TRIM_REGS_PER_CHANNEL_SIZE];
+
+ u32 vref_perch_regs[VREF_REGS_PER_CHANNEL_SIZE];
+
+ u32 dram_timings[DRAM_TIMINGS_NUM];
+ u32 training_mod_regs[TRAINING_MOD_REGS_SIZE];
+ u32 save_restore_mod_regs[SAVE_RESTORE_MOD_REGS_SIZE];
+ u32 burst_mc_regs[BURST_MC_REGS_SIZE];
+ u32 la_scale_regs[BURST_UP_DOWN_REGS_SIZE];
+
+ u32 min_mrs_wait;
+ u32 emc_mrw;
+ u32 emc_mrw2;
+ u32 emc_mrw3;
+ u32 emc_mrw4;
+ u32 emc_mrw9;
+ u32 emc_mrs;
+ u32 emc_emrs;
+ u32 emc_emrs2;
+ u32 emc_auto_cal_config;
+ u32 emc_auto_cal_config2;
+ u32 emc_auto_cal_config3;
+ u32 emc_auto_cal_config4;
+ u32 emc_auto_cal_config5;
+ u32 emc_auto_cal_config6;
+ u32 emc_auto_cal_config7;
+ u32 emc_auto_cal_config8;
+ u32 emc_cfg_2;
+ u32 emc_sel_dpd_ctrl;
+ u32 emc_fdpd_ctrl_cmd_no_ramp;
+ u32 dll_clk_src;
+ u32 clk_out_enb_x_0_clk_enb_emc_dll;
+ u32 latency;
+};
+
+enum tegra210_emc_refresh {
+ TEGRA210_EMC_REFRESH_NOMINAL = 0,
+ TEGRA210_EMC_REFRESH_2X,
+ TEGRA210_EMC_REFRESH_4X,
+ TEGRA210_EMC_REFRESH_THROTTLE, /* 4x Refresh + derating. */
+};
+
+#define DRAM_TYPE_DDR3 0
+#define DRAM_TYPE_LPDDR4 1
+#define DRAM_TYPE_LPDDR2 2
+#define DRAM_TYPE_DDR2 3
+
+struct tegra210_emc {
+ struct tegra_mc *mc;
+ struct device *dev;
+ struct clk *clk;
+
+ /* nominal EMC frequency table */
+ struct tegra210_emc_timing *nominal;
+ /* derated EMC frequency table */
+ struct tegra210_emc_timing *derated;
+
+ /* currently selected table (nominal or derated) */
+ struct tegra210_emc_timing *timings;
+ unsigned int num_timings;
+
+ const struct tegra210_emc_table_register_offsets *offsets;
+
+ const struct tegra210_emc_sequence *sequence;
+ spinlock_t lock;
+
+ void __iomem *regs, *channel[2];
+ unsigned int num_channels;
+ unsigned int num_devices;
+ unsigned int dram_type;
+
+ struct tegra210_emc_timing *last;
+ struct tegra210_emc_timing *next;
+
+ unsigned int training_interval;
+ struct timer_list training;
+
+ enum tegra210_emc_refresh refresh;
+ unsigned int refresh_poll_interval;
+ struct timer_list refresh_timer;
+ unsigned int temperature;
+ atomic_t refresh_poll;
+
+ ktime_t clkchange_time;
+ int clkchange_delay;
+
+ unsigned long resume_rate;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ unsigned int temperature;
+ } debugfs;
+
+ struct tegra210_clk_emc_provider provider;
+};
+
+struct tegra210_emc_sequence {
+ u8 revision;
+ void (*set_clock)(struct tegra210_emc *emc, u32 clksrc);
+ u32 (*periodic_compensation)(struct tegra210_emc *emc);
+};
+
+static inline void emc_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset)
+{
+ writel_relaxed(value, emc->regs + offset);
+}
+
+static inline u32 emc_readl(struct tegra210_emc *emc, unsigned int offset)
+{
+ return readl_relaxed(emc->regs + offset);
+}
+
+static inline void emc_channel_writel(struct tegra210_emc *emc,
+ unsigned int channel,
+ u32 value, unsigned int offset)
+{
+ writel_relaxed(value, emc->channel[channel] + offset);
+}
+
+static inline u32 emc_channel_readl(struct tegra210_emc *emc,
+ unsigned int channel, unsigned int offset)
+{
+ return readl_relaxed(emc->channel[channel] + offset);
+}
+
+static inline void ccfifo_writel(struct tegra210_emc *emc, u32 value,
+ unsigned int offset, u32 delay)
+{
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_DATA);
+
+ value = EMC_CCFIFO_ADDR_STALL_BY_1 | EMC_CCFIFO_ADDR_STALL(delay) |
+ EMC_CCFIFO_ADDR_OFFSET(offset);
+ writel_relaxed(value, emc->regs + EMC_CCFIFO_ADDR);
+}
+
+static inline u32 div_o3(u32 a, u32 b)
+{
+ u32 result = a / b;
+
+ if ((b * result) < a)
+ return result + 1;
+
+ return result;
+}
+
+/* from tegra210-emc-r21021.c */
+extern const struct tegra210_emc_sequence tegra210_emc_r21021;
+
+int tegra210_emc_set_refresh(struct tegra210_emc *emc,
+ enum tegra210_emc_refresh refresh);
+u32 tegra210_emc_mrr_read(struct tegra210_emc *emc, unsigned int chip,
+ unsigned int address);
+void tegra210_emc_do_clock_change(struct tegra210_emc *emc, u32 clksrc);
+void tegra210_emc_set_shadow_bypass(struct tegra210_emc *emc, int set);
+void tegra210_emc_timing_update(struct tegra210_emc *emc);
+u32 tegra210_emc_get_dll_state(struct tegra210_emc_timing *next);
+struct tegra210_emc_timing *tegra210_emc_find_timing(struct tegra210_emc *emc,
+ unsigned long rate);
+void tegra210_emc_adjust_timing(struct tegra210_emc *emc,
+ struct tegra210_emc_timing *timing);
+int tegra210_emc_wait_for_update(struct tegra210_emc *emc, unsigned int channel,
+ unsigned int offset, u32 bit_mask, bool state);
+unsigned long tegra210_emc_actual_osc_clocks(u32 in);
+u32 tegra210_emc_compensate(struct tegra210_emc_timing *next, u32 offset);
+void tegra210_emc_dll_disable(struct tegra210_emc *emc);
+void tegra210_emc_dll_enable(struct tegra210_emc *emc);
+u32 tegra210_emc_dll_prelock(struct tegra210_emc *emc, u32 clksrc);
+u32 tegra210_emc_dvfs_power_ramp_down(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+u32 tegra210_emc_dvfs_power_ramp_up(struct tegra210_emc *emc, u32 clk,
+ bool flip_backward);
+void tegra210_emc_reset_dram_clktree_values(struct tegra210_emc_timing *timing);
+void tegra210_emc_start_periodic_compensation(struct tegra210_emc *emc);
+
+#endif
diff --git a/drivers/memory/tegra/tegra210-mc.h b/drivers/memory/tegra/tegra210-mc.h
new file mode 100644
index 0000000000..b9b91ceb47
--- /dev/null
+++ b/drivers/memory/tegra/tegra210-mc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA210_MC_H
+#define TEGRA210_MC_H
+
+#include "mc.h"
+
+/* register definitions */
+#define MC_LATENCY_ALLOWANCE_AVPC_0 0x2e4
+#define MC_LATENCY_ALLOWANCE_HC_0 0x310
+#define MC_LATENCY_ALLOWANCE_HC_1 0x314
+#define MC_LATENCY_ALLOWANCE_MPCORE_0 0x320
+#define MC_LATENCY_ALLOWANCE_NVENC_0 0x328
+#define MC_LATENCY_ALLOWANCE_PPCS_0 0x344
+#define MC_LATENCY_ALLOWANCE_PPCS_1 0x348
+#define MC_LATENCY_ALLOWANCE_ISP2_0 0x370
+#define MC_LATENCY_ALLOWANCE_ISP2_1 0x374
+#define MC_LATENCY_ALLOWANCE_XUSB_0 0x37c
+#define MC_LATENCY_ALLOWANCE_XUSB_1 0x380
+#define MC_LATENCY_ALLOWANCE_TSEC_0 0x390
+#define MC_LATENCY_ALLOWANCE_VIC_0 0x394
+#define MC_LATENCY_ALLOWANCE_VI2_0 0x398
+#define MC_LATENCY_ALLOWANCE_GPU_0 0x3ac
+#define MC_LATENCY_ALLOWANCE_SDMMCA_0 0x3b8
+#define MC_LATENCY_ALLOWANCE_SDMMCAA_0 0x3bc
+#define MC_LATENCY_ALLOWANCE_SDMMC_0 0x3c0
+#define MC_LATENCY_ALLOWANCE_SDMMCAB_0 0x3c4
+#define MC_LATENCY_ALLOWANCE_GPU2_0 0x3e8
+#define MC_LATENCY_ALLOWANCE_NVDEC_0 0x3d8
+#define MC_MLL_MPCORER_PTSA_RATE 0x44c
+#define MC_FTOP_PTSA_RATE 0x50c
+#define MC_EMEM_ARB_TIMING_RFCPB 0x6c0
+#define MC_EMEM_ARB_TIMING_CCDMW 0x6c4
+#define MC_EMEM_ARB_REFPB_HP_CTRL 0x6f0
+#define MC_EMEM_ARB_REFPB_BANK_CTRL 0x6f4
+#define MC_PTSA_GRANT_DECREMENT 0x960
+#define MC_EMEM_ARB_DHYST_CTRL 0xbcc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0 0xbd0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1 0xbd4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2 0xbd8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3 0xbdc
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4 0xbe0
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5 0xbe4
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6 0xbe8
+#define MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7 0xbec
+
+#endif
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
new file mode 100644
index 0000000000..8ab6498dbe
--- /dev/null
+++ b/drivers/memory/tegra/tegra210.c
@@ -0,0 +1,1290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <dt-bindings/memory/tegra210-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra210_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
+ },
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x24,
+ },
+ },
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x1c,
+ .name = "nvencsrd",
+ .swgroup = TEGRA_SWGROUP_NVENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x65,
+ },
+ },
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x2b,
+ .name = "nvencswr",
+ .swgroup = TEGRA_SWGROUP_NVENC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x44,
+ .name = "ispra",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x370,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x46,
+ .name = "ispwa",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x47,
+ .name = "ispwb",
+ .swgroup = TEGRA_SWGROUP_ISP2,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x374,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4a,
+ .name = "xusb_hostr",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x7a,
+ },
+ },
+ }, {
+ .id = 0x4b,
+ .name = "xusb_hostw",
+ .swgroup = TEGRA_SWGROUP_XUSB_HOST,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x37c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4c,
+ .name = "xusb_devr",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x39,
+ },
+ },
+ }, {
+ .id = 0x4d,
+ .name = "xusb_devw",
+ .swgroup = TEGRA_SWGROUP_XUSB_DEV,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x380,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x4e,
+ .name = "isprab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x384,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x18,
+ },
+ },
+ }, {
+ .id = 0x50,
+ .name = "ispwab",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x51,
+ .name = "ispwbb",
+ .swgroup = TEGRA_SWGROUP_ISP2B,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x388,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x54,
+ .name = "tsecsrd",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ },
+ }, {
+ .id = 0x55,
+ .name = "tsecswr",
+ .swgroup = TEGRA_SWGROUP_TSEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x390,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x56,
+ .name = "a9avpscr",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ }, {
+ .id = 0x57,
+ .name = "a9avpscw",
+ .swgroup = TEGRA_SWGROUP_A9AVP,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x3a4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x58,
+ .name = "gpusrd",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x59,
+ .name = "gpuswr",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0x230,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x5a,
+ .name = "displayt",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x1e,
+ },
+ },
+ }, {
+ .id = 0x60,
+ .name = "sdmmcra",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x61,
+ .name = "sdmmcraa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x5a,
+ },
+ },
+ }, {
+ .id = 0x62,
+ .name = "sdmmcr",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x49,
+ },
+ },
+ }, {
+ .id = 0x63,
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .name = "sdmmcrab",
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x5a,
+ },
+ },
+ }, {
+ .id = 0x64,
+ .name = "sdmmcwa",
+ .swgroup = TEGRA_SWGROUP_SDMMC1A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3b8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x65,
+ .name = "sdmmcwaa",
+ .swgroup = TEGRA_SWGROUP_SDMMC2A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3bc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x66,
+ .name = "sdmmcw",
+ .swgroup = TEGRA_SWGROUP_SDMMC3A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3c0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x67,
+ .name = "sdmmcwab",
+ .swgroup = TEGRA_SWGROUP_SDMMC4A,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3c4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x6c,
+ .name = "vicsrd",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x6d,
+ .name = "vicswr",
+ .swgroup = TEGRA_SWGROUP_VIC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x394,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x72,
+ .name = "viw",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x398,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x73,
+ .name = "displayd",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x3c8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ }, {
+ .id = 0x78,
+ .name = "nvdecsrd",
+ .swgroup = TEGRA_SWGROUP_NVDEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x79,
+ .name = "nvdecswr",
+ .swgroup = TEGRA_SWGROUP_NVDEC,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x3d8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x7a,
+ .name = "aper",
+ .swgroup = TEGRA_SWGROUP_APE,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x7b,
+ .name = "apew",
+ .swgroup = TEGRA_SWGROUP_APE,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x3dc,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x7e,
+ .name = "nvjpgsrd",
+ .swgroup = TEGRA_SWGROUP_NVJPG,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x23,
+ },
+ },
+ }, {
+ .id = 0x7f,
+ .name = "nvjpgswr",
+ .swgroup = TEGRA_SWGROUP_NVJPG,
+ .regs = {
+ .smmu = {
+ .reg = 0x234,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x3e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x80,
+ .name = "sesrd",
+ .swgroup = TEGRA_SWGROUP_SE,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2e,
+ },
+ },
+ }, {
+ .id = 0x81,
+ .name = "seswr",
+ .swgroup = TEGRA_SWGROUP_SE,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x3e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x82,
+ .name = "axiapr",
+ .swgroup = TEGRA_SWGROUP_AXIAP,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x83,
+ .name = "axiapw",
+ .swgroup = TEGRA_SWGROUP_AXIAP,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x3a0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x84,
+ .name = "etrr",
+ .swgroup = TEGRA_SWGROUP_ETR,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ }, {
+ .id = 0x85,
+ .name = "etrw",
+ .swgroup = TEGRA_SWGROUP_ETR,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x3ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x86,
+ .name = "tsecsrdb",
+ .swgroup = TEGRA_SWGROUP_TSECB,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x9b,
+ },
+ },
+ }, {
+ .id = 0x87,
+ .name = "tsecswrb",
+ .swgroup = TEGRA_SWGROUP_TSECB,
+ .regs = {
+ .smmu = {
+ .reg = 0xb98,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x3f0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ }, {
+ .id = 0x88,
+ .name = "gpusrd2",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x1a,
+ },
+ },
+ }, {
+ .id = 0x89,
+ .name = "gpuswr2",
+ .swgroup = TEGRA_SWGROUP_GPU,
+ .regs = {
+ .smmu = {
+ /* read-only */
+ .reg = 0xb98,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x3e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "nvenc", .swgroup = TEGRA_SWGROUP_NVENC, .reg = 0x264 },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
+ { .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+ { .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
+ { .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "ppcs1", .swgroup = TEGRA_SWGROUP_PPCS1, .reg = 0x298 },
+ { .name = "dc1", .swgroup = TEGRA_SWGROUP_DC1, .reg = 0xa88 },
+ { .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
+ { .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
+ { .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
+ { .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "ppcs2", .swgroup = TEGRA_SWGROUP_PPCS2, .reg = 0xab0 },
+ { .name = "nvdec", .swgroup = TEGRA_SWGROUP_NVDEC, .reg = 0xab4 },
+ { .name = "ape", .swgroup = TEGRA_SWGROUP_APE, .reg = 0xab8 },
+ { .name = "se", .swgroup = TEGRA_SWGROUP_SE, .reg = 0xabc },
+ { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
+ { .name = "hc1", .swgroup = TEGRA_SWGROUP_HC1, .reg = 0xac4 },
+ { .name = "se1", .swgroup = TEGRA_SWGROUP_SE1, .reg = 0xac8 },
+ { .name = "axiap", .swgroup = TEGRA_SWGROUP_AXIAP, .reg = 0xacc },
+ { .name = "etr", .swgroup = TEGRA_SWGROUP_ETR, .reg = 0xad0 },
+ { .name = "tsecb", .swgroup = TEGRA_SWGROUP_TSECB, .reg = 0xad4 },
+ { .name = "tsec1", .swgroup = TEGRA_SWGROUP_TSEC1, .reg = 0xad8 },
+ { .name = "tsecb1", .swgroup = TEGRA_SWGROUP_TSECB1, .reg = 0xadc },
+ { .name = "nvdec1", .swgroup = TEGRA_SWGROUP_NVDEC1, .reg = 0xae0 },
+};
+
+static const unsigned int tegra210_group_display[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+};
+
+static const struct tegra_smmu_group_soc tegra210_groups[] = {
+ {
+ .name = "display",
+ .swgroups = tegra210_group_display,
+ .num_swgroups = ARRAY_SIZE(tegra210_group_display),
+ },
+};
+
+static const struct tegra_smmu_soc tegra210_smmu_soc = {
+ .clients = tegra210_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra210_mc_clients),
+ .swgroups = tegra210_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra210_swgroups),
+ .groups = tegra210_groups,
+ .num_groups = ARRAY_SIZE(tegra210_groups),
+ .supports_round_robin_arbitration = true,
+ .supports_request_limit = true,
+ .num_tlb_lines = 48,
+ .num_asids = 128,
+};
+
+#define TEGRA210_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA210_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra210_mc_resets[] = {
+ TEGRA210_MC_RESET(AFI, 0x200, 0x204, 0),
+ TEGRA210_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA210_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA210_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA210_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA210_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA210_MC_RESET(ISP2, 0x200, 0x204, 8),
+ TEGRA210_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA210_MC_RESET(NVENC, 0x200, 0x204, 11),
+ TEGRA210_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA210_MC_RESET(SATA, 0x200, 0x204, 15),
+ TEGRA210_MC_RESET(VI, 0x200, 0x204, 17),
+ TEGRA210_MC_RESET(VIC, 0x200, 0x204, 18),
+ TEGRA210_MC_RESET(XUSB_HOST, 0x200, 0x204, 19),
+ TEGRA210_MC_RESET(XUSB_DEV, 0x200, 0x204, 20),
+ TEGRA210_MC_RESET(A9AVP, 0x200, 0x204, 21),
+ TEGRA210_MC_RESET(TSEC, 0x200, 0x204, 22),
+ TEGRA210_MC_RESET(SDMMC1, 0x200, 0x204, 29),
+ TEGRA210_MC_RESET(SDMMC2, 0x200, 0x204, 30),
+ TEGRA210_MC_RESET(SDMMC3, 0x200, 0x204, 31),
+ TEGRA210_MC_RESET(SDMMC4, 0x970, 0x974, 0),
+ TEGRA210_MC_RESET(ISP2B, 0x970, 0x974, 1),
+ TEGRA210_MC_RESET(GPU, 0x970, 0x974, 2),
+ TEGRA210_MC_RESET(NVDEC, 0x970, 0x974, 5),
+ TEGRA210_MC_RESET(APE, 0x970, 0x974, 6),
+ TEGRA210_MC_RESET(SE, 0x970, 0x974, 7),
+ TEGRA210_MC_RESET(NVJPG, 0x970, 0x974, 8),
+ TEGRA210_MC_RESET(AXIAP, 0x970, 0x974, 11),
+ TEGRA210_MC_RESET(ETR, 0x970, 0x974, 12),
+ TEGRA210_MC_RESET(TSECB, 0x970, 0x974, 13),
+};
+
+const struct tegra_mc_soc tegra210_mc_soc = {
+ .clients = tegra210_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra210_mc_clients),
+ .num_address_bits = 34,
+ .atom_size = 64,
+ .client_id_mask = 0xff,
+ .smmu = &tegra210_smmu_soc,
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra210_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra210_mc_resets),
+ .ops = &tegra30_mc_ops,
+};
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
new file mode 100644
index 0000000000..2845041f32
--- /dev/null
+++ b/drivers/memory/tegra/tegra234.c
@@ -0,0 +1,1065 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022-2023, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/mc.h>
+
+#include <dt-bindings/memory/tegra234-mc.h>
+#include <linux/interconnect.h>
+#include <linux/tegra-icc.h>
+
+#include <soc/tegra/bpmp.h>
+#include "mc.h"
+
+/*
+ * MC Client entries are sorted in the increasing order of the
+ * override and security register offsets.
+ */
+static const struct tegra_mc_client tegra234_mc_clients[] = {
+ {
+ .id = TEGRA234_MEMORY_CLIENT_HDAR,
+ .name = "hdar",
+ .bpmp_id = TEGRA_ICC_BPMP_HDA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0xa8,
+ .security = 0xac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVENCSRD,
+ .name = "nvencsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVENC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0xe0,
+ .security = 0xe4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE6AR,
+ .name = "pcie6ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_6,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE6,
+ .regs = {
+ .sid = {
+ .override = 0x140,
+ .security = 0x144,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE6AW,
+ .name = "pcie6aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_6,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE6,
+ .regs = {
+ .sid = {
+ .override = 0x148,
+ .security = 0x14c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE7AR,
+ .name = "pcie7ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_7,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE7,
+ .regs = {
+ .sid = {
+ .override = 0x150,
+ .security = 0x154,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVENCSWR,
+ .name = "nvencswr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVENC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVENC,
+ .regs = {
+ .sid = {
+ .override = 0x158,
+ .security = 0x15c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDB,
+ .name = "dla0rdb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x160,
+ .security = 0x164,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDB1,
+ .name = "dla0rdb1",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x168,
+ .security = 0x16c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0WRB,
+ .name = "dla0wrb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x170,
+ .security = 0x174,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDB,
+ .name = "dla0rdb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x178,
+ .security = 0x17c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE7AW,
+ .name = "pcie7aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_7,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE7,
+ .regs = {
+ .sid = {
+ .override = 0x180,
+ .security = 0x184,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE8AR,
+ .name = "pcie8ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_8,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE8,
+ .regs = {
+ .sid = {
+ .override = 0x190,
+ .security = 0x194,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_HDAW,
+ .name = "hdaw",
+ .bpmp_id = TEGRA_ICC_BPMP_HDA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_HDA,
+ .regs = {
+ .sid = {
+ .override = 0x1a8,
+ .security = 0x1ac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE8AW,
+ .name = "pcie8aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_8,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE8,
+ .regs = {
+ .sid = {
+ .override = 0x1d8,
+ .security = 0x1dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE9AR,
+ .name = "pcie9ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_9,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE9,
+ .regs = {
+ .sid = {
+ .override = 0x1e0,
+ .security = 0x1e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE6AR1,
+ .name = "pcie6ar1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_6,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE6,
+ .regs = {
+ .sid = {
+ .override = 0x1e8,
+ .security = 0x1ec,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE9AW,
+ .name = "pcie9aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_9,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE9,
+ .regs = {
+ .sid = {
+ .override = 0x1f0,
+ .security = 0x1f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE10AR,
+ .name = "pcie10ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_10,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE10,
+ .regs = {
+ .sid = {
+ .override = 0x1f8,
+ .security = 0x1fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE10AW,
+ .name = "pcie10aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_10,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE10,
+ .regs = {
+ .sid = {
+ .override = 0x200,
+ .security = 0x204,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE10AR1,
+ .name = "pcie10ar1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_10,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE10,
+ .regs = {
+ .sid = {
+ .override = 0x240,
+ .security = 0x244,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE7AR1,
+ .name = "pcie7ar1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_7,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE7,
+ .regs = {
+ .sid = {
+ .override = 0x248,
+ .security = 0x24c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEARD,
+ .name = "mgbeard",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2c0,
+ .security = 0x2c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBRD,
+ .name = "mgbebrd",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2c8,
+ .security = 0x2cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECRD,
+ .name = "mgbecrd",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x2d0,
+ .security = 0x2d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDRD,
+ .name = "mgbedrd",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x2d8,
+ .security = 0x2dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEAWR,
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .name = "mgbeawr",
+ .sid = TEGRA234_SID_MGBE,
+ .regs = {
+ .sid = {
+ .override = 0x2e0,
+ .security = 0x2e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEBWR,
+ .name = "mgbebwr",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF1,
+ .regs = {
+ .sid = {
+ .override = 0x2f8,
+ .security = 0x2fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBECWR,
+ .name = "mgbecwr",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF2,
+ .regs = {
+ .sid = {
+ .override = 0x308,
+ .security = 0x30c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_SDMMCRAB,
+ .name = "sdmmcrab",
+ .bpmp_id = TEGRA_ICC_BPMP_SDMMC_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x318,
+ .security = 0x31c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_MGBEDWR,
+ .name = "mgbedwr",
+ .bpmp_id = TEGRA_ICC_BPMP_EQOS,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_MGBE_VF3,
+ .regs = {
+ .sid = {
+ .override = 0x328,
+ .security = 0x32c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_SDMMCWAB,
+ .name = "sdmmcwab",
+ .bpmp_id = TEGRA_ICC_BPMP_SDMMC_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_SDMMC4,
+ .regs = {
+ .sid = {
+ .override = 0x338,
+ .security = 0x33c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VICSRD,
+ .name = "vicsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_VIC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x360,
+ .security = 0x364,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VICSWR,
+ .name = "vicswr",
+ .bpmp_id = TEGRA_ICC_BPMP_VIC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_VIC,
+ .regs = {
+ .sid = {
+ .override = 0x368,
+ .security = 0x36c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDB1,
+ .name = "dla0rdb1",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x370,
+ .security = 0x374,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1WRB,
+ .name = "dla0wrb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x378,
+ .security = 0x37c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VI2W,
+ .name = "vi2w",
+ .bpmp_id = TEGRA_ICC_BPMP_VI2,
+ .type = TEGRA_ICC_ISO_VI,
+ .sid = TEGRA234_SID_ISO_VI2,
+ .regs = {
+ .sid = {
+ .override = 0x380,
+ .security = 0x384,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VI2FALR,
+ .name = "vi2falr",
+ .bpmp_id = TEGRA_ICC_BPMP_VI2FAL,
+ .type = TEGRA_ICC_ISO_VIFAL,
+ .sid = TEGRA234_SID_ISO_VI2FALC,
+ .regs = {
+ .sid = {
+ .override = 0x388,
+ .security = 0x38c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDECSRD,
+ .name = "nvdecsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVDEC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c0,
+ .security = 0x3c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDECSWR,
+ .name = "nvdecswr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVDEC,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVDEC,
+ .regs = {
+ .sid = {
+ .override = 0x3c8,
+ .security = 0x3cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APER,
+ .name = "aper",
+ .bpmp_id = TEGRA_ICC_BPMP_APE,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d0,
+ .security = 0x3d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEW,
+ .name = "apew",
+ .bpmp_id = TEGRA_ICC_BPMP_APE,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x3d8,
+ .security = 0x3dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_VI2FALW,
+ .name = "vi2falw",
+ .bpmp_id = TEGRA_ICC_BPMP_VI2FAL,
+ .type = TEGRA_ICC_ISO_VIFAL,
+ .sid = TEGRA234_SID_ISO_VI2FALC,
+ .regs = {
+ .sid = {
+ .override = 0x3e0,
+ .security = 0x3e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPGSRD,
+ .name = "nvjpgsrd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f0,
+ .security = 0x3f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPGSWR,
+ .name = "nvjpgswr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG,
+ .regs = {
+ .sid = {
+ .override = 0x3f8,
+ .security = 0x3fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDISPLAYR,
+ .name = "nvdisplayr",
+ .bpmp_id = TEGRA_ICC_BPMP_DISPLAY,
+ .type = TEGRA_ICC_ISO_DISPLAY,
+ .sid = TEGRA234_SID_ISO_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x490,
+ .security = 0x494,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPR,
+ .name = "bpmpr",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x498,
+ .security = 0x49c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPW,
+ .name = "bpmpw",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a0,
+ .security = 0x4a4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPDMAR,
+ .name = "bpmpdmar",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4a8,
+ .security = 0x4ac,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_BPMPDMAW,
+ .name = "bpmpdmaw",
+ .sid = TEGRA234_SID_BPMP,
+ .regs = {
+ .sid = {
+ .override = 0x4b0,
+ .security = 0x4b4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEDMAR,
+ .name = "apedmar",
+ .bpmp_id = TEGRA_ICC_BPMP_APEDMA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x4f8,
+ .security = 0x4fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_APEDMAW,
+ .name = "apedmaw",
+ .bpmp_id = TEGRA_ICC_BPMP_APEDMA,
+ .type = TEGRA_ICC_ISO_AUDIO,
+ .sid = TEGRA234_SID_APE,
+ .regs = {
+ .sid = {
+ .override = 0x500,
+ .security = 0x504,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVDISPLAYR1,
+ .name = "nvdisplayr1",
+ .bpmp_id = TEGRA_ICC_BPMP_DISPLAY,
+ .type = TEGRA_ICC_ISO_DISPLAY,
+ .sid = TEGRA234_SID_ISO_NVDISPLAY,
+ .regs = {
+ .sid = {
+ .override = 0x508,
+ .security = 0x50c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDA,
+ .name = "dla0rda",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f0,
+ .security = 0x5f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x5f8,
+ .security = 0x5fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0WRA,
+ .name = "dla0wra",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x600,
+ .security = 0x604,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x608,
+ .security = 0x60c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDA,
+ .name = "dla0rda",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x610,
+ .security = 0x614,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB,
+ .name = "dla0falrdb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x618,
+ .security = 0x61c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1WRA,
+ .name = "dla0wra",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x620,
+ .security = 0x624,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB,
+ .name = "dla0falwrb",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x628,
+ .security = 0x62c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE0R,
+ .name = "pcie0r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c0,
+ .security = 0x6c4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE0W,
+ .name = "pcie0w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_0,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE0,
+ .regs = {
+ .sid = {
+ .override = 0x6c8,
+ .security = 0x6cc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE1R,
+ .name = "pcie1r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d0,
+ .security = 0x6d4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE1W,
+ .name = "pcie1w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE1,
+ .regs = {
+ .sid = {
+ .override = 0x6d8,
+ .security = 0x6dc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE2AR,
+ .name = "pcie2ar",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_2,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e0,
+ .security = 0x6e4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE2AW,
+ .name = "pcie2aw",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_2,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE2,
+ .regs = {
+ .sid = {
+ .override = 0x6e8,
+ .security = 0x6ec,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE3R,
+ .name = "pcie3r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_3,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f0,
+ .security = 0x6f4,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE3W,
+ .name = "pcie3w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_3,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE3,
+ .regs = {
+ .sid = {
+ .override = 0x6f8,
+ .security = 0x6fc,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE4R,
+ .name = "pcie4r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x700,
+ .security = 0x704,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE4W,
+ .name = "pcie4w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_4,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE4,
+ .regs = {
+ .sid = {
+ .override = 0x708,
+ .security = 0x70c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE5R,
+ .name = "pcie5r",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_5,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x710,
+ .security = 0x714,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE5W,
+ .name = "pcie5w",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_5,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x718,
+ .security = 0x71c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA0RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA234_SID_NVDLA0,
+ .regs = {
+ .sid = {
+ .override = 0x748,
+ .security = 0x74c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_DLA1RDA1,
+ .name = "dla0rda1",
+ .sid = TEGRA234_SID_NVDLA1,
+ .regs = {
+ .sid = {
+ .override = 0x750,
+ .security = 0x754,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_PCIE5R1,
+ .name = "pcie5r1",
+ .bpmp_id = TEGRA_ICC_BPMP_PCIE_5,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_PCIE5,
+ .regs = {
+ .sid = {
+ .override = 0x778,
+ .security = 0x77c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPG1SRD,
+ .name = "nvjpg1srd",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG1,
+ .regs = {
+ .sid = {
+ .override = 0x918,
+ .security = 0x91c,
+ },
+ },
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVJPG1SWR,
+ .name = "nvjpg1swr",
+ .bpmp_id = TEGRA_ICC_BPMP_NVJPG_1,
+ .type = TEGRA_ICC_NISO,
+ .sid = TEGRA234_SID_NVJPG1,
+ .regs = {
+ .sid = {
+ .override = 0x920,
+ .security = 0x924,
+ },
+ },
+ }, {
+ .id = TEGRA_ICC_MC_CPU_CLUSTER0,
+ .name = "sw_cluster0",
+ .bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER0,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA_ICC_MC_CPU_CLUSTER1,
+ .name = "sw_cluster1",
+ .bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER1,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA_ICC_MC_CPU_CLUSTER2,
+ .name = "sw_cluster2",
+ .bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER2,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVL1R,
+ .name = "nvl1r",
+ .bpmp_id = TEGRA_ICC_BPMP_GPU,
+ .type = TEGRA_ICC_NISO,
+ }, {
+ .id = TEGRA234_MEMORY_CLIENT_NVL1W,
+ .name = "nvl1w",
+ .bpmp_id = TEGRA_ICC_BPMP_GPU,
+ .type = TEGRA_ICC_NISO,
+ },
+};
+
+/*
+ * tegra234_mc_icc_set() - Pass MC client info to the BPMP-FW
+ * @src: ICC node for Memory Controller's (MC) Client
+ * @dst: ICC node for Memory Controller (MC)
+ *
+ * Passing the current request info from the MC to the BPMP-FW where
+ * LA and PTSA registers are accessed and the final EMC freq is set
+ * based on client_id, type, latency and bandwidth.
+ * icc_set_bw() makes set_bw calls for both MC and EMC providers in
+ * sequence. Both the calls are protected by 'mutex_lock(&icc_lock)'.
+ * So, the data passed won't be updated by concurrent set calls from
+ * other clients.
+ */
+static int tegra234_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(dst->provider);
+ struct mrq_bwmgr_int_request bwmgr_req = { 0 };
+ struct mrq_bwmgr_int_response bwmgr_resp = { 0 };
+ const struct tegra_mc_client *pclient = src->data;
+ struct tegra_bpmp_message msg;
+ int ret;
+
+ /*
+ * Same Src and Dst node will happen during boot from icc_node_add().
+ * This can be used to pre-initialize and set bandwidth for all clients
+ * before their drivers are loaded. We are skipping this case as for us,
+ * the pre-initialization already happened in Bootloader(MB2) and BPMP-FW.
+ */
+ if (src->id == dst->id)
+ return 0;
+
+ if (!mc->bwmgr_mrq_supported)
+ return 0;
+
+ if (!mc->bpmp) {
+ dev_err(mc->dev, "BPMP reference NULL\n");
+ return -ENOENT;
+ }
+
+ if (pclient->type == TEGRA_ICC_NISO)
+ bwmgr_req.bwmgr_calc_set_req.niso_bw = src->avg_bw;
+ else
+ bwmgr_req.bwmgr_calc_set_req.iso_bw = src->avg_bw;
+
+ bwmgr_req.bwmgr_calc_set_req.client_id = pclient->bpmp_id;
+
+ bwmgr_req.cmd = CMD_BWMGR_INT_CALC_AND_SET;
+ bwmgr_req.bwmgr_calc_set_req.mc_floor = src->peak_bw;
+ bwmgr_req.bwmgr_calc_set_req.floor_unit = BWMGR_INT_UNIT_KBPS;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_BWMGR_INT;
+ msg.tx.data = &bwmgr_req;
+ msg.tx.size = sizeof(bwmgr_req);
+ msg.rx.data = &bwmgr_resp;
+ msg.rx.size = sizeof(bwmgr_resp);
+
+ if (pclient->bpmp_id >= TEGRA_ICC_BPMP_CPU_CLUSTER0 &&
+ pclient->bpmp_id <= TEGRA_ICC_BPMP_CPU_CLUSTER2)
+ msg.flags = TEGRA_BPMP_MESSAGE_RESET;
+
+ ret = tegra_bpmp_transfer(mc->bpmp, &msg);
+ if (ret < 0) {
+ dev_err(mc->dev, "BPMP transfer failed: %d\n", ret);
+ goto error;
+ }
+ if (msg.rx.ret < 0) {
+ pr_err("failed to set bandwidth for %u: %d\n",
+ bwmgr_req.bwmgr_calc_set_req.client_id, msg.rx.ret);
+ ret = -EINVAL;
+ }
+
+error:
+ return ret;
+}
+
+static int tegra234_mc_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ struct icc_provider *p = node->provider;
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(p);
+
+ if (!mc->bwmgr_mrq_supported)
+ return 0;
+
+ if (node->id == TEGRA_ICC_MC_CPU_CLUSTER0 ||
+ node->id == TEGRA_ICC_MC_CPU_CLUSTER1 ||
+ node->id == TEGRA_ICC_MC_CPU_CLUSTER2) {
+ if (mc)
+ peak_bw = peak_bw * mc->num_channels;
+ }
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static int tegra234_mc_icc_get_init_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+ *avg = 0;
+ *peak = 0;
+
+ return 0;
+}
+
+static const struct tegra_mc_icc_ops tegra234_mc_icc_ops = {
+ .xlate = tegra_mc_icc_xlate,
+ .aggregate = tegra234_mc_icc_aggregate,
+ .get_bw = tegra234_mc_icc_get_init_bw,
+ .set = tegra234_mc_icc_set,
+};
+
+const struct tegra_mc_soc tegra234_mc_soc = {
+ .num_clients = ARRAY_SIZE(tegra234_mc_clients),
+ .clients = tegra234_mc_clients,
+ .num_address_bits = 40,
+ .num_channels = 16,
+ .client_id_mask = 0x1ff,
+ .intmask = MC_INT_DECERR_ROUTE_SANITY |
+ MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
+ MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
+ .has_addr_hi_reg = true,
+ .ops = &tegra186_mc_ops,
+ .icc_ops = &tegra234_mc_icc_ops,
+ .ch_intmask = 0x0000ff00,
+ .global_intstatus_channel_shift = 8,
+ /*
+ * Additionally, there are lite carveouts but those are not currently
+ * supported.
+ */
+ .num_carveouts = 32,
+};
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
new file mode 100644
index 0000000000..9eae25c57e
--- /dev/null
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -0,0 +1,1751 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Tegra30 External Memory Controller driver
+ *
+ * Based on downstream driver from NVIDIA and tegra124-emc.c
+ * Copyright (C) 2011-2014 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+#include "mc.h"
+
+#define EMC_INTSTATUS 0x000
+#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
+#define EMC_ADR_CFG 0x010
+#define EMC_CFG 0x00c
+#define EMC_REFCTRL 0x020
+#define EMC_TIMING_CONTROL 0x028
+#define EMC_RC 0x02c
+#define EMC_RFC 0x030
+#define EMC_RAS 0x034
+#define EMC_RP 0x038
+#define EMC_R2W 0x03c
+#define EMC_W2R 0x040
+#define EMC_R2P 0x044
+#define EMC_W2P 0x048
+#define EMC_RD_RCD 0x04c
+#define EMC_WR_RCD 0x050
+#define EMC_RRD 0x054
+#define EMC_REXT 0x058
+#define EMC_WDV 0x05c
+#define EMC_QUSE 0x060
+#define EMC_QRST 0x064
+#define EMC_QSAFE 0x068
+#define EMC_RDV 0x06c
+#define EMC_REFRESH 0x070
+#define EMC_BURST_REFRESH_NUM 0x074
+#define EMC_PDEX2WR 0x078
+#define EMC_PDEX2RD 0x07c
+#define EMC_PCHG2PDEN 0x080
+#define EMC_ACT2PDEN 0x084
+#define EMC_AR2PDEN 0x088
+#define EMC_RW2PDEN 0x08c
+#define EMC_TXSR 0x090
+#define EMC_TCKE 0x094
+#define EMC_TFAW 0x098
+#define EMC_TRPAB 0x09c
+#define EMC_TCLKSTABLE 0x0a0
+#define EMC_TCLKSTOP 0x0a4
+#define EMC_TREFBW 0x0a8
+#define EMC_QUSE_EXTRA 0x0ac
+#define EMC_ODT_WRITE 0x0b0
+#define EMC_ODT_READ 0x0b4
+#define EMC_WEXT 0x0b8
+#define EMC_CTT 0x0bc
+#define EMC_MRS_WAIT_CNT 0x0c8
+#define EMC_MRS 0x0cc
+#define EMC_EMRS 0x0d0
+#define EMC_SELF_REF 0x0e0
+#define EMC_MRW 0x0e8
+#define EMC_MRR 0x0ec
+#define EMC_XM2DQSPADCTRL3 0x0f8
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG6 0x114
+#define EMC_CFG_RSV 0x120
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_STATUS 0x2b4
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2QUSEPADCTRL 0x318
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+
+#define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+
+#define EMC_MODE_SET_DLL_RESET BIT(8)
+#define EMC_MODE_SET_LONG_CNT BIT(26)
+
+#define EMC_SELF_REF_CMD_ENABLED BIT(0)
+
+#define DRAM_DEV_SEL_ALL (0 << 30)
+#define DRAM_DEV_SEL_0 BIT(31)
+#define DRAM_DEV_SEL_1 BIT(30)
+#define DRAM_BROADCAST(num) \
+ ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+#define EMC_ZQ_CAL_CMD BIT(0)
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
+#define EMC_CFG5_QUSE_MODE_SHIFT 13
+#define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT)
+
+#define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2
+#define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3
+
+#define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9)
+
+#define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10)
+
+#define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4)
+
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
+#define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5)
+
+#define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
+
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_REFCTRL_DEV_SEL_MASK 0x3
+#define EMC_REFCTRL_ENABLE BIT(31)
+#define EMC_REFCTRL_ENABLE_ALL(num) \
+ (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE)
+#define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2)
+
+#define EMC_CFG_PERIODIC_QRST BIT(21)
+#define EMC_CFG_DYN_SREF_ENABLE BIT(28)
+
+#define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
+#define EMC_CLKCHANGE_PD_ENABLE BIT(1)
+#define EMC_CLKCHANGE_SR_ENABLE BIT(2)
+
+#define EMC_TIMING_UPDATE BIT(0)
+
+#define EMC_REFRESH_OVERFLOW_INT BIT(3)
+#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_EMEM_NUMDEV BIT(0)
+
+enum emc_dram_type {
+ DRAM_TYPE_DDR3,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
+enum emc_dll_change {
+ DLL_CHANGE_NONE,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF
+};
+
+static const u16 emc_timing_registers[] = {
+ [0] = EMC_RC,
+ [1] = EMC_RFC,
+ [2] = EMC_RAS,
+ [3] = EMC_RP,
+ [4] = EMC_R2W,
+ [5] = EMC_W2R,
+ [6] = EMC_R2P,
+ [7] = EMC_W2P,
+ [8] = EMC_RD_RCD,
+ [9] = EMC_WR_RCD,
+ [10] = EMC_RRD,
+ [11] = EMC_REXT,
+ [12] = EMC_WEXT,
+ [13] = EMC_WDV,
+ [14] = EMC_QUSE,
+ [15] = EMC_QRST,
+ [16] = EMC_QSAFE,
+ [17] = EMC_RDV,
+ [18] = EMC_REFRESH,
+ [19] = EMC_BURST_REFRESH_NUM,
+ [20] = EMC_PRE_REFRESH_REQ_CNT,
+ [21] = EMC_PDEX2WR,
+ [22] = EMC_PDEX2RD,
+ [23] = EMC_PCHG2PDEN,
+ [24] = EMC_ACT2PDEN,
+ [25] = EMC_AR2PDEN,
+ [26] = EMC_RW2PDEN,
+ [27] = EMC_TXSR,
+ [28] = EMC_TXSRDLL,
+ [29] = EMC_TCKE,
+ [30] = EMC_TFAW,
+ [31] = EMC_TRPAB,
+ [32] = EMC_TCLKSTABLE,
+ [33] = EMC_TCLKSTOP,
+ [34] = EMC_TREFBW,
+ [35] = EMC_QUSE_EXTRA,
+ [36] = EMC_FBIO_CFG6,
+ [37] = EMC_ODT_WRITE,
+ [38] = EMC_ODT_READ,
+ [39] = EMC_FBIO_CFG5,
+ [40] = EMC_CFG_DIG_DLL,
+ [41] = EMC_CFG_DIG_DLL_PERIOD,
+ [42] = EMC_DLL_XFORM_DQS0,
+ [43] = EMC_DLL_XFORM_DQS1,
+ [44] = EMC_DLL_XFORM_DQS2,
+ [45] = EMC_DLL_XFORM_DQS3,
+ [46] = EMC_DLL_XFORM_DQS4,
+ [47] = EMC_DLL_XFORM_DQS5,
+ [48] = EMC_DLL_XFORM_DQS6,
+ [49] = EMC_DLL_XFORM_DQS7,
+ [50] = EMC_DLL_XFORM_QUSE0,
+ [51] = EMC_DLL_XFORM_QUSE1,
+ [52] = EMC_DLL_XFORM_QUSE2,
+ [53] = EMC_DLL_XFORM_QUSE3,
+ [54] = EMC_DLL_XFORM_QUSE4,
+ [55] = EMC_DLL_XFORM_QUSE5,
+ [56] = EMC_DLL_XFORM_QUSE6,
+ [57] = EMC_DLL_XFORM_QUSE7,
+ [58] = EMC_DLI_TRIM_TXDQS0,
+ [59] = EMC_DLI_TRIM_TXDQS1,
+ [60] = EMC_DLI_TRIM_TXDQS2,
+ [61] = EMC_DLI_TRIM_TXDQS3,
+ [62] = EMC_DLI_TRIM_TXDQS4,
+ [63] = EMC_DLI_TRIM_TXDQS5,
+ [64] = EMC_DLI_TRIM_TXDQS6,
+ [65] = EMC_DLI_TRIM_TXDQS7,
+ [66] = EMC_DLL_XFORM_DQ0,
+ [67] = EMC_DLL_XFORM_DQ1,
+ [68] = EMC_DLL_XFORM_DQ2,
+ [69] = EMC_DLL_XFORM_DQ3,
+ [70] = EMC_XM2CMDPADCTRL,
+ [71] = EMC_XM2DQSPADCTRL2,
+ [72] = EMC_XM2DQPADCTRL2,
+ [73] = EMC_XM2CLKPADCTRL,
+ [74] = EMC_XM2COMPPADCTRL,
+ [75] = EMC_XM2VTTGENPADCTRL,
+ [76] = EMC_XM2VTTGENPADCTRL2,
+ [77] = EMC_XM2QUSEPADCTRL,
+ [78] = EMC_XM2DQSPADCTRL3,
+ [79] = EMC_CTT_TERM_CTRL,
+ [80] = EMC_ZCAL_INTERVAL,
+ [81] = EMC_ZCAL_WAIT_CNT,
+ [82] = EMC_MRS_WAIT_CNT,
+ [83] = EMC_AUTO_CAL_CONFIG,
+ [84] = EMC_CTT,
+ [85] = EMC_CTT_DURATION,
+ [86] = EMC_DYN_SELF_REF_CONTROL,
+ [87] = EMC_FBIO_SPARE,
+ [88] = EMC_CFG_RSV,
+};
+
+struct emc_timing {
+ unsigned long rate;
+
+ u32 data[ARRAY_SIZE(emc_timing_registers)];
+
+ u32 emc_auto_cal_interval;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+ u32 emc_zcal_cnt_long;
+ bool emc_cfg_periodic_qrst;
+ bool emc_cfg_dyn_self_ref;
+};
+
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct tegra_emc {
+ struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
+ struct notifier_block clk_nb;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int irq;
+ bool bad_state;
+
+ struct emc_timing *new_timing;
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ u32 mc_override;
+ u32 emc_cfg;
+
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+
+ bool vref_cal_toggle : 1;
+ bool zcal_long : 1;
+ bool dll_on : 1;
+
+ struct {
+ struct dentry *root;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ } debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ bool mrr_error;
+};
+
+static int emc_seq_update_timing(struct tegra_emc *emc)
+{
+ u32 val;
+ int err;
+
+ writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
+ !(val & EMC_STATUS_TIMING_UPDATE_STALLED),
+ 1, 200);
+ if (err) {
+ dev_err(emc->dev, "failed to update timing: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra_emc_isr(int irq, void *data)
+{
+ struct tegra_emc *emc = data;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ u32 status;
+
+ status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ /* notify about HW problem */
+ if (status & EMC_REFRESH_OVERFLOW_INT)
+ dev_err_ratelimited(emc->dev,
+ "refresh request overflow timeout\n");
+
+ /* clear interrupts */
+ writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static struct emc_timing *emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate >= rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing,
+ bool *schmitt_to_vref)
+{
+ bool preset = false;
+ u32 val;
+
+ if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2);
+
+ if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3);
+
+ if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL);
+
+ if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) {
+ val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL);
+
+ *schmitt_to_vref = true;
+ preset = true;
+ }
+ }
+
+ return preset;
+}
+
+static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int misc0_index = 16;
+ unsigned int i;
+ bool same;
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (mc->timings[i].rate != rate)
+ continue;
+
+ if (mc->timings[i].emem_data[misc0_index] & BIT(27))
+ same = true;
+ else
+ same = false;
+
+ return tegra20_clk_prepare_emc_mc_same_freq(emc->clk, same);
+ }
+
+ return -EINVAL;
+}
+
+static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ enum emc_dll_change dll_change;
+ enum emc_dram_type dram_type;
+ bool schmitt_to_vref = false;
+ unsigned int pre_wait = 0;
+ bool qrst_used = false;
+ unsigned int dram_num;
+ unsigned int i;
+ u32 fbio_cfg5;
+ u32 emc_dbg;
+ u32 val;
+ int err;
+
+ if (!timing || emc->bad_state)
+ return -EINVAL;
+
+ dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
+ __func__, timing->rate, rate);
+
+ emc->bad_state = true;
+
+ err = emc_prepare_mc_clk_cfg(emc, rate);
+ if (err) {
+ dev_err(emc->dev, "mc clock preparation failed: %d\n", err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = false;
+ emc->mc_override = mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+ emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG);
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+
+ if (emc->dll_on == !!(timing->emc_mode_1 & 0x1))
+ dll_change = DLL_CHANGE_NONE;
+ else if (timing->emc_mode_1 & 0x1)
+ dll_change = DLL_CHANGE_ON;
+ else
+ dll_change = DLL_CHANGE_OFF;
+
+ emc->dll_on = !!(timing->emc_mode_1 & 0x1);
+
+ if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL))
+ emc->zcal_long = true;
+ else
+ emc->zcal_long = false;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+
+ /* disable dynamic self-refresh */
+ if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
+ emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+
+ pre_wait = 5;
+ }
+
+ /* update MC arbiter settings */
+ val = mc_readl(emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ);
+ if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
+ ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) {
+
+ val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
+ MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50;
+ mc_writel(emc->mc, val, MC_EMEM_ARB_OUTSTANDING_REQ);
+ mc_writel(emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+ }
+
+ if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
+ mc_writel(emc->mc,
+ emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK,
+ MC_EMEM_ARB_OVERRIDE);
+
+ /* check DQ/DQS VREF delay */
+ if (emc_dqs_preset(emc, timing, &schmitt_to_vref)) {
+ if (pre_wait < 3)
+ pre_wait = 3;
+ }
+
+ if (pre_wait) {
+ err = emc_seq_update_timing(emc);
+ if (err)
+ return err;
+
+ udelay(pre_wait);
+ }
+
+ /* disable auto-calibration if VREF mode is switching */
+ if (timing->emc_auto_cal_interval) {
+ val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL);
+ val ^= timing->data[74];
+
+ if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) {
+ writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ err = readl_relaxed_poll_timeout_atomic(
+ emc->regs + EMC_AUTO_CAL_STATUS, val,
+ !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300);
+ if (err) {
+ dev_err(emc->dev,
+ "auto-cal finish timeout: %d\n", err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = true;
+ }
+ }
+
+ /* program shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->data); i++) {
+ /* EMC_XM2CLKPADCTRL should be programmed separately */
+ if (i != 73)
+ writel_relaxed(timing->data[i],
+ emc->regs + emc_timing_registers[i]);
+ }
+
+ err = tegra_mc_write_emem_configuration(emc->mc, timing->rate);
+ if (err)
+ return err;
+
+ /* DDR3: predict MRS long wait count */
+ if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) {
+ u32 cnt = 512;
+
+ if (emc->zcal_long)
+ cnt -= dram_num * 256;
+
+ val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK;
+ if (cnt < val)
+ cnt = val;
+
+ val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+ val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
+ EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT);
+ }
+
+ /* this read also completes the writes */
+ val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL);
+
+ if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) {
+ u32 cur_mode, new_mode;
+
+ cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK;
+ cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK;
+ new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) ||
+ (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK))
+ qrst_used = true;
+ }
+
+ /* flow control marker 1 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE);
+
+ /* enable periodic reset */
+ if (qrst_used) {
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ emc->regs + EMC_DBG);
+ writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST,
+ emc->regs + EMC_CFG);
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ }
+
+ /* disable auto-refresh to save time after clock change */
+ writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* turn off DLL and enter self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (dll_change == DLL_CHANGE_OFF)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ writel_relaxed(DRAM_BROADCAST(dram_num) |
+ EMC_SELF_REF_CMD_ENABLED,
+ emc->regs + EMC_SELF_REF);
+ }
+
+ /* flow control marker 2 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
+
+ /* enable write-active MUX, update unshadowed pad control */
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG);
+ writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL);
+
+ /* restore periodic QRST and disable write-active MUX */
+ val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST);
+ if (qrst_used || timing->emc_cfg_periodic_qrst != val) {
+ if (timing->emc_cfg_periodic_qrst)
+ emc->emc_cfg |= EMC_CFG_PERIODIC_QRST;
+ else
+ emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST;
+
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ /* exit self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3)
+ writel_relaxed(DRAM_BROADCAST(dram_num),
+ emc->regs + EMC_SELF_REF);
+
+ /* set DRAM-mode registers */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_reset != emc->emc_mode_reset ||
+ dll_change == DLL_CHANGE_ON) {
+ val = timing->emc_mode_reset;
+ if (dll_change == DLL_CHANGE_ON) {
+ val |= EMC_MODE_SET_DLL_RESET;
+ val |= EMC_MODE_SET_LONG_CNT;
+ } else {
+ val &= ~EMC_MODE_SET_DLL_RESET;
+ }
+ writel_relaxed(val, emc->regs + EMC_MRS);
+ }
+ } else {
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_MRW);
+
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_MRW);
+ }
+
+ emc->emc_mode_1 = timing->emc_mode_1;
+ emc->emc_mode_2 = timing->emc_mode_2;
+ emc->emc_mode_reset = timing->emc_mode_reset;
+
+ /* issue ZCAL command if turning ZCAL on */
+ if (emc->zcal_long) {
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0,
+ emc->regs + EMC_ZQ_CAL);
+
+ if (dram_num > 1)
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1,
+ emc->regs + EMC_ZQ_CAL);
+ }
+
+ /* flow control marker 3 */
+ writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE);
+
+ /*
+ * Read and discard an arbitrary MC register (Note: EMC registers
+ * can't be used) to ensure the register writes are completed.
+ */
+ mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+
+ return 0;
+}
+
+static int emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned int dram_num;
+ int err;
+ u32 v;
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v,
+ v & EMC_CLKCHANGE_COMPLETE_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "emc-car handshake timeout: %d\n", err);
+ return err;
+ }
+
+ /* re-enable auto-refresh */
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ /* wait for writes to settle */
+ udelay(2);
+
+ /* update restored timing */
+ err = emc_seq_update_timing(emc);
+ if (!err)
+ emc->bad_state = false;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ return err;
+}
+
+static int emc_unprepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ if (!emc->bad_state) {
+ /* shouldn't ever happen in practice */
+ dev_err(emc->dev, "timing configuration can't be reverted\n");
+ emc->bad_state = true;
+ }
+
+ return 0;
+}
+
+static int emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
+ struct clk_notifier_data *cnd = data;
+ int err;
+
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ /*
+ * Disable interrupt since read accesses are prohibited after
+ * stalling.
+ */
+ disable_irq(emc->irq);
+ err = emc_prepare_timing_change(emc, cnd->new_rate);
+ enable_irq(emc->irq);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = emc_unprepare_timing_change(emc, cnd->old_rate);
+ break;
+
+ case POST_RATE_CHANGE:
+ err = emc_complete_timing_change(emc, cnd->new_rate);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(node, "clock-frequency", &value);
+ if (err) {
+ dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ timing->rate = value;
+
+ err = of_property_read_u32_array(node, "nvidia,emc-configuration",
+ timing->data,
+ ARRAY_SIZE(emc_timing_registers));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOF: failed to read emc timing data: %d\n",
+ node, err);
+ return err;
+ }
+
+#define EMC_READ_BOOL(prop, dtprop) \
+ timing->prop = of_property_read_bool(node, dtprop);
+
+#define EMC_READ_U32(prop, dtprop) \
+ err = of_property_read_u32(node, dtprop, &timing->prop); \
+ if (err) { \
+ dev_err(emc->dev, \
+ "timing %pOFn: failed to read " #prop ": %d\n", \
+ node, err); \
+ return err; \
+ }
+
+ EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval")
+ EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1")
+ EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2")
+ EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset")
+ EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long")
+ EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref")
+ EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst")
+
+#undef EMC_READ_U32
+#undef EMC_READ_BOOL
+
+ dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate);
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+
+ if (a->rate > b->rate)
+ return 1;
+
+ return 0;
+}
+
+static int emc_check_mc_timings(struct tegra_emc *emc)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int i;
+
+ if (emc->num_timings != mc->num_timings) {
+ dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n",
+ emc->num_timings, mc->num_timings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (emc->timings[i].rate != mc->timings[i].rate) {
+ dev_err(emc->dev,
+ "emc/mc timing rate mismatch: %lu %lu\n",
+ emc->timings[i].rate, mc->timings[i].rate);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ struct device_node *child;
+ struct emc_timing *timing;
+ int child_count;
+ int err;
+
+ child_count = of_get_child_count(node);
+ if (!child_count) {
+ dev_err(emc->dev, "no memory timings in: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ emc->num_timings = child_count;
+ timing = emc->timings;
+
+ for_each_child_of_node(node, child) {
+ err = load_one_timing_from_dt(emc, timing++, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ err = emc_check_mc_timings(emc);
+ if (err)
+ return err;
+
+ dev_info_once(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
+ return 0;
+}
+
+static struct device_node *emc_find_node_by_ram_code(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ struct device_node *np;
+ u32 value, ram_code;
+ int err;
+
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info_once(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
+ ram_code = tegra_read_ram_code();
+
+ for_each_child_of_node(dev->of_node, np) {
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || value != ram_code)
+ continue;
+
+ return np;
+ }
+
+ dev_err(dev, "no memory timings for RAM code %u found in device-tree\n",
+ ram_code);
+
+ return NULL;
+}
+
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev ? 1 : 2;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev)
+{
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &basic_conf4.value);
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, manufacturer_id,
+ lpddr2_jedec_manufacturer(manufacturer_id),
+ revision_id1, revision_id2,
+ 4 >> basic_conf4.arch_type,
+ 64 << basic_conf4.density,
+ 32 >> basic_conf4.io_width);
+}
+
+static int emc_setup_hw(struct tegra_emc *emc)
+{
+ u32 fbio_cfg5, emc_cfg, emc_dbg, emc_adr_cfg;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
+
+ /* enable EMC and CAR to handshake on PLL divider/source changes */
+ emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
+
+ /* configure clock change mode accordingly to DRAM type */
+ switch (dram_type) {
+ case DRAM_TYPE_LPDDR2:
+ emc_cfg |= EMC_CLKCHANGE_PD_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ break;
+
+ default:
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE;
+ break;
+ }
+
+ writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
+
+ /* initialize interrupt */
+ writel_relaxed(intmask, emc->regs + EMC_INTMASK);
+ writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS);
+
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ switch (dram_type) {
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ case DRAM_TYPE_DDR3:
+ dram_type_str = "DDR3";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%u %s %s attached\n", emem_numdev,
+ dram_type_str, emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2 && !print_sdram_info_once) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev);
+
+ print_sdram_info_once = true;
+ }
+
+ return 0;
+}
+
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
+{
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
+
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
+
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ return timing->rate;
+}
+
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+/*
+ * debugfs interface
+ *
+ * The memory controller driver exposes some files in debugfs that can be used
+ * to control the EMC frequency. The top-level directory can be found here:
+ *
+ * /sys/kernel/debug/emc
+ *
+ * It contains the following files:
+ *
+ * - available_rates: This file contains a list of valid, space-separated
+ * EMC frequencies.
+ *
+ * - min_rate: Writing a value to this file sets the given frequency as the
+ * floor of the permitted range. If this is higher than the currently
+ * configured EMC frequency, this will cause the frequency to be
+ * increased so that it stays within the valid range.
+ *
+ * - max_rate: Similarily to the min_rate file, writing a value to this file
+ * sets the given frequency as the ceiling of the permitted range. If
+ * the value is lower than the currently configured EMC frequency, this
+ * will cause the frequency to be decreased so that it stays within the
+ * valid range.
+ */
+
+static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++)
+ if (rate == emc->timings[i].rate)
+ return true;
+
+ return false;
+}
+
+static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data)
+{
+ struct tegra_emc *emc = s->private;
+ const char *prefix = "";
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ seq_printf(s, "%s%lu", prefix, emc->timings[i].rate);
+ prefix = " ";
+ }
+
+ seq_puts(s, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
+
+static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.min_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.min_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+ tegra_emc_debug_min_rate_get,
+ tegra_emc_debug_min_rate_set, "%llu\n");
+
+static int tegra_emc_debug_max_rate_get(void *data, u64 *rate)
+{
+ struct tegra_emc *emc = data;
+
+ *rate = emc->debugfs.max_rate;
+
+ return 0;
+}
+
+static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
+{
+ struct tegra_emc *emc = data;
+ int err;
+
+ if (!tegra_emc_validate_rate(emc, rate))
+ return -EINVAL;
+
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
+ if (err < 0)
+ return err;
+
+ emc->debugfs.max_rate = rate;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+ tegra_emc_debug_max_rate_get,
+ tegra_emc_debug_max_rate_set, "%llu\n");
+
+static void tegra_emc_debugfs_init(struct tegra_emc *emc)
+{
+ struct device *dev = emc->dev;
+ unsigned int i;
+ int err;
+
+ emc->debugfs.min_rate = ULONG_MAX;
+ emc->debugfs.max_rate = 0;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < emc->debugfs.min_rate)
+ emc->debugfs.min_rate = emc->timings[i].rate;
+
+ if (emc->timings[i].rate > emc->debugfs.max_rate)
+ emc->debugfs.max_rate = emc->timings[i].rate;
+ }
+
+ if (!emc->num_timings) {
+ emc->debugfs.min_rate = clk_get_rate(emc->clk);
+ emc->debugfs.max_rate = emc->debugfs.min_rate;
+ }
+
+ err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
+ emc->debugfs.max_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n",
+ emc->debugfs.min_rate, emc->debugfs.max_rate,
+ emc->clk);
+ }
+
+ emc->debugfs.root = debugfs_create_dir("emc", NULL);
+
+ debugfs_create_file("available_rates", 0444, emc->debugfs.root,
+ emc, &tegra_emc_debug_available_rates_fops);
+ debugfs_create_file("min_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_min_rate_fops);
+ debugfs_create_file("max_rate", 0644, emc->debugfs.root,
+ emc, &tegra_emc_debug_max_rate_fops);
+}
+
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ const unsigned int dram_data_bus_width_bytes = 4;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra30 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
+ int err;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ icc_provider_init(&emc->provider);
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto err_msg;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ err = icc_provider_register(&emc->provider);
+ if (err)
+ goto remove_nodes;
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return -ENOMEM;
+
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ mutex_init(&emc->rate_lock);
+ emc->clk_nb.notifier_call = emc_clk_change_notify;
+ emc->dev = &pdev->dev;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ err = emc_setup_hw(emc);
+ if (err)
+ return err;
+
+ np = emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ emc->irq = err;
+
+ err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
+ dev_name(&pdev->dev), emc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", err);
+ return err;
+ }
+
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
+
+ opp_params.init_state = true;
+
+ err = devm_tegra_core_dev_init_opp_table(&pdev->dev, &opp_params);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
+ tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
+
+ return 0;
+}
+
+static int tegra_emc_suspend(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ int err;
+
+ /* take exclusive control over the clock's rate */
+ err = clk_rate_exclusive_get(emc->clk);
+ if (err) {
+ dev_err(emc->dev, "failed to acquire clk: %d\n", err);
+ return err;
+ }
+
+ /* suspending in a bad state will hang machine */
+ if (WARN(emc->bad_state, "hardware in a bad state\n"))
+ return -EINVAL;
+
+ emc->bad_state = true;
+
+ return 0;
+}
+
+static int tegra_emc_resume(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ emc_setup_hw(emc);
+ emc->bad_state = false;
+
+ clk_rate_exclusive_put(emc->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_emc_pm_ops = {
+ .suspend = tegra_emc_suspend,
+ .resume = tegra_emc_resume,
+};
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra30-emc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra30-emc",
+ .of_match_table = tegra_emc_of_match,
+ .pm = &tegra_emc_pm_ops,
+ .suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(tegra_emc_driver);
+
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
new file mode 100644
index 0000000000..06f8b35e0a
--- /dev/null
+++ b/drivers/memory/tegra/tegra30.c
@@ -0,0 +1,1403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/memory/tegra30-mc.h>
+
+#include "mc.h"
+
+static const unsigned long tegra30_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_RING1_THROTTLE,
+};
+
+static const struct tegra_mc_client tegra30_mc_clients[] = {
+ {
+ .id = 0x00,
+ .name = "ptcr",
+ .swgroup = TEGRA_SWGROUP_PTC,
+ .regs = {
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x01,
+ .name = "display0a",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x02,
+ .name = "display0ab",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x03,
+ .name = "display0b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x2e8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x04,
+ .name = "display0bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x2f4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x05,
+ .name = "display0c",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x06,
+ .name = "display0cb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 6,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x07,
+ .name = "display1b",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 7,
+ },
+ .la = {
+ .reg = 0x2ec,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x08,
+ .name = "display1bb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x2f8,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x4e,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x09,
+ .name = "eppup",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x17,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x0a,
+ .name = "g2pr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x0b,
+ .name = "g2sr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x308,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x09,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x0c,
+ .name = "mpeunifbr",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x0d,
+ .name = "viruv",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2c,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x0e,
+ .name = "afir",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x0f,
+ .name = "avpcarm7r",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x10,
+ .name = "displayhc",
+ .swgroup = TEGRA_SWGROUP_DC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x2f0,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x11,
+ .name = "displayhcb",
+ .swgroup = TEGRA_SWGROUP_DCB,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2fc,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x12,
+ .name = "fdcdrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x13,
+ .name = "fdcdrd2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x14,
+ .name = "g2dr",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x15,
+ .name = "hdar",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x16,
+ .name = "host1xdmar",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x05,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x17,
+ .name = "host1xr",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x310,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x50,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x18,
+ .name = "idxsrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 24,
+ },
+ .la = {
+ .reg = 0x334,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x19,
+ .name = "idxsrd2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 25,
+ },
+ .la = {
+ .reg = 0x33c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x1a,
+ .name = "mpe_ipred",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x328,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x80,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x1b,
+ .name = "mpeamemrd",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x32c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x42,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x1c,
+ .name = "mpecsrd",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x32c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x1d,
+ .name = "ppcsahbdmar",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x1e,
+ .name = "ppcsahbslvr",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x344,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x12,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x1f,
+ .name = "satar",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x228,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x20,
+ .name = "texsrd",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x21,
+ .name = "texsrd2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x22,
+ .name = "vdebsevr",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 2,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x23,
+ .name = "vdember",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 3,
+ },
+ .la = {
+ .reg = 0x354,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xd0,
+ },
+ },
+ .fifo_size = 16 * 4,
+ }, {
+ .id = 0x24,
+ .name = "vdemcer",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 4,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x2a,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x25,
+ .name = "vdetper",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 5,
+ },
+ .la = {
+ .reg = 0x358,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x74,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x26,
+ .name = "mpcorelpr",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ .fifo_size = 16 * 14,
+ }, {
+ .id = 0x27,
+ .name = "mpcorer",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x04,
+ },
+ },
+ .fifo_size = 16 * 14,
+ }, {
+ .id = 0x28,
+ .name = "eppu",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 8,
+ },
+ .la = {
+ .reg = 0x300,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x29,
+ .name = "eppv",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 9,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2a,
+ .name = "eppy",
+ .swgroup = TEGRA_SWGROUP_EPP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 10,
+ },
+ .la = {
+ .reg = 0x304,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x6c,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2b,
+ .name = "mpeunifbw",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 11,
+ },
+ .la = {
+ .reg = 0x330,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x13,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x2c,
+ .name = "viwsb",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 12,
+ },
+ .la = {
+ .reg = 0x364,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x12,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2d,
+ .name = "viwu",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 13,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xb2,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2e,
+ .name = "viwv",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 14,
+ },
+ .la = {
+ .reg = 0x368,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xb2,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x2f,
+ .name = "viwy",
+ .swgroup = TEGRA_SWGROUP_VI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 15,
+ },
+ .la = {
+ .reg = 0x36c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x12,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x30,
+ .name = "g2dw",
+ .swgroup = TEGRA_SWGROUP_G2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 16,
+ },
+ .la = {
+ .reg = 0x30c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x9,
+ },
+ },
+ .fifo_size = 16 * 128,
+ }, {
+ .id = 0x31,
+ .name = "afiw",
+ .swgroup = TEGRA_SWGROUP_AFI,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 17,
+ },
+ .la = {
+ .reg = 0x2e0,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0c,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x32,
+ .name = "avpcarm7w",
+ .swgroup = TEGRA_SWGROUP_AVPC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 18,
+ },
+ .la = {
+ .reg = 0x2e4,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x33,
+ .name = "fdcdwr",
+ .swgroup = TEGRA_SWGROUP_NV,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 19,
+ },
+ .la = {
+ .reg = 0x338,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x34,
+ .name = "fdcdwr2",
+ .swgroup = TEGRA_SWGROUP_NV2,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 20,
+ },
+ .la = {
+ .reg = 0x340,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0a,
+ },
+ },
+ .fifo_size = 16 * 48,
+ }, {
+ .id = 0x35,
+ .name = "hdaw",
+ .swgroup = TEGRA_SWGROUP_HDA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 21,
+ },
+ .la = {
+ .reg = 0x318,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x36,
+ .name = "host1xw",
+ .swgroup = TEGRA_SWGROUP_HC,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 22,
+ },
+ .la = {
+ .reg = 0x314,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x37,
+ .name = "ispw",
+ .swgroup = TEGRA_SWGROUP_ISP,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 23,
+ },
+ .la = {
+ .reg = 0x31c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 64,
+ }, {
+ .id = 0x38,
+ .name = "mpcorelpw",
+ .swgroup = TEGRA_SWGROUP_MPCORELP,
+ .regs = {
+ .la = {
+ .reg = 0x324,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ .fifo_size = 16 * 24,
+ }, {
+ .id = 0x39,
+ .name = "mpcorew",
+ .swgroup = TEGRA_SWGROUP_MPCORE,
+ .regs = {
+ .la = {
+ .reg = 0x320,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x0e,
+ },
+ },
+ .fifo_size = 16 * 24,
+ }, {
+ .id = 0x3a,
+ .name = "mpecswr",
+ .swgroup = TEGRA_SWGROUP_MPE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 26,
+ },
+ .la = {
+ .reg = 0x330,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 8,
+ }, {
+ .id = 0x3b,
+ .name = "ppcsahbdmaw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 27,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x10,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x3c,
+ .name = "ppcsahbslvw",
+ .swgroup = TEGRA_SWGROUP_PPCS,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 28,
+ },
+ .la = {
+ .reg = 0x348,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x06,
+ },
+ },
+ .fifo_size = 16 * 4,
+ }, {
+ .id = 0x3d,
+ .name = "sataw",
+ .swgroup = TEGRA_SWGROUP_SATA,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 29,
+ },
+ .la = {
+ .reg = 0x350,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x33,
+ },
+ },
+ .fifo_size = 16 * 32,
+ }, {
+ .id = 0x3e,
+ .name = "vdebsevw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 30,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 4,
+ }, {
+ .id = 0x3f,
+ .name = "vdedbgw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x22c,
+ .bit = 31,
+ },
+ .la = {
+ .reg = 0x35c,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0xff,
+ },
+ },
+ .fifo_size = 16 * 16,
+ }, {
+ .id = 0x40,
+ .name = "vdembew",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 0,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x42,
+ },
+ },
+ .fifo_size = 16 * 2,
+ }, {
+ .id = 0x41,
+ .name = "vdetpmw",
+ .swgroup = TEGRA_SWGROUP_VDE,
+ .regs = {
+ .smmu = {
+ .reg = 0x230,
+ .bit = 1,
+ },
+ .la = {
+ .reg = 0x360,
+ .shift = 16,
+ .mask = 0xff,
+ .def = 0x2a,
+ },
+ },
+ .fifo_size = 16 * 16,
+ },
+};
+
+static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
+ { .name = "epp", .swgroup = TEGRA_SWGROUP_EPP, .reg = 0x248 },
+ { .name = "g2", .swgroup = TEGRA_SWGROUP_G2, .reg = 0x24c },
+ { .name = "mpe", .swgroup = TEGRA_SWGROUP_MPE, .reg = 0x264 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
+ { .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
+ { .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x278 },
+ { .name = "vde", .swgroup = TEGRA_SWGROUP_VDE, .reg = 0x27c },
+ { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
+};
+
+static const unsigned int tegra30_group_drm[] = {
+ TEGRA_SWGROUP_DC,
+ TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
+ TEGRA_SWGROUP_NV2,
+};
+
+static const struct tegra_smmu_group_soc tegra30_groups[] = {
+ {
+ .name = "drm",
+ .swgroups = tegra30_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra30_group_drm),
+ },
+};
+
+static const struct tegra_smmu_soc tegra30_smmu_soc = {
+ .clients = tegra30_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra30_mc_clients),
+ .swgroups = tegra30_swgroups,
+ .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
+ .groups = tegra30_groups,
+ .num_groups = ARRAY_SIZE(tegra30_groups),
+ .supports_round_robin_arbitration = false,
+ .supports_request_limit = false,
+ .num_tlb_lines = 16,
+ .num_asids = 4,
+};
+
+#define TEGRA30_MC_RESET(_name, _control, _status, _bit) \
+ { \
+ .name = #_name, \
+ .id = TEGRA30_MC_RESET_##_name, \
+ .control = _control, \
+ .status = _status, \
+ .bit = _bit, \
+ }
+
+static const struct tegra_mc_reset tegra30_mc_resets[] = {
+ TEGRA30_MC_RESET(AFI, 0x200, 0x204, 0),
+ TEGRA30_MC_RESET(AVPC, 0x200, 0x204, 1),
+ TEGRA30_MC_RESET(DC, 0x200, 0x204, 2),
+ TEGRA30_MC_RESET(DCB, 0x200, 0x204, 3),
+ TEGRA30_MC_RESET(EPP, 0x200, 0x204, 4),
+ TEGRA30_MC_RESET(2D, 0x200, 0x204, 5),
+ TEGRA30_MC_RESET(HC, 0x200, 0x204, 6),
+ TEGRA30_MC_RESET(HDA, 0x200, 0x204, 7),
+ TEGRA30_MC_RESET(ISP, 0x200, 0x204, 8),
+ TEGRA30_MC_RESET(MPCORE, 0x200, 0x204, 9),
+ TEGRA30_MC_RESET(MPCORELP, 0x200, 0x204, 10),
+ TEGRA30_MC_RESET(MPE, 0x200, 0x204, 11),
+ TEGRA30_MC_RESET(3D, 0x200, 0x204, 12),
+ TEGRA30_MC_RESET(3D2, 0x200, 0x204, 13),
+ TEGRA30_MC_RESET(PPCS, 0x200, 0x204, 14),
+ TEGRA30_MC_RESET(SATA, 0x200, 0x204, 15),
+ TEGRA30_MC_RESET(VDE, 0x200, 0x204, 16),
+ TEGRA30_MC_RESET(VI, 0x200, 0x204, 17),
+};
+
+static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int bandwidth_mbytes_sec)
+{
+ u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
+ unsigned int fifo_size = client->fifo_size;
+ u32 arb_nsec, la_ticks, value;
+
+ /* see 18.4.1 Client Configuration in Tegra3 TRM v03p */
+ if (bandwidth_mbytes_sec)
+ arb_nsec = fifo_size * NSEC_PER_USEC / bandwidth_mbytes_sec;
+ else
+ arb_nsec = U32_MAX;
+
+ /*
+ * Latency allowness should be set with consideration for the module's
+ * latency tolerance and internal buffering capabilities.
+ *
+ * Display memory clients use isochronous transfers and have very low
+ * tolerance to a belated transfers. Hence we need to compensate the
+ * memory arbitration imperfection for them in order to prevent FIFO
+ * underflow condition when memory bus is busy.
+ *
+ * VI clients also need a stronger compensation.
+ */
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_MPCORE:
+ case TEGRA_SWGROUP_PTC:
+ /*
+ * We always want lower latency for these clients, hence
+ * don't touch them.
+ */
+ return;
+
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 2;
+ break;
+
+ case TEGRA_SWGROUP_VI:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 1;
+ break;
+
+ default:
+ arb_tolerance_compensation_nsec = 150;
+ arb_tolerance_compensation_div = 1;
+ break;
+ }
+
+ if (arb_nsec > arb_tolerance_compensation_nsec)
+ arb_nsec -= arb_tolerance_compensation_nsec;
+ else
+ arb_nsec = 0;
+
+ arb_nsec /= arb_tolerance_compensation_div;
+
+ /*
+ * Latency allowance is a number of ticks a request from a particular
+ * client may wait in the EMEM arbiter before it becomes a high-priority
+ * request.
+ */
+ la_ticks = arb_nsec / mc->tick;
+ la_ticks = min(la_ticks, client->regs.la.mask);
+
+ value = mc_readl(mc, client->regs.la.reg);
+ value &= ~(client->regs.la.mask << client->regs.la.shift);
+ value |= la_ticks << client->regs.la.shift;
+ mc_writel(mc, value, client->regs.la.reg);
+}
+
+static int tegra30_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(src->provider);
+ const struct tegra_mc_client *client = &mc->soc->clients[src->id];
+ u64 peak_bandwidth = icc_units_to_bps(src->peak_bw);
+
+ /*
+ * Skip pre-initialization that is done by icc_node_add(), which sets
+ * bandwidth to maximum for all clients before drivers are loaded.
+ *
+ * This doesn't make sense for us because we don't have drivers for all
+ * clients and it's okay to keep configuration left from bootloader
+ * during boot, at least for today.
+ */
+ if (src == dst)
+ return 0;
+
+ /* convert bytes/sec to megabytes/sec */
+ do_div(peak_bandwidth, 1000000);
+
+ tegra30_mc_tune_client_latency(mc, client, peak_bandwidth);
+
+ return 0;
+}
+
+static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra30_mc_icc_ops = {
+ .xlate_extended = tegra30_mc_of_icc_xlate_extended,
+ .aggregate = tegra30_mc_icc_aggreate,
+ .set = tegra30_mc_icc_set,
+};
+
+const struct tegra_mc_soc tegra30_mc_soc = {
+ .clients = tegra30_mc_clients,
+ .num_clients = ARRAY_SIZE(tegra30_mc_clients),
+ .num_address_bits = 32,
+ .atom_size = 16,
+ .client_id_mask = 0x7f,
+ .smmu = &tegra30_smmu_soc,
+ .emem_regs = tegra30_mc_emem_regs,
+ .num_emem_regs = ARRAY_SIZE(tegra30_mc_emem_regs),
+ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+ MC_INT_DECERR_EMEM,
+ .reset_ops = &tegra_mc_reset_ops_common,
+ .resets = tegra30_mc_resets,
+ .num_resets = ARRAY_SIZE(tegra30_mc_resets),
+ .icc_ops = &tegra30_mc_icc_ops,
+ .ops = &tegra30_mc_ops,
+};
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
new file mode 100644
index 0000000000..f81e7df879
--- /dev/null
+++ b/drivers/memory/ti-aemif.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI AEMIF driver
+ *
+ * Copyright (C) 2010 - 2013 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * Authors:
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/ti-aemif.h>
+
+#define TA_SHIFT 2
+#define RHOLD_SHIFT 4
+#define RSTROBE_SHIFT 7
+#define RSETUP_SHIFT 13
+#define WHOLD_SHIFT 17
+#define WSTROBE_SHIFT 20
+#define WSETUP_SHIFT 26
+#define EW_SHIFT 30
+#define SSTROBE_SHIFT 31
+
+#define TA(x) ((x) << TA_SHIFT)
+#define RHOLD(x) ((x) << RHOLD_SHIFT)
+#define RSTROBE(x) ((x) << RSTROBE_SHIFT)
+#define RSETUP(x) ((x) << RSETUP_SHIFT)
+#define WHOLD(x) ((x) << WHOLD_SHIFT)
+#define WSTROBE(x) ((x) << WSTROBE_SHIFT)
+#define WSETUP(x) ((x) << WSETUP_SHIFT)
+#define EW(x) ((x) << EW_SHIFT)
+#define SSTROBE(x) ((x) << SSTROBE_SHIFT)
+
+#define ASIZE_MAX 0x1
+#define TA_MAX 0x3
+#define RHOLD_MAX 0x7
+#define RSTROBE_MAX 0x3f
+#define RSETUP_MAX 0xf
+#define WHOLD_MAX 0x7
+#define WSTROBE_MAX 0x3f
+#define WSETUP_MAX 0xf
+#define EW_MAX 0x1
+#define SSTROBE_MAX 0x1
+#define NUM_CS 4
+
+#define TA_VAL(x) (((x) & TA(TA_MAX)) >> TA_SHIFT)
+#define RHOLD_VAL(x) (((x) & RHOLD(RHOLD_MAX)) >> RHOLD_SHIFT)
+#define RSTROBE_VAL(x) (((x) & RSTROBE(RSTROBE_MAX)) >> RSTROBE_SHIFT)
+#define RSETUP_VAL(x) (((x) & RSETUP(RSETUP_MAX)) >> RSETUP_SHIFT)
+#define WHOLD_VAL(x) (((x) & WHOLD(WHOLD_MAX)) >> WHOLD_SHIFT)
+#define WSTROBE_VAL(x) (((x) & WSTROBE(WSTROBE_MAX)) >> WSTROBE_SHIFT)
+#define WSETUP_VAL(x) (((x) & WSETUP(WSETUP_MAX)) >> WSETUP_SHIFT)
+#define EW_VAL(x) (((x) & EW(EW_MAX)) >> EW_SHIFT)
+#define SSTROBE_VAL(x) (((x) & SSTROBE(SSTROBE_MAX)) >> SSTROBE_SHIFT)
+
+#define NRCSR_OFFSET 0x00
+#define AWCCR_OFFSET 0x04
+#define A1CR_OFFSET 0x10
+
+#define ACR_ASIZE_MASK 0x3
+#define ACR_EW_MASK BIT(30)
+#define ACR_SSTROBE_MASK BIT(31)
+#define ASIZE_16BIT 1
+
+#define CONFIG_MASK (TA(TA_MAX) | \
+ RHOLD(RHOLD_MAX) | \
+ RSTROBE(RSTROBE_MAX) | \
+ RSETUP(RSETUP_MAX) | \
+ WHOLD(WHOLD_MAX) | \
+ WSTROBE(WSTROBE_MAX) | \
+ WSETUP(WSETUP_MAX) | \
+ EW(EW_MAX) | SSTROBE(SSTROBE_MAX) | \
+ ASIZE_MAX)
+
+/**
+ * struct aemif_cs_data: structure to hold cs parameters
+ * @cs: chip-select number
+ * @wstrobe: write strobe width, ns
+ * @rstrobe: read strobe width, ns
+ * @wsetup: write setup width, ns
+ * @whold: write hold width, ns
+ * @rsetup: read setup width, ns
+ * @rhold: read hold width, ns
+ * @ta: minimum turn around time, ns
+ * @enable_ss: enable/disable select strobe mode
+ * @enable_ew: enable/disable extended wait mode
+ * @asize: width of the asynchronous device's data bus
+ */
+struct aemif_cs_data {
+ u8 cs;
+ u16 wstrobe;
+ u16 rstrobe;
+ u8 wsetup;
+ u8 whold;
+ u8 rsetup;
+ u8 rhold;
+ u8 ta;
+ u8 enable_ss;
+ u8 enable_ew;
+ u8 asize;
+};
+
+/**
+ * struct aemif_device: structure to hold device data
+ * @base: base address of AEMIF registers
+ * @clk: source clock
+ * @clk_rate: clock's rate in kHz
+ * @num_cs: number of assigned chip-selects
+ * @cs_offset: start number of cs nodes
+ * @cs_data: array of chip-select settings
+ */
+struct aemif_device {
+ void __iomem *base;
+ struct clk *clk;
+ unsigned long clk_rate;
+ u8 num_cs;
+ int cs_offset;
+ struct aemif_cs_data cs_data[NUM_CS];
+};
+
+/**
+ * aemif_calc_rate - calculate timing data.
+ * @pdev: platform device to calculate for
+ * @wanted: The cycle time needed in nanoseconds.
+ * @clk: The input clock rate in kHz.
+ * @max: The maximum divider value that can be programmed.
+ *
+ * On success, returns the calculated timing value minus 1 for easy
+ * programming into AEMIF timing registers, else negative errno.
+ */
+static int aemif_calc_rate(struct platform_device *pdev, int wanted,
+ unsigned long clk, int max)
+{
+ int result;
+
+ result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
+
+ dev_dbg(&pdev->dev, "%s: result %d from %ld, %d\n", __func__, result,
+ clk, wanted);
+
+ /* It is generally OK to have a more relaxed timing than requested... */
+ if (result < 0)
+ result = 0;
+
+ /* ... But configuring tighter timings is not an option. */
+ else if (result > max)
+ result = -EINVAL;
+
+ return result;
+}
+
+/**
+ * aemif_config_abus - configure async bus parameters
+ * @pdev: platform device to configure for
+ * @csnum: aemif chip select number
+ *
+ * This function programs the given timing values (in real clock) into the
+ * AEMIF registers taking the AEMIF clock into account.
+ *
+ * This function does not use any locking while programming the AEMIF
+ * because it is expected that there is only one user of a given
+ * chip-select.
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int aemif_config_abus(struct platform_device *pdev, int csnum)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data = &aemif->cs_data[csnum];
+ int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
+ unsigned long clk_rate = aemif->clk_rate;
+ unsigned offset;
+ u32 set, val;
+
+ offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
+
+ ta = aemif_calc_rate(pdev, data->ta, clk_rate, TA_MAX);
+ rhold = aemif_calc_rate(pdev, data->rhold, clk_rate, RHOLD_MAX);
+ rstrobe = aemif_calc_rate(pdev, data->rstrobe, clk_rate, RSTROBE_MAX);
+ rsetup = aemif_calc_rate(pdev, data->rsetup, clk_rate, RSETUP_MAX);
+ whold = aemif_calc_rate(pdev, data->whold, clk_rate, WHOLD_MAX);
+ wstrobe = aemif_calc_rate(pdev, data->wstrobe, clk_rate, WSTROBE_MAX);
+ wsetup = aemif_calc_rate(pdev, data->wsetup, clk_rate, WSETUP_MAX);
+
+ if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
+ whold < 0 || wstrobe < 0 || wsetup < 0) {
+ dev_err(&pdev->dev, "%s: cannot get suitable timings\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
+ WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
+
+ set |= (data->asize & ACR_ASIZE_MASK);
+ if (data->enable_ew)
+ set |= ACR_EW_MASK;
+ if (data->enable_ss)
+ set |= ACR_SSTROBE_MASK;
+
+ val = readl(aemif->base + offset);
+ val &= ~CONFIG_MASK;
+ val |= set;
+ writel(val, aemif->base + offset);
+
+ return 0;
+}
+
+static inline int aemif_cycles_to_nsec(int val, unsigned long clk_rate)
+{
+ return ((val + 1) * NSEC_PER_MSEC) / clk_rate;
+}
+
+/**
+ * aemif_get_hw_params - function to read hw register values
+ * @pdev: platform device to read for
+ * @csnum: aemif chip select number
+ *
+ * This function reads the defaults from the registers and update
+ * the timing values. Required for get/set commands and also for
+ * the case when driver needs to use defaults in hardware.
+ */
+static void aemif_get_hw_params(struct platform_device *pdev, int csnum)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data = &aemif->cs_data[csnum];
+ unsigned long clk_rate = aemif->clk_rate;
+ u32 val, offset;
+
+ offset = A1CR_OFFSET + (data->cs - aemif->cs_offset) * 4;
+ val = readl(aemif->base + offset);
+
+ data->ta = aemif_cycles_to_nsec(TA_VAL(val), clk_rate);
+ data->rhold = aemif_cycles_to_nsec(RHOLD_VAL(val), clk_rate);
+ data->rstrobe = aemif_cycles_to_nsec(RSTROBE_VAL(val), clk_rate);
+ data->rsetup = aemif_cycles_to_nsec(RSETUP_VAL(val), clk_rate);
+ data->whold = aemif_cycles_to_nsec(WHOLD_VAL(val), clk_rate);
+ data->wstrobe = aemif_cycles_to_nsec(WSTROBE_VAL(val), clk_rate);
+ data->wsetup = aemif_cycles_to_nsec(WSETUP_VAL(val), clk_rate);
+ data->enable_ew = EW_VAL(val);
+ data->enable_ss = SSTROBE_VAL(val);
+ data->asize = val & ASIZE_MAX;
+}
+
+/**
+ * of_aemif_parse_abus_config - parse CS configuration from DT
+ * @pdev: platform device to parse for
+ * @np: device node ptr
+ *
+ * This function update the emif async bus configuration based on the values
+ * configured in a cs device binding node.
+ */
+static int of_aemif_parse_abus_config(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+ struct aemif_cs_data *data;
+ u32 cs;
+ u32 val;
+
+ if (of_property_read_u32(np, "ti,cs-chipselect", &cs)) {
+ dev_dbg(&pdev->dev, "cs property is required");
+ return -EINVAL;
+ }
+
+ if (cs - aemif->cs_offset >= NUM_CS || cs < aemif->cs_offset) {
+ dev_dbg(&pdev->dev, "cs number is incorrect %d", cs);
+ return -EINVAL;
+ }
+
+ if (aemif->num_cs >= NUM_CS) {
+ dev_dbg(&pdev->dev, "cs count is more than %d", NUM_CS);
+ return -EINVAL;
+ }
+
+ data = &aemif->cs_data[aemif->num_cs];
+ data->cs = cs;
+
+ /* read the current value in the hw register */
+ aemif_get_hw_params(pdev, aemif->num_cs++);
+
+ /* override the values from device node */
+ if (!of_property_read_u32(np, "ti,cs-min-turnaround-ns", &val))
+ data->ta = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-hold-ns", &val))
+ data->rhold = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-strobe-ns", &val))
+ data->rstrobe = val;
+
+ if (!of_property_read_u32(np, "ti,cs-read-setup-ns", &val))
+ data->rsetup = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-hold-ns", &val))
+ data->whold = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-strobe-ns", &val))
+ data->wstrobe = val;
+
+ if (!of_property_read_u32(np, "ti,cs-write-setup-ns", &val))
+ data->wsetup = val;
+
+ if (!of_property_read_u32(np, "ti,cs-bus-width", &val))
+ if (val == 16)
+ data->asize = 1;
+ data->enable_ew = of_property_read_bool(np, "ti,cs-extended-wait-mode");
+ data->enable_ss = of_property_read_bool(np, "ti,cs-select-strobe-mode");
+ return 0;
+}
+
+static const struct of_device_id aemif_of_match[] = {
+ { .compatible = "ti,davinci-aemif", },
+ { .compatible = "ti,da850-aemif", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, aemif_of_match);
+
+static int aemif_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret = -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct aemif_device *aemif;
+ struct aemif_platform_data *pdata;
+ struct of_dev_auxdata *dev_lookup;
+
+ aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL);
+ if (!aemif)
+ return -ENOMEM;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ dev_lookup = pdata ? pdata->dev_lookup : NULL;
+
+ platform_set_drvdata(pdev, aemif);
+
+ aemif->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(aemif->clk)) {
+ dev_err(dev, "cannot get clock 'aemif'\n");
+ return PTR_ERR(aemif->clk);
+ }
+
+ ret = clk_prepare_enable(aemif->clk);
+ if (ret)
+ return ret;
+
+ aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
+
+ if (np && of_device_is_compatible(np, "ti,da850-aemif"))
+ aemif->cs_offset = 2;
+ else if (pdata)
+ aemif->cs_offset = pdata->cs_offset;
+
+ aemif->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(aemif->base)) {
+ ret = PTR_ERR(aemif->base);
+ goto error;
+ }
+
+ if (np) {
+ /*
+ * For every controller device node, there is a cs device node
+ * that describe the bus configuration parameters. This
+ * functions iterate over these nodes and update the cs data
+ * array.
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_aemif_parse_abus_config(pdev, child_np);
+ if (ret < 0) {
+ of_node_put(child_np);
+ goto error;
+ }
+ }
+ } else if (pdata && pdata->num_abus_data > 0) {
+ for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
+ aemif->cs_data[i].cs = pdata->abus_data[i].cs;
+ aemif_get_hw_params(pdev, i);
+ }
+ }
+
+ for (i = 0; i < aemif->num_cs; i++) {
+ ret = aemif_config_abus(pdev, i);
+ if (ret < 0) {
+ dev_err(dev, "Error configuring chip select %d\n",
+ aemif->cs_data[i].cs);
+ goto error;
+ }
+ }
+
+ /*
+ * Create a child devices explicitly from here to guarantee that the
+ * child will be probed after the AEMIF timing parameters are set.
+ */
+ if (np) {
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_platform_populate(child_np, NULL,
+ dev_lookup, dev);
+ if (ret < 0) {
+ of_node_put(child_np);
+ goto error;
+ }
+ }
+ } else if (pdata) {
+ for (i = 0; i < pdata->num_sub_devices; i++) {
+ pdata->sub_devices[i].dev.parent = dev;
+ ret = platform_device_register(&pdata->sub_devices[i]);
+ if (ret) {
+ dev_warn(dev, "Error register sub device %s\n",
+ pdata->sub_devices[i].name);
+ }
+ }
+ }
+
+ return 0;
+error:
+ clk_disable_unprepare(aemif->clk);
+ return ret;
+}
+
+static int aemif_remove(struct platform_device *pdev)
+{
+ struct aemif_device *aemif = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(aemif->clk);
+ return 0;
+}
+
+static struct platform_driver aemif_driver = {
+ .probe = aemif_probe,
+ .remove = aemif_remove,
+ .driver = {
+ .name = "ti-aemif",
+ .of_match_table = of_match_ptr(aemif_of_match),
+ },
+};
+
+module_platform_driver(aemif_driver);
+
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments AEMIF driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
new file mode 100644
index 0000000000..cef0d3beb6
--- /dev/null
+++ b/drivers/memory/ti-emif-pm.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI AM33XX SRAM EMIF Driver
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Inc.
+ * Dave Gerlach
+ */
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sram.h>
+#include <linux/ti-emif-sram.h>
+
+#include "emif.h"
+
+#define TI_EMIF_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
+ (unsigned long)&ti_emif_sram)
+
+#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
+
+struct ti_emif_data {
+ phys_addr_t ti_emif_sram_phys;
+ phys_addr_t ti_emif_sram_data_phys;
+ unsigned long ti_emif_sram_virt;
+ unsigned long ti_emif_sram_data_virt;
+ struct gen_pool *sram_pool_code;
+ struct gen_pool *sram_pool_data;
+ struct ti_emif_pm_data pm_data;
+ struct ti_emif_pm_functions pm_functions;
+};
+
+static struct ti_emif_data *emif_instance;
+
+static u32 sram_suspend_address(struct ti_emif_data *emif_data,
+ unsigned long addr)
+{
+ return (emif_data->ti_emif_sram_virt +
+ TI_EMIF_SRAM_SYMBOL_OFFSET(addr));
+}
+
+static phys_addr_t sram_resume_address(struct ti_emif_data *emif_data,
+ unsigned long addr)
+{
+ return ((unsigned long)emif_data->ti_emif_sram_phys +
+ TI_EMIF_SRAM_SYMBOL_OFFSET(addr));
+}
+
+static void ti_emif_free_sram(struct ti_emif_data *emif_data)
+{
+ gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt,
+ ti_emif_sram_sz);
+ gen_pool_free(emif_data->sram_pool_data,
+ emif_data->ti_emif_sram_data_virt,
+ sizeof(struct emif_regs_amx3));
+}
+
+static int ti_emif_alloc_sram(struct device *dev,
+ struct ti_emif_data *emif_data)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ emif_data->sram_pool_code = of_gen_pool_get(np, "sram", 0);
+ if (!emif_data->sram_pool_code) {
+ dev_err(dev, "Unable to get sram pool for ocmcram code\n");
+ return -ENODEV;
+ }
+
+ emif_data->ti_emif_sram_virt =
+ gen_pool_alloc(emif_data->sram_pool_code,
+ ti_emif_sram_sz);
+ if (!emif_data->ti_emif_sram_virt) {
+ dev_err(dev, "Unable to allocate code memory from ocmcram\n");
+ return -ENOMEM;
+ }
+
+ /* Save physical address to calculate resume offset during pm init */
+ emif_data->ti_emif_sram_phys =
+ gen_pool_virt_to_phys(emif_data->sram_pool_code,
+ emif_data->ti_emif_sram_virt);
+
+ /* Get sram pool for data section and allocate space */
+ emif_data->sram_pool_data = of_gen_pool_get(np, "sram", 1);
+ if (!emif_data->sram_pool_data) {
+ dev_err(dev, "Unable to get sram pool for ocmcram data\n");
+ ret = -ENODEV;
+ goto err_free_sram_code;
+ }
+
+ emif_data->ti_emif_sram_data_virt =
+ gen_pool_alloc(emif_data->sram_pool_data,
+ sizeof(struct emif_regs_amx3));
+ if (!emif_data->ti_emif_sram_data_virt) {
+ dev_err(dev, "Unable to allocate data memory from ocmcram\n");
+ ret = -ENOMEM;
+ goto err_free_sram_code;
+ }
+
+ /* Save physical address to calculate resume offset during pm init */
+ emif_data->ti_emif_sram_data_phys =
+ gen_pool_virt_to_phys(emif_data->sram_pool_data,
+ emif_data->ti_emif_sram_data_virt);
+ /*
+ * These functions are called during suspend path while MMU is
+ * still on so add virtual base to offset for absolute address
+ */
+ emif_data->pm_functions.save_context =
+ sram_suspend_address(emif_data,
+ (unsigned long)ti_emif_save_context);
+ emif_data->pm_functions.enter_sr =
+ sram_suspend_address(emif_data,
+ (unsigned long)ti_emif_enter_sr);
+ emif_data->pm_functions.abort_sr =
+ sram_suspend_address(emif_data,
+ (unsigned long)ti_emif_abort_sr);
+
+ /*
+ * These are called during resume path when MMU is not enabled
+ * so physical address is used instead
+ */
+ emif_data->pm_functions.restore_context =
+ sram_resume_address(emif_data,
+ (unsigned long)ti_emif_restore_context);
+ emif_data->pm_functions.exit_sr =
+ sram_resume_address(emif_data,
+ (unsigned long)ti_emif_exit_sr);
+ emif_data->pm_functions.run_hw_leveling =
+ sram_resume_address(emif_data,
+ (unsigned long)ti_emif_run_hw_leveling);
+
+ emif_data->pm_data.regs_virt =
+ (struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt;
+ emif_data->pm_data.regs_phys = emif_data->ti_emif_sram_data_phys;
+
+ return 0;
+
+err_free_sram_code:
+ gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt,
+ ti_emif_sram_sz);
+ return ret;
+}
+
+static int ti_emif_push_sram(struct device *dev, struct ti_emif_data *emif_data)
+{
+ void *copy_addr;
+ u32 data_addr;
+
+ copy_addr = sram_exec_copy(emif_data->sram_pool_code,
+ (void *)emif_data->ti_emif_sram_virt,
+ &ti_emif_sram, ti_emif_sram_sz);
+ if (!copy_addr) {
+ dev_err(dev, "Cannot copy emif code to sram\n");
+ return -ENODEV;
+ }
+
+ data_addr = sram_suspend_address(emif_data,
+ (unsigned long)&ti_emif_pm_sram_data);
+ copy_addr = sram_exec_copy(emif_data->sram_pool_code,
+ (void *)data_addr,
+ &emif_data->pm_data,
+ sizeof(emif_data->pm_data));
+ if (!copy_addr) {
+ dev_err(dev, "Cannot copy emif data to code sram\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Due to Usage Note 3.1.2 "DDR3: JEDEC Compliance for Maximum
+ * Self-Refresh Command Limit" found in AM335x Silicon Errata
+ * (Document SPRZ360F Revised November 2013) we must configure
+ * the self refresh delay timer to 0xA (8192 cycles) to avoid
+ * generating too many refresh command from the EMIF.
+ */
+static void ti_emif_configure_sr_delay(struct ti_emif_data *emif_data)
+{
+ writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES,
+ (emif_data->pm_data.ti_emif_base_addr_virt +
+ EMIF_POWER_MANAGEMENT_CONTROL));
+
+ writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES,
+ (emif_data->pm_data.ti_emif_base_addr_virt +
+ EMIF_POWER_MANAGEMENT_CTRL_SHDW));
+}
+
+/**
+ * ti_emif_copy_pm_function_table - copy mapping of pm funcs in sram
+ * @sram_pool: pointer to struct gen_pool where dst resides
+ * @dst: void * to address that table should be copied
+ *
+ * Returns 0 if success other error code if table is not available
+ */
+int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst)
+{
+ void *copy_addr;
+
+ if (!emif_instance)
+ return -ENODEV;
+
+ copy_addr = sram_exec_copy(sram_pool, dst,
+ &emif_instance->pm_functions,
+ sizeof(emif_instance->pm_functions));
+ if (!copy_addr)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ti_emif_copy_pm_function_table);
+
+/**
+ * ti_emif_get_mem_type - return type for memory type in use
+ *
+ * Returns memory type value read from EMIF or error code if fails
+ */
+int ti_emif_get_mem_type(void)
+{
+ unsigned long temp;
+
+ if (!emif_instance)
+ return -ENODEV;
+
+ temp = readl(emif_instance->pm_data.ti_emif_base_addr_virt +
+ EMIF_SDRAM_CONFIG);
+
+ temp = (temp & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT;
+ return temp;
+}
+EXPORT_SYMBOL_GPL(ti_emif_get_mem_type);
+
+static const struct of_device_id ti_emif_of_match[] = {
+ { .compatible = "ti,emif-am3352", .data =
+ (void *)EMIF_SRAM_AM33_REG_LAYOUT, },
+ { .compatible = "ti,emif-am4372", .data =
+ (void *)EMIF_SRAM_AM43_REG_LAYOUT, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_emif_of_match);
+
+#ifdef CONFIG_PM_SLEEP
+static int ti_emif_resume(struct device *dev)
+{
+ unsigned long tmp =
+ __raw_readl((void __iomem *)emif_instance->ti_emif_sram_virt);
+
+ /*
+ * Check to see if what we are copying is already present in the
+ * first byte at the destination, only copy if it is not which
+ * indicates we have lost context and sram no longer contains
+ * the PM code
+ */
+ if (tmp != ti_emif_sram)
+ ti_emif_push_sram(dev, emif_instance);
+
+ return 0;
+}
+
+static int ti_emif_suspend(struct device *dev)
+{
+ /*
+ * The contents will be present in DDR hence no need to
+ * explicitly save
+ */
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int ti_emif_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct ti_emif_data *emif_data;
+
+ emif_data = devm_kzalloc(dev, sizeof(*emif_data), GFP_KERNEL);
+ if (!emif_data)
+ return -ENOMEM;
+
+ emif_data->pm_data.ti_emif_sram_config = (unsigned long) device_get_match_data(&pdev->dev);
+
+ emif_data->pm_data.ti_emif_base_addr_virt = devm_platform_get_and_ioremap_resource(pdev,
+ 0,
+ &res);
+ if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) {
+ ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt);
+ return ret;
+ }
+
+ emif_data->pm_data.ti_emif_base_addr_phys = res->start;
+
+ ti_emif_configure_sr_delay(emif_data);
+
+ ret = ti_emif_alloc_sram(dev, emif_data);
+ if (ret)
+ return ret;
+
+ ret = ti_emif_push_sram(dev, emif_data);
+ if (ret)
+ goto fail_free_sram;
+
+ emif_instance = emif_data;
+
+ return 0;
+
+fail_free_sram:
+ ti_emif_free_sram(emif_data);
+
+ return ret;
+}
+
+static int ti_emif_remove(struct platform_device *pdev)
+{
+ struct ti_emif_data *emif_data = emif_instance;
+
+ emif_instance = NULL;
+
+ ti_emif_free_sram(emif_data);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_emif_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ti_emif_suspend, ti_emif_resume)
+};
+
+static struct platform_driver ti_emif_driver = {
+ .probe = ti_emif_probe,
+ .remove = ti_emif_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ti_emif_of_match,
+ .pm = &ti_emif_pm_ops,
+ },
+};
+module_platform_driver(ti_emif_driver);
+
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments SRAM EMIF driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/ti-emif-sram-pm.S b/drivers/memory/ti-emif-sram-pm.S
new file mode 100644
index 0000000000..7756b39712
--- /dev/null
+++ b/drivers/memory/ti-emif-sram-pm.S
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low level PM code for TI EMIF
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Dave Gerlach
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+
+#include "emif.h"
+#include "ti-emif-asm-offsets.h"
+
+#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
+#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0
+#define EMIF_POWER_MGMT_SELF_REFRESH_MODE 0x0200
+#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
+
+#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
+#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT
+#define EMIF_STATUS_READY 0x4
+
+#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
+
+#define EMIF_AM437X_REGISTERS 0x1
+
+ .arm
+ .align 3
+ .arch armv7-a
+
+ENTRY(ti_emif_sram)
+
+/*
+ * void ti_emif_save_context(void)
+ *
+ * Used during suspend to save the context of all required EMIF registers
+ * to local memory if the EMIF is going to lose context during the sleep
+ * transition. Operates on the VIRTUAL address of the EMIF.
+ */
+ENTRY(ti_emif_save_context)
+ stmfd sp!, {r4 - r11, lr} @ save registers on stack
+
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
+ ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
+
+ /* Save EMIF configuration */
+ ldr r1, [r0, #EMIF_SDRAM_CONFIG]
+ str r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
+ str r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_SDRAM_TIMING_1]
+ str r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_SDRAM_TIMING_2]
+ str r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_SDRAM_TIMING_3]
+ str r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+ str r1, [r2, #EMIF_PMCR_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
+ str r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
+ str r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_DDR_PHY_CTRL_1]
+ str r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
+
+ ldr r1, [r0, #EMIF_COS_CONFIG]
+ str r1, [r2, #EMIF_COS_CONFIG_OFFSET]
+
+ ldr r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
+ str r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
+
+ ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
+ str r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
+
+ ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
+ str r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
+
+ ldr r1, [r0, #EMIF_OCP_CONFIG]
+ str r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
+
+ ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
+ cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
+ bne emif_skip_save_extra_regs
+
+ ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
+ str r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
+
+ ldr r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
+ str r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
+
+ ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
+ str r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
+
+ ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
+ str r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
+
+ ldr r1, [r0, #EMIF_DLL_CALIB_CTRL]
+ str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
+
+ ldr r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
+ str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
+
+ /* Loop and save entire block of emif phy regs */
+ mov r5, #0x0
+ add r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
+ add r3, r0, #EMIF_EXT_PHY_CTRL_1
+ddr_phy_ctrl_save:
+ ldr r1, [r3, r5]
+ str r1, [r4, r5]
+ add r5, r5, #0x4
+ cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
+ bne ddr_phy_ctrl_save
+
+emif_skip_save_extra_regs:
+ ldmfd sp!, {r4 - r11, pc} @ restore regs and return
+ENDPROC(ti_emif_save_context)
+
+/*
+ * void ti_emif_restore_context(void)
+ *
+ * Used during resume to restore the context of all required EMIF registers
+ * from local memory after the EMIF has lost context during a sleep transition.
+ * Operates on the PHYSICAL address of the EMIF.
+ */
+ENTRY(ti_emif_restore_context)
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
+ ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
+
+ /* Config EMIF Timings */
+ ldr r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
+ str r1, [r0, #EMIF_DDR_PHY_CTRL_1]
+ str r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW]
+
+ ldr r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
+ str r1, [r0, #EMIF_SDRAM_TIMING_1]
+ str r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW]
+
+ ldr r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
+ str r1, [r0, #EMIF_SDRAM_TIMING_2]
+ str r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW]
+
+ ldr r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
+ str r1, [r0, #EMIF_SDRAM_TIMING_3]
+ str r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW]
+
+ ldr r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
+ str r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
+ str r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW]
+
+ ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
+ str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+
+ ldr r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
+ str r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
+
+ ldr r1, [r2, #EMIF_COS_CONFIG_OFFSET]
+ str r1, [r0, #EMIF_COS_CONFIG]
+
+ ldr r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
+ str r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
+
+ ldr r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
+ str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
+
+ ldr r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
+ str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
+
+ ldr r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
+ str r1, [r0, #EMIF_OCP_CONFIG]
+
+ ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
+ cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
+ bne emif_skip_restore_extra_regs
+
+ ldr r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
+ str r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
+
+ ldr r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
+ str r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
+
+ ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
+ str r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
+
+ ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
+ str r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
+
+ ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
+ str r1, [r0, #EMIF_DLL_CALIB_CTRL]
+
+ ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
+ str r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
+
+ ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
+ str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
+
+ /* Loop and restore entire block of emif phy regs */
+ mov r5, #0x0
+ /* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address
+ * to phy register save space
+ */
+ add r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
+ add r4, r0, #EMIF_EXT_PHY_CTRL_1
+ddr_phy_ctrl_restore:
+ ldr r1, [r3, r5]
+ str r1, [r4, r5]
+ add r5, r5, #0x4
+ cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
+ bne ddr_phy_ctrl_restore
+
+emif_skip_restore_extra_regs:
+ /*
+ * Output impedence calib needed only for DDR3
+ * but since the initial state of this will be
+ * disabled for DDR2 no harm in restoring the
+ * old configuration
+ */
+ ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
+ str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
+
+ /* Write to sdcfg last for DDR2 only */
+ ldr r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
+ and r2, r1, #SDRAM_TYPE_MASK
+ cmp r2, #EMIF_SDCFG_TYPE_DDR2
+ streq r1, [r0, #EMIF_SDRAM_CONFIG]
+
+ mov pc, lr
+ENDPROC(ti_emif_restore_context)
+
+/*
+ * void ti_emif_run_hw_leveling(void)
+ *
+ * Used during resume to run hardware leveling again and restore the
+ * configuration of the EMIF PHY, only for DDR3.
+ */
+ENTRY(ti_emif_run_hw_leveling)
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
+
+ ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+ orr r3, r3, #RDWRLVLFULL_START
+ ldr r2, [r0, #EMIF_SDRAM_CONFIG]
+ and r2, r2, #SDRAM_TYPE_MASK
+ cmp r2, #EMIF_SDCFG_TYPE_DDR3
+ bne skip_hwlvl
+
+ str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+
+ /*
+ * If EMIF registers are touched during initial stage of HW
+ * leveling sequence there will be an L3 NOC timeout error issued
+ * as the EMIF will not respond, which is not fatal, but it is
+ * avoidable. This small wait loop is enough time for this condition
+ * to clear, even at worst case of CPU running at max speed of 1Ghz.
+ */
+ mov r2, #0x2000
+1:
+ subs r2, r2, #0x1
+ bne 1b
+
+ /* Bit clears when operation is complete */
+2: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+ tst r1, #RDWRLVLFULL_START
+ bne 2b
+
+skip_hwlvl:
+ mov pc, lr
+ENDPROC(ti_emif_run_hw_leveling)
+
+/*
+ * void ti_emif_enter_sr(void)
+ *
+ * Programs the EMIF to tell the SDRAM to enter into self-refresh
+ * mode during a sleep transition. Operates on the VIRTUAL address
+ * of the EMIF.
+ */
+ENTRY(ti_emif_enter_sr)
+ stmfd sp!, {r4 - r11, lr} @ save registers on stack
+
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
+ ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
+
+ ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+ bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
+ orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
+ str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+
+ ldmfd sp!, {r4 - r11, pc} @ restore regs and return
+ENDPROC(ti_emif_enter_sr)
+
+/*
+ * void ti_emif_exit_sr(void)
+ *
+ * Programs the EMIF to tell the SDRAM to exit self-refresh mode
+ * after a sleep transition. Operates on the PHYSICAL address of
+ * the EMIF.
+ */
+ENTRY(ti_emif_exit_sr)
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
+ ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
+
+ /*
+ * Toggle EMIF to exit refresh mode:
+ * if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable
+ * (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable
+ * (0x0) here.
+ * *If* EMIF did not lose context, nothing broken as we write the same
+ * value(0x2) to reg before we write a disable (0x0).
+ */
+ ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
+ bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
+ orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
+ str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+ bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
+ str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+
+ /* Wait for EMIF to become ready */
+1: ldr r1, [r0, #EMIF_STATUS]
+ tst r1, #EMIF_STATUS_READY
+ beq 1b
+
+ mov pc, lr
+ENDPROC(ti_emif_exit_sr)
+
+/*
+ * void ti_emif_abort_sr(void)
+ *
+ * Disables self-refresh after a failed transition to a low-power
+ * state so the kernel can jump back to DDR and follow abort path.
+ * Operates on the VIRTUAL address of the EMIF.
+ */
+ENTRY(ti_emif_abort_sr)
+ stmfd sp!, {r4 - r11, lr} @ save registers on stack
+
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
+ ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
+
+ ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
+ bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
+ str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
+
+ /* Wait for EMIF to become ready */
+1: ldr r1, [r0, #EMIF_STATUS]
+ tst r1, #EMIF_STATUS_READY
+ beq 1b
+
+ ldmfd sp!, {r4 - r11, pc} @ restore regs and return
+ENDPROC(ti_emif_abort_sr)
+
+ .align 3
+ENTRY(ti_emif_pm_sram_data)
+ .space EMIF_PM_DATA_SIZE
+ENTRY(ti_emif_sram_sz)
+ .word . - ti_emif_save_context