summaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/bus
parentInitial commit. (diff)
downloadlinux-upstream/5.10.209.tar.xz
linux-upstream/5.10.209.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig245
-rw-r--r--drivers/bus/Makefile41
-rw-r--r--drivers/bus/arm-cci.c587
-rw-r--r--drivers/bus/arm-integrator-lm.c128
-rw-r--r--drivers/bus/brcmstb_gisb.c547
-rw-r--r--drivers/bus/bt1-apb.c421
-rw-r--r--drivers/bus/bt1-axi.c314
-rw-r--r--drivers/bus/da8xx-mstpri.c264
-rw-r--r--drivers/bus/fsl-mc/Kconfig16
-rw-r--r--drivers/bus/fsl-mc/Makefile18
-rw-r--r--drivers/bus/fsl-mc/dpbp.c185
-rw-r--r--drivers/bus/fsl-mc/dpcon.c221
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c99
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c861
-rw-r--r--drivers/bus/fsl-mc/dprc.c702
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c669
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c1157
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c301
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h614
-rw-r--r--drivers/bus/fsl-mc/mc-io.c285
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c296
-rw-r--r--drivers/bus/hisi_lpc.c708
-rw-r--r--drivers/bus/imx-weim.c278
-rw-r--r--drivers/bus/mhi/Kconfig8
-rw-r--r--drivers/bus/mhi/Makefile2
-rw-r--r--drivers/bus/mhi/host/Kconfig31
-rw-r--r--drivers/bus/mhi/host/Makefile6
-rw-r--r--drivers/bus/mhi/host/boot.c525
-rw-r--r--drivers/bus/mhi/host/debugfs.c411
-rw-r--r--drivers/bus/mhi/host/init.c1387
-rw-r--r--drivers/bus/mhi/host/internal.h722
-rw-r--r--drivers/bus/mhi/host/main.c1630
-rw-r--r--drivers/bus/mhi/host/pci_generic.c345
-rw-r--r--drivers/bus/mhi/host/pm.c1162
-rw-r--r--drivers/bus/mips_cdmm.c698
-rw-r--r--drivers/bus/moxtet.c884
-rw-r--r--drivers/bus/mvebu-mbus.c1378
-rw-r--r--drivers/bus/omap-ocp2scp.c118
-rw-r--r--drivers/bus/omap_l3_noc.c382
-rw-r--r--drivers/bus/omap_l3_noc.h501
-rw-r--r--drivers/bus/omap_l3_smx.c303
-rw-r--r--drivers/bus/omap_l3_smx.h324
-rw-r--r--drivers/bus/qcom-ebi2.c406
-rw-r--r--drivers/bus/simple-pm-bus.c58
-rw-r--r--drivers/bus/sun50i-de2.c48
-rw-r--r--drivers/bus/sunxi-rsb.c803
-rw-r--r--drivers/bus/tegra-aconnect.c120
-rw-r--r--drivers/bus/tegra-gmi.c284
-rw-r--r--drivers/bus/ti-pwmss.c55
-rw-r--r--drivers/bus/ti-sysc.c3394
-rw-r--r--drivers/bus/ts-nbus.c369
-rw-r--r--drivers/bus/uniphier-system-bus.c283
-rw-r--r--drivers/bus/vexpress-config.c419
53 files changed, 26013 insertions, 0 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
new file mode 100644
index 000000000..0c262c2ae
--- /dev/null
+++ b/drivers/bus/Kconfig
@@ -0,0 +1,245 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Bus Devices
+#
+
+menu "Bus devices"
+
+config ARM_CCI
+ bool
+
+config ARM_CCI400_COMMON
+ bool
+ select ARM_CCI
+
+config ARM_CCI400_PORT_CTRL
+ bool
+ depends on ARM && OF && CPU_V7
+ select ARM_CCI400_COMMON
+ help
+ Low level power management driver for CCI400 cache coherent
+ interconnect for ARM platforms.
+
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
+config BRCMSTB_GISB_ARB
+ bool "Broadcom STB GISB bus arbiter"
+ depends on ARM || ARM64 || MIPS
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ help
+ Driver for the Broadcom Set Top Box System-on-a-chip internal bus
+ arbiter. This driver provides timeout and target abort error handling
+ and internal bus master decoding.
+
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
+config MOXTET
+ tristate "CZ.NIC Turris Mox module configuration bus"
+ depends on SPI_MASTER && OF
+ help
+ Say yes here to add support for the module configuration bus found
+ on CZ.NIC's Turris Mox. This is needed for the ability to discover
+ the order in which the modules are connected and to get/set some of
+ their settings. For example the GPIOs on Mox SFP module are
+ configured through this bus.
+
+config HISILICON_LPC
+ bool "Support for ISA I/O space on HiSilicon Hip06/7"
+ depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC && !C6X)
+ depends on HAS_IOMEM
+ select INDIRECT_PIO if ARM64
+ help
+ Driver to enable I/O access to devices attached to the Low Pin
+ Count bus on the HiSilicon Hip06/7 SoC.
+
+config IMX_WEIM
+ bool "Freescale EIM DRIVER"
+ depends on ARCH_MXC
+ help
+ Driver for i.MX WEIM controller.
+ The WEIM(Wireless External Interface Module) works like a bus.
+ You can attach many different devices on it, such as NOR, onenand.
+
+config MIPS_CDMM
+ bool "MIPS Common Device Memory Map (CDMM) Driver"
+ depends on CPU_MIPSR2 || CPU_MIPSR5
+ help
+ Driver needed for the MIPS Common Device Memory Map bus in MIPS
+ cores. This bus is for per-CPU tightly coupled devices such as the
+ Fast Debug Channel (FDC).
+
+ For this to work, either your bootloader needs to enable the CDMM
+ region at an unused physical address on the boot CPU, or else your
+ platform code needs to implement mips_cdmm_phys_base() (see
+ asm/cdmm.h).
+
+config MVEBU_MBUS
+ bool
+ depends on PLAT_ORION
+ help
+ Driver needed for the MBus configuration on Marvell EBU SoCs
+ (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
+
+config OMAP_INTERCONNECT
+ tristate "OMAP INTERCONNECT DRIVER"
+ depends on ARCH_OMAP2PLUS
+
+ help
+ Driver to enable OMAP interconnect error handling driver.
+
+config OMAP_OCP2SCP
+ tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
+ help
+ Driver to enable ocp2scp module which transforms ocp interface
+ protocol to scp protocol. In OMAP4, USB PHY is connected via
+ OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
+ OCP2SCP.
+
+config QCOM_EBI2
+ bool "Qualcomm External Bus Interface 2 (EBI2)"
+ depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Say y here to enable support for the Qualcomm External Bus
+ Interface 2, which can be used to connect things like NAND Flash,
+ SRAM, ethernet adapters, FPGAs and LCD displays.
+
+config SIMPLE_PM_BUS
+ tristate "Simple Power-Managed Bus Driver"
+ depends on OF && PM
+ help
+ Driver for transparent busses that don't need a real driver, but
+ where the bus controller is part of a PM domain, or under the control
+ of a functional clock, and thus relies on runtime PM for managing
+ this PM domain and/or clock.
+ An example of such a bus controller is the Renesas Bus State
+ Controller (BSC, sometimes called "LBSC within Bus Bridge", or
+ "External Bus Interface") as found on several Renesas ARM SoCs.
+
+config SUN50I_DE2_BUS
+ bool "Allwinner A64 DE2 Bus Driver"
+ default ARM64
+ depends on ARCH_SUNXI
+ select SUNXI_SRAM
+ help
+ Say y here to enable support for Allwinner A64 DE2 bus driver. It's
+ mostly transparent, but a SRAM region needs to be claimed in the SRAM
+ controller to make the all blocks in the DE2 part accessible.
+
+config SUNXI_RSB
+ tristate "Allwinner sunXi Reduced Serial Bus Driver"
+ default MACH_SUN8I || MACH_SUN9I || ARM64
+ depends on ARCH_SUNXI
+ select REGMAP
+ help
+ Say y here to enable support for Allwinner's Reduced Serial Bus
+ (RSB) support. This controller is responsible for communicating
+ with various RSB based devices, such as AXP223, AXP8XX PMICs,
+ and AC100/AC200 ICs.
+
+config TEGRA_ACONNECT
+ tristate "Tegra ACONNECT Bus Driver"
+ depends on ARCH_TEGRA_210_SOC
+ depends on OF && PM
+ help
+ Driver for the Tegra ACONNECT bus which is used to interface with
+ the devices inside the Audio Processing Engine (APE) for Tegra210.
+
+config TEGRA_GMI
+ tristate "Tegra Generic Memory Interface bus driver"
+ depends on ARCH_TEGRA
+ help
+ Driver for the Tegra Generic Memory Interface bus which can be used
+ to attach devices such as NOR, UART, FPGA and more.
+
+config TI_PWMSS
+ bool
+ default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM || TI_EQEP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
+config TI_SYSC
+ bool "TI sysc interconnect target module driver"
+ depends on ARCH_OMAP2PLUS
+ help
+ Generic driver for Texas Instruments interconnect target module
+ found on many TI SoCs.
+
+config TS_NBUS
+ tristate "Technologic Systems NBUS Driver"
+ depends on SOC_IMX28
+ depends on OF_GPIO && PWM
+ help
+ Driver for the Technologic Systems NBUS which is used to interface
+ with the peripherals in the FPGA of the TS-4600 SoM.
+
+config UNIPHIER_SYSTEM_BUS
+ tristate "UniPhier System Bus driver"
+ depends on ARCH_UNIPHIER && OF
+ default y
+ help
+ Support for UniPhier System Bus, a simple external bus. This is
+ needed to use on-board devices connected to UniPhier SoCs.
+
+config VEXPRESS_CONFIG
+ tristate "Versatile Express configuration bus"
+ default y if ARCH_VEXPRESS
+ depends on ARM || ARM64
+ depends on OF
+ select REGMAP
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
+
+config DA8XX_MSTPRI
+ bool "TI da8xx master peripheral priority driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Driver for Texas Instruments da8xx master peripheral priority
+ configuration. Allows to adjust the priorities of all master
+ peripherals.
+
+source "drivers/bus/fsl-mc/Kconfig"
+source "drivers/bus/mhi/Kconfig"
+
+endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
new file mode 100644
index 000000000..16c47a061
--- /dev/null
+++ b/drivers/bus/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the bus drivers.
+#
+
+# Interconnect bus drivers for ARM platforms
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
+obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
+obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
+obj-$(CONFIG_MOXTET) += moxtet.o
+
+# DPAA2 fsl-mc bus
+obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
+obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
+obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
+
+# Interconnect bus driver for OMAP SoCs.
+obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
+
+obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
+obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
+obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
+obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
+obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
+obj-$(CONFIG_TI_SYSC) += ti-sysc.o
+obj-$(CONFIG_TS_NBUS) += ts-nbus.o
+obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
+obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
+
+# MHI
+obj-y += mhi/
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
new file mode 100644
index 000000000..b8184a903
--- /dev/null
+++ b/drivers/bus/arm-cci.c
@@ -0,0 +1,587 @@
+/*
+ * CCI cache coherent interconnect driver
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+
+static void __iomem *cci_ctrl_base __ro_after_init;
+static unsigned long cci_ctrl_phys __ro_after_init;
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+struct cci_nb_ports {
+ unsigned int nb_ace;
+ unsigned int nb_ace_lite;
+};
+
+static const struct cci_nb_ports cci400_ports = {
+ .nb_ace = 2,
+ .nb_ace_lite = 3
+};
+
+#define CCI400_PORTS_DATA (&cci400_ports)
+#else
+#define CCI400_PORTS_DATA (NULL)
+#endif
+
+static const struct of_device_id arm_cci_matches[] = {
+#ifdef CONFIG_ARM_CCI400_COMMON
+ {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
+#endif
+#ifdef CONFIG_ARM_CCI5xx_PMU
+ { .compatible = "arm,cci-500", },
+ { .compatible = "arm,cci-550", },
+#endif
+ {},
+};
+
+static const struct of_dev_auxdata arm_cci_auxdata[] = {
+ OF_DEV_AUXDATA("arm,cci-400-pmu", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-400-pmu,r0", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-400-pmu,r1", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-500-pmu,r0", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-550-pmu,r0", 0, NULL, &cci_ctrl_base),
+ {}
+};
+
+#define DRIVER_NAME "ARM-CCI"
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+ if (!cci_probed())
+ return -ENODEV;
+
+ return of_platform_populate(pdev->dev.of_node, NULL,
+ arm_cci_auxdata, &pdev->dev);
+}
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+ return platform_driver_register(&cci_platform_driver);
+}
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+
+#define CCI_PORT_CTRL 0x0
+#define CCI_CTRL_STATUS 0xc
+
+#define CCI_ENABLE_SNOOP_REQ 0x1
+#define CCI_ENABLE_DVM_REQ 0x2
+#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
+
+enum cci_ace_port_type {
+ ACE_INVALID_PORT = 0x0,
+ ACE_PORT,
+ ACE_LITE_PORT,
+};
+
+struct cci_ace_port {
+ void __iomem *base;
+ unsigned long phys;
+ enum cci_ace_port_type type;
+ struct device_node *dn;
+};
+
+static struct cci_ace_port *ports;
+static unsigned int nb_cci_ports;
+
+struct cpu_port {
+ u64 mpidr;
+ u32 port;
+};
+
+/*
+ * Use the port MSB as valid flag, shift can be made dynamic
+ * by computing number of bits required for port indexes.
+ * Code disabling CCI cpu ports runs with D-cache invalidated
+ * and SCTLR bit clear so data accesses must be kept to a minimum
+ * to improve performance; for now shift is left static to
+ * avoid one more data access while disabling the CCI port.
+ */
+#define PORT_VALID_SHIFT 31
+#define PORT_VALID (0x1 << PORT_VALID_SHIFT)
+
+static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
+{
+ port->port = PORT_VALID | index;
+ port->mpidr = mpidr;
+}
+
+static inline bool cpu_port_is_valid(struct cpu_port *port)
+{
+ return !!(port->port & PORT_VALID);
+}
+
+static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
+{
+ return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
+}
+
+static struct cpu_port cpu_port[NR_CPUS];
+
+/**
+ * __cci_ace_get_port - Function to retrieve the port index connected to
+ * a cpu or device.
+ *
+ * @dn: device node of the device to look-up
+ * @type: port type
+ *
+ * Return value:
+ * - CCI port index if success
+ * - -ENODEV if failure
+ */
+static int __cci_ace_get_port(struct device_node *dn, int type)
+{
+ int i;
+ bool ace_match;
+ struct device_node *cci_portn;
+
+ cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
+ for (i = 0; i < nb_cci_ports; i++) {
+ ace_match = ports[i].type == type;
+ if (ace_match && cci_portn == ports[i].dn)
+ return i;
+ }
+ return -ENODEV;
+}
+
+int cci_ace_get_port(struct device_node *dn)
+{
+ return __cci_ace_get_port(dn, ACE_LITE_PORT);
+}
+EXPORT_SYMBOL_GPL(cci_ace_get_port);
+
+static void cci_ace_init_ports(void)
+{
+ int port, cpu;
+ struct device_node *cpun;
+
+ /*
+ * Port index look-up speeds up the function disabling ports by CPU,
+ * since the logical to port index mapping is done once and does
+ * not change after system boot.
+ * The stashed index array is initialized for all possible CPUs
+ * at probe time.
+ */
+ for_each_possible_cpu(cpu) {
+ /* too early to use cpu->of_node */
+ cpun = of_get_cpu_node(cpu, NULL);
+
+ if (WARN(!cpun, "Missing cpu device node\n"))
+ continue;
+
+ port = __cci_ace_get_port(cpun, ACE_PORT);
+ if (port < 0)
+ continue;
+
+ init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
+ }
+
+ for_each_possible_cpu(cpu) {
+ WARN(!cpu_port_is_valid(&cpu_port[cpu]),
+ "CPU %u does not have an associated CCI port\n",
+ cpu);
+ }
+}
+/*
+ * Functions to enable/disable a CCI interconnect slave port
+ *
+ * They are called by low-level power management code to disable slave
+ * interfaces snoops and DVM broadcast.
+ * Since they may execute with cache data allocation disabled and
+ * after the caches have been cleaned and invalidated the functions provide
+ * no explicit locking since they may run with D-cache disabled, so normal
+ * cacheable kernel locks based on ldrex/strex may not work.
+ * Locking has to be provided by BSP implementations to ensure proper
+ * operations.
+ */
+
+/**
+ * cci_port_control() - function to control a CCI port
+ *
+ * @port: index of the port to setup
+ * @enable: if true enables the port, if false disables it
+ */
+static void notrace cci_port_control(unsigned int port, bool enable)
+{
+ void __iomem *base = ports[port].base;
+
+ writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
+ /*
+ * This function is called from power down procedures
+ * and must not execute any instruction that might
+ * cause the processor to be put in a quiescent state
+ * (eg wfi). Hence, cpu_relax() can not be added to this
+ * read loop to optimize power, since it might hide possibly
+ * disruptive operations.
+ */
+ while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
+ ;
+}
+
+/**
+ * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
+ * reference
+ *
+ * @mpidr: mpidr of the CPU whose CCI port should be disabled
+ *
+ * Disabling a CCI port for a CPU implies disabling the CCI port
+ * controlling that CPU cluster. Code disabling CPU CCI ports
+ * must make sure that the CPU running the code is the last active CPU
+ * in the cluster ie all other CPUs are quiescent in a low power state.
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port look-up failure
+ */
+int notrace cci_disable_port_by_cpu(u64 mpidr)
+{
+ int cpu;
+ bool is_valid;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ is_valid = cpu_port_is_valid(&cpu_port[cpu]);
+ if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
+ cci_port_control(cpu_port[cpu].port, false);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
+
+/**
+ * cci_enable_port_for_self() - enable a CCI port for calling CPU
+ *
+ * Enabling a CCI port for the calling CPU implies enabling the CCI
+ * port controlling that CPU's cluster. Caller must make sure that the
+ * CPU running the code is the first active CPU in the cluster and all
+ * other CPUs are quiescent in a low power state or waiting for this CPU
+ * to complete the CCI initialization.
+ *
+ * Because this is called when the MMU is still off and with no stack,
+ * the code must be position independent and ideally rely on callee
+ * clobbered registers only. To achieve this we must code this function
+ * entirely in assembler.
+ *
+ * On success this returns with the proper CCI port enabled. In case of
+ * any failure this never returns as the inability to enable the CCI is
+ * fatal and there is no possible recovery at this stage.
+ */
+asmlinkage void __naked cci_enable_port_for_self(void)
+{
+ asm volatile ("\n"
+" .arch armv7-a\n"
+" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
+" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
+" adr r1, 5f \n"
+" ldr r2, [r1] \n"
+" add r1, r1, r2 @ &cpu_port \n"
+" add ip, r1, %[sizeof_cpu_port] \n"
+
+ /* Loop over the cpu_port array looking for a matching MPIDR */
+"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
+" cmp r2, r0 @ compare MPIDR \n"
+" bne 2f \n"
+
+ /* Found a match, now test port validity */
+" ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
+" tst r3, #"__stringify(PORT_VALID)" \n"
+" bne 3f \n"
+
+ /* no match, loop with the next cpu_port entry */
+"2: add r1, r1, %[sizeof_struct_cpu_port] \n"
+" cmp r1, ip @ done? \n"
+" blo 1b \n"
+
+ /* CCI port not found -- cheaply try to stall this CPU */
+"cci_port_not_found: \n"
+" wfi \n"
+" wfe \n"
+" b cci_port_not_found \n"
+
+ /* Use matched port index to look up the corresponding ports entry */
+"3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
+" adr r0, 6f \n"
+" ldmia r0, {r1, r2} \n"
+" sub r1, r1, r0 @ virt - phys \n"
+" ldr r0, [r0, r2] @ *(&ports) \n"
+" mov r2, %[sizeof_struct_ace_port] \n"
+" mla r0, r2, r3, r0 @ &ports[index] \n"
+" sub r0, r0, r1 @ virt_to_phys() \n"
+
+ /* Enable the CCI port */
+" ldr r0, [r0, %[offsetof_port_phys]] \n"
+" mov r3, %[cci_enable_req]\n"
+" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
+
+ /* poll the status reg for completion */
+" adr r1, 7f \n"
+" ldr r0, [r1] \n"
+" ldr r0, [r0, r1] @ cci_ctrl_base \n"
+"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
+" tst r1, %[cci_control_status_bits] \n"
+" bne 4b \n"
+
+" mov r0, #0 \n"
+" bx lr \n"
+
+" .align 2 \n"
+"5: .word cpu_port - . \n"
+"6: .word . \n"
+" .word ports - 6b \n"
+"7: .word cci_ctrl_phys - . \n"
+ : :
+ [sizeof_cpu_port] "i" (sizeof(cpu_port)),
+ [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+ [cci_control_status_bits] "i" cpu_to_le32(1),
+#ifndef __ARMEB__
+ [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
+#else
+ [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
+#endif
+ [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
+ [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
+ [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
+ [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
+}
+
+/**
+ * __cci_control_port_by_device() - function to control a CCI port by device
+ * reference
+ *
+ * @dn: device node pointer of the device whose CCI port should be
+ * controlled
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port look-up failure
+ */
+int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
+{
+ int port;
+
+ if (!dn)
+ return -ENODEV;
+
+ port = __cci_ace_get_port(dn, ACE_LITE_PORT);
+ if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n",
+ dn))
+ return -ENODEV;
+ cci_port_control(port, enable);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
+
+/**
+ * __cci_control_port_by_index() - function to control a CCI port by port index
+ *
+ * @port: port index previously retrieved with cci_ace_get_port()
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port index out of range
+ * -EPERM if operation carried out on an ACE PORT
+ */
+int notrace __cci_control_port_by_index(u32 port, bool enable)
+{
+ if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
+ return -ENODEV;
+ /*
+ * CCI control for ports connected to CPUS is extremely fragile
+ * and must be made to go through a specific and controlled
+ * interface (ie cci_disable_port_by_cpu(); control by general purpose
+ * indexing is therefore disabled for ACE ports.
+ */
+ if (ports[port].type == ACE_PORT)
+ return -EPERM;
+
+ cci_port_control(port, enable);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
+
+static const struct of_device_id arm_cci_ctrl_if_matches[] = {
+ {.compatible = "arm,cci-400-ctrl-if", },
+ {},
+};
+
+static int cci_probe_ports(struct device_node *np)
+{
+ struct cci_nb_ports const *cci_config;
+ int ret, i, nb_ace = 0, nb_ace_lite = 0;
+ struct device_node *cp;
+ struct resource res;
+ const char *match_str;
+ bool is_ace;
+
+
+ cci_config = of_match_node(arm_cci_matches, np)->data;
+ if (!cci_config)
+ return -ENODEV;
+
+ nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
+
+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
+ if (!ports)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, cp) {
+ if (!of_match_node(arm_cci_ctrl_if_matches, cp))
+ continue;
+
+ i = nb_ace + nb_ace_lite;
+
+ if (i >= nb_cci_ports)
+ break;
+
+ if (of_property_read_string(cp, "interface-type",
+ &match_str)) {
+ WARN(1, "node %pOF missing interface-type property\n",
+ cp);
+ continue;
+ }
+ is_ace = strcmp(match_str, "ace") == 0;
+ if (!is_ace && strcmp(match_str, "ace-lite")) {
+ WARN(1, "node %pOF containing invalid interface-type property, skipping it\n",
+ cp);
+ continue;
+ }
+
+ ret = of_address_to_resource(cp, 0, &res);
+ if (!ret) {
+ ports[i].base = ioremap(res.start, resource_size(&res));
+ ports[i].phys = res.start;
+ }
+ if (ret || !ports[i].base) {
+ WARN(1, "unable to ioremap CCI port %d\n", i);
+ continue;
+ }
+
+ if (is_ace) {
+ if (WARN_ON(nb_ace >= cci_config->nb_ace))
+ continue;
+ ports[i].type = ACE_PORT;
+ ++nb_ace;
+ } else {
+ if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
+ continue;
+ ports[i].type = ACE_LITE_PORT;
+ ++nb_ace_lite;
+ }
+ ports[i].dn = cp;
+ }
+
+ /*
+ * If there is no CCI port that is under kernel control
+ * return early and report probe status.
+ */
+ if (!nb_ace && !nb_ace_lite)
+ return -ENODEV;
+
+ /* initialize a stashed array of ACE ports to speed-up look-up */
+ cci_ace_init_ports();
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory.
+ */
+ sync_cache_w(&cci_ctrl_base);
+ sync_cache_w(&cci_ctrl_phys);
+ sync_cache_w(&ports);
+ sync_cache_w(&cpu_port);
+ __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
+ pr_info("ARM CCI driver probed\n");
+
+ return 0;
+}
+#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
+static inline int cci_probe_ports(struct device_node *np)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
+
+static int cci_probe(void)
+{
+ int ret;
+ struct device_node *np;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (!ret) {
+ cci_ctrl_base = ioremap(res.start, resource_size(&res));
+ cci_ctrl_phys = res.start;
+ }
+ if (ret || !cci_ctrl_base) {
+ WARN(1, "unable to ioremap CCI ctrl\n");
+ return -ENXIO;
+ }
+
+ return cci_probe_ports(np);
+}
+
+static int cci_init_status = -EAGAIN;
+static DEFINE_MUTEX(cci_probing);
+
+static int cci_init(void)
+{
+ if (cci_init_status != -EAGAIN)
+ return cci_init_status;
+
+ mutex_lock(&cci_probing);
+ if (cci_init_status == -EAGAIN)
+ cci_init_status = cci_probe();
+ mutex_unlock(&cci_probing);
+ return cci_init_status;
+}
+
+/*
+ * To sort out early init calls ordering a helper function is provided to
+ * check if the CCI driver has beed initialized. Function check if the driver
+ * has been initialized, if not it calls the init function that probes
+ * the driver and updates the return value.
+ */
+bool cci_probed(void)
+{
+ return cci_init() == 0;
+}
+EXPORT_SYMBOL_GPL(cci_probed);
+
+early_initcall(cci_init);
+core_initcall(cci_platform_init);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ARM CCI support");
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 000000000..845b6c43f
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 000000000..7355fa2cb
--- /dev/null
+++ b/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2017 Broadcom
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+
+#ifdef CONFIG_MIPS
+#include <asm/traps.h>
+#endif
+
+#define ARB_ERR_CAP_CLEAR (1 << 0)
+#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
+#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
+#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
+#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+
+#define ARB_BP_CAP_CLEAR (1 << 0)
+#define ARB_BP_CAP_STATUS_PROT_SHIFT 14
+#define ARB_BP_CAP_STATUS_TYPE (1 << 13)
+#define ARB_BP_CAP_STATUS_RSP_SHIFT 10
+#define ARB_BP_CAP_STATUS_MASK GENMASK(1, 0)
+#define ARB_BP_CAP_STATUS_BS_SHIFT 2
+#define ARB_BP_CAP_STATUS_WRITE (1 << 1)
+#define ARB_BP_CAP_STATUS_VALID (1 << 0)
+
+enum {
+ ARB_TIMER,
+ ARB_BP_CAP_CLR,
+ ARB_BP_CAP_HI_ADDR,
+ ARB_BP_CAP_ADDR,
+ ARB_BP_CAP_STATUS,
+ ARB_BP_CAP_MASTER,
+ ARB_ERR_CAP_CLR,
+ ARB_ERR_CAP_HI_ADDR,
+ ARB_ERR_CAP_ADDR,
+ ARB_ERR_CAP_STATUS,
+ ARB_ERR_CAP_MASTER,
+};
+
+static const int gisb_offsets_bcm7038[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = -1,
+ [ARB_ERR_CAP_CLR] = 0x0c4,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x0c8,
+ [ARB_ERR_CAP_STATUS] = 0x0d0,
+ [ARB_ERR_CAP_MASTER] = -1,
+};
+
+static const int gisb_offsets_bcm7278[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x01c,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x220,
+ [ARB_BP_CAP_STATUS] = 0x230,
+ [ARB_BP_CAP_MASTER] = 0x234,
+ [ARB_ERR_CAP_CLR] = 0x7f8,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x7e0,
+ [ARB_ERR_CAP_STATUS] = 0x7f0,
+ [ARB_ERR_CAP_MASTER] = 0x7f4,
+};
+
+static const int gisb_offsets_bcm7400[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = 0x0c4,
+ [ARB_ERR_CAP_CLR] = 0x0c8,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x0cc,
+ [ARB_ERR_CAP_STATUS] = 0x0d4,
+ [ARB_ERR_CAP_MASTER] = 0x0d8,
+};
+
+static const int gisb_offsets_bcm7435[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x158,
+ [ARB_BP_CAP_STATUS] = 0x160,
+ [ARB_BP_CAP_MASTER] = 0x164,
+ [ARB_ERR_CAP_CLR] = 0x168,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x16c,
+ [ARB_ERR_CAP_STATUS] = 0x174,
+ [ARB_ERR_CAP_MASTER] = 0x178,
+};
+
+static const int gisb_offsets_bcm7445[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x010,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x1d8,
+ [ARB_BP_CAP_STATUS] = 0x1e0,
+ [ARB_BP_CAP_MASTER] = 0x1e4,
+ [ARB_ERR_CAP_CLR] = 0x7e4,
+ [ARB_ERR_CAP_HI_ADDR] = 0x7e8,
+ [ARB_ERR_CAP_ADDR] = 0x7ec,
+ [ARB_ERR_CAP_STATUS] = 0x7f4,
+ [ARB_ERR_CAP_MASTER] = 0x7f8,
+};
+
+struct brcmstb_gisb_arb_device {
+ void __iomem *base;
+ const int *gisb_offsets;
+ bool big_endian;
+ struct mutex lock;
+ struct list_head next;
+ u32 valid_mask;
+ const char *master_names[sizeof(u32) * BITS_PER_BYTE];
+ u32 saved_timeout;
+};
+
+static LIST_HEAD(brcmstb_gisb_arb_device_list);
+
+static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
+{
+ int offset = gdev->gisb_offsets[reg];
+
+ if (offset < 0) {
+ /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+ if (reg == ARB_ERR_CAP_MASTER)
+ return 1;
+ else
+ return 0;
+ }
+
+ if (gdev->big_endian)
+ return ioread32be(gdev->base + offset);
+ else
+ return ioread32(gdev->base + offset);
+}
+
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
+static u64 gisb_read_bp_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_BP_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_BP_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
+static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
+{
+ int offset = gdev->gisb_offsets[reg];
+
+ if (offset == -1)
+ return;
+
+ if (gdev->big_endian)
+ iowrite32be(val, gdev->base + offset);
+ else
+ iowrite32(val, gdev->base + offset);
+}
+
+static ssize_t gisb_arb_get_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+ u32 timeout;
+
+ mutex_lock(&gdev->lock);
+ timeout = gisb_read(gdev, ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return sprintf(buf, "%d", timeout);
+}
+
+static ssize_t gisb_arb_set_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == 0 || val >= 0xffffffff)
+ return -EINVAL;
+
+ mutex_lock(&gdev->lock);
+ gisb_write(gdev, val, ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return count;
+}
+
+static const char *
+brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
+ u32 masters)
+{
+ u32 mask = gdev->valid_mask & masters;
+
+ if (hweight_long(mask) != 1)
+ return NULL;
+
+ return gdev->master_names[ffs(mask) - 1];
+}
+
+static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ const char *reason)
+{
+ u32 cap_status;
+ u64 arb_addr;
+ u32 master;
+ const char *m_name;
+ char m_fmt[11];
+
+ cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
+ return 1;
+
+ /* Read the address and master */
+ arb_addr = gisb_read_address(gdev);
+ master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("GISB: %s at 0x%llx [%c %s], core: %s\n",
+ reason, arb_addr,
+ cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+ cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return 0;
+}
+
+#ifdef CONFIG_MIPS
+static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
+{
+ int ret = 0;
+ struct brcmstb_gisb_arb_device *gdev;
+ u32 cap_status;
+
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) {
+ cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) {
+ is_fixup = 1;
+ goto out;
+ }
+
+ ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ }
+out:
+ return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+}
+#endif
+
+static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_bp_handler(int irq, void *dev_id)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_id;
+ const char *m_name;
+ u32 bp_status;
+ u64 arb_addr;
+ u32 master;
+ char m_fmt[11];
+
+ bp_status = gisb_read(gdev, ARB_BP_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(bp_status & ARB_BP_CAP_STATUS_VALID))
+ return IRQ_HANDLED;
+
+ /* Read the address and master */
+ arb_addr = gisb_read_bp_address(gdev);
+ master = gisb_read(gdev, ARB_BP_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("GISB: breakpoint at 0x%llx [%c], core: %s\n",
+ arb_addr, bp_status & ARB_BP_CAP_STATUS_WRITE ? 'W' : 'R',
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dump out gisb errors on die or panic.
+ */
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+ void *p);
+
+static struct notifier_block gisb_die_notifier = {
+ .notifier_call = dump_gisb_error,
+};
+
+static struct notifier_block gisb_panic_notifier = {
+ .notifier_call = dump_gisb_error,
+};
+
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+ void *p)
+{
+ struct brcmstb_gisb_arb_device *gdev;
+ const char *reason = "panic";
+
+ if (self == &gisb_die_notifier)
+ reason = "die";
+
+ /* iterate over each GISB arb registered handlers */
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+ brcmstb_gisb_arb_decode_addr(gdev, reason);
+
+ return NOTIFY_DONE;
+}
+
+static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
+ gisb_arb_get_timeout, gisb_arb_set_timeout);
+
+static struct attribute *gisb_arb_sysfs_attrs[] = {
+ &dev_attr_gisb_arb_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group gisb_arb_sysfs_attr_group = {
+ .attrs = gisb_arb_sysfs_attrs,
+};
+
+static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
+ { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
+ { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
+ { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
+ { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
+ { .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
+ { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+ { },
+};
+
+static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct brcmstb_gisb_arb_device *gdev;
+ const struct of_device_id *of_id;
+ struct resource *r;
+ int err, timeout_irq, tea_irq, bp_irq;
+ unsigned int num_masters, j = 0;
+ int i, first, last;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ timeout_irq = platform_get_irq(pdev, 0);
+ tea_irq = platform_get_irq(pdev, 1);
+ bp_irq = platform_get_irq(pdev, 2);
+
+ gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ mutex_init(&gdev->lock);
+ INIT_LIST_HEAD(&gdev->next);
+
+ gdev->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(gdev->base))
+ return PTR_ERR(gdev->base);
+
+ of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
+ if (!of_id) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+ gdev->gisb_offsets = of_id->data;
+ gdev->big_endian = of_device_is_big_endian(dn);
+
+ err = devm_request_irq(&pdev->dev, timeout_irq,
+ brcmstb_gisb_timeout_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(&pdev->dev, tea_irq,
+ brcmstb_gisb_tea_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ /* Interrupt is optional */
+ if (bp_irq > 0) {
+ err = devm_request_irq(&pdev->dev, bp_irq,
+ brcmstb_gisb_bp_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+ }
+
+ /* If we do not have a valid mask, assume all masters are enabled */
+ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
+ &gdev->valid_mask))
+ gdev->valid_mask = 0xffffffff;
+
+ /* Proceed with reading the litteral names if we agree on the
+ * number of masters
+ */
+ num_masters = of_property_count_strings(dn,
+ "brcm,gisb-arb-master-names");
+ if (hweight_long(gdev->valid_mask) == num_masters) {
+ first = ffs(gdev->valid_mask) - 1;
+ last = fls(gdev->valid_mask) - 1;
+
+ for (i = first; i < last; i++) {
+ if (!(gdev->valid_mask & BIT(i)))
+ continue;
+
+ of_property_read_string_index(dn,
+ "brcm,gisb-arb-master-names", j,
+ &gdev->master_names[i]);
+ j++;
+ }
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, gdev);
+
+ list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
+
+#ifdef CONFIG_MIPS
+ board_be_handler = brcmstb_bus_error_handler;
+#endif
+
+ if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
+ register_die_notifier(&gisb_die_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &gisb_panic_notifier);
+ }
+
+ dev_info(&pdev->dev, "registered irqs: %d, %d\n",
+ timeout_irq, tea_irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmstb_gisb_arb_suspend(struct device *dev)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+
+ gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
+
+ return 0;
+}
+
+/* Make sure we provide the same timeout value that was configured before, and
+ * do this before the GISB timeout interrupt handler has any chance to run.
+ */
+static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+
+ gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
+
+ return 0;
+}
+#else
+#define brcmstb_gisb_arb_suspend NULL
+#define brcmstb_gisb_arb_resume_noirq NULL
+#endif
+
+static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
+ .suspend = brcmstb_gisb_arb_suspend,
+ .resume_noirq = brcmstb_gisb_arb_resume_noirq,
+};
+
+static struct platform_driver brcmstb_gisb_arb_driver = {
+ .driver = {
+ .name = "brcm-gisb-arb",
+ .of_match_table = brcmstb_gisb_arb_of_match,
+ .pm = &brcmstb_gisb_arb_pm_ops,
+ },
+};
+
+static int __init brcm_gisb_driver_init(void)
+{
+ return platform_driver_probe(&brcmstb_gisb_arb_driver,
+ brcmstb_gisb_arb_probe);
+}
+
+module_init(brcm_gisb_driver_init);
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 000000000..b25ff941e
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst)) {
+ dev_warn(apb->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(apb->prst);
+ }
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk)) {
+ dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
+ return PTR_ERR(apb->pclk);
+ }
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 000000000..e7a6744ac
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst)) {
+ dev_warn(axi->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(axi->arst);
+ }
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk)) {
+ dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
+ return PTR_ERR(axi->aclk);
+ }
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 000000000..ee4c02335
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET 0
+#define DA8XX_MSTPRI1_OFFSET 4
+#define DA8XX_MSTPRI2_OFFSET 8
+
+enum {
+ DA8XX_MSTPRI_ARM_I = 0,
+ DA8XX_MSTPRI_ARM_D,
+ DA8XX_MSTPRI_UPP,
+ DA8XX_MSTPRI_SATA,
+ DA8XX_MSTPRI_PRU0,
+ DA8XX_MSTPRI_PRU1,
+ DA8XX_MSTPRI_EDMA30TC0,
+ DA8XX_MSTPRI_EDMA30TC1,
+ DA8XX_MSTPRI_EDMA31TC0,
+ DA8XX_MSTPRI_VPIF_DMA_0,
+ DA8XX_MSTPRI_VPIF_DMA_1,
+ DA8XX_MSTPRI_EMAC,
+ DA8XX_MSTPRI_USB0CFG,
+ DA8XX_MSTPRI_USB0CDMA,
+ DA8XX_MSTPRI_UHPI,
+ DA8XX_MSTPRI_USB1,
+ DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+ int reg;
+ int shift;
+ int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+ [DA8XX_MSTPRI_ARM_I] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_ARM_D] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_UPP] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_SATA] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_PRU0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_PRU1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_EDMA30TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_EDMA30TC1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_EDMA31TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+ [DA8XX_MSTPRI_EMAC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_USB0CFG] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_USB0CDMA] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_UHPI] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_USB1] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_LCDC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+};
+
+struct da8xx_mstpri_priority {
+ int which;
+ u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+ const char *board;
+ const struct da8xx_mstpri_priority *priorities;
+ size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ * tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+ {
+ .which = DA8XX_MSTPRI_LCDC,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC1,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC0,
+ .val = 1,
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .priorities = da850_lcdk_priorities,
+ .numprio = ARRAY_SIZE(da850_lcdk_priorities),
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+ const struct da8xx_mstpri_board_priorities *board_prio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+ board_prio = &da8xx_mstpri_board_confs[i];
+
+ if (of_machine_is_compatible(board_prio->board))
+ return board_prio;
+ }
+
+ return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+ const struct da8xx_mstpri_board_priorities *prio_list;
+ const struct da8xx_mstpri_descr *prio_descr;
+ const struct da8xx_mstpri_priority *prio;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mstpri;
+ u32 reg;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mstpri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mstpri)) {
+ dev_err(dev, "unable to map MSTPRI registers\n");
+ return PTR_ERR(mstpri);
+ }
+
+ prio_list = da8xx_mstpri_get_board_prio();
+ if (!prio_list) {
+ dev_err(dev, "no master priorities defined for this board\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prio_list->numprio; i++) {
+ prio = &prio_list->priorities[i];
+ prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+ if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev, "register offset out of range\n");
+ continue;
+ }
+
+ reg = readl(mstpri + prio_descr->reg);
+ reg &= ~prio_descr->mask;
+ reg |= prio->val << prio_descr->shift;
+
+ writel(reg, mstpri + prio_descr->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+ { .compatible = "ti,da850-mstpri", },
+ { },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+ .probe = da8xx_mstpri_probe,
+ .driver = {
+ .name = "da8xx-mstpri",
+ .of_match_table = da8xx_mstpri_of_match,
+ },
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
new file mode 100644
index 000000000..c23c77c9b
--- /dev/null
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DPAA2 fsl-mc bus
+#
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+#
+
+config FSL_MC_BUS
+ bool "QorIQ DPAA2 fsl-mc bus driver"
+ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
new file mode 100644
index 000000000..3c518c7e8
--- /dev/null
+++ b/drivers/bus/fsl-mc/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Freescale Management Complex (MC) bus drivers
+#
+# Copyright (C) 2014 Freescale Semiconductor, Inc.
+#
+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
+
+mc-bus-driver-objs := fsl-mc-bus.o \
+ mc-sys.o \
+ mc-io.o \
+ dpbp.o \
+ dpcon.o \
+ dprc.o \
+ dprc-driver.o \
+ fsl-mc-allocator.o \
+ fsl-mc-msi.o \
+ dpmcp.o
diff --git a/drivers/bus/fsl-mc/dpbp.c b/drivers/bus/fsl-mc/dpbp.c
new file mode 100644
index 000000000..9003cd369
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpbp.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpbp_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpbp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpbp_open);
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_close);
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_enable);
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_disable);
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_reset);
+
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpbp_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpbp_get_attributes);
diff --git a/drivers/bus/fsl-mc/dpcon.c b/drivers/bus/fsl-mc/dpcon.c
new file mode 100644
index 000000000..97b6fa605
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpcon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpcon_open);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_close);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_enable);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_disable);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_reset);
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpcon_get_attributes);
+
+/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @cfg: Notification parameters
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
+ cmd_flags,
+ token);
+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
+ dpcon_cmd->priority = cfg->priority;
+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_set_notification);
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
new file mode 100644
index 000000000..5fbd0dbde
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpmcp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmcp_id: DPMCP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmcp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpmcp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmcp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
new file mode 100644
index 000000000..91dc01596
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale data path resource container (DPRC) driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+
+struct fsl_mc_child_objs {
+ int child_count;
+ struct fsl_mc_obj_desc *child_array;
+};
+
+static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ return mc_dev->obj_desc.id == obj_desc->id &&
+ strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
+}
+
+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+{
+ int i;
+ struct fsl_mc_child_objs *objs;
+ struct fsl_mc_device *mc_dev;
+
+ mc_dev = to_fsl_mc_device(dev);
+ objs = data;
+
+ for (i = 0; i < objs->child_count; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
+
+ if (strlen(obj_desc->type) != 0 &&
+ fsl_mc_device_match(mc_dev, obj_desc))
+ break;
+ }
+
+ if (i == objs->child_count)
+ fsl_mc_device_remove(mc_dev);
+
+ return 0;
+}
+
+static int __fsl_mc_device_remove(struct device *dev, void *data)
+{
+ fsl_mc_device_remove(to_fsl_mc_device(dev));
+ return 0;
+}
+
+/**
+ * dprc_remove_devices - Removes devices for objects removed from a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of object descriptors for child objects currently
+ * present in the DPRC in the MC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual state of
+ * the MC by removing devices that represent MC objects that have
+ * been dynamically removed in the physical DPRC.
+ */
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ if (num_child_objects_in_mc != 0) {
+ /*
+ * Remove child objects that are in the DPRC in Linux,
+ * but not in the MC:
+ */
+ struct fsl_mc_child_objs objs;
+
+ objs.child_count = num_child_objects_in_mc;
+ objs.child_array = obj_desc_array;
+ device_for_each_child(&mc_bus_dev->dev, &objs,
+ __fsl_mc_device_remove_if_not_in_mc);
+ } else {
+ /*
+ * There are no child objects for this DPRC in the MC.
+ * So, remove all the child devices from Linux:
+ */
+ device_for_each_child(&mc_bus_dev->dev, NULL,
+ __fsl_mc_device_remove);
+ }
+}
+EXPORT_SYMBOL_GPL(dprc_remove_devices);
+
+static int __fsl_mc_device_match(struct device *dev, void *data)
+{
+ struct fsl_mc_obj_desc *obj_desc = data;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return fsl_mc_device_match(mc_dev, obj_desc);
+}
+
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ struct device *dev;
+
+ dev = device_find_child(&mc_bus_dev->dev, obj_desc,
+ __fsl_mc_device_match);
+
+ return dev ? to_fsl_mc_device(dev) : NULL;
+}
+
+/**
+ * check_plugged_state_change - Check change in an MC object's plugged state
+ *
+ * @mc_dev: pointer to the fsl-mc device for a given MC object
+ * @obj_desc: pointer to the MC object's descriptor in the MC
+ *
+ * If the plugged state has changed from unplugged to plugged, the fsl-mc
+ * device is bound to the corresponding device driver.
+ * If the plugged state has changed from plugged to unplugged, the fsl-mc
+ * device is unbound from the corresponding device driver.
+ */
+static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ u32 plugged_flag_at_mc =
+ obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
+
+ if (plugged_flag_at_mc !=
+ (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
+ if (plugged_flag_at_mc) {
+ mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
+ error = device_attach(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "device_attach() failed: %d\n",
+ error);
+ }
+ } else {
+ mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
+ device_release_driver(&mc_dev->dev);
+ }
+ }
+}
+
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
+/**
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual
+ * state of the MC by adding objects that have been newly discovered
+ * in the physical DPRC.
+ */
+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ int i;
+
+ /* probe the allocable objects first */
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
+
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
+}
+
+/**
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+ * state of the Linux bus driver, MC by adding and removing
+ * devices accordingly.
+ * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
+ * All allocatable devices needed to be probed before all non-allocatable
+ * devices, to ensure that device drivers for non-allocatable
+ * devices can allocate any type of allocatable devices.
+ * That is, we need to ensure that the corresponding resource pools are
+ * populated before they can get allocation requests from probe callbacks
+ * of the device drivers for the non-allocatable devices.
+ */
+static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
+{
+ int num_child_objects;
+ int dprc_get_obj_failures;
+ int error;
+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
+ struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ error = dprc_get_obj_count(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ &num_child_objects);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
+ error);
+ return error;
+ }
+
+ if (num_child_objects != 0) {
+ int i;
+
+ child_obj_desc_array =
+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
+ sizeof(*child_obj_desc_array),
+ GFP_KERNEL);
+ if (!child_obj_desc_array)
+ return -ENOMEM;
+
+ /*
+ * Discover objects currently present in the physical DPRC:
+ */
+ dprc_get_obj_failures = 0;
+ for (i = 0; i < num_child_objects; i++) {
+ struct fsl_mc_obj_desc *obj_desc =
+ &child_obj_desc_array[i];
+
+ error = dprc_get_obj(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ i, obj_desc);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev,
+ "dprc_get_obj(i=%d) failed: %d\n",
+ i, error);
+ /*
+ * Mark the obj entry as "invalid", by using the
+ * empty string as obj type:
+ */
+ obj_desc->type[0] = '\0';
+ obj_desc->id = error;
+ dprc_get_obj_failures++;
+ continue;
+ }
+
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
+ irq_count += obj_desc->irq_count;
+ dev_dbg(&mc_bus_dev->dev,
+ "Discovered object: type %s, id %d\n",
+ obj_desc->type, obj_desc->id);
+ }
+
+ if (dprc_get_obj_failures != 0) {
+ dev_err(&mc_bus_dev->dev,
+ "%d out of %d devices could not be retrieved\n",
+ dprc_get_obj_failures, num_child_objects);
+ }
+ }
+
+ /*
+ * Allocate IRQ's before binding the scanned devices with their
+ * respective drivers.
+ */
+ if (dev_get_msi_domain(&mc_bus_dev->dev)) {
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(&mc_bus_dev->dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+
+ if (alloc_interrupts && !mc_bus->irq_resources) {
+ error = fsl_mc_populate_irq_pool(mc_bus_dev,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
+ }
+
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
+
+ return 0;
+}
+
+/**
+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ *
+ * Scans the physical DPRC and synchronizes the state of the Linux
+ * bus driver with the actual state of the MC by adding and removing
+ * devices as appropriate.
+ */
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
+{
+ int error = 0;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ fsl_mc_init_all_resource_pools(mc_bus_dev);
+
+ /*
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+ error = dprc_scan_objects(mc_bus_dev, alloc_interrupts);
+ mutex_unlock(&mc_bus->scan_mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_scan_container);
+/**
+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+{
+ int error;
+ u32 status;
+ struct device *dev = arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+
+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+ irq_num, smp_processor_id());
+
+ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
+ return IRQ_HANDLED;
+
+ mutex_lock(&mc_bus->scan_mutex);
+ if (!msi_desc || msi_desc->irq != (u32)irq_num)
+ goto out;
+
+ status = 0;
+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ &status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_get_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_clear_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+ DPRC_IRQ_EVENT_OBJ_REMOVED |
+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+
+ error = dprc_scan_objects(mc_dev, true);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+ * that the object scan was aborted, as we detected that
+ * an object was removed from the DPRC in the MC, while
+ * we were scanning the DPRC.
+ */
+ if (error != -ENXIO) {
+ dev_err(dev, "dprc_scan_objects() failed: %d\n",
+ error);
+ }
+
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mc_bus->scan_mutex);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Disable and clear interrupt for a given DPRC object
+ */
+static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+
+ /*
+ * Disable generation of interrupt, while we configure it:
+ */
+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Disable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Clear any leftover interrupts:
+ */
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ /*
+ * NOTE: devm_request_threaded_irq() invokes the device-specific
+ * function that programs the MSI physically in the device
+ */
+ error = devm_request_threaded_irq(&mc_dev->dev,
+ irq->msi_desc->irq,
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&mc_dev->dev),
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "devm_request_threaded_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ /*
+ * Enable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+ ~0x0u);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ /*
+ * Enable generation of the interrupt:
+ */
+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup interrupt for a given DPRC device
+ */
+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = fsl_mc_allocate_irqs(mc_dev);
+ if (error < 0)
+ return error;
+
+ error = disable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = register_dprc_irq_handler(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = enable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ return 0;
+
+error_free_irqs:
+ fsl_mc_free_irqs(mc_dev);
+ return error;
+}
+
+/**
+ * dprc_setup - opens and creates a mc_io for DPRC
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It configures the DPRC portal used to communicate with MC
+ */
+
+int dprc_setup(struct fsl_mc_device *mc_dev)
+{
+ struct device *parent_dev = mc_dev->dev.parent;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct irq_domain *mc_msi_domain;
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
+ u16 major_ver, minor_ver;
+ size_t region_size;
+ int error;
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ return -EINVAL;
+
+ if (!mc_dev->mc_io) {
+ /*
+ * This is a child DPRC:
+ */
+ if (!dev_is_fsl_mc(parent_dev))
+ return -EINVAL;
+
+ if (mc_dev->obj_desc.region_count == 0)
+ return -EINVAL;
+
+ region_size = resource_size(mc_dev->regions);
+
+ error = fsl_create_mc_io(&mc_dev->dev,
+ mc_dev->regions[0].start,
+ region_size,
+ NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
+
+ mc_io_created = true;
+ }
+
+ mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
+ if (!mc_msi_domain) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
+ } else {
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
+ }
+
+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
+ goto error_cleanup_msi_domain;
+ }
+
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ error = dprc_get_api_version(mc_dev->mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (major_ver < DPRC_MIN_VER_MAJOR ||
+ (major_ver == DPRC_MIN_VER_MAJOR &&
+ minor_ver < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ major_ver, minor_ver);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
+ return 0;
+
+error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_setup);
+
+/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It scans the DPRC to discover the MC objects contained in it.
+ * It creates the interrupt pool for the MC bus associated with the DPRC.
+ * It configures the interrupts for the DPRC device itself.
+ */
+static int dprc_probe(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = dprc_setup(mc_dev);
+ if (error < 0)
+ return error;
+
+ /*
+ * Discover MC objects in DPRC object:
+ */
+ error = dprc_scan_container(mc_dev, true);
+ if (error < 0)
+ goto dprc_cleanup;
+
+ /*
+ * Configure interrupt for the DPRC object associated with this MC bus:
+ */
+ error = dprc_setup_irq(mc_dev);
+ if (error < 0)
+ goto scan_cleanup;
+
+ dev_info(&mc_dev->dev, "DPRC device bound to driver");
+ return 0;
+
+scan_cleanup:
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+dprc_cleanup:
+ dprc_cleanup(mc_dev);
+ return error;
+}
+
+/*
+ * Tear down interrupt for a given DPRC object
+ */
+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ (void)disable_dprc_irq(mc_dev);
+
+ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+
+ fsl_mc_free_irqs(mc_dev);
+}
+
+/**
+ * dprc_cleanup - function that cleanups a DPRC
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It closes the DPRC device in the MC.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ /* this function should be called only for DPRCs, it
+ * is an error to call it for regular objects
+ */
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_dev);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
+ fsl_mc_cleanup_all_resource_pools(mc_dev);
+
+ /* if this step fails we cannot go further with cleanup as there is no way of
+ * communicating with the firmware
+ */
+ if (!mc_dev->mc_io) {
+ dev_err(&mc_dev->dev, "mc_io is NULL, tear down cannot be performed in firmware\n");
+ return -EINVAL;
+ }
+
+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ if (error < 0)
+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_cleanup);
+
+/**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It removes the DPRC's child objects from Linux (not from the MC) and
+ * closes the DPRC device in the MC.
+ * It tears down the interrupts that were configured for the DPRC device.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+static int dprc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ dprc_cleanup(mc_dev);
+
+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+ return 0;
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprc"},
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver dprc_driver = {
+ .driver = {
+ .name = FSL_MC_DPRC_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = dprc_probe,
+ .remove = dprc_remove,
+};
+
+int __init dprc_driver_init(void)
+{
+ return fsl_mc_driver_register(&dprc_driver);
+}
+
+void dprc_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&dprc_driver);
+}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
new file mode 100644
index 000000000..57b097caf
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * cache the DPRC version to reduce the number of commands
+ * towards the mc firmware
+ */
+static u16 dprc_major_ver;
+static u16 dprc_minor_ver;
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_open);
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_close);
+
+/**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @child_container_id: ID of the container to reset
+ * @options: 32 bit options:
+ * - 0 (no bits set) - all the objects inside the container are
+ * reset. The child containers are entered recursively and the
+ * objects reset. All the objects (including the child containers)
+ * are closed.
+ * - bit 0 set - all the objects inside the container are reset.
+ * However the child containers are not entered recursively.
+ * This option is supported for API versions >= 6.5
+ * In case a software context crashes or becomes non-responsive, the parent
+ * may wish to reset its resources container before the software context is
+ * restarted.
+ *
+ * This routine informs all objects assigned to the child container that the
+ * container is being reset, so they may perform any cleanup operations that are
+ * needed. All objects handles that were owned by the child container shall be
+ * closed.
+ *
+ * Note that such request may be submitted even if the child software context
+ * has not crashed, but the resulting object cleanup operations will not be
+ * aware of that.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
+ u32 cmdid = DPRC_CMDID_RESET_CONT;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ /*
+ * MC API 6.5 introduced a new field in the command used to pass
+ * some flags.
+ * Bit 0 indicates that the child containers are not recursively reset.
+ */
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 5))
+ cmdid = DPRC_CMDID_RESET_CONT_V2;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmdid, cmd_flags, token);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_reset_container);
+
+/**
+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: Identifies the interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPRC_ENABLE;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting irq
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_status *cmd_params;
+ struct dprc_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprc_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_attributes() - Obtains container attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @attributes Returned container attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+ attr->container_id = le32_to_cpu(rsp_params->container_id);
+ attr->icid = le32_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+
+ return 0;
+}
+
+/**
+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_count: Number of objects assigned to the DPRC
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_rsp_get_obj_count *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+ *obj_count = le32_to_cpu(rsp_params->obj_count);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_count);
+
+/**
+ * dprc_get_obj() - Get general information on an object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_index: Index of the object to be queried (< obj_count)
+ * @obj_desc: Returns the requested object descriptor
+ *
+ * The object descriptors are retrieved one by one by incrementing
+ * obj_index up to (not including) the value of obj_count returned
+ * from dprc_get_obj_count(). dprc_get_obj_count() must
+ * be called prior to dprc_get_obj().
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj *cmd_params;
+ struct dprc_rsp_get_obj *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+ cmd_params->obj_index = cpu_to_le32(obj_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
+ obj_desc->type[15] = '\0';
+ strncpy(obj_desc->label, rsp_params->label, 16);
+ obj_desc->label[15] = '\0';
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj);
+
+/**
+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type: Type of the object to set its IRQ
+ * @obj_id: ID of the object to set its IRQ
+ * @irq_index: The interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
+
+/**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type; Object type as returned in dprc_get_obj()
+ * @obj_id: Unique object instance as returned in dprc_get_obj()
+ * @region_index: The specific region to query
+ * @region_desc: Returns the requested region descriptor
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_region *cmd_params;
+ struct dprc_rsp_get_obj_region *rsp_params;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 6)) {
+ /*
+ * MC API version 6.6 changed the size of the MC portals and software
+ * portals to 64K (as implemented by hardware). If older API is in use the
+ * size reported is less (64 bytes for mc portals and 4K for software
+ * portals).
+ */
+
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V3,
+ cmd_flags, token);
+
+ } else if (dprc_major_ver == 6 && dprc_minor_ver >= 3) {
+ /*
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address. If the older API is in use then the base
+ * address is set to zero to indicate it needs to be obtained elsewhere
+ * (typically the device tree).
+ */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
+ cmd_flags, token);
+ } else {
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+ }
+
+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->region_index = region_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
+ region_desc->size = le32_to_cpu(rsp_params->size);
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
+ region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
+ else
+ region_desc->base_address = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_region);
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Resource Container API
+ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return -ENOTCONN;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
new file mode 100644
index 000000000..2d7c764bb
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fsl-mc object allocator driver
+ *
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
+{
+ return is_fsl_mc_bus_dpbp(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev) ||
+ is_fsl_mc_bus_dpcon(mc_dev);
+}
+
+/**
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
+ *
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
+ */
+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ *mc_bus,
+ enum fsl_mc_pool_type
+ pool_type,
+ struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+ if (!fsl_mc_is_allocatable(mc_dev))
+ goto out;
+ if (mc_dev->resource)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->type != pool_type)
+ goto out;
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count < 0)
+ goto out_unlock;
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
+ GFP_KERNEL);
+ if (!resource) {
+ error = -ENOMEM;
+ dev_err(&mc_bus_dev->dev,
+ "Failed to allocate memory for fsl_mc_resource\n");
+ goto out_unlock;
+ }
+
+ resource->type = pool_type;
+ resource->id = mc_dev->obj_desc.id;
+ resource->data = mc_dev;
+ resource->parent_pool = res_pool;
+ INIT_LIST_HEAD(&resource->node);
+ list_add_tail(&resource->node, &res_pool->free_list);
+ mc_dev->resource = resource;
+ res_pool->free_count++;
+ res_pool->max_count++;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+/**
+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+ * resource pool
+ *
+ * @mc_dev: pointer to allocatable fsl-mc device
+ *
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
+ */
+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ int error = -EINVAL;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ goto out;
+
+ resource = mc_dev->resource;
+ if (!resource || resource->data != mc_dev)
+ goto out;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ res_pool = resource->parent_pool;
+ if (res_pool != &mc_bus->resource_pools[resource->type])
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count <= 0)
+ goto out_unlock;
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ /*
+ * If the device is currently allocated, its resource is not
+ * in the free list and thus, the device cannot be removed.
+ */
+ if (list_empty(&resource->node)) {
+ error = -EBUSY;
+ dev_err(&mc_bus_dev->dev,
+ "Device %s cannot be removed from resource pool\n",
+ dev_name(&mc_dev->dev));
+ goto out_unlock;
+ }
+
+ list_del_init(&resource->node);
+ res_pool->free_count--;
+ res_pool->max_count--;
+
+ devm_kfree(&mc_bus_dev->dev, resource);
+ mc_dev->resource = NULL;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+static const char *const fsl_mc_pool_type_strings[] = {
+ [FSL_MC_POOL_DPMCP] = "dpmcp",
+ [FSL_MC_POOL_DPBP] = "dpbp",
+ [FSL_MC_POOL_DPCON] = "dpcon",
+ [FSL_MC_POOL_IRQ] = "irq",
+};
+
+static int __must_check object_type_to_pool_type(const char *object_type,
+ enum fsl_mc_pool_type
+ *pool_type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
+ *pool_type = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource **new_resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
+ FSL_MC_NUM_POOL_TYPES);
+
+ *new_resource = NULL;
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+ resource = list_first_entry_or_null(&res_pool->free_list,
+ struct fsl_mc_resource, node);
+
+ if (!resource) {
+ error = -ENXIO;
+ dev_err(&mc_bus_dev->dev,
+ "No more resources of type %s left\n",
+ fsl_mc_pool_type_strings[pool_type]);
+ goto out_unlock;
+ }
+
+ if (resource->type != pool_type)
+ goto out_unlock;
+ if (resource->parent_pool != res_pool)
+ goto out_unlock;
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ list_del_init(&resource->node);
+
+ res_pool->free_count--;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+ *new_resource = resource;
+out:
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+
+ res_pool = resource->parent_pool;
+ if (resource->type != res_pool->type)
+ return;
+
+ mutex_lock(&res_pool->mutex);
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count >= res_pool->max_count)
+ goto out_unlock;
+
+ if (!list_empty(&resource->node))
+ goto out_unlock;
+
+ list_add_tail(&resource->node, &res_pool->free_list);
+ res_pool->free_count++;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
+/**
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
+ *
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * is to be returned
+ *
+ * Allocatable objects are always used in conjunction with some functional
+ * device. This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
+ *
+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+ * portals are allocated using fsl_mc_portal_allocate(), instead of
+ * this function.
+ */
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device *mc_adev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+
+ *new_mc_adev = NULL;
+ if (mc_dev->flags & FSL_MC_IS_DPRC)
+ goto error;
+
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ goto error;
+
+ if (pool_type == FSL_MC_POOL_DPMCP)
+ goto error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
+ if (error < 0)
+ goto error;
+
+ mc_adev = resource->data;
+ if (!mc_adev) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ mc_adev->consumer_link = device_link_add(&mc_dev->dev,
+ &mc_adev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!mc_adev->consumer_link) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ *new_mc_adev = mc_adev;
+ return 0;
+error:
+ if (resource)
+ fsl_mc_resource_free(resource);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+/**
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
+ */
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+{
+ struct fsl_mc_resource *resource;
+
+ resource = mc_adev->resource;
+ if (resource->type == FSL_MC_POOL_DPMCP)
+ return;
+ if (resource->data != mc_adev)
+ return;
+
+ fsl_mc_resource_free(resource);
+
+ mc_adev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
+/*
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID. A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
+ */
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count)
+{
+ unsigned int i;
+ struct msi_desc *msi_desc;
+ struct fsl_mc_device_irq *irq_resources;
+ struct fsl_mc_device_irq *mc_dev_irq;
+ int error;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ /* do nothing if the IRQ pool is already populated */
+ if (mc_bus->irq_resources)
+ return 0;
+
+ if (irq_count == 0 ||
+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
+ return -EINVAL;
+
+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+ if (error < 0)
+ return error;
+
+ irq_resources = devm_kcalloc(&mc_bus_dev->dev,
+ irq_count, sizeof(*irq_resources),
+ GFP_KERNEL);
+ if (!irq_resources) {
+ error = -ENOMEM;
+ goto cleanup_msi_irqs;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ mc_dev_irq = &irq_resources[i];
+
+ /*
+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+ * by the fsl_mc_msi_write_msg() callback
+ */
+ mc_dev_irq->resource.type = res_pool->type;
+ mc_dev_irq->resource.data = mc_dev_irq;
+ mc_dev_irq->resource.parent_pool = res_pool;
+ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+ }
+
+ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
+ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
+ mc_dev_irq->msi_desc = msi_desc;
+ mc_dev_irq->resource.id = msi_desc->irq;
+ }
+
+ res_pool->max_count = irq_count;
+ res_pool->free_count = irq_count;
+ mc_bus->irq_resources = irq_resources;
+ return 0;
+
+cleanup_msi_irqs:
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+/**
+ * Teardown the interrupt pool associated with an fsl-mc bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ if (res_pool->max_count == 0)
+ return;
+
+ if (res_pool->free_count != res_pool->max_count)
+ return;
+
+ INIT_LIST_HEAD(&res_pool->free_list);
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ mc_bus->irq_resources = NULL;
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+/**
+ * Allocate the IRQs required by a given fsl-mc device.
+ */
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ int res_allocated_count = 0;
+ int error = -EINVAL;
+ struct fsl_mc_device_irq **irqs = NULL;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+
+ if (mc_dev->irqs)
+ return -EINVAL;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+ if (irq_count == 0)
+ return -EINVAL;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ if (res_pool->free_count < irq_count) {
+ dev_err(&mc_dev->dev,
+ "Not able to allocate %u irqs for device\n", irq_count);
+ return -ENOSPC;
+ }
+
+ irqs = devm_kcalloc(&mc_dev->dev, irq_count, sizeof(irqs[0]),
+ GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_count; i++) {
+ struct fsl_mc_resource *resource;
+
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+ &resource);
+ if (error < 0)
+ goto error_resource_alloc;
+
+ irqs[i] = to_fsl_mc_irq(resource);
+ res_allocated_count++;
+
+ irqs[i]->mc_dev = mc_dev;
+ irqs[i]->dev_irq_index = i;
+ }
+
+ mc_dev->irqs = irqs;
+ return 0;
+
+error_resource_alloc:
+ for (i = 0; i < res_allocated_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+/*
+ * Frees the IRQs that were allocated for an fsl-mc device.
+ */
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
+ if (!irqs)
+ return;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ for (i = 0; i < irq_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ mc_dev->irqs = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ res_pool->type = pool_type;
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ res_pool->mc_bus = mc_bus;
+ INIT_LIST_HEAD(&res_pool->free_list);
+ mutex_init(&res_pool->mutex);
+ }
+}
+
+static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
+ enum fsl_mc_pool_type pool_type)
+{
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_resource *next;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+ int free_count = 0;
+
+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
+ free_count++;
+ devm_kfree(&mc_bus_dev->dev, resource);
+ }
+}
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
+ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+}
+
+/**
+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
+ * being added to the system
+ */
+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+{
+ enum fsl_mc_pool_type pool_type;
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ return -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
+ return -EINVAL;
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
+ if (error < 0)
+ return error;
+
+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
+ if (error < 0)
+ return error;
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
+ return 0;
+}
+
+/**
+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is
+ * being removed from the system
+ */
+static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ return -EINVAL;
+
+ if (mc_dev->resource) {
+ error = fsl_mc_resource_pool_remove_device(mc_dev);
+ if (error < 0)
+ return error;
+ }
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
+ return 0;
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpbp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmcp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpcon",
+ },
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver fsl_mc_allocator_driver = {
+ .driver = {
+ .name = "fsl_mc_allocator",
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = fsl_mc_allocator_probe,
+ .remove = fsl_mc_allocator_remove,
+};
+
+int __init fsl_mc_allocator_driver_init(void)
+{
+ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+}
+
+void fsl_mc_allocator_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
new file mode 100644
index 000000000..e329cdd71
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/bitops.h>
+#include <linux/msi.h>
+#include <linux/dma-mapping.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+
+static struct fsl_mc_version mc_version;
+
+/**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ */
+struct fsl_mc {
+ struct fsl_mc_device *root_mc_bus_dev;
+ u8 num_translation_ranges;
+ struct fsl_mc_addr_translation_range *translation_ranges;
+ void *fsl_mc_regs;
+};
+
+/**
+ * struct fsl_mc_addr_translation_range - bus to system address translation
+ * range
+ * @mc_region_type: Type of MC region for the range being translated
+ * @start_mc_offset: Start MC offset of the range being translated
+ * @end_mc_offset: MC offset of the first byte after the range (last MC
+ * offset of the range is end_mc_offset - 1)
+ * @start_phys_addr: system physical address corresponding to start_mc_addr
+ */
+struct fsl_mc_addr_translation_range {
+ enum dprc_region_type mc_region_type;
+ u64 start_mc_offset;
+ u64 end_mc_offset;
+ phys_addr_t start_phys_addr;
+};
+
+#define FSL_MC_FAPR 0x28
+#define MC_FAPR_PL BIT(18)
+#define MC_FAPR_BMT BIT(17)
+
+static phys_addr_t mc_portal_base_phys_addr;
+
+/**
+ * fsl_mc_bus_match - device to driver matching callback
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
+ * structures
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ const struct fsl_mc_device_id *id;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (mc_dev->driver_override) {
+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
+ }
+
+ if (!mc_drv->match_id_table)
+ goto out;
+
+ /*
+ * If the object is not 'plugged' don't match.
+ * Only exception is the root DPRC, which is a special case.
+ */
+ if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
+ !fsl_mc_is_root_dprc(&mc_dev->dev))
+ goto out;
+
+ /*
+ * Traverse the match_id table of the given driver, trying to find
+ * a matching for the given device.
+ */
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
+ found = true;
+
+ break;
+ }
+ }
+
+out:
+ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
+ return found;
+}
+
+/**
+ * fsl_mc_bus_uevent - callback invoked when a device is added
+ */
+static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+ mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int fsl_mc_dma_configure(struct device *dev)
+{
+ struct device *dma_dev = dev;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ u32 input_id = mc_dev->icid;
+
+ while (dev_is_fsl_mc(dma_dev))
+ dma_dev = dma_dev->parent;
+
+ if (dev_of_node(dma_dev))
+ return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+
+ return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ char *driver_override, *old = mc_dev->driver_override;
+ char *cp;
+
+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(driver_override)) {
+ mc_dev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ mc_dev->driver_override = NULL;
+ }
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_dev);
+
+struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dma_configure = fsl_mc_dma_configure,
+ .dev_groups = fsl_mc_dev_groups,
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
+struct device_type fsl_mc_bus_dprc_type = {
+ .name = "fsl_mc_bus_dprc"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
+
+struct device_type fsl_mc_bus_dpni_type = {
+ .name = "fsl_mc_bus_dpni"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
+
+struct device_type fsl_mc_bus_dpio_type = {
+ .name = "fsl_mc_bus_dpio"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
+
+struct device_type fsl_mc_bus_dpsw_type = {
+ .name = "fsl_mc_bus_dpsw"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
+
+struct device_type fsl_mc_bus_dpbp_type = {
+ .name = "fsl_mc_bus_dpbp"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
+
+struct device_type fsl_mc_bus_dpcon_type = {
+ .name = "fsl_mc_bus_dpcon"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
+
+struct device_type fsl_mc_bus_dpmcp_type = {
+ .name = "fsl_mc_bus_dpmcp"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
+
+struct device_type fsl_mc_bus_dpmac_type = {
+ .name = "fsl_mc_bus_dpmac"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
+
+struct device_type fsl_mc_bus_dprtc_type = {
+ .name = "fsl_mc_bus_dprtc"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
+
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
+static struct device_type *fsl_mc_get_device_type(const char *type)
+{
+ static const struct {
+ struct device_type *dev_type;
+ const char *type;
+ } dev_types[] = {
+ { &fsl_mc_bus_dprc_type, "dprc" },
+ { &fsl_mc_bus_dpni_type, "dpni" },
+ { &fsl_mc_bus_dpio_type, "dpio" },
+ { &fsl_mc_bus_dpsw_type, "dpsw" },
+ { &fsl_mc_bus_dpbp_type, "dpbp" },
+ { &fsl_mc_bus_dpcon_type, "dpcon" },
+ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
+ { &fsl_mc_bus_dpmac_type, "dpmac" },
+ { &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
+ { NULL, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_types[i].dev_type; i++)
+ if (!strcmp(dev_types[i].type, type))
+ return dev_types[i].dev_type;
+
+ return NULL;
+}
+
+static int fsl_mc_driver_probe(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int error;
+
+ mc_drv = to_fsl_mc_driver(dev->driver);
+
+ error = mc_drv->probe(mc_dev);
+ if (error < 0) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int fsl_mc_driver_remove(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int error;
+
+ error = mc_drv->remove(mc_dev);
+ if (error < 0) {
+ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void fsl_mc_driver_shutdown(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ mc_drv->shutdown(mc_dev);
+}
+
+/**
+ * __fsl_mc_driver_register - registers a child device driver with the
+ * MC bus
+ *
+ * This function is implicitly invoked from the registration function of
+ * fsl_mc device drivers, which is generated by the
+ * module_fsl_mc_driver() macro.
+ */
+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+ struct module *owner)
+{
+ int error;
+
+ mc_driver->driver.owner = owner;
+ mc_driver->driver.bus = &fsl_mc_bus_type;
+
+ if (mc_driver->probe)
+ mc_driver->driver.probe = fsl_mc_driver_probe;
+
+ if (mc_driver->remove)
+ mc_driver->driver.remove = fsl_mc_driver_remove;
+
+ if (mc_driver->shutdown)
+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
+
+ error = driver_register(&mc_driver->driver);
+ if (error < 0) {
+ pr_err("driver_register() failed for %s: %d\n",
+ mc_driver->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+
+/**
+ * fsl_mc_driver_unregister - unregisters a device driver from the
+ * MC bus
+ */
+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+{
+ driver_unregister(&mc_driver->driver);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+static int mc_get_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ struct fsl_mc_version *mc_ver_info)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * fsl_mc_get_version - function to retrieve the MC f/w version information
+ *
+ * Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
+ */
+struct fsl_mc_version *fsl_mc_get_version(void)
+{
+ if (mc_version.major)
+ return &mc_version;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_version);
+
+/**
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
+{
+ if (!dev) {
+ *root_dprc_dev = NULL;
+ } else if (!dev_is_fsl_mc(dev)) {
+ *root_dprc_dev = NULL;
+ } else {
+ *root_dprc_dev = dev;
+ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
+ *root_dprc_dev = (*root_dprc_dev)->parent;
+ }
+}
+
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
+{
+ u16 dprc_handle;
+ int error;
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
+ return error;
+ }
+
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto common_cleanup;
+ }
+
+ error = 0;
+
+common_cleanup:
+ (void)dprc_close(mc_io, 0, dprc_handle);
+ return error;
+}
+
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u32 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+{
+ int i;
+ struct device *root_dprc_dev;
+ struct fsl_mc *mc;
+
+ fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
+ mc = dev_get_drvdata(root_dprc_dev->parent);
+
+ if (mc->num_translation_ranges == 0) {
+ /*
+ * Do identity mapping:
+ */
+ *phys_addr = mc_offset;
+ return 0;
+ }
+
+ for (i = 0; i < mc->num_translation_ranges; i++) {
+ struct fsl_mc_addr_translation_range *range =
+ &mc->translation_ranges[i];
+
+ if (mc_region_type == range->mc_region_type &&
+ mc_offset >= range->start_mc_offset &&
+ mc_offset < range->end_mc_offset) {
+ *phys_addr = range->start_phys_addr +
+ (mc_offset - range->start_mc_offset);
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ int i;
+ int error;
+ struct resource *regions;
+ struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
+ struct device *parent_dev = mc_dev->dev.parent;
+ enum dprc_region_type mc_region_type;
+
+ if (is_fsl_mc_bus_dprc(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
+ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
+ } else {
+ /*
+ * This function should not have been called for this MC object
+ * type, as this object type is not supposed to have MMIO
+ * regions
+ */
+ return -EINVAL;
+ }
+
+ regions = kmalloc_array(obj_desc->region_count,
+ sizeof(regions[0]), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ for (i = 0; i < obj_desc->region_count; i++) {
+ struct dprc_region_desc region_desc;
+
+ error = dprc_get_obj_region(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ obj_desc->type,
+ obj_desc->id, i, &region_desc);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "dprc_get_obj_region() failed: %d\n", error);
+ goto error_cleanup_regions;
+ }
+ /*
+ * Older MC only returned region offset and no base address
+ * If base address is in the region_desc use it otherwise
+ * revert to old mechanism
+ */
+ if (region_desc.base_address) {
+ regions[i].start = region_desc.base_address +
+ region_desc.base_offset;
+ } else {
+ error = translate_mc_addr(mc_dev, mc_region_type,
+ region_desc.base_offset,
+ &regions[i].start);
+
+ /*
+ * Some versions of the MC firmware wrongly report
+ * 0 for register base address of the DPMCP associated
+ * with child DPRC objects thus rendering them unusable.
+ * This is particularly troublesome in ACPI boot
+ * scenarios where the legacy way of extracting this
+ * base address from the device tree does not apply.
+ * Given that DPMCPs share the same base address,
+ * workaround this by using the base address extracted
+ * from the root DPRC container.
+ */
+ if (is_fsl_mc_bus_dprc(mc_dev) &&
+ regions[i].start == region_desc.base_offset)
+ regions[i].start += mc_portal_base_phys_addr;
+ }
+
+ if (error < 0) {
+ dev_err(parent_dev,
+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
+ region_desc.base_offset,
+ obj_desc->type, obj_desc->id, i);
+ goto error_cleanup_regions;
+ }
+
+ regions[i].end = regions[i].start + region_desc.size - 1;
+ regions[i].name = "fsl-mc object MMIO region";
+ regions[i].flags = region_desc.flags & IORESOURCE_BITS;
+ regions[i].flags |= IORESOURCE_MEM;
+ }
+
+ mc_dev->regions = regions;
+ return 0;
+
+error_cleanup_regions:
+ kfree(regions);
+ return error;
+}
+
+/**
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+ struct device *root_dprc_dev;
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+ return dev == root_dprc_dev;
+}
+
+static void fsl_mc_device_release(struct device *dev)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ kfree(mc_dev->regions);
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ kfree(to_fsl_mc_bus(mc_dev));
+ else
+ kfree(mc_dev);
+}
+
+/**
+ * Add a newly discovered fsl-mc device to be visible in Linux
+ */
+int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev)
+{
+ int error;
+ struct fsl_mc_device *mc_dev = NULL;
+ struct fsl_mc_bus *mc_bus = NULL;
+ struct fsl_mc_device *parent_mc_dev;
+
+ if (dev_is_fsl_mc(parent_dev))
+ parent_mc_dev = to_fsl_mc_device(parent_dev);
+ else
+ parent_mc_dev = NULL;
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ /*
+ * Allocate an MC bus device object:
+ */
+ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
+ if (!mc_bus)
+ return -ENOMEM;
+
+ mutex_init(&mc_bus->scan_mutex);
+ mc_dev = &mc_bus->mc_dev;
+ } else {
+ /*
+ * Allocate a regular fsl_mc_device object:
+ */
+ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
+ if (!mc_dev)
+ return -ENOMEM;
+ }
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
+ mc_dev->dev.release = fsl_mc_device_release;
+ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
+ if (!mc_dev->dev.type) {
+ error = -ENODEV;
+ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
+ goto error_cleanup_dev;
+ }
+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ struct fsl_mc_io *mc_io2;
+
+ mc_dev->flags |= FSL_MC_IS_DPRC;
+
+ /*
+ * To get the DPRC's ICID, we need to open the DPRC
+ * in get_dprc_icid(). For child DPRCs, we do so using the
+ * parent DPRC's MC portal instead of the child DPRC's MC
+ * portal, in case the child DPRC is already opened with
+ * its own portal (e.g., the DPRC used by AIOP).
+ *
+ * NOTE: There cannot be more than one active open for a
+ * given MC object, using the same MC portal.
+ */
+ if (parent_mc_dev) {
+ /*
+ * device being added is a child DPRC device
+ */
+ mc_io2 = parent_mc_dev->mc_io;
+ } else {
+ /*
+ * device being added is the root DPRC device
+ */
+ if (!mc_io) {
+ error = -EINVAL;
+ goto error_cleanup_dev;
+ }
+
+ mc_io2 = mc_io;
+ }
+
+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+ if (error < 0)
+ goto error_cleanup_dev;
+ } else {
+ /*
+ * A non-DPRC object has to be a child of a DPRC, use the
+ * parent's ICID and interrupt domain.
+ */
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(&parent_mc_dev->dev));
+ }
+
+ /*
+ * Get MMIO regions for the device from the MC:
+ *
+ * NOTE: the root DPRC is a special case as its MMIO region is
+ * obtained from the device tree
+ */
+ if (parent_mc_dev && obj_desc->region_count != 0) {
+ error = fsl_mc_device_get_mmio_regions(mc_dev,
+ parent_mc_dev);
+ if (error < 0)
+ goto error_cleanup_dev;
+ }
+
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+ */
+ error = device_add(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "device_add() failed for device %s: %d\n",
+ dev_name(&mc_dev->dev), error);
+ goto error_cleanup_dev;
+ }
+
+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
+
+ *new_mc_dev = mc_dev;
+ return 0;
+
+error_cleanup_dev:
+ kfree(mc_dev->regions);
+ kfree(mc_bus);
+ kfree(mc_dev);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+/**
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
+ * Linux
+ *
+ * @mc_dev: Pointer to an fsl-mc device
+ */
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+{
+ kfree(mc_dev->driver_override);
+ mc_dev->driver_override = NULL;
+
+ /*
+ * The device-specific remove callback will get invoked by device_del()
+ */
+ device_del(&mc_dev->dev);
+ put_device(&mc_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev, *endpoint;
+ struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+ struct dprc_endpoint endpoint1 = {{ 0 }};
+ struct dprc_endpoint endpoint2 = {{ 0 }};
+ int state, err;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ strcpy(endpoint1.type, mc_dev->obj_desc.type);
+ endpoint1.id = mc_dev->obj_desc.id;
+
+ err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+ mc_bus_dev->mc_handle,
+ &endpoint1, &endpoint2,
+ &state);
+
+ if (err == -ENOTCONN || state == -1)
+ return ERR_PTR(-ENOTCONN);
+
+ if (err < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ strcpy(endpoint_desc.type, endpoint2.type);
+ endpoint_desc.id = endpoint2.id;
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+ return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
+static int parse_mc_ranges(struct device *dev,
+ int *paddr_cells,
+ int *mc_addr_cells,
+ int *mc_size_cells,
+ const __be32 **ranges_start)
+{
+ const __be32 *prop;
+ int range_tuple_cell_count;
+ int ranges_len;
+ int tuple_len;
+ struct device_node *mc_node = dev->of_node;
+
+ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
+ if (!(*ranges_start) || !ranges_len) {
+ dev_warn(dev,
+ "missing or empty ranges property for device tree node '%pOFn'\n",
+ mc_node);
+ return 0;
+ }
+
+ *paddr_cells = of_n_addr_cells(mc_node);
+
+ prop = of_get_property(mc_node, "#address-cells", NULL);
+ if (prop)
+ *mc_addr_cells = be32_to_cpup(prop);
+ else
+ *mc_addr_cells = *paddr_cells;
+
+ prop = of_get_property(mc_node, "#size-cells", NULL);
+ if (prop)
+ *mc_size_cells = be32_to_cpup(prop);
+ else
+ *mc_size_cells = of_n_size_cells(mc_node);
+
+ range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
+ *mc_size_cells;
+
+ tuple_len = range_tuple_cell_count * sizeof(__be32);
+ if (ranges_len % tuple_len != 0) {
+ dev_err(dev, "malformed ranges property '%pOFn'\n", mc_node);
+ return -EINVAL;
+ }
+
+ return ranges_len / tuple_len;
+}
+
+static int get_mc_addr_translation_ranges(struct device *dev,
+ struct fsl_mc_addr_translation_range
+ **ranges,
+ u8 *num_ranges)
+{
+ int ret;
+ int paddr_cells;
+ int mc_addr_cells;
+ int mc_size_cells;
+ int i;
+ const __be32 *ranges_start;
+ const __be32 *cell;
+
+ ret = parse_mc_ranges(dev,
+ &paddr_cells,
+ &mc_addr_cells,
+ &mc_size_cells,
+ &ranges_start);
+ if (ret < 0)
+ return ret;
+
+ *num_ranges = ret;
+ if (!ret) {
+ /*
+ * Missing or empty ranges property ("ranges;") for the
+ * 'fsl,qoriq-mc' node. In this case, identity mapping
+ * will be used.
+ */
+ *ranges = NULL;
+ return 0;
+ }
+
+ *ranges = devm_kcalloc(dev, *num_ranges,
+ sizeof(struct fsl_mc_addr_translation_range),
+ GFP_KERNEL);
+ if (!(*ranges))
+ return -ENOMEM;
+
+ cell = ranges_start;
+ for (i = 0; i < *num_ranges; ++i) {
+ struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
+
+ range->mc_region_type = of_read_number(cell, 1);
+ range->start_mc_offset = of_read_number(cell + 1,
+ mc_addr_cells - 1);
+ cell += mc_addr_cells;
+ range->start_phys_addr = of_read_number(cell, paddr_cells);
+ cell += paddr_cells;
+ range->end_mc_offset = range->start_mc_offset +
+ of_read_number(cell, mc_size_cells);
+
+ cell += mc_size_cells;
+ }
+
+ return 0;
+}
+
+/**
+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being
+ * added
+ */
+static int fsl_mc_bus_probe(struct platform_device *pdev)
+{
+ struct fsl_mc_obj_desc obj_desc;
+ int error;
+ struct fsl_mc *mc;
+ struct fsl_mc_device *mc_bus_dev = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+ int container_id;
+ phys_addr_t mc_portal_phys_addr;
+ u32 mc_portal_size, mc_stream_id;
+ struct resource *plat_res;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mc);
+
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (plat_res) {
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+ if (IS_ERR(mc->fsl_mc_regs))
+ return PTR_ERR(mc->fsl_mc_regs);
+ }
+
+ if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) &&
+ !dev_of_node(&pdev->dev)) {
+ mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+ /*
+ * HW ORs the PL and BMT bit, places the result in bit 15 of
+ * the StreamID and ORs in the ICID. Calculate it accordingly.
+ */
+ mc_stream_id = (mc_stream_id & 0xffff) |
+ ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
+ 0x4000 : 0);
+ error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT,
+ &mc_stream_id);
+ if (error)
+ dev_warn(&pdev->dev, "failed to configure dma: %d.\n",
+ error);
+ }
+
+ /*
+ * Get physical address of MC portal for the root DPRC:
+ */
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mc_portal_phys_addr = plat_res->start;
+ mc_portal_size = resource_size(plat_res);
+ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
+
+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
+ mc_portal_size, NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
+ if (error < 0)
+ return error;
+
+ error = mc_get_version(mc_io, 0, &mc_version);
+ if (error != 0) {
+ dev_err(&pdev->dev,
+ "mc_get_version() failed with error %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+ if (dev_of_node(&pdev->dev)) {
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+ }
+
+ error = dprc_get_container_id(mc_io, 0, &container_id);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+ "dprc_get_container_id() failed: %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
+ error = dprc_get_api_version(mc_io, 0,
+ &obj_desc.ver_major,
+ &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
+ strcpy(obj_desc.type, "dprc");
+ obj_desc.id = container_id;
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ mc->root_mc_bus_dev = mc_bus_dev;
+ mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being
+ * removed
+ */
+static int fsl_mc_bus_remove(struct platform_device *pdev)
+{
+ struct fsl_mc *mc = platform_get_drvdata(pdev);
+
+ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
+ return -EINVAL;
+
+ fsl_mc_device_remove(mc->root_mc_bus_dev);
+
+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+ mc->root_mc_bus_dev->mc_io = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_mc_bus_match_table[] = {
+ {.compatible = "fsl,qoriq-mc",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+
+static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
+ {"NXP0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
+
+static struct platform_driver fsl_mc_bus_driver = {
+ .driver = {
+ .name = "fsl_mc_bus",
+ .pm = NULL,
+ .of_match_table = fsl_mc_bus_match_table,
+ .acpi_match_table = fsl_mc_bus_acpi_match_table,
+ },
+ .probe = fsl_mc_bus_probe,
+ .remove = fsl_mc_bus_remove,
+};
+
+static int __init fsl_mc_bus_driver_init(void)
+{
+ int error;
+
+ error = bus_register(&fsl_mc_bus_type);
+ if (error < 0) {
+ pr_err("bus type registration failed: %d\n", error);
+ goto error_cleanup_cache;
+ }
+
+ error = platform_driver_register(&fsl_mc_bus_driver);
+ if (error < 0) {
+ pr_err("platform_driver_register() failed: %d\n", error);
+ goto error_cleanup_bus;
+ }
+
+ error = dprc_driver_init();
+ if (error < 0)
+ goto error_cleanup_driver;
+
+ error = fsl_mc_allocator_driver_init();
+ if (error < 0)
+ goto error_cleanup_dprc_driver;
+
+ return 0;
+
+error_cleanup_dprc_driver:
+ dprc_driver_exit();
+
+error_cleanup_driver:
+ platform_driver_unregister(&fsl_mc_bus_driver);
+
+error_cleanup_bus:
+ bus_unregister(&fsl_mc_bus_type);
+
+error_cleanup_cache:
+ return error;
+}
+postcore_initcall(fsl_mc_bus_driver_init);
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
new file mode 100644
index 000000000..8edadf05c
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/acpi_iort.h>
+
+#include "fsl-mc-private.h"
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain. Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+ struct msi_desc *desc)
+{
+ /*
+ * Make the base hwirq value for ICID*10000 so it is readable
+ * as a decimal value in /proc/interrupts.
+ */
+ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+}
+
+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+ desc);
+}
+#else
+#define fsl_mc_msi_set_desc NULL
+#endif
+
+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (!ops)
+ return;
+
+ /*
+ * set_desc should not be set by the caller
+ */
+ if (!ops->set_desc)
+ ops->set_desc = fsl_mc_msi_set_desc;
+}
+
+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_device_irq *mc_dev_irq)
+{
+ int error;
+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+ struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
+ struct dprc_irq_cfg irq_cfg;
+
+ /*
+ * msi_desc->msg.address is 0x0 when this function is invoked in
+ * the free_irq() code path. In this case, for the MC, we don't
+ * really need to "unprogram" the MSI, so we just return.
+ */
+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+ return;
+
+ if (!owner_mc_dev)
+ return;
+
+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ irq_cfg.val = msi_desc->msg.data;
+ irq_cfg.irq_num = msi_desc->irq;
+
+ if (owner_mc_dev == mc_bus_dev) {
+ /*
+ * IRQ is for the mc_bus_dev's DPRC itself
+ */
+ error = dprc_set_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_set_irq() failed: %d\n", error);
+ }
+ } else {
+ /*
+ * IRQ is for for a child device of mc_bus_dev
+ */
+ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ owner_mc_dev->obj_desc.type,
+ owner_mc_dev->obj_desc.id,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_obj_set_irq() failed: %d\n", error);
+ }
+ }
+}
+
+/*
+ * NOTE: This function is invoked with interrupts disabled
+ */
+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+ struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_device_irq *mc_dev_irq =
+ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+
+ msi_desc->msg = *msg;
+
+ /*
+ * Program the MSI (paddr, value) pair in the device:
+ */
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+}
+
+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!chip)
+ return;
+
+ /*
+ * irq_write_msi_msg should not be set by the caller
+ */
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
+/**
+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+ * @np: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a fsl-mc MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE)))
+ info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ fsl_mc_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ fsl_mc_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
+
+ return domain;
+}
+
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
+{
+ struct device *root_dprc_dev;
+ struct device *bus_dev;
+ struct irq_domain *msi_domain;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ bus_dev = root_dprc_dev->parent;
+
+ if (bus_dev->of_node) {
+ msi_domain = of_msi_map_get_device_domain(dev,
+ mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
+
+ /*
+ * if the msi-map property is missing assume that all the
+ * child containers inherit the domain from the parent
+ */
+ if (!msi_domain)
+
+ msi_domain = of_msi_get_domain(bus_dev,
+ bus_dev->of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ } else {
+ msi_domain = iort_get_device_domain(dev, mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
+ }
+
+ return msi_domain;
+}
+
+static void fsl_mc_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+
+{
+ unsigned int i;
+ int error;
+ struct msi_desc *msi_desc;
+
+ for (i = 0; i < irq_count; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ dev_err(dev, "Failed to allocate msi entry\n");
+ error = -ENOMEM;
+ goto cleanup_msi_descs;
+ }
+
+ msi_desc->fsl_mc.msi_index = i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ }
+
+ return 0;
+
+cleanup_msi_descs:
+ fsl_mc_msi_free_descs(dev);
+ return error;
+}
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count)
+{
+ struct irq_domain *msi_domain;
+ int error;
+
+ if (!list_empty(dev_to_msi_list(dev)))
+ return -EINVAL;
+
+ error = fsl_mc_msi_alloc_descs(dev, irq_count);
+ if (error < 0)
+ return error;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain) {
+ error = -EINVAL;
+ goto cleanup_msi_descs;
+ }
+
+ /*
+ * NOTE: Calling this function will trigger the invocation of the
+ * its_fsl_mc_msi_prepare() callback
+ */
+ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
+
+ if (error) {
+ dev_err(dev, "Failed to allocate IRQs\n");
+ goto cleanup_msi_descs;
+ }
+
+ return 0;
+
+cleanup_msi_descs:
+ fsl_mc_msi_free_descs(dev);
+ return error;
+}
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
+ struct irq_domain *msi_domain;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
+ return;
+
+ msi_domain_free_irqs(msi_domain, dev);
+
+ if (list_empty(dev_to_msi_list(dev)))
+ return;
+
+ fsl_mc_msi_free_descs(dev);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
new file mode 100644
index 000000000..85ca5fdee
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus private declarations
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_MC_PRIVATE_H_
+#define _FSL_MC_PRIVATE_H_
+
+#include <linux/fsl/mc.h>
+#include <linux/mutex.h>
+
+/*
+ * Data Path Management Complex (DPMNG) General API
+ */
+
+/* DPMNG command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* DPMNG command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
+/*
+ * Data Path Management Command Portal (DPMCP) API
+ */
+
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* DPMCP command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+
+/* DPMCP command IDs */
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+/*
+ * Initialization and runtime control APIs for DPMCP
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token);
+
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/*
+ * Data Path Resource Container (DPRC) API
+ */
+
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 6
+#define DPRC_MIN_VER_MINOR 0
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_3RD_VERSION 3
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+#define DPRC_CMD_V3(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_3RD_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+#define DPRC_CMDID_RESET_CONT_V2 DPRC_CMD_V2(0x005)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V3 DPRC_CMD_V3(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+ __le32 options;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le32 icid;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1 */
+ __le64 base_offset;
+ /* response word 2 */
+ __le32 size;
+ __le32 pad2;
+ /* response word 3 */
+ __le32 flags;
+ __le32 pad3;
+ /* response word 4 */
+ /* base_addr may be zero if older MC firmware is used */
+ __le64 base_addr;
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_get_connection {
+ __le32 ep1_id;
+ __le16 ep1_interface_id;
+ u8 pad[2];
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ __le64 pad[3];
+ __le32 ep2_id;
+ __le16 ep2_interface_id;
+ __le16 pad1;
+ u8 ep2_type[16];
+ __le32 state;
+};
+
+/*
+ * DPRC API for managing and querying DPAA resources
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPRC IRQ events */
+
+/* IRQ event - Indicates that a new object added to the container */
+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+/* IRQ event - Indicates that an object was removed from the container */
+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+
+/* Irq event - Indicates that object is created at the container */
+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+
+/**
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+};
+
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ */
+struct dprc_attributes {
+ int container_id;
+ u32 icid;
+ int portal_id;
+ u64 options;
+};
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+/**
+ * enum dprc_region_type - Region type
+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
+};
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_offset: Region offset from region's base address.
+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+ * base address; For DPIO, region base is offset from SoC QMan portals
+ * base address
+ * @size: Region size (in bytes)
+ * @flags: Region attributes
+ * @type: Portal region type
+ */
+struct dprc_region_desc {
+ u32 base_offset;
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
+ u64 base_address;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
+
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
+/*
+ * Data Path Buffer Pool (DPBP) API
+ */
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 3
+#define DPBP_VER_MINOR 2
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) (((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+
+struct dpbp_cmd_open {
+ __le32 dpbp_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_get_attributes {
+ /* response word 0 */
+ __le16 pad;
+ __le16 bpid;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+/*
+ * Data Path Concentrator (DPCON) API
+ */
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 2
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+struct dpcon_cmd_open {
+ __le32 dpcon_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_get_attr {
+ /* response word 0 */
+ __le32 id;
+ __le16 qbman_ch_id;
+ u8 num_priorities;
+ u8 pad;
+};
+
+struct dpcon_cmd_set_notification {
+ /* cmd word 0 */
+ __le32 dpio_id;
+ u8 priority;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 user_ctx;
+};
+
+
+/**
+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
+ * type
+ * @type: type of resources in the pool
+ * @max_count: maximum number of resources in the pool
+ * @free_count: number of free resources in the pool
+ * @mutex: mutex to serialize access to the pool's free list
+ * @free_list: anchor node of list of free resources in the pool
+ * @mc_bus: pointer to the MC bus that owns this resource pool
+ */
+struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+ int max_count;
+ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+};
+
+/**
+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+ * @mc_dev: fsl-mc device for the bus device itself.
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
+ */
+struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
+};
+
+#define to_fsl_mc_bus(_mc_dev) \
+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+
+int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev);
+
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+
+int __init dprc_driver_init(void);
+
+void dprc_driver_exit(void);
+
+int __init fsl_mc_allocator_driver_init(void);
+
+void fsl_mc_allocator_driver_exit(void);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource
+ **new_resource);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
+
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io);
+
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
+bool fsl_mc_is_root_dprc(struct device *dev);
+
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
+
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev);
+
+#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
new file mode 100644
index 000000000..305015486
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
+ struct fsl_mc_device *dpmcp_dev)
+{
+ int error;
+
+ if (mc_io->dpmcp_dev)
+ return -EINVAL;
+
+ if (dpmcp_dev->mc_io)
+ return -EINVAL;
+
+ error = dpmcp_open(mc_io,
+ 0,
+ dpmcp_dev->obj_desc.id,
+ &dpmcp_dev->mc_handle);
+ if (error < 0)
+ return error;
+
+ mc_io->dpmcp_dev = dpmcp_dev;
+ dpmcp_dev->mc_io = mc_io;
+ return 0;
+}
+
+static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_close(mc_io,
+ 0,
+ dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
+ error);
+ }
+
+ mc_io->dpmcp_dev = NULL;
+ dpmcp_dev->mc_io = NULL;
+}
+
+/**
+ * Creates an MC I/O object
+ *
+ * @dev: device to be associated with the MC I/O object
+ * @mc_portal_phys_addr: physical address of the MC portal to use
+ * @mc_portal_size: size in bytes of the MC portal
+ * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
+ * object or NULL if none.
+ * @flags: flags for the new MC I/O object
+ * @new_mc_io: Area to return pointer to newly created MC I/O object
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io)
+{
+ int error;
+ struct fsl_mc_io *mc_io;
+ void __iomem *mc_portal_virt_addr;
+ struct resource *res;
+
+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
+ if (!mc_io)
+ return -ENOMEM;
+
+ mc_io->dev = dev;
+ mc_io->flags = flags;
+ mc_io->portal_phys_addr = mc_portal_phys_addr;
+ mc_io->portal_size = mc_portal_size;
+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_lock_init(&mc_io->spinlock);
+ else
+ mutex_init(&mc_io->mutex);
+
+ res = devm_request_mem_region(dev,
+ mc_portal_phys_addr,
+ mc_portal_size,
+ "mc_portal");
+ if (!res) {
+ dev_err(dev,
+ "devm_request_mem_region failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -EBUSY;
+ }
+
+ mc_portal_virt_addr = devm_ioremap(dev,
+ mc_portal_phys_addr,
+ mc_portal_size);
+ if (!mc_portal_virt_addr) {
+ dev_err(dev,
+ "devm_ioremap failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -ENXIO;
+ }
+
+ mc_io->portal_virt_addr = mc_portal_virt_addr;
+ if (dpmcp_dev) {
+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
+ if (error < 0)
+ goto error_destroy_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_destroy_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * Destroys an MC I/O object
+ *
+ * @mc_io: MC I/O object to destroy
+ */
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+
+ if (!mc_io)
+ return;
+
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (dpmcp_dev)
+ fsl_mc_io_unset_dpmcp(mc_io);
+
+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
+ devm_release_mem_region(mc_io->dev,
+ mc_io->portal_phys_addr,
+ mc_io->portal_size);
+
+ mc_io->portal_virt_addr = NULL;
+ devm_kfree(mc_io->dev, mc_io);
+}
+
+/**
+ * fsl_mc_portal_allocate - Allocates an MC portal
+ *
+ * @mc_dev: MC device for which the MC portal is to be allocated
+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
+ * MC portal.
+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
+ * that wraps the allocated MC portal is to be returned
+ *
+ * This function allocates an MC portal from the device's parent DPRC,
+ * from the corresponding MC bus' pool of MC portals and wraps
+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
+ * portal is allocated from its own MC bus.
+ */
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ phys_addr_t mc_portal_phys_addr;
+ size_t mc_portal_size;
+ struct fsl_mc_device *dpmcp_dev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+
+ if (mc_dev->flags & FSL_MC_IS_DPRC) {
+ mc_bus_dev = mc_dev;
+ } else {
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ return error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ }
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ *new_mc_io = NULL;
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
+ if (error < 0)
+ return error;
+
+ error = -EINVAL;
+ dpmcp_dev = resource->data;
+
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
+ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
+ mc_portal_size = resource_size(dpmcp_dev->regions);
+
+ error = fsl_create_mc_io(&mc_bus_dev->dev,
+ mc_portal_phys_addr,
+ mc_portal_size, dpmcp_dev,
+ mc_io_flags, &mc_io);
+ if (error < 0)
+ goto error_cleanup_resource;
+
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+error_cleanup_resource:
+ fsl_mc_resource_free(resource);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
+
+/**
+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
+ * of a given MC bus
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+ struct fsl_mc_resource *resource;
+
+ /*
+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
+ * to have a DPMCP object associated with.
+ */
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ resource = dpmcp_dev->resource;
+ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
+ return;
+
+ if (resource->data != dpmcp_dev)
+ return;
+
+ fsl_destroy_mc_io(mc_io);
+ fsl_mc_resource_free(resource);
+
+ dpmcp_dev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
+/**
+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
new file mode 100644
index 000000000..85a0225db
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * I/O services to send MC commands to the MC hardware
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * Timeout in milliseconds to wait for the completion of an MC command
+ */
+#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+
+/*
+ * usleep_range() min and max values used to throttle down polling
+ * iterations while waiting for MC command completion
+ */
+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
+
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+ return (enum mc_cmd_status)hdr->status;
+}
+
+static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+ return cmd_id;
+}
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ static const int mc_status_to_error_map[] = {
+ [MC_CMD_STATUS_OK] = 0,
+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
+ [MC_CMD_STATUS_DMA_ERR] = -EIO,
+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
+ [MC_CMD_STATUS_BUSY] = -EBUSY,
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
+ };
+
+ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
+ return -EINVAL;
+
+ return mc_status_to_error_map[status];
+}
+
+static const char *mc_status_to_string(enum mc_cmd_status status)
+{
+ static const char *const status_strings[] = {
+ [MC_CMD_STATUS_OK] = "Command completed successfully",
+ [MC_CMD_STATUS_READY] = "Command ready to be processed",
+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
+ [MC_CMD_STATUS_BUSY] = "Device is busy",
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
+ };
+
+ if ((unsigned int)status >= ARRAY_SIZE(status_strings))
+ return "Unknown MC error";
+
+ return status_strings[status];
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
+ struct fsl_mc_command *cmd)
+{
+ int i;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is already in the expected LE byte-order. Do an
+ * extra LE -> CPU conversion so that the CPU -> LE done in
+ * the device io write api puts it back in the right order.
+ */
+ writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
+
+ /* submit the command by writing the header */
+ writeq(le64_to_cpu(cmd->header), &portal->header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
+ *portal,
+ struct fsl_mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = cpu_to_le64(readq_relaxed(&portal->header));
+ status = mc_cmd_hdr_read_status(resp);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is expected to be in LE byte-order. Do an
+ * extra CPU -> LE to revert the LE -> CPU done in
+ * the device io read api.
+ */
+ resp->params[i] =
+ cpu_to_le64(readq_relaxed(&portal->params[i]));
+
+ return status;
+}
+
+/**
+ * Waits for the completion of an MC command doing preemptible polling.
+ * uslepp_range() is called between polling iterations.
+ *
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+ struct fsl_mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long jiffies_until_timeout =
+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ /*
+ * TODO: When MC command completion interrupts are supported
+ * call wait function here instead of usleep_range()
+ */
+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * Waits for the completion of an MC command doing atomic polling.
+ * udelay() is called between polling iterations.
+ *
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+ struct fsl_mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
+
+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
+
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * Sends a command to the MC device using the given MC I/O object
+ *
+ * @mc_io: MC I/O object to be used
+ * @cmd: command to be sent
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
+{
+ int error;
+ enum mc_cmd_status status;
+ unsigned long irq_flags = 0;
+
+ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ return -EINVAL;
+
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ else
+ mutex_lock(&mc_io->mutex);
+
+ /*
+ * Send command to the MC hardware:
+ */
+ mc_write_command(mc_io->portal_virt_addr, cmd);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ error = mc_polling_wait_preemptible(mc_io, cmd, &status);
+ else
+ error = mc_polling_wait_atomic(mc_io, cmd, &status);
+
+ if (error < 0)
+ goto common_exit;
+
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+ mc_status_to_string(status),
+ (unsigned int)status);
+
+ error = mc_status_to_error(status);
+ goto common_exit;
+ }
+
+ error = 0;
+common_exit:
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ else
+ mutex_unlock(&mc_io->mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(mc_send_command);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
new file mode 100644
index 000000000..e7eaa8784
--- /dev/null
+++ b/drivers/bus/hisi_lpc.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Hisilicon Limited, All Rights Reserved.
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ * Author: Zou Rongrong <zourongrong@huawei.com>
+ * Author: John Garry <john.garry@huawei.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/logic_pio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/serial_8250.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "hisi-lpc"
+
+/*
+ * Setting this bit means each IO operation will target a different port
+ * address; 0 means repeated IO operations will use the same port,
+ * such as BT.
+ */
+#define FG_INCRADDR_LPC 0x02
+
+struct lpc_cycle_para {
+ unsigned int opflags;
+ unsigned int csize; /* data length of each operation */
+};
+
+struct hisi_lpc_dev {
+ spinlock_t cycle_lock;
+ void __iomem *membase;
+ struct logic_pio_hwaddr *io_host;
+};
+
+/* The max IO cycle counts supported is four per operation at maximum */
+#define LPC_MAX_DWIDTH 4
+
+#define LPC_REG_STARTUP_SIGNAL 0x00
+#define LPC_REG_STARTUP_SIGNAL_START BIT(0)
+#define LPC_REG_OP_STATUS 0x04
+#define LPC_REG_OP_STATUS_IDLE BIT(0)
+#define LPC_REG_OP_STATUS_FINISHED BIT(1)
+#define LPC_REG_OP_LEN 0x10 /* LPC cycles count per start */
+#define LPC_REG_CMD 0x14
+#define LPC_REG_CMD_OP BIT(0) /* 0: read, 1: write */
+#define LPC_REG_CMD_SAMEADDR BIT(3)
+#define LPC_REG_ADDR 0x20 /* target address */
+#define LPC_REG_WDATA 0x24 /* write FIFO */
+#define LPC_REG_RDATA 0x28 /* read FIFO */
+
+/* The minimal nanosecond interval for each query on LPC cycle status */
+#define LPC_NSEC_PERWAIT 100
+
+/*
+ * The maximum waiting time is about 128us. It is specific for stream I/O,
+ * such as ins.
+ *
+ * The fastest IO cycle time is about 390ns, but the worst case will wait
+ * for extra 256 lpc clocks, so (256 + 13) * 30ns = 8 us. The maximum burst
+ * cycles is 16. So, the maximum waiting time is about 128us under worst
+ * case.
+ *
+ * Choose 1300 as the maximum.
+ */
+#define LPC_MAX_WAITCNT 1300
+
+/* About 10us. This is specific for single IO operations, such as inb */
+#define LPC_PEROP_WAITCNT 100
+
+static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
+{
+ u32 status;
+
+ do {
+ status = readl(mbase + LPC_REG_OP_STATUS);
+ if (status & LPC_REG_OP_STATUS_IDLE)
+ return (status & LPC_REG_OP_STATUS_FINISHED) ? 0 : -EIO;
+ ndelay(LPC_NSEC_PERWAIT);
+ } while (--waitcnt);
+
+ return -ETIME;
+}
+
+/*
+ * hisi_lpc_target_in - trigger a series of LPC cycles for read operation
+ * @lpcdev: pointer to hisi lpc device
+ * @para: some parameters used to control the lpc I/O operations
+ * @addr: the lpc I/O target port address
+ * @buf: where the read back data is stored
+ * @opcnt: how many I/O operations required, i.e. data width
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_target_in(struct hisi_lpc_dev *lpcdev,
+ struct lpc_cycle_para *para, unsigned long addr,
+ unsigned char *buf, unsigned long opcnt)
+{
+ unsigned int cmd_word;
+ unsigned int waitcnt;
+ unsigned long flags;
+ int ret;
+
+ if (!buf || !opcnt || !para || !para->csize || !lpcdev)
+ return -EINVAL;
+
+ cmd_word = 0; /* IO mode, Read */
+ waitcnt = LPC_PEROP_WAITCNT;
+ if (!(para->opflags & FG_INCRADDR_LPC)) {
+ cmd_word |= LPC_REG_CMD_SAMEADDR;
+ waitcnt = LPC_MAX_WAITCNT;
+ }
+
+ /* whole operation must be atomic */
+ spin_lock_irqsave(&lpcdev->cycle_lock, flags);
+
+ writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
+ writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
+ writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
+
+ writel(LPC_REG_STARTUP_SIGNAL_START,
+ lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
+
+ /* whether the operation is finished */
+ ret = wait_lpc_idle(lpcdev->membase, waitcnt);
+ if (ret) {
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+ return ret;
+ }
+
+ readsb(lpcdev->membase + LPC_REG_RDATA, buf, opcnt);
+
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_target_out - trigger a series of LPC cycles for write operation
+ * @lpcdev: pointer to hisi lpc device
+ * @para: some parameters used to control the lpc I/O operations
+ * @addr: the lpc I/O target port address
+ * @buf: where the data to be written is stored
+ * @opcnt: how many I/O operations required, i.e. data width
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_target_out(struct hisi_lpc_dev *lpcdev,
+ struct lpc_cycle_para *para, unsigned long addr,
+ const unsigned char *buf, unsigned long opcnt)
+{
+ unsigned int waitcnt;
+ unsigned long flags;
+ u32 cmd_word;
+ int ret;
+
+ if (!buf || !opcnt || !para || !lpcdev)
+ return -EINVAL;
+
+ /* default is increasing address */
+ cmd_word = LPC_REG_CMD_OP; /* IO mode, write */
+ waitcnt = LPC_PEROP_WAITCNT;
+ if (!(para->opflags & FG_INCRADDR_LPC)) {
+ cmd_word |= LPC_REG_CMD_SAMEADDR;
+ waitcnt = LPC_MAX_WAITCNT;
+ }
+
+ spin_lock_irqsave(&lpcdev->cycle_lock, flags);
+
+ writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
+ writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
+ writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
+
+ writesb(lpcdev->membase + LPC_REG_WDATA, buf, opcnt);
+
+ writel(LPC_REG_STARTUP_SIGNAL_START,
+ lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
+
+ /* whether the operation is finished */
+ ret = wait_lpc_idle(lpcdev->membase, waitcnt);
+
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+
+ return ret;
+}
+
+static unsigned long hisi_lpc_pio_to_addr(struct hisi_lpc_dev *lpcdev,
+ unsigned long pio)
+{
+ return pio - lpcdev->io_host->io_start + lpcdev->io_host->hw_start;
+}
+
+/*
+ * hisi_lpc_comm_in - input the data in a single operation
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @dwidth: the data length required to read from the target I/O port
+ *
+ * When success, data is returned. Otherwise, ~0 is returned.
+ */
+static u32 hisi_lpc_comm_in(void *hostdata, unsigned long pio, size_t dwidth)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ unsigned long addr;
+ __le32 rd_data = 0;
+ int ret;
+
+ if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return ~0;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ iopara.opflags = FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ ret = hisi_lpc_target_in(lpcdev, &iopara, addr,
+ (unsigned char *)&rd_data, dwidth);
+ if (ret)
+ return ~0;
+
+ return le32_to_cpu(rd_data);
+}
+
+/*
+ * hisi_lpc_comm_out - output the data in a single operation
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @val: a value to be output from caller, maximum is four bytes
+ * @dwidth: the data width required writing to the target I/O port
+ *
+ * This function corresponds to out(b,w,l) only.
+ */
+static void hisi_lpc_comm_out(void *hostdata, unsigned long pio,
+ u32 val, size_t dwidth)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ const unsigned char *buf;
+ unsigned long addr;
+ __le32 _val = cpu_to_le32(val);
+
+ if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return;
+
+ buf = (const unsigned char *)&_val;
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ iopara.opflags = FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth);
+}
+
+/*
+ * hisi_lpc_comm_ins - input the data in the buffer in multiple operations
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @buffer: a buffer where read/input data bytes are stored
+ * @dwidth: the data width required writing to the target I/O port
+ * @count: how many data units whose length is dwidth will be read
+ *
+ * When success, the data read back is stored in buffer pointed by buffer.
+ * Returns 0 on success, -errno otherwise.
+ */
+static u32 hisi_lpc_comm_ins(void *hostdata, unsigned long pio, void *buffer,
+ size_t dwidth, unsigned int count)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ unsigned char *buf = buffer;
+ struct lpc_cycle_para iopara;
+ unsigned long addr;
+
+ if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return -EINVAL;
+
+ iopara.opflags = 0;
+ if (dwidth > 1)
+ iopara.opflags |= FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ do {
+ int ret;
+
+ ret = hisi_lpc_target_in(lpcdev, &iopara, addr, buf, dwidth);
+ if (ret)
+ return ret;
+ buf += dwidth;
+ } while (--count);
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_comm_outs - output the data in the buffer in multiple operations
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @buffer: a buffer where write/output data bytes are stored
+ * @dwidth: the data width required writing to the target I/O port
+ * @count: how many data units whose length is dwidth will be written
+ */
+static void hisi_lpc_comm_outs(void *hostdata, unsigned long pio,
+ const void *buffer, size_t dwidth,
+ unsigned int count)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ const unsigned char *buf = buffer;
+ unsigned long addr;
+
+ if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return;
+
+ iopara.opflags = 0;
+ if (dwidth > 1)
+ iopara.opflags |= FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+ do {
+ if (hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth))
+ break;
+ buf += dwidth;
+ } while (--count);
+}
+
+static const struct logic_pio_host_ops hisi_lpc_ops = {
+ .in = hisi_lpc_comm_in,
+ .out = hisi_lpc_comm_out,
+ .ins = hisi_lpc_comm_ins,
+ .outs = hisi_lpc_comm_outs,
+};
+
+#ifdef CONFIG_ACPI
+static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
+ struct acpi_device *host,
+ struct resource *res)
+{
+ unsigned long sys_port;
+ resource_size_t len = resource_size(res);
+
+ sys_port = logic_pio_trans_hwaddr(&host->fwnode, res->start, len);
+ if (sys_port == ~0UL)
+ return -EFAULT;
+
+ res->start = sys_port;
+ res->end = sys_port + len;
+
+ return 0;
+}
+
+/*
+ * Released firmware describes the IO port max address as 0x3fff, which is
+ * the max host bus address. Fixup to a proper range. This will probably
+ * never be fixed in firmware.
+ */
+static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
+ struct resource *r)
+{
+ if (r->end != 0x3fff)
+ return;
+
+ if (r->start == 0xe4)
+ r->end = 0xe4 + 0x04 - 1;
+ else if (r->start == 0x2f8)
+ r->end = 0x2f8 + 0x08 - 1;
+ else
+ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
+ r);
+}
+
+/*
+ * hisi_lpc_acpi_set_io_res - set the resources for a child
+ * @child: the device node to be updated the I/O resource
+ * @hostdev: the device node associated with host controller
+ * @res: double pointer to be set to the address of translated resources
+ * @num_res: pointer to variable to hold the number of translated resources
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * For a given host controller, each child device will have an associated
+ * host-relative address resource. This function will return the translated
+ * logical PIO addresses for each child devices resources.
+ */
+static int hisi_lpc_acpi_set_io_res(struct device *child,
+ struct device *hostdev,
+ const struct resource **res, int *num_res)
+{
+ struct acpi_device *adev;
+ struct acpi_device *host;
+ struct resource_entry *rentry;
+ LIST_HEAD(resource_list);
+ struct resource *resources;
+ int count;
+ int i;
+
+ if (!child || !hostdev)
+ return -EINVAL;
+
+ host = to_acpi_device(hostdev);
+ adev = to_acpi_device(child);
+
+ if (!adev->status.present) {
+ dev_dbg(child, "device is not present\n");
+ return -EIO;
+ }
+
+ if (acpi_device_enumerated(adev)) {
+ dev_dbg(child, "has been enumerated\n");
+ return -EIO;
+ }
+
+ /*
+ * The following code segment to retrieve the resources is common to
+ * acpi_create_platform_device(), so consider a common helper function
+ * in future.
+ */
+ count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (count <= 0) {
+ dev_dbg(child, "failed to get resources\n");
+ return count ? count : -EIO;
+ }
+
+ resources = devm_kcalloc(hostdev, count, sizeof(*resources),
+ GFP_KERNEL);
+ if (!resources) {
+ dev_warn(hostdev, "could not allocate memory for %d resources\n",
+ count);
+ acpi_dev_free_resource_list(&resource_list);
+ return -ENOMEM;
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node) {
+ resources[count] = *rentry->res;
+ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
+ count++;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /* translate the I/O resources */
+ for (i = 0; i < count; i++) {
+ int ret;
+
+ if (!(resources[i].flags & IORESOURCE_IO))
+ continue;
+ ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]);
+ if (ret) {
+ dev_err(child, "translate IO range %pR failed (%d)\n",
+ &resources[i], ret);
+ return ret;
+ }
+ }
+ *res = resources;
+ *num_res = count;
+
+ return 0;
+}
+
+static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+struct hisi_lpc_acpi_cell {
+ const char *hid;
+ const char *name;
+ void *pdata;
+ size_t pdata_size;
+};
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(hostdev);
+ struct acpi_device *child;
+
+ device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+ list_for_each_entry(child, &adev->children, node)
+ acpi_device_clear_enumerated(child);
+}
+
+/*
+ * hisi_lpc_acpi_probe - probe children for ACPI FW
+ * @hostdev: LPC host device pointer
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * Create a platform device per child, fixing up the resources
+ * from bus addresses to Logical PIO addresses.
+ *
+ */
+static int hisi_lpc_acpi_probe(struct device *hostdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(hostdev);
+ struct acpi_device *child;
+ struct platform_device *pdev;
+ int ret;
+
+ /* Only consider the children of the host */
+ list_for_each_entry(child, &adev->children, node) {
+ const char *hid = acpi_device_hid(child);
+ const struct hisi_lpc_acpi_cell *cell;
+ const struct resource *res;
+ bool found = false;
+ int num_res;
+
+ ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev, &res,
+ &num_res);
+ if (ret) {
+ dev_warn(hostdev, "set resource fail (%d)\n", ret);
+ goto fail;
+ }
+
+ cell = (struct hisi_lpc_acpi_cell []){
+ /* ipmi */
+ {
+ .hid = "IPI0001",
+ .name = "hisi-lpc-ipmi",
+ },
+ /* 8250-compatible uart */
+ {
+ .hid = "HISI1031",
+ .name = "serial8250",
+ .pdata = (struct plat_serial8250_port []) {
+ {
+ .iobase = res->start,
+ .uartclk = 1843200,
+ .iotype = UPIO_PORT,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {}
+ },
+ .pdata_size = 2 *
+ sizeof(struct plat_serial8250_port),
+ },
+ {}
+ };
+
+ for (; cell && cell->name; cell++) {
+ if (!strcmp(cell->hid, hid)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_warn(hostdev,
+ "could not find cell for child device (%s), discarding\n",
+ hid);
+ continue;
+ }
+
+ pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ pdev->dev.parent = hostdev;
+ ACPI_COMPANION_SET(&pdev->dev, child);
+
+ ret = platform_device_add_resources(pdev, res, num_res);
+ if (ret)
+ goto fail_put_device;
+
+ ret = platform_device_add_data(pdev, cell->pdata,
+ cell->pdata_size);
+ if (ret)
+ goto fail_put_device;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail_put_device;
+
+ acpi_device_set_enumerated(child);
+ }
+
+ return 0;
+
+fail_put_device:
+ platform_device_put(pdev);
+fail:
+ hisi_lpc_acpi_remove(hostdev);
+ return ret;
+}
+
+static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+ {"HISI0191"},
+ {}
+};
+#else
+static int hisi_lpc_acpi_probe(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
+#endif // CONFIG_ACPI
+
+/*
+ * hisi_lpc_probe - the probe callback function for hisi lpc host,
+ * will finish all the initialization.
+ * @pdev: the platform device corresponding to hisi lpc host
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+ struct logic_pio_hwaddr *range;
+ struct hisi_lpc_dev *lpcdev;
+ resource_size_t io_end;
+ struct resource *res;
+ int ret;
+
+ lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
+ if (!lpcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&lpcdev->cycle_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lpcdev->membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(lpcdev->membase))
+ return PTR_ERR(lpcdev->membase);
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+
+ range->fwnode = dev->fwnode;
+ range->flags = LOGIC_PIO_INDIRECT;
+ range->size = PIO_INDIRECT_SIZE;
+ range->hostdata = lpcdev;
+ range->ops = &hisi_lpc_ops;
+ lpcdev->io_host = range;
+
+ ret = logic_pio_register_range(range);
+ if (ret) {
+ dev_err(dev, "register IO range failed (%d)!\n", ret);
+ return ret;
+ }
+
+ /* register the LPC host PIO resources */
+ if (acpi_device)
+ ret = hisi_lpc_acpi_probe(dev);
+ else
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ logic_pio_unregister_range(range);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, lpcdev);
+
+ io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
+ dev_info(dev, "registered range [%pa - %pa]\n",
+ &lpcdev->io_host->io_start, &io_end);
+
+ return ret;
+}
+
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+ struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+ struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+ if (acpi_device)
+ hisi_lpc_acpi_remove(dev);
+ else
+ of_platform_depopulate(dev);
+
+ logic_pio_unregister_range(range);
+
+ return 0;
+}
+
+static const struct of_device_id hisi_lpc_of_match[] = {
+ { .compatible = "hisilicon,hip06-lpc", },
+ { .compatible = "hisilicon,hip07-lpc", },
+ {}
+};
+
+static struct platform_driver hisi_lpc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hisi_lpc_of_match,
+ .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
+ },
+ .probe = hisi_lpc_probe,
+ .remove = hisi_lpc_remove,
+};
+builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
new file mode 100644
index 000000000..201767823
--- /dev/null
+++ b/drivers/bus/imx-weim.c
@@ -0,0 +1,278 @@
+/*
+ * EIM driver for Freescale's i.MX chips
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/regmap.h>
+
+struct imx_weim_devtype {
+ unsigned int cs_count;
+ unsigned int cs_regs_count;
+ unsigned int cs_stride;
+ unsigned int wcr_offset;
+ unsigned int wcr_bcm;
+};
+
+static const struct imx_weim_devtype imx1_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 2,
+ .cs_stride = 0x08,
+};
+
+static const struct imx_weim_devtype imx27_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 3,
+ .cs_stride = 0x10,
+};
+
+static const struct imx_weim_devtype imx50_weim_devtype = {
+ .cs_count = 4,
+ .cs_regs_count = 6,
+ .cs_stride = 0x18,
+ .wcr_offset = 0x90,
+ .wcr_bcm = BIT(0),
+};
+
+static const struct imx_weim_devtype imx51_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 6,
+ .cs_stride = 0x18,
+};
+
+#define MAX_CS_REGS_COUNT 6
+#define MAX_CS_COUNT 6
+#define OF_REG_SIZE 3
+
+struct cs_timing {
+ bool is_applied;
+ u32 regs[MAX_CS_REGS_COUNT];
+};
+
+struct cs_timing_state {
+ struct cs_timing cs[MAX_CS_COUNT];
+};
+
+static const struct of_device_id weim_id_table[] = {
+ /* i.MX1/21 */
+ { .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
+ /* i.MX25/27/31/35 */
+ { .compatible = "fsl,imx27-weim", .data = &imx27_weim_devtype, },
+ /* i.MX50/53/6Q */
+ { .compatible = "fsl,imx50-weim", .data = &imx50_weim_devtype, },
+ { .compatible = "fsl,imx6q-weim", .data = &imx50_weim_devtype, },
+ /* i.MX51 */
+ { .compatible = "fsl,imx51-weim", .data = &imx51_weim_devtype, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, weim_id_table);
+
+static int imx_weim_gpr_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ struct regmap *gpr;
+ u32 gprvals[4] = {
+ 05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */
+ 033, /* CS0(64M) CS1(64M) CS2(0M) CS3(0M) */
+ 0113, /* CS0(64M) CS1(32M) CS2(32M) CS3(0M) */
+ 01111, /* CS0(32M) CS1(32M) CS2(32M) CS3(32M) */
+ };
+ u32 gprval = 0;
+ u32 val;
+ int cs = 0;
+ int i = 0;
+
+ gpr = syscon_regmap_lookup_by_phandle(np, "fsl,weim-cs-gpr");
+ if (IS_ERR(gpr)) {
+ dev_dbg(&pdev->dev, "failed to find weim-cs-gpr\n");
+ return 0;
+ }
+
+ of_property_for_each_u32(np, "ranges", prop, p, val) {
+ if (i % 4 == 0) {
+ cs = val;
+ } else if (i % 4 == 3 && val) {
+ val = (val / SZ_32M) | 1;
+ gprval |= val << cs * 3;
+ }
+ i++;
+ }
+
+ if (i == 0 || i % 4)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+ if (gprval == gprvals[i]) {
+ /* Found it. Set up IOMUXC_GPR1[11:0] with it. */
+ regmap_update_bits(gpr, IOMUXC_GPR1, 0xfff, gprval);
+ return 0;
+ }
+ }
+
+err:
+ dev_err(&pdev->dev, "Invalid 'ranges' configuration\n");
+ return -EINVAL;
+}
+
+/* Parse and set the timing for this device. */
+static int weim_timing_setup(struct device *dev,
+ struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype,
+ struct cs_timing_state *ts)
+{
+ u32 cs_idx, value[MAX_CS_REGS_COUNT];
+ int i, ret;
+ int reg_idx, num_regs;
+ struct cs_timing *cst;
+
+ if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
+ return -EINVAL;
+ if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
+ value, devtype->cs_regs_count);
+ if (ret)
+ return ret;
+
+ /*
+ * the child node's "reg" property may contain multiple address ranges,
+ * extract the chip select for each.
+ */
+ num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
+ if (num_regs < 0)
+ return num_regs;
+ if (!num_regs)
+ return -EINVAL;
+ for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
+ /* get the CS index from this child node's "reg" property. */
+ ret = of_property_read_u32_index(np, "reg",
+ reg_idx * OF_REG_SIZE, &cs_idx);
+ if (ret)
+ break;
+
+ if (cs_idx >= devtype->cs_count)
+ return -EINVAL;
+
+ /* prevent re-configuring a CS that's already been configured */
+ cst = &ts->cs[cs_idx];
+ if (cst->is_applied && memcmp(value, cst->regs,
+ devtype->cs_regs_count * sizeof(u32))) {
+ dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
+ return -EINVAL;
+ }
+
+ /* set the timing for WEIM */
+ for (i = 0; i < devtype->cs_regs_count; i++)
+ writel(value[i],
+ base + cs_idx * devtype->cs_stride + i * 4);
+ if (!cst->is_applied) {
+ cst->is_applied = true;
+ memcpy(cst->regs, value,
+ devtype->cs_regs_count * sizeof(u32));
+ }
+ }
+
+ return 0;
+}
+
+static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
+{
+ const struct of_device_id *of_id = of_match_device(weim_id_table,
+ &pdev->dev);
+ const struct imx_weim_devtype *devtype = of_id->data;
+ int ret = 0, have_child = 0;
+ struct device_node *child;
+ struct cs_timing_state ts = {};
+ u32 reg;
+
+ if (devtype == &imx50_weim_devtype) {
+ ret = imx_weim_gpr_setup(pdev);
+ if (ret)
+ return ret;
+ }
+
+ if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
+ if (devtype->wcr_bcm) {
+ reg = readl(base + devtype->wcr_offset);
+ writel(reg | devtype->wcr_bcm,
+ base + devtype->wcr_offset);
+ } else {
+ dev_err(&pdev->dev, "burst clk mode not supported.\n");
+ return -EINVAL;
+ }
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
+ if (ret)
+ dev_warn(&pdev->dev, "%pOF set timing failed.\n",
+ child);
+ else
+ have_child = 1;
+ }
+
+ if (have_child)
+ ret = of_platform_default_populate(pdev->dev.of_node,
+ NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "%pOF fail to create devices.\n",
+ pdev->dev.of_node);
+ return ret;
+}
+
+static int weim_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ /* get the resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* get the clock */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ /* parse the device node */
+ ret = weim_parse_dt(pdev, base);
+ if (ret)
+ clk_disable_unprepare(clk);
+ else
+ dev_info(&pdev->dev, "Driver registered.\n");
+
+ return ret;
+}
+
+static struct platform_driver weim_driver = {
+ .driver = {
+ .name = "imx-weim",
+ .of_match_table = weim_id_table,
+ },
+ .probe = weim_probe,
+};
+module_platform_driver(weim_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_DESCRIPTION("i.MX EIM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
new file mode 100644
index 000000000..4748df7f9
--- /dev/null
+++ b/drivers/bus/mhi/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2021, Linaro Ltd.
+#
+
+source "drivers/bus/mhi/host/Kconfig"
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 000000000..5f5708a24
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,2 @@
+# Host MHI stack
+obj-y += host/
diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig
new file mode 100644
index 000000000..da5cd0c9f
--- /dev/null
+++ b/drivers/bus/mhi/host/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+#
+
+config MHI_BUS
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+config MHI_BUS_DEBUG
+ bool "Debugfs support for the MHI bus"
+ depends on MHI_BUS && DEBUG_FS
+ help
+ Enable debugfs support for use with the MHI transport. Allows
+ reading and/or modifying some values within the MHI controller
+ for debug and test purposes.
+
+config MHI_BUS_PCI_GENERIC
+ tristate "MHI PCI controller driver"
+ depends on MHI_BUS
+ depends on PCI
+ help
+ This driver provides MHI PCI controller driver for devices such as
+ Qualcomm SDX55 based PCIe modems.
+
diff --git a/drivers/bus/mhi/host/Makefile b/drivers/bus/mhi/host/Makefile
new file mode 100644
index 000000000..859c2f384
--- /dev/null
+++ b/drivers/bus/mhi/host/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MHI_BUS) += mhi.o
+mhi-y := init.o main.o pm.o boot.o
+mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o
+
+obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
+mhi_pci_generic-y += pci_generic.o
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
new file mode 100644
index 000000000..24422f5c3
--- /dev/null
+++ b/drivers/bus/mhi/host/boot.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/* Setup RDDM vector table for RDDM transfer and program RXVEC */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 sequence_id;
+ unsigned int i;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ dev_dbg(dev, "BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
+
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+ sequence_id);
+
+ dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+}
+
+/* Collect RDDM buffer during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting RDDM buffer. After
+ * returning from this function, we expect the device to reset.
+ *
+ * Normaly, we read/write pm_state only after grabbing the
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee that this state change would take effect since
+ * we're setting it w/o grabbing pm_lock
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM. In case the device asserts
+ * and a kernel panic follows, device will already be in RDDM.
+ * Do not trigger SYS ERR again and proceed with waiting for
+ * image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee != MHI_EE_RDDM) {
+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ dev_dbg(dev, "Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset so force device to enter RDDM */
+ dev_dbg(dev,
+ "Did not enter RDDM, do a host req reset\n");
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
+ return 0;
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ dev_err(dev, "Did not complete RDDM transfer\n");
+ dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
+
+ return -EIO;
+}
+
+/* Download RDDM image from device */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ BHIE_RXVECSTATUS_STATUS_SHFT,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
+
+static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
+ const struct mhi_buf *mhi_buf)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ u32 tx_status, sequence_id;
+ int ret;
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ return -EIO;
+ }
+
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
+ sequence_id);
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+ mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
+ sequence_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
+ return -EIO;
+
+ return (!ret) ? -ETIMEDOUT : 0;
+}
+
+static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 tx_status, val, session_id;
+ int i, ret;
+ void __iomem *base = mhi_cntrl->bhi;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
+ session_id);
+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ upper_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ lower_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+ BHI_STATUS_MASK, BHI_STATUS_SHIFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ goto invalid_pm_state;
+
+ if (tx_status == BHI_STATUS_ERROR) {
+ dev_err(dev, "Image transfer failed\n");
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base,
+ error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n",
+ error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ return (!ret) ? -ETIMEDOUT : 0;
+
+invalid_pm_state:
+
+ return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ int i;
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ for (i = 0; i < image_info->entries; i++, mhi_buf++)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ size_t seg_size = mhi_cntrl->seg_len;
+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int i;
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entries */
+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+ GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+ for (i = 0; i < segments; i++, mhi_buf++) {
+ size_t vec_size = seg_size;
+
+ /* Vector table is the last entry */
+ if (i == segments - 1)
+ vec_size = sizeof(struct bhi_vec_entry) * i;
+
+ mhi_buf->len = vec_size;
+ mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
+ &mhi_buf->dma_addr,
+ GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+ }
+
+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+ img_info->entries = segments;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+ mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+ mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ const struct firmware *firmware,
+ struct image_info *img_info)
+{
+ size_t remainder = firmware->size;
+ size_t to_cpy;
+ const u8 *buf = firmware->data;
+ int i = 0;
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+ while (remainder) {
+ to_cpy = min(remainder, mhi_buf->len);
+ memcpy(mhi_buf->buf, buf, to_cpy);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = to_cpy;
+
+ buf += to_cpy;
+ remainder -= to_cpy;
+ i++;
+ bhi_vec++;
+ mhi_buf++;
+ }
+}
+
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+{
+ const struct firmware *firmware = NULL;
+ struct image_info *image_info;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ const char *fw_name;
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+ int i, ret;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device MHI is not in valid state\n");
+ return;
+ }
+
+ /* save hardware info from BHI */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
+ &mhi_cntrl->serial_number);
+ if (ret)
+ dev_err(dev, "Could not capture serial number via BHI\n");
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
+ &mhi_cntrl->oem_pk_hash[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
+ break;
+ }
+ }
+
+ /* If device is in pass through, do reset to ready state transition */
+ if (mhi_cntrl->ee == MHI_EE_PTHRU)
+ goto fw_load_ee_pthru;
+
+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+ mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+ !mhi_cntrl->seg_len))) {
+ dev_err(dev,
+ "No firmware image defined or !sbl_size || !seg_len\n");
+ return;
+ }
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "Error loading firmware: %d\n", ret);
+ return;
+ }
+
+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+ /* SBL size provided is maximum size, not necessarily the image size */
+ if (size > firmware->size)
+ size = firmware->size;
+
+ buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
+ if (!buf) {
+ release_firmware(firmware);
+ return;
+ }
+
+ /* Download SBL image */
+ memcpy(buf, firmware->data, size);
+ ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
+ mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
+
+ if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
+ release_firmware(firmware);
+
+ /* Error or in EDL mode, we're done */
+ if (ret) {
+ dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
+ return;
+ }
+
+ if (mhi_cntrl->ee == MHI_EE_EDL)
+ return;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * If we're doing fbc, populate vector tables while
+ * device transitioning into MHI READY state
+ */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
+ firmware->size);
+ if (ret)
+ goto error_alloc_fw_table;
+
+ /* Load the firmware into BHIE vec table */
+ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+ }
+
+fw_load_ee_pthru:
+ /* Transitioning into MHI RESET->READY state */
+ ret = mhi_ready_state_transition(mhi_cntrl);
+
+ if (!mhi_cntrl->fbc_download)
+ return;
+
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
+ goto error_read;
+ }
+
+ /* Wait for the SBL event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_SBL ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "MHI did not enter SBL\n");
+ goto error_read;
+ }
+
+ /* Start full firmware image download */
+ image_info = mhi_cntrl->fbc_image;
+ ret = mhi_fw_load_amss(mhi_cntrl,
+ /* Vector table is the last entry */
+ &image_info->mhi_buf[image_info->entries - 1]);
+ if (ret)
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+
+ release_firmware(firmware);
+
+ return;
+
+error_read:
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+
+error_alloc_fw_table:
+ release_firmware(firmware);
+}
diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c
new file mode 100644
index 000000000..3a48801e0
--- /dev/null
+++ b/drivers/bus/mhi/host/debugfs.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include "internal.h"
+
+static int mhi_debugfs_states_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ /* states */
+ seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_cntrl->wake_set ? "true" : "false");
+
+ /* counters */
+ seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
+ mhi_cntrl->M3);
+
+ seq_printf(m, " device wake: %u pending packets: %u\n",
+ atomic_read(&mhi_cntrl->dev_wake),
+ atomic_read(&mhi_cntrl->pending_pkts));
+
+ return 0;
+}
+
+static int mhi_debugfs_events_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings;
+ i++, er_ctxt++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev) {
+ seq_printf(m, "Index: %d is an offload event ring\n",
+ i);
+ continue;
+ }
+
+ seq_printf(m, "Index: %d intmod count: %lu time: %lu",
+ i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >>
+ EV_CTX_INTMODC_SHIFT,
+ (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >>
+ EV_CTX_INTMODT_SHIFT);
+
+ seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase,
+ er_ctxt->rlen);
+
+ seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp,
+ er_ctxt->wp);
+
+ seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp,
+ &mhi_event->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_chan *mhi_chan;
+ struct mhi_chan_ctxt *chan_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->offload_ch) {
+ seq_printf(m, "%s(%u) is an offload channel\n",
+ mhi_chan->name, mhi_chan->chan);
+ continue;
+ }
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ seq_printf(m,
+ "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx",
+ mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg &
+ CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT,
+ (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >>
+ CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg &
+ CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT);
+
+ seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype,
+ chan_ctxt->erindex);
+
+ seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx",
+ chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp,
+ chan_ctxt->wp);
+
+ seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n",
+ ring->rp, ring->wp,
+ &mhi_chan->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_device_info_show(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+
+ seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u",
+ mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer",
+ mhi_dev->dev_wake);
+
+ /* for transfer device types only */
+ if (mhi_dev->dev_type == MHI_DEVICE_XFER)
+ seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)",
+ mhi_dev->ul_chan_id, mhi_dev->dl_chan_id);
+
+ seq_puts((struct seq_file *)data, "\n");
+
+ return 0;
+}
+
+static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child(mhi_cntrl->cntrl_dev, m, mhi_device_info_show);
+
+ return 0;
+}
+
+static int mhi_debugfs_regdump_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ enum mhi_state state;
+ enum mhi_ee_type ee;
+ int i, ret = -EIO;
+ u32 val;
+ void __iomem *mhi_base = mhi_cntrl->regs;
+ void __iomem *bhi_base = mhi_cntrl->bhi;
+ void __iomem *bhie_base = mhi_cntrl->bhie;
+ void __iomem *wake_db = mhi_cntrl->wake_db;
+ struct {
+ const char *name;
+ int offset;
+ void __iomem *base;
+ } regs[] = {
+ { "MHI_REGLEN", MHIREGLEN, mhi_base},
+ { "MHI_VER", MHIVER, mhi_base},
+ { "MHI_CFG", MHICFG, mhi_base},
+ { "MHI_CTRL", MHICTRL, mhi_base},
+ { "MHI_STATUS", MHISTATUS, mhi_base},
+ { "MHI_WAKE_DB", 0, wake_db},
+ { "BHI_EXECENV", BHI_EXECENV, bhi_base},
+ { "BHI_STATUS", BHI_STATUS, bhi_base},
+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+ { NULL },
+ };
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ return ret;
+
+ seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+ seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
+ for (i = 0; regs[i].name; i++) {
+ if (!regs[i].base)
+ continue;
+ ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset,
+ &val);
+ if (ret)
+ continue;
+
+ seq_printf(m, "%s: 0x%x\n", regs[i].name, val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ seq_printf(m,
+ "Wake count: %d\n%s\n", mhi_dev->dev_wake,
+ "Usage: echo get/put > device_wake to vote/unvote for M0");
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_device_wake_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ char buf[16];
+ int ret = -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "get", 3)) {
+ ret = mhi_device_get_sync(mhi_dev);
+ } else if (!strncmp(buf, "put", 3)) {
+ mhi_device_put(mhi_dev);
+ ret = 0;
+ }
+
+ return ret ? ret : count;
+}
+
+static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms);
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_timeout_ms_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ u32 timeout_ms;
+
+ if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms))
+ return -EINVAL;
+
+ mhi_cntrl->timeout_ms = timeout_ms;
+
+ return count;
+}
+
+static int mhi_debugfs_states_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_states_show, inode->i_private);
+}
+
+static int mhi_debugfs_events_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_events_show, inode->i_private);
+}
+
+static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_channels_show, inode->i_private);
+}
+
+static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_devices_show, inode->i_private);
+}
+
+static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_regdump_show, inode->i_private);
+}
+
+static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private);
+}
+
+static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_states_fops = {
+ .open = mhi_debugfs_states_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_events_fops = {
+ .open = mhi_debugfs_events_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_channels_fops = {
+ .open = mhi_debugfs_channels_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_devices_fops = {
+ .open = mhi_debugfs_devices_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_regdump_fops = {
+ .open = mhi_debugfs_regdump_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_device_wake_fops = {
+ .open = mhi_debugfs_device_wake_open,
+ .write = mhi_debugfs_device_wake_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_timeout_ms_fops = {
+ .open = mhi_debugfs_timeout_ms_open,
+ .write = mhi_debugfs_timeout_ms_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static struct dentry *mhi_debugfs_root;
+
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->debugfs_dentry =
+ debugfs_create_dir(dev_name(mhi_cntrl->cntrl_dev),
+ mhi_debugfs_root);
+
+ debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_states_fops);
+ debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_events_fops);
+ debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_channels_fops);
+ debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_devices_fops);
+ debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_regdump_fops);
+ debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_device_wake_fops);
+ debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_timeout_ms_fops);
+}
+
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ debugfs_remove_recursive(mhi_cntrl->debugfs_dentry);
+ mhi_cntrl->debugfs_dentry = NULL;
+}
+
+void mhi_debugfs_init(void)
+{
+ mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL);
+}
+
+void mhi_debugfs_exit(void)
+{
+ debugfs_remove_recursive(mhi_debugfs_root);
+}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
new file mode 100644
index 000000000..2cc48f96a
--- /dev/null
+++ b/drivers/bus/mhi/host/init.c
@@ -0,0 +1,1387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+ [MHI_EE_PBL] = "PBL",
+ [MHI_EE_SBL] = "SBL",
+ [MHI_EE_AMSS] = "AMSS",
+ [MHI_EE_RDDM] = "RDDM",
+ [MHI_EE_WFW] = "WFW",
+ [MHI_EE_PTHRU] = "PASS THRU",
+ [MHI_EE_EDL] = "EDL",
+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
+ [DEV_ST_TRANSITION_PBL] = "PBL",
+ [DEV_ST_TRANSITION_READY] = "READY",
+ [DEV_ST_TRANSITION_SBL] = "SBL",
+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
+};
+
+const char * const mhi_state_str[MHI_STATE_MAX] = {
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
+ [MHI_STATE_M3_FAST] = "M3_FAST",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS_ERR",
+};
+
+static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_STATE_DISABLE] = "DISABLE",
+ [MHI_PM_STATE_POR] = "POR",
+ [MHI_PM_STATE_M0] = "M0",
+ [MHI_PM_STATE_M2] = "M2",
+ [MHI_PM_STATE_M3_ENTER] = "M?->M3",
+ [MHI_PM_STATE_M3] = "M3",
+ [MHI_PM_STATE_M3_EXIT] = "M3->M0",
+ [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+};
+
+const char *to_mhi_pm_state_str(enum mhi_pm_state state)
+{
+ int index = find_last_bit((unsigned long *)&state, 32);
+
+ if (index >= ARRAY_SIZE(mhi_pm_state_str))
+ return "Invalid State";
+
+ return mhi_pm_state_str[index];
+}
+
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
+ mhi_cntrl->serial_number);
+}
+static DEVICE_ATTR_RO(serial_number);
+
+static ssize_t oem_pk_hash_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int i, cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "OEMPKHASH[%d]: 0x%x\n", i,
+ mhi_cntrl->oem_pk_hash[i]);
+
+ return cnt;
+}
+static DEVICE_ATTR_RO(oem_pk_hash);
+
+static struct attribute *mhi_dev_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_oem_pk_hash.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mhi_dev);
+
+/* MHI protocol requires the transfer ring to be aligned with ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring,
+ u64 len)
+{
+ ring->alloc_size = len + (len - 1);
+ ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned)
+ return -ENOMEM;
+
+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+
+ return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ /* Setup BHI_INTVEC IRQ */
+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
+ mhi_intvec_threaded_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "bhi", mhi_cntrl);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
+ dev_err(dev, "irq %d not available for event ring\n",
+ mhi_event->irq);
+ ret = -EINVAL;
+ goto error_request;
+ }
+
+ ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
+ mhi_irq_handler,
+ IRQF_SHARED | IRQF_NO_SUSPEND,
+ "mhi", mhi_event);
+ if (ret) {
+ dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ mhi_cntrl->irq[mhi_event->irq], i);
+ goto error_request;
+ }
+ }
+
+ return 0;
+
+error_request:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+ return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event *mhi_event;
+ struct mhi_ring *ring;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+ kfree(mhi_ctxt);
+ mhi_cntrl->mhi_ctxt = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ u32 tmp;
+ int ret = -ENOMEM, i;
+
+ atomic_set(&mhi_cntrl->dev_wake, 0);
+ atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return -ENOMEM;
+
+ /* Setup channel ctxt */
+ mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan,
+ &mhi_ctxt->chan_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->chan_ctxt)
+ goto error_alloc_chan_ctxt;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ /* Skip if it is an offload channel */
+ if (mhi_chan->offload_ch)
+ continue;
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+ tmp &= ~CHAN_CTX_BRSTMODE_MASK;
+ tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
+ tmp &= ~CHAN_CTX_POLLCFG_MASK;
+ tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ chan_ctxt->chtype = mhi_chan->type;
+ chan_ctxt->erindex = mhi_chan->er_index;
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
+ }
+
+ /* Setup event context */
+ mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings,
+ &mhi_ctxt->er_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->er_ctxt)
+ goto error_alloc_er_ctxt;
+
+ er_ctxt = mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ tmp = er_ctxt->intmod;
+ tmp &= ~EV_CTX_INTMODC_MASK;
+ tmp &= ~EV_CTX_INTMODT_MASK;
+ tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
+ er_ctxt->intmod = tmp;
+
+ er_ctxt->ertype = MHI_ER_TYPE_VALID;
+ er_ctxt->msivec = mhi_event->irq;
+ mhi_event->db_cfg.db_mode = true;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_er;
+
+ /*
+ * If the read pointer equals to the write pointer, then the
+ * ring is empty
+ */
+ ring->rp = ring->wp = ring->base;
+ er_ctxt->rbase = ring->iommu_base;
+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+ er_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &er_ctxt->wp;
+ }
+
+ /* Setup cmd context */
+ ret = -ENOMEM;
+ mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) *
+ NR_OF_CMD_RINGS,
+ &mhi_ctxt->cmd_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->cmd_ctxt)
+ goto error_alloc_er;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->el_size = sizeof(struct mhi_tre);
+ ring->elements = CMD_EL_PER_RING;
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_cmd;
+
+ ring->rp = ring->wp = ring->base;
+ cmd_ctxt->rbase = ring->iommu_base;
+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+ cmd_ctxt->rlen = ring->len;
+ ring->ctxt_wp = &cmd_ctxt->wp;
+ }
+
+ mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+ return 0;
+
+error_alloc_cmd:
+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+ i = mhi_cntrl->total_ev_rings;
+ mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+ mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+ kfree(mhi_ctxt);
+
+ return ret;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+ u32 val;
+ int i, ret;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ void __iomem *base = mhi_cntrl->regs;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 val;
+ } reg_info[] = {
+ {
+ CCABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ CCABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ ECABAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ ECABAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ CRCBAP_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ CRCBAP_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
+ mhi_cntrl->total_ev_rings,
+ },
+ {
+ MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
+ mhi_cntrl->hw_ev_rings,
+ },
+ {
+ MHICTRLBASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLBASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLLIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHICTRLLIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_HIGHER, U32_MAX, 0,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_LOWER, U32_MAX, 0,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ { 0, 0, 0 }
+ };
+
+ dev_dbg(dev, "Initializing MHI registers\n");
+
+ /* Read channel db offset */
+ ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
+ CHDBOFF_CHDBOFF_SHIFT, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
+ dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
+ return -ERANGE;
+ }
+
+ /* Setup wake db */
+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+ mhi_cntrl->wake_set = false;
+
+ /* Setup channel db address for each channel in tre_ring */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+ mhi_chan->tre_ring.db_addr = base + val;
+
+ /* Read event ring db offset */
+ ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
+ ERDBOFF_ERDBOFF_SHIFT, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read ERDBOFF register\n");
+ return -EIO;
+ }
+
+ if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
+ dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
+ return -ERANGE;
+ }
+
+ /* Setup event db address for each ev_ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->ring.db_addr = base + val;
+ }
+
+ /* Setup DB register for primary CMD rings */
+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+ /* Write to MMIO registers */
+ for (i = 0; reg_info[i].offset; i++)
+ mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].mask, reg_info[i].shift,
+ reg_info[i].val);
+
+ return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+
+ buf_ring->base = tre_ring->base = NULL;
+ tre_ring->ctxt_wp = NULL;
+ chan_ctxt->rbase = 0;
+ chan_ctxt->rlen = 0;
+ chan_ctxt->rp = 0;
+ chan_ctxt->wp = 0;
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ tre_ring->el_size = sizeof(struct mhi_tre);
+ tre_ring->len = tre_ring->el_size * tre_ring->elements;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+ if (ret)
+ return -ENOMEM;
+
+ buf_ring->el_size = sizeof(struct mhi_buf_info);
+ buf_ring->len = buf_ring->el_size * buf_ring->elements;
+ buf_ring->base = vzalloc(buf_ring->len);
+
+ if (!buf_ring->base) {
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ return -ENOMEM;
+ }
+
+ tmp = chan_ctxt->chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
+ chan_ctxt->chcfg = tmp;
+
+ chan_ctxt->rbase = tre_ring->iommu_base;
+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+ chan_ctxt->rlen = tre_ring->len;
+ tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+ tre_ring->rp = tre_ring->wp = tre_ring->base;
+ buf_ring->rp = buf_ring->wp = buf_ring->base;
+ mhi_chan->db_cfg.db_mode = 1;
+
+ /* Update to all cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ const struct mhi_event_config *event_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ int i, num;
+
+ num = config->num_events;
+ mhi_cntrl->total_ev_rings = num;
+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Populate event ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < num; i++) {
+ event_cfg = &config->event_cfg[i];
+
+ mhi_event->er_index = i;
+ mhi_event->ring.elements = event_cfg->num_elements;
+ mhi_event->intmod = event_cfg->irq_moderation_ms;
+ mhi_event->irq = event_cfg->irq;
+
+ if (event_cfg->channel != U32_MAX) {
+ /* This event ring has a dedicated channel */
+ mhi_event->chan = event_cfg->channel;
+ if (mhi_event->chan >= mhi_cntrl->max_chan) {
+ dev_err(dev,
+ "Event Ring channel not available\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->mhi_chan =
+ &mhi_cntrl->mhi_chan[mhi_event->chan];
+ }
+
+ /* Priority is fixed to 1 for now */
+ mhi_event->priority = 1;
+
+ mhi_event->db_cfg.brstmode = event_cfg->mode;
+ if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+ goto error_ev_cfg;
+
+ if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_event->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_event->data_type = event_cfg->data_type;
+
+ switch (mhi_event->data_type) {
+ case MHI_ER_DATA:
+ mhi_event->process_event = mhi_process_data_event_ring;
+ break;
+ case MHI_ER_CTRL:
+ mhi_event->process_event = mhi_process_ctrl_ev_ring;
+ break;
+ default:
+ dev_err(dev, "Event Ring type not supported\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->hw_ring = event_cfg->hardware_event;
+ if (mhi_event->hw_ring)
+ mhi_cntrl->hw_ev_rings++;
+ else
+ mhi_cntrl->sw_ev_rings++;
+
+ mhi_event->cl_manage = event_cfg->client_managed;
+ mhi_event->offload_ev = event_cfg->offload_channel;
+ mhi_event++;
+ }
+
+ return 0;
+
+error_ev_cfg:
+
+ kfree(mhi_cntrl->mhi_event);
+ return -EINVAL;
+}
+
+static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ const struct mhi_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ int i;
+ u32 chan;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * The allocation of MHI channels can exceed 32KB in some scenarios,
+ * so to avoid any memory possible allocation failures, vzalloc is
+ * used here
+ */
+ mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+ sizeof(*mhi_cntrl->mhi_chan));
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+ /* Populate channel configurations */
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+
+ mhi_chan->tre_ring.elements = ch_cfg->num_elements;
+ if (!mhi_chan->tre_ring.elements)
+ goto error_chan_cfg;
+
+ /*
+ * For some channels, local ring length should be bigger than
+ * the transfer ring length due to internal logical channels
+ * in device. So host can queue much more buffers than transfer
+ * ring length. Example, RSC channels should have a larger local
+ * channel length than transfer ring length.
+ */
+ mhi_chan->buf_ring.elements = ch_cfg->local_elements;
+ if (!mhi_chan->buf_ring.elements)
+ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
+ mhi_chan->er_index = ch_cfg->event_ring;
+ mhi_chan->dir = ch_cfg->dir;
+
+ /*
+ * For most channels, chtype is identical to channel directions.
+ * So, if it is not defined then assign channel direction to
+ * chtype
+ */
+ mhi_chan->type = ch_cfg->type;
+ if (!mhi_chan->type)
+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+ mhi_chan->ee_mask = ch_cfg->ee_mask;
+ mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
+ mhi_chan->lpm_notify = ch_cfg->lpm_notify;
+ mhi_chan->offload_ch = ch_cfg->offload_channel;
+ mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
+ mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->auto_start = ch_cfg->auto_start;
+
+ /*
+ * If MHI host allocates buffers, then the channel direction
+ * should be DMA_FROM_DEVICE
+ */
+ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ /*
+ * Bi-directional and direction less channel must be an
+ * offload channel
+ */
+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ if (!mhi_chan->offload_ch) {
+ mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
+ if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
+ dev_err(dev, "Invalid Door bell mode\n");
+ goto error_chan_cfg;
+ }
+ }
+
+ if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_chan->configured = true;
+
+ if (mhi_chan->lpm_notify)
+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int parse_config(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ int ret;
+
+ /* Parse MHI channel configuration */
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ /* Parse MHI event configuration */
+ ret = parse_ev_cfg(mhi_cntrl, config);
+ if (ret)
+ goto error_ev_cfg;
+
+ mhi_cntrl->timeout_ms = config->timeout_ms;
+ if (!mhi_cntrl->timeout_ms)
+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+ mhi_cntrl->bounce_buf = config->use_bounce_buf;
+ mhi_cntrl->buffer_len = config->buf_len;
+ if (!mhi_cntrl->buffer_len)
+ mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+ /* By default, host is allowed to ring DB in both M0 and M2 states */
+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+ if (config->m2_no_db)
+ mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+ return 0;
+
+error_ev_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_chan *mhi_chan;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_device *mhi_dev;
+ u32 soc_info;
+ int ret, i;
+
+ if (!mhi_cntrl)
+ return -EINVAL;
+
+ if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+ !mhi_cntrl->write_reg)
+ return -EINVAL;
+
+ ret = parse_config(mhi_cntrl, config);
+ if (ret)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto error_alloc_cmd;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+ mutex_init(&mhi_cntrl->pm_mutex);
+ rwlock_init(&mhi_cntrl->pm_lock);
+ spin_lock_init(&mhi_cntrl->transition_lock);
+ spin_lock_init(&mhi_cntrl->wlock);
+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ init_waitqueue_head(&mhi_cntrl->state_event);
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+ spin_lock_init(&mhi_cmd->lock);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip for offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->mhi_cntrl = mhi_cntrl;
+ spin_lock_init(&mhi_event->lock);
+ if (mhi_event->data_type == MHI_ER_CTRL)
+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+ (ulong)mhi_event);
+ else
+ tasklet_init(&mhi_event->task, mhi_ev_task,
+ (ulong)mhi_event);
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ mutex_init(&mhi_chan->mutex);
+ init_completion(&mhi_chan->completion);
+ rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
+ }
+
+ if (mhi_cntrl->bounce_buf) {
+ mhi_cntrl->map_single = mhi_map_single_use_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+ } else {
+ mhi_cntrl->map_single = mhi_map_single_no_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+ }
+
+ /* Read the MHI device info */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ SOC_HW_VERSION_OFFS, &soc_info);
+ if (ret)
+ goto error_alloc_dev;
+
+ mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
+ SOC_HW_VERSION_FAM_NUM_SHFT;
+ mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
+ SOC_HW_VERSION_DEV_NUM_SHFT;
+ mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
+ SOC_HW_VERSION_MAJOR_VER_SHFT;
+ mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
+ SOC_HW_VERSION_MINOR_VER_SHFT;
+
+ /* Register controller with MHI bus */
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto error_alloc_dev;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
+ mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
+
+ /* Init wakeup source */
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto error_add_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ mhi_create_debugfs(mhi_cntrl);
+
+ return 0;
+
+error_add_dev:
+ put_device(&mhi_dev->dev);
+
+error_alloc_dev:
+ kfree(mhi_cntrl->mhi_cmd);
+
+error_alloc_cmd:
+ vfree(mhi_cntrl->mhi_chan);
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_register_controller);
+
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
+ unsigned int i;
+
+ mhi_destroy_debugfs(mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_event);
+
+ /* Drop the references to MHI devices created for channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ put_device(&mhi_chan->mhi_dev->dev);
+ }
+ vfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+}
+EXPORT_SYMBOL_GPL(mhi_unregister_controller);
+
+struct mhi_controller *mhi_alloc_controller(void)
+{
+ struct mhi_controller *mhi_cntrl;
+
+ mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
+
+ return mhi_cntrl;
+}
+EXPORT_SYMBOL_GPL(mhi_alloc_controller);
+
+void mhi_free_controller(struct mhi_controller *mhi_cntrl)
+{
+ kfree(mhi_cntrl);
+}
+EXPORT_SYMBOL_GPL(mhi_free_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bhie_off;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+
+ /*
+ * Allocate RDDM table if specified, this table is for debugging purpose
+ */
+ if (mhi_cntrl->rddm_size) {
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+
+ /*
+ * This controller supports RDDM, so we need to manually clear
+ * BHIE RX registers since POR values are undefined.
+ */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHIE offset\n");
+ goto bhie_error;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
+ 4);
+
+ if (mhi_cntrl->rddm_image)
+ mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ }
+
+ mhi_cntrl->pre_init = true;
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return 0;
+
+bhie_error:
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ mhi_cntrl->pre_init = false;
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
+
+static void mhi_release_device(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created if the mhi_dev
+ * associated with it is NULL. This scenario will happen during the
+ * controller suspend and resume.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_bus_type;
+ dev->release = mhi_release_device;
+ dev->parent = mhi_cntrl->cntrl_dev;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_wake = 0;
+
+ return mhi_dev;
+}
+
+static int mhi_driver_probe(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device_driver *drv = dev->driver;
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ struct mhi_event *mhi_event;
+ struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+ int ret;
+
+ /* Bring device out of LPM */
+ ret = mhi_device_get_sync(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ if (ul_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+ goto exit_probe;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ if (ul_chan->auto_start) {
+ ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+ if (ret)
+ goto exit_probe;
+ }
+ }
+
+ ret = -EINVAL;
+ if (dl_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+ goto exit_probe;
+
+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+ /*
+ * If the channel event ring is managed by client, then
+ * status_cb must be provided so that the framework can
+ * notify pending data
+ */
+ if (mhi_event->cl_manage && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+ }
+
+ /* Call the user provided probe function */
+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
+
+ if (dl_chan && dl_chan->auto_start)
+ mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+
+exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ enum mhi_ch_state ch_state[] = {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_DISABLED
+ };
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ /* Wake all threads waiting for completion */
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_EV_CC_INVALID;
+ complete_all(&mhi_chan->completion);
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Set the channel state to disabled */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Reset the non-offload channel */
+ if (!mhi_chan->offload_ch)
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ mhi_drv->remove(mhi_dev);
+
+ /* De-init channel if it was enabled */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
+ ch_state[dir] == MHI_CH_STATE_STOP) &&
+ !mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ while (mhi_dev->dev_wake)
+ mhi_device_put(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_driver_probe;
+ driver->remove = mhi_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_driver_unregister);
+
+static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_bus_type = {
+ .name = "mhi",
+ .dev_name = "mhi",
+ .match = mhi_match,
+ .uevent = mhi_uevent,
+ .dev_groups = mhi_dev_groups,
+};
+
+static int __init mhi_init(void)
+{
+ mhi_debugfs_init();
+ return bus_register(&mhi_bus_type);
+}
+
+static void __exit mhi_exit(void)
+{
+ mhi_debugfs_exit();
+ bus_unregister(&mhi_bus_type);
+}
+
+postcore_initcall(mhi_init);
+module_exit(mhi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Host Interface");
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
new file mode 100644
index 000000000..7989269dd
--- /dev/null
+++ b/drivers/bus/mhi/host/internal.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+#include <linux/mhi.h>
+
+extern struct bus_type mhi_bus_type;
+
+#define MHIREGLEN (0x0)
+#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
+#define MHIREGLEN_MHIREGLEN_SHIFT (0)
+
+#define MHIVER (0x8)
+#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
+#define MHIVER_MHIVER_SHIFT (0)
+
+#define MHICFG (0x10)
+#define MHICFG_NHWER_MASK (0xFF000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xFF0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xFF00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xFF)
+#define MHICFG_NCH_SHIFT (0)
+
+#define CHDBOFF (0x18)
+#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
+#define CHDBOFF_CHDBOFF_SHIFT (0)
+
+#define ERDBOFF (0x20)
+#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
+#define ERDBOFF_ERDBOFF_SHIFT (0)
+
+#define BHIOFF (0x28)
+#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
+#define BHIOFF_BHIOFF_SHIFT (0)
+
+#define BHIEOFF (0x2C)
+#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
+#define BHIEOFF_BHIEOFF_SHIFT (0)
+
+#define DEBUGOFF (0x30)
+#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
+#define DEBUGOFF_DEBUGOFF_SHIFT (0)
+
+#define MHICTRL (0x38)
+#define MHICTRL_MHISTATE_MASK (0x0000FF00)
+#define MHICTRL_MHISTATE_SHIFT (8)
+#define MHICTRL_RESET_MASK (0x2)
+#define MHICTRL_RESET_SHIFT (1)
+
+#define MHISTATUS (0x48)
+#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
+#define MHISTATUS_MHISTATE_SHIFT (8)
+#define MHISTATUS_SYSERR_MASK (0x4)
+#define MHISTATUS_SYSERR_SHIFT (2)
+#define MHISTATUS_READY_MASK (0x1)
+#define MHISTATUS_READY_SHIFT (0)
+
+#define CCABAP_LOWER (0x58)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
+
+#define CCABAP_HIGHER (0x5C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
+
+#define ECABAP_LOWER (0x60)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
+
+#define ECABAP_HIGHER (0x64)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
+
+#define CRCBAP_LOWER (0x68)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
+
+#define CRCBAP_HIGHER (0x6C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
+
+#define CRDB_LOWER (0x70)
+#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
+#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
+
+#define CRDB_HIGHER (0x74)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
+
+#define MHICTRLBASE_LOWER (0x80)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
+
+#define MHICTRLBASE_HIGHER (0x84)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
+
+#define MHICTRLLIMIT_LOWER (0x88)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
+
+#define MHICTRLLIMIT_HIGHER (0x8C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
+
+#define MHIDATABASE_LOWER (0x98)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
+
+#define MHIDATABASE_HIGHER (0x9C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
+
+#define MHIDATALIMIT_LOWER (0xA0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
+
+#define MHIDATALIMIT_HIGHER (0xA4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET (0xB0)
+#define MHI_SOC_RESET_REQ BIT(0)
+
+/* MHI BHI offfsets */
+#define BHI_BHIVERSION_MINOR (0x00)
+#define BHI_BHIVERSION_MAJOR (0x04)
+#define BHI_IMGADDR_LOW (0x08)
+#define BHI_IMGADDR_HIGH (0x0C)
+#define BHI_IMGSIZE (0x10)
+#define BHI_RSVD1 (0x14)
+#define BHI_IMGTXDB (0x18)
+#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHI_TXDB_SEQNUM_SHFT (0)
+#define BHI_RSVD2 (0x1C)
+#define BHI_INTVEC (0x20)
+#define BHI_RSVD3 (0x24)
+#define BHI_EXECENV (0x28)
+#define BHI_STATUS (0x2C)
+#define BHI_ERRCODE (0x30)
+#define BHI_ERRDBG1 (0x34)
+#define BHI_ERRDBG2 (0x38)
+#define BHI_ERRDBG3 (0x3C)
+#define BHI_SERIALNU (0x40)
+#define BHI_SBLANTIROLLVER (0x44)
+#define BHI_NUMSEG (0x48)
+#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
+#define BHI_RSVD5 (0xC4)
+#define BHI_STATUS_MASK (0xC0000000)
+#define BHI_STATUS_SHIFT (30)
+#define BHI_STATUS_ERROR (3)
+#define BHI_STATUS_SUCCESS (2)
+#define BHI_STATUS_RESET (0)
+
+/* MHI BHIE offsets */
+#define BHIE_MSMSOCID_OFFS (0x0000)
+#define BHIE_TXVECADDR_LOW_OFFS (0x002C)
+#define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
+#define BHIE_TXVECSIZE_OFFS (0x0034)
+#define BHIE_TXVECDB_OFFS (0x003C)
+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECDB_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_OFFS (0x0044)
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
+#define BHIE_RXVECADDR_LOW_OFFS (0x0060)
+#define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
+#define BHIE_RXVECSIZE_OFFS (0x0068)
+#define BHIE_RXVECDB_OFFS (0x0070)
+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECDB_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_OFFS (0x0078)
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
+
+#define SOC_HW_VERSION_OFFS (0x224)
+#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
+#define SOC_HW_VERSION_FAM_NUM_SHFT (28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
+#define SOC_HW_VERSION_DEV_NUM_SHFT (16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
+#define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
+#define SOC_HW_VERSION_MINOR_VER_SHFT (0)
+
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODC_SHIFT 8
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+#define EV_CTX_INTMODT_SHIFT 16
+struct mhi_event_ctxt {
+ __u32 intmod;
+ __u32 ertype;
+ __u32 msivec;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_CHSTATE_SHIFT 0
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_BRSTMODE_SHIFT 8
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_POLLCFG_SHIFT 10
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_chan_ctxt {
+ __u32 chcfg;
+ __u32 chtype;
+ __u32 erindex;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_ctxt {
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ dma_addr_t er_ctxt_addr;
+ dma_addr_t chan_ctxt_addr;
+ dma_addr_t cmd_ctxt_addr;
+};
+
+struct mhi_tre {
+ u64 ptr;
+ u32 dword[2];
+};
+
+struct bhi_vec_entry {
+ u64 dma_addr;
+ u64 size;
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_NOP = 1,
+ MHI_CMD_RESET_CHAN = 16,
+ MHI_CMD_STOP_CHAN = 17,
+ MHI_CMD_START_CHAN = 18,
+};
+
+/* No operation command */
+#define MHI_TRE_CMD_NOOP_PTR (0)
+#define MHI_TRE_CMD_NOOP_DWORD0 (0)
+#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
+
+/* Channel reset command */
+#define MHI_TRE_CMD_RESET_PTR (0)
+#define MHI_TRE_CMD_RESET_DWORD0 (0)
+#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_RESET_CHAN << 16))
+
+/* Channel stop command */
+#define MHI_TRE_CMD_STOP_PTR (0)
+#define MHI_TRE_CMD_STOP_DWORD0 (0)
+#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_STOP_CHAN << 16))
+
+/* Channel start command */
+#define MHI_TRE_CMD_START_PTR (0)
+#define MHI_TRE_CMD_START_DWORD0 (0)
+#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_START_CHAN << 16))
+
+#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+
+/* Event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) (ptr)
+#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
+#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
+#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
+#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
+#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+
+/* Transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) (ptr)
+#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+ | (ieot << 9) | (ieob << 8) | chain)
+
+/* RSC transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
+#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
+
+enum mhi_pkt_type {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum mhi_ev_ccs {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4, /* End of block event */
+ MHI_EV_CC_OOB = 0x5, /* Out of block event */
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+enum mhi_ch_state {
+ MHI_CH_STATE_DISABLED = 0x0,
+ MHI_CH_STATE_ENABLED = 0x1,
+ MHI_CH_STATE_RUNNING = 0x2,
+ MHI_CH_STATE_SUSPENDED = 0x3,
+ MHI_CH_STATE_STOP = 0x4,
+ MHI_CH_STATE_ERROR = 0x5,
+};
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
+ mode != MHI_DB_BRST_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+ "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+ ee == MHI_EE_EDL)
+
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+
+enum dev_st_transition {
+ DEV_ST_TRANSITION_PBL,
+ DEV_ST_TRANSITION_READY,
+ DEV_ST_TRANSITION_SBL,
+ DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
+ DEV_ST_TRANSITION_MAX,
+};
+
+extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
+#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
+ "INVALID_STATE" : dev_state_tran_str[state])
+
+extern const char * const mhi_state_str[MHI_STATE_MAX];
+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
+ !mhi_state_str[state]) ? \
+ "INVALID_STATE" : mhi_state_str[state])
+
+/* internal power states */
+enum mhi_pm_state {
+ MHI_PM_STATE_DISABLE,
+ MHI_PM_STATE_POR,
+ MHI_PM_STATE_M0,
+ MHI_PM_STATE_M2,
+ MHI_PM_STATE_M3_ENTER,
+ MHI_PM_STATE_M3,
+ MHI_PM_STATE_M3_EXIT,
+ MHI_PM_STATE_FW_DL_ERR,
+ MHI_PM_STATE_SYS_ERR_DETECT,
+ MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SHUTDOWN_PROCESS,
+ MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ MHI_PM_STATE_MAX
+};
+
+#define MHI_PM_DISABLE BIT(0)
+#define MHI_PM_POR BIT(1)
+#define MHI_PM_M0 BIT(2)
+#define MHI_PM_M2 BIT(3)
+#define MHI_PM_M3_ENTER BIT(4)
+#define MHI_PM_M3 BIT(5)
+#define MHI_PM_M3_EXIT BIT(6)
+/* firmware download failure state */
+#define MHI_PM_FW_DL_ERR BIT(7)
+#define MHI_PM_SYS_ERR_DETECT BIT(8)
+#define MHI_PM_SYS_ERR_PROCESS BIT(9)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+/* link not accessible */
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
+ mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+ (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+#define NR_OF_CMD_RINGS 1
+#define CMD_EL_PER_RING 128
+#define PRIMARY_CMD_RING 0
+#define MHI_DEV_WAKE_DB 127
+#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
+
+enum mhi_er_type {
+ MHI_ER_TYPE_INVALID = 0x0,
+ MHI_ER_TYPE_VALID = 0x1,
+};
+
+struct db_cfg {
+ bool reset_req;
+ bool db_mode;
+ u32 pollcfg;
+ enum mhi_db_brst_mode brstmode;
+ dma_addr_t db_val;
+ void (*process_db)(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg, void __iomem *io_addr,
+ dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+ enum mhi_pm_state from_state;
+ u32 to_states;
+};
+
+struct state_transition {
+ struct list_head node;
+ enum dev_st_transition state;
+};
+
+struct mhi_ring {
+ dma_addr_t dma_handle;
+ dma_addr_t iommu_base;
+ u64 *ctxt_wp; /* point to ctxt wp */
+ void *pre_aligned;
+ void *base;
+ void *rp;
+ void *wp;
+ size_t el_size;
+ size_t len;
+ size_t elements;
+ size_t alloc_size;
+ void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+ struct mhi_ring ring;
+ spinlock_t lock;
+};
+
+struct mhi_buf_info {
+ void *v_addr;
+ void *bb_addr;
+ void *wp;
+ void *cb_buf;
+ dma_addr_t p_addr;
+ size_t len;
+ enum dma_data_direction dir;
+ bool used; /* Indicates whether the buffer is used or not */
+ bool pre_mapped; /* Already pre-mapped by client */
+};
+
+struct mhi_event {
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *mhi_chan; /* dedicated to channel */
+ u32 er_index;
+ u32 intmod;
+ u32 irq;
+ int chan; /* this event ring is dedicated to a channel (optional) */
+ u32 priority;
+ enum mhi_er_data_type data_type;
+ struct mhi_ring ring;
+ struct db_cfg db_cfg;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ int (*process_event)(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota);
+ bool hw_ring;
+ bool cl_manage;
+ bool offload_ev; /* managed by a device driver */
+};
+
+struct mhi_chan {
+ const char *name;
+ /*
+ * Important: When consuming, increment tre_ring first and when
+ * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
+ * is guranteed to have space so we do not need to check both rings.
+ */
+ struct mhi_ring buf_ring;
+ struct mhi_ring tre_ring;
+ u32 chan;
+ u32 er_index;
+ u32 intmod;
+ enum mhi_ch_type type;
+ enum dma_data_direction dir;
+ struct db_cfg db_cfg;
+ enum mhi_ch_ee_mask ee_mask;
+ enum mhi_ch_state ch_state;
+ enum mhi_ev_ccs ccs;
+ struct mhi_device *mhi_dev;
+ void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
+ struct mutex mutex;
+ struct completion completion;
+ rwlock_t lock;
+ struct list_head node;
+ bool lpm_notify;
+ bool configured;
+ bool offload_ch;
+ bool pre_alloc;
+ bool auto_start;
+ bool wake_capable;
+};
+
+/* Default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+
+/* debugfs related functions */
+#ifdef CONFIG_MHI_BUS_DEBUG
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_debugfs_init(void);
+void mhi_debugfs_exit(void);
+#else
+static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_debugfs_init(void)
+{
+}
+
+static inline void mhi_debugfs_exit(void)
+{
+}
+#endif
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info);
+
+/* Power management APIs */
+enum mhi_pm_state __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state);
+const char *to_mhi_pm_state_str(enum mhi_pm_state state);
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
+void mhi_fw_load_worker(struct work_struct *work);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd);
+static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
+{
+ return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+ mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
+}
+
+static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
+{
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+}
+
+/* Register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+ void __iomem *db_addr, dma_addr_t db_val);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_mode, void __iomem *db_addr,
+ dma_addr_t db_val);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 shift, u32 *out);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Initialization methods */
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Memory allocation methods */
+static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ dma_addr_t *dma_handle,
+ gfp_t gfp)
+{
+ void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, dma_handle,
+ gfp);
+
+ return buf;
+}
+
+static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
+ size_t size,
+ void *vaddr,
+ dma_addr_t dma_handle)
+{
+ dma_free_coherent(mhi_cntrl->cntrl_dev, size, vaddr, dma_handle);
+}
+
+/* Event processing methods */
+void mhi_ctrl_ev_task(unsigned long data);
+void mhi_ev_task(unsigned long data);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+
+/* ISR handlers */
+irqreturn_t mhi_irq_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_buf_info *info, enum mhi_flags flags);
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
new file mode 100644
index 000000000..614dd287c
--- /dev/null
+++ b/drivers/bus/mhi/host/main.c
@@ -0,0 +1,1630 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out)
+{
+ return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 shift, u32 *out)
+{
+ u32 tmp;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ *out = (tmp & mask) >> shift;
+
+ return 0;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val)
+{
+ mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
+}
+
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 mask, u32 shift, u32 val)
+{
+ int ret;
+ u32 tmp;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return;
+
+ tmp &= ~mask;
+ tmp |= (val << shift);
+ mhi_write_reg(mhi_cntrl, base, offset, tmp);
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ if (db_cfg->db_mode) {
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+ db_cfg->db_mode = 0;
+ }
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+ ring->db_addr, *ring->ctxt_wp);
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+ dma_addr_t db;
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+ dma_addr_t db;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = db;
+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
+ ring->db_addr, db);
+}
+
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+ u32 exec;
+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+ return (ret) ? MHI_EE_MAX : exec;
+}
+
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+ u32 state;
+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+ return ret ? MHI_STATE_MAX : state;
+}
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ buf_info->v_addr, buf_info->len,
+ buf_info->dir);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
+ &buf_info->p_addr, GFP_ATOMIC);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (buf_info->dir == DMA_TO_DEVICE)
+ memcpy(buf, buf_info->v_addr, buf_info->len);
+
+ buf_info->bb_addr = buf;
+
+ return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
+ buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ if (buf_info->dir == DMA_FROM_DEVICE)
+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+ mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
+ buf_info->p_addr);
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ int nr_el;
+
+ if (ring->wp < ring->rp) {
+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ nr_el = (ring->rp - ring->base) / ring->el_size;
+ nr_el += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+
+ return nr_el;
+}
+
+static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return (addr - ring->iommu_base) + ring->base;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
+}
+
+int mhi_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_chan *ul_chan, *dl_chan;
+ struct mhi_device *mhi_dev;
+ struct mhi_controller *mhi_cntrl;
+ enum mhi_ee_type ee = MHI_EE_MAX;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy virtual devices thats attached to bus */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ /*
+ * If execution environment is specified, remove only those devices that
+ * started in them based on ee_mask for the channels as we move on to a
+ * different execution environment
+ */
+ if (data)
+ ee = *(enum mhi_ee_type *)data;
+
+ /*
+ * For the suspend and resume case, this function will get called
+ * without mhi_unregister_controller(). Hence, we need to drop the
+ * references to mhi_dev created for ul and dl channels. We can
+ * be sure that there will be no instances of mhi_dev left after
+ * this.
+ */
+ if (ul_chan) {
+ if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&ul_chan->mhi_dev->dev);
+ }
+
+ if (dl_chan) {
+ if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&dl_chan->mhi_dev->dev);
+ }
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+{
+ struct mhi_driver *mhi_drv;
+
+ if (!mhi_dev->dev.driver)
+ return;
+
+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+ if (mhi_drv->status_cb)
+ mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+EXPORT_SYMBOL_GPL(mhi_notify);
+
+/* Bind MHI channels to MHI devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *mhi_chan;
+ struct mhi_device *mhi_dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+ continue;
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev))
+ return;
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+ switch (mhi_chan->dir) {
+ case DMA_TO_DEVICE:
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ break;
+ case DMA_FROM_DEVICE:
+ /* We use dl_chan as offload channels */
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ break;
+ default:
+ dev_err(dev, "Direction not supported\n");
+ put_device(&mhi_dev->dev);
+ return;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Check next channel if it matches */
+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+ i++;
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+ }
+ }
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(mhi_cntrl->cntrl_dev),
+ mhi_dev->name);
+
+ /* Init wakeup source if available */
+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+ }
+}
+
+irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+{
+ struct mhi_event *mhi_event = dev;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ dma_addr_t ptr = er_ctxt->rp;
+ void *dev_rp;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+
+ /* Only proceed if event ring has pending events */
+ if (ev_ring->rp == dev_rp)
+ return IRQ_HANDLED;
+
+ /* For client managed event ring, notify pending data */
+ if (mhi_event->cl_manage) {
+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_dev)
+ mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
+ } else {
+ tasklet_schedule(&mhi_event->task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+{
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state = MHI_STATE_MAX;
+ enum mhi_pm_state pm_state = 0;
+ enum mhi_ee_type ee = 0;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
+ }
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* If device supports RDDM don't bother processing SYS error */
+ if (mhi_cntrl->rddm_image) {
+ if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+ goto exit_intvec;
+ }
+
+ if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* For fatal errors, we let controller decide next step */
+ if (MHI_IN_PBL(ee))
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+ else
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ }
+
+exit_intvec:
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+
+ /* Wake up events waiting for state change */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ dma_addr_t ctxt_wp;
+
+ /* Update the WP */
+ ring->wp += ring->el_size;
+ ctxt_wp = *ring->ctxt_wp + ring->el_size;
+
+ if (ring->wp >= (ring->base + ring->len)) {
+ ring->wp = ring->base;
+ ctxt_wp = ring->iommu_base;
+ }
+
+ *ring->ctxt_wp = ctxt_wp;
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result;
+ unsigned long flags = 0;
+ u32 ev_code;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /*
+ * If it's a DB Event then we need to grab the lock
+ * with preemption disabled and as a write because we
+ * have to update db register and there are chances that
+ * another thread could be doing the same.
+ */
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_lock_irqsave(&mhi_chan->lock, flags);
+ else
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_tx_event;
+
+ switch (ev_code) {
+ case MHI_EV_CC_OVERFLOW:
+ case MHI_EV_CC_EOB:
+ case MHI_EV_CC_EOT:
+ {
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ struct mhi_tre *local_rp, *ev_tre;
+ void *dev_rp;
+ struct mhi_buf_info *buf_info;
+ u16 xfer_len;
+
+ if (!is_valid_ring_ptr(tre_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the tre ring\n");
+ break;
+ }
+ /* Get the TRB this event points to */
+ ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+ dev_rp = ev_tre + 1;
+ if (dev_rp >= (tre_ring->base + tre_ring->len))
+ dev_rp = tre_ring->base;
+
+ result.dir = mhi_chan->dir;
+
+ local_rp = tre_ring->rp;
+ while (local_rp != dev_rp) {
+ buf_info = buf_ring->rp;
+ /* If it's the last TRE, get length from the event */
+ if (local_rp == ev_tre)
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+ else
+ xfer_len = buf_info->len;
+
+ /* Unmap if it's not pre-mapped by client */
+ if (likely(!buf_info->pre_mapped))
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ result.buf_addr = buf_info->cb_buf;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ /*
+ * Recycle the buffer if buffer is pre-allocated,
+ * if there is an error, not much we can do apart
+ * from dropping the packet
+ */
+ if (mhi_chan->pre_alloc) {
+ if (mhi_queue_buf(mhi_chan->mhi_dev,
+ mhi_chan->dir,
+ buf_info->cb_buf,
+ buf_info->len, MHI_EOT)) {
+ dev_err(dev,
+ "Error recycling buffer for chan:%d\n",
+ mhi_chan->chan);
+ kfree(buf_info->cb_buf);
+ }
+ }
+ }
+ break;
+ } /* CC_EOT */
+ case MHI_EV_CC_OOB:
+ case MHI_EV_CC_DB_MODE:
+ {
+ unsigned long flags;
+
+ mhi_chan->db_cfg.db_mode = 1;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+ if (tre_ring->wp != tre_ring->rp &&
+ MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ }
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+ break;
+ }
+ case MHI_EV_CC_BAD_TRE:
+ default:
+ dev_err(dev, "Unknown event 0x%x\n", ev_code);
+ break;
+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_unlock_irqrestore(&mhi_chan->lock, flags);
+ else
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_result result;
+ int ev_code;
+ u32 cookie; /* offset to local descriptor */
+ u16 xfer_len;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ cookie = MHI_TRE_GET_EV_COOKIE(event);
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+ /* Received out of bound cookie */
+ WARN_ON(cookie >= buf_ring->len);
+
+ buf_info = buf_ring->base + cookie;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_rsc_event;
+
+ WARN_ON(!buf_info->used);
+
+ /* notify the client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /*
+ * Note: We're arbitrarily incrementing RP even though, completion
+ * packet we processed might not be the same one, reason we can do this
+ * is because device guaranteed to cache descriptors in order it
+ * receive, so even though completion event is different we can re-use
+ * all descriptors in between.
+ * Example:
+ * Transfer Ring has descriptors: A, B, C, D
+ * Last descriptor host queue is D (WP) and first descriptor
+ * host queue is A (RP).
+ * The completion event we just serviced is descriptor C.
+ * Then we can safely queue descriptors to replace A, B, and C
+ * even though host did not receive any completions.
+ */
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ buf_info->used = false;
+
+end_process_rsc_event:
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ struct mhi_tre *tre)
+{
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *mhi_ring = &cmd_ring->ring;
+ struct mhi_tre *cmd_pkt;
+ struct mhi_chan *mhi_chan;
+ u32 chan;
+
+ if (!is_valid_ring_ptr(mhi_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the cmd ring\n");
+ return;
+ }
+
+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ } else {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Completion packet for invalid channel ID: %d\n", chan);
+ }
+
+ mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 chan;
+ int count = 0;
+ dma_addr_t ptr = er_ctxt->rp;
+
+ /*
+ * This is a quick check to avoid unnecessary event processing
+ * in case MHI is already in error state, but it's still possible
+ * to transition to error state while processing events
+ */
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ switch (type) {
+ case MHI_PKT_TYPE_BW_REQ_EVENT:
+ {
+ struct mhi_link_info *link_info;
+
+ link_info = &mhi_cntrl->mhi_link_info;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ link_info->target_link_speed =
+ MHI_TRE_GET_EV_LINKSPEED(local_rp);
+ link_info->target_link_width =
+ MHI_TRE_GET_EV_LINKWIDTH(local_rp);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Received BW_REQ event\n");
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
+ break;
+ }
+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+ {
+ enum mhi_state new_state;
+
+ new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+ dev_dbg(dev, "State change event to state: %s\n",
+ TO_MHI_STATE_STR(new_state));
+
+ switch (new_state) {
+ case MHI_STATE_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M1:
+ mhi_pm_m1_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M3:
+ mhi_pm_m3_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_SYS_ERR:
+ {
+ enum mhi_pm_state new_state;
+
+ /* skip SYS_ERROR handling if RDDM supported */
+ if (mhi_cntrl->ee == MHI_EE_RDDM ||
+ mhi_cntrl->rddm_image)
+ break;
+
+ dev_dbg(dev, "System error detected\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ }
+ default:
+ dev_err(dev, "Invalid state: %s\n",
+ TO_MHI_STATE_STR(new_state));
+ }
+
+ break;
+ }
+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+ mhi_process_cmd_completion(mhi_cntrl, local_rp);
+ break;
+ case MHI_PKT_TYPE_EE_EVENT:
+ {
+ enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
+ enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+ dev_dbg(dev, "Received EE event: %s\n",
+ TO_MHI_EXEC_STR(event));
+ switch (event) {
+ case MHI_EE_SBL:
+ st = DEV_ST_TRANSITION_SBL;
+ break;
+ case MHI_EE_WFW:
+ case MHI_EE_AMSS:
+ st = DEV_ST_TRANSITION_MISSION_MODE;
+ break;
+ case MHI_EE_RDDM:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = event;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ default:
+ dev_err(dev,
+ "Unhandled EE event: 0x%x\n", type);
+ }
+ if (st != DEV_ST_TRANSITION_MAX)
+ mhi_queue_state_transition(mhi_cntrl, st);
+
+ break;
+ }
+ case MHI_PKT_TYPE_TX_EVENT:
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ if (!mhi_chan->configured)
+ break;
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+ break;
+ default:
+ dev_err(dev, "Unhandled event type: %d\n", type);
+ break;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+
+ ptr = er_ctxt->rp;
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+ u32 chan;
+ struct mhi_chan *mhi_chan;
+ dma_addr_t ptr = er_ctxt->rp;
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp && event_quota > 0) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+
+ ptr = er_ctxt->rp;
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ count++;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ /* process all pending events */
+ spin_lock_bh(&mhi_event->lock);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ int ret;
+
+ /*
+ * We can check PM state w/o a lock here because there is no way
+ * PM state can change from reg access valid to no access while this
+ * thread being executed.
+ */
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ /*
+ * We may have a pending event but not allowed to
+ * process it since we are probably in a suspended state,
+ * so trigger a resume.
+ */
+ mhi_trigger_resume(mhi_cntrl);
+
+ return;
+ }
+
+ /* Process ctrl events events */
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+ /*
+ * We received an IRQ but no events to process, maybe device went to
+ * SYS_ERR state? Check the state to confirm.
+ */
+ if (!ret) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ }
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ void *tmp = ring->wp + ring->el_size;
+
+ if (tmp >= (ring->base + ring->len))
+ tmp = ring->base;
+
+ return (tmp == ring->rp);
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_buf_info buf_info = { };
+ int ret;
+
+ /* If MHI host pre-allocates buffers then client drivers cannot queue */
+ if (mhi_chan->pre_alloc)
+ return -EINVAL;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
+
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ struct mhi_buf_info buf_info = { };
+ int ret;
+
+ /* If MHI host pre-allocates buffers then client drivers cannot queue */
+ if (mhi_chan->pre_alloc)
+ return -EINVAL;
+
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+ dev_err(dev, "MHI is not in activate state, PM state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+ }
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ read_lock_bh(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_bh(&mhi_chan->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_dma);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_buf_info *info, enum mhi_flags flags)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_tre *mhi_tre;
+ struct mhi_buf_info *buf_info;
+ int eot, eob, chain, bei;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ buf_info = buf_ring->wp;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = info->len;
+
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+ }
+
+ eob = !!(flags & MHI_EOB);
+ eot = !!(flags & MHI_EOT);
+ chain = !!(flags & MHI_CHAIN);
+ bei = !!(mhi_chan->intmod);
+
+ mhi_tre = tre_ring->wp;
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring;
+ struct mhi_buf_info buf_info = { };
+ unsigned long flags;
+ int ret;
+
+ /*
+ * this check here only as a guard, it's always
+ * possible mhi can enter error while executing rest of function,
+ * which is not fatal so we do not need to hold pm_lock
+ */
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ tre_ring = &mhi_chan->tre_ring;
+ if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+ return -ENOMEM;
+
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret))
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* we're in M3 or transitioning to M3 */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ /* Toggle wake to exit out of M2 */
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_chan->lock, flags);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irqrestore(&mhi_chan->lock, flags);
+ }
+
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_queue_buf);
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd)
+{
+ struct mhi_tre *cmd_tre = NULL;
+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *ring = &mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int chan = 0;
+
+ if (mhi_chan)
+ chan = mhi_chan->chan;
+
+ spin_lock_bh(&mhi_cmd->lock);
+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+ spin_unlock_bh(&mhi_cmd->lock);
+ return -ENOMEM;
+ }
+
+ /* prepare the cmd tre */
+ cmd_tre = ring->wp;
+ switch (cmd) {
+ case MHI_CMD_RESET_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+ break;
+ case MHI_CMD_START_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+ break;
+ default:
+ dev_err(dev, "Command not supported\n");
+ break;
+ }
+
+ /* queue to hardware */
+ mhi_add_ring_element(mhi_cntrl, ring);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_cmd->lock);
+
+ return 0;
+}
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
+
+ /* no more processing events for this channel */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ write_unlock_irq(&mhi_chan->lock);
+ mutex_unlock(&mhi_chan->mutex);
+ return;
+ }
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ goto error_invalid_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+ if (ret)
+ goto error_invalid_state;
+
+ /* even if it fails we will still reset */
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
+ dev_err(dev,
+ "Failed to receive cmd completion, still resetting\n");
+
+error_invalid_state:
+ if (!mhi_chan->offload_ch) {
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ }
+ dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
+ mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret = 0;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_err(dev,
+ "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
+ mhi_chan->name);
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&mhi_chan->mutex);
+
+ /* If channel is not in disable state, do not allow it to start */
+ if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
+ ret = -EIO;
+ dev_dbg(dev, "channel: %d is not in disabled state\n",
+ mhi_chan->chan);
+ goto error_init_chan;
+ }
+
+ /* Check of client manages channel context for offload channels */
+ if (!mhi_chan->offload_ch) {
+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_init_chan;
+ }
+
+ reinit_completion(&mhi_chan->completion);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+ if (ret)
+ goto error_pm_state;
+
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ ret = -EIO;
+ goto error_pm_state;
+ }
+
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Pre-allocate buffer for xfer ring */
+ if (mhi_chan->pre_alloc) {
+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+ &mhi_chan->tre_ring);
+ size_t len = mhi_cntrl->buffer_len;
+
+ while (nr_el--) {
+ void *buf;
+ struct mhi_buf_info info = { };
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_pre_alloc;
+ }
+
+ /* Prepare transfer descriptors */
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ goto error_pre_alloc;
+ }
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ read_lock_irq(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ mutex_unlock(&mhi_chan->mutex);
+
+ dev_dbg(dev, "Chan: %d successfully moved to start state\n",
+ mhi_chan->chan);
+
+ return 0;
+
+error_pm_state:
+ if (!mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+ mutex_unlock(&mhi_chan->mutex);
+
+ return ret;
+
+error_pre_alloc:
+ mutex_unlock(&mhi_chan->mutex);
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+ return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ struct mhi_event_ctxt *er_ctxt,
+ int chan)
+
+{
+ struct mhi_tre *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long flags;
+ dma_addr_t ptr;
+
+ dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+
+ ev_ring = &mhi_event->ring;
+
+ /* mark all stale events related to channel as STALE event */
+ spin_lock_irqsave(&mhi_event->lock, flags);
+
+ ptr = er_ctxt->rp;
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ dev_rp = ev_ring->rp;
+ } else {
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ }
+
+ local_rp = ev_ring->rp;
+ while (dev_rp != local_rp) {
+ if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
+ chan == MHI_TRE_GET_EV_CHID(local_rp))
+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+ MHI_PKT_TYPE_STALE_EVENT);
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+
+ dev_dbg(dev, "Finished marking events as stale events\n");
+ spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+
+ /* Reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ while (tre_ring->rp != tre_ring->wp) {
+ struct mhi_buf_info *buf_info = buf_ring->rp;
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_dec(&mhi_cntrl->pending_pkts);
+
+ if (!buf_info->pre_mapped)
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+ if (mhi_chan->pre_alloc) {
+ kfree(buf_info->cb_buf);
+ } else {
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+ }
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int chan = mhi_chan->chan;
+
+ /* Nothing to reset, client doesn't queue buffers */
+ if (mhi_chan->offload_ch)
+ return;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+ mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+
+/* Move channel to start state */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ int ret, dir;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_open_chan;
+ }
+
+ return 0;
+
+error_open_chan:
+ for (--dir; dir >= 0; dir--) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ int dir;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+ if (!mhi_chan)
+ continue;
+
+ __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
+
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ int ret;
+
+ spin_lock_bh(&mhi_event->lock);
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
+ spin_unlock_bh(&mhi_event->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_poll);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
new file mode 100644
index 000000000..f5bee76ea
--- /dev/null
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI PCI driver - MHI over PCI controller driver
+ *
+ * This module is a generic driver for registering MHI-over-PCI devices,
+ * such as PCIe QCOM modems.
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define MHI_PCI_DEFAULT_BAR_NUM 0
+
+/**
+ * struct mhi_pci_dev_info - MHI PCI device specific information
+ * @config: MHI controller configuration
+ * @name: name of the PCI module
+ * @fw: firmware path (if any)
+ * @edl: emergency download mode firmware path (if any)
+ * @bar_num: PCI base address register to use for MHI MMIO register space
+ * @dma_data_width: DMA transfer word size (32 or 64 bits)
+ */
+struct mhi_pci_dev_info {
+ const struct mhi_controller_config *config;
+ const char *name;
+ const char *fw;
+ const char *edl;
+ unsigned int bar_num;
+ unsigned int dma_data_width;
+};
+
+#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_EVENT_CONFIG_CTRL(ev_ring) \
+ { \
+ .num_elements = 64, \
+ .irq_moderation_ms = 0, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_CTRL, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_EVENT_CONFIG_DATA(ev_ring) \
+ { \
+ .num_elements = 128, \
+ .irq_moderation_ms = 5, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
+ { \
+ .num_elements = 128, \
+ .irq_moderation_ms = 5, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = true, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ .channel = ch_num, \
+ }
+
+static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1),
+ MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2),
+};
+
+static const struct mhi_event_config modem_qcom_v1_mhi_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(1, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 101)
+};
+
+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+ .max_channels = 128,
+ .timeout_ms = 5000,
+ .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+ .ch_cfg = modem_qcom_v1_mhi_channels,
+ .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+ .event_cfg = modem_qcom_v1_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
+ .name = "qcom-sdx55m",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32
+};
+
+static const struct pci_device_id mhi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
+
+static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr, u32 *out)
+{
+ *out = readl(addr);
+ return 0;
+}
+
+static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr, u32 val)
+{
+ writel(val, addr);
+}
+
+static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ /* Nothing to do for now */
+}
+
+static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
+ unsigned int bar_num, u64 dma_mask)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ err = pci_assign_resource(pdev, bar_num);
+ if (err)
+ return err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
+ if (err) {
+ dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
+ return err;
+ }
+ mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
+
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
+ return err;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "set consistent dma mask failed\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *mhi_cntrl_config)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int nr_vectors, i;
+ int *irq;
+
+ /*
+ * Alloc one MSI vector for BHI + one vector per event ring, ideally...
+ * No explicit pci_free_irq_vectors required, done by pcim_release.
+ */
+ mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
+
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ if (nr_vectors < 0) {
+ dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
+ nr_vectors);
+ return nr_vectors;
+ }
+
+ if (nr_vectors < mhi_cntrl->nr_irqs) {
+ dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n",
+ nr_vectors, mhi_cntrl_config->num_events);
+ }
+
+ irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
+ int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
+
+ irq[i] = pci_irq_vector(pdev, vector);
+ }
+
+ mhi_cntrl->irq = irq;
+
+ return 0;
+}
+
+static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ /* no PM for now */
+ return 0;
+}
+
+static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+ /* no PM for now */
+}
+
+static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
+ const struct mhi_controller_config *mhi_cntrl_config;
+ struct mhi_controller *mhi_cntrl;
+ int err;
+
+ dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
+
+ mhi_cntrl = mhi_alloc_controller();
+ if (!mhi_cntrl)
+ return -ENOMEM;
+
+ mhi_cntrl_config = info->config;
+ mhi_cntrl->cntrl_dev = &pdev->dev;
+ mhi_cntrl->iova_start = 0;
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->fw_image = info->fw;
+ mhi_cntrl->edl_image = info->edl;
+
+ mhi_cntrl->read_reg = mhi_pci_read_reg;
+ mhi_cntrl->write_reg = mhi_pci_write_reg;
+ mhi_cntrl->status_cb = mhi_pci_status_cb;
+ mhi_cntrl->runtime_get = mhi_pci_runtime_get;
+ mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ if (err)
+ goto err_release;
+
+ err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
+ if (err)
+ goto err_release;
+
+ pci_set_drvdata(pdev, mhi_cntrl);
+
+ err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
+ if (err)
+ goto err_release;
+
+ /* MHI bus does not power up the controller by default */
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ goto err_unregister;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ goto err_unprepare;
+ }
+
+ return 0;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_unregister:
+ mhi_unregister_controller(mhi_cntrl);
+err_release:
+ mhi_free_controller(mhi_cntrl);
+
+ return err;
+}
+
+static void mhi_pci_remove(struct pci_dev *pdev)
+{
+ struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev);
+
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ mhi_unregister_controller(mhi_cntrl);
+ mhi_free_controller(mhi_cntrl);
+}
+
+static struct pci_driver mhi_pci_driver = {
+ .name = "mhi-pci-generic",
+ .id_table = mhi_pci_id_table,
+ .probe = mhi_pci_probe,
+ .remove = mhi_pci_remove
+};
+module_pci_driver(mhi_pci_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
new file mode 100644
index 000000000..fe8ecd6ea
--- /dev/null
+++ b/drivers/bus/mhi/host/pm.c
@@ -0,0 +1,1162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/*
+ * Not all MHI state transitions are synchronous. Transitions like Linkdown,
+ * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
+ * transition to a new state only if we're allowed to.
+ *
+ * Priority increases as we go down. For instance, from any state in L0, the
+ * transition can be made to states in L1, L2 and L3. A notable exception to
+ * this rule is state DISABLE. From DISABLE state we can only transition to
+ * POR state. Also, while in L2 state, user cannot jump back to previous
+ * L1 or L0 states.
+ *
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * POR <--> POR
+ * POR -> M0 -> M2 --> M0
+ * POR -> FW_DL_ERR
+ * FW_DL_ERR <--> FW_DL_ERR
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static struct mhi_pm_transitions const dev_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_FW_DL_ERR,
+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+ },
+};
+
+enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state)
+{
+ unsigned long cur_state = mhi_cntrl->pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
+ return cur_state;
+
+ if (unlikely(dev_state_transitions[index].from_state != cur_state))
+ return cur_state;
+
+ if (unlikely(!(dev_state_transitions[index].to_states & state)))
+ return cur_state;
+
+ mhi_cntrl->pm_state = state;
+ return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
+{
+ if (state == MHI_STATE_RESET) {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
+ } else {
+ mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK,
+ MHICTRL_MHISTATE_SHIFT, state);
+ }
+}
+
+/* NOP for backward compatibility, host allowed to ring DB in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* Handle device ready state transition */
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+ void __iomem *base = mhi_cntrl->regs;
+ struct mhi_event *mhi_event;
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 reset = 1, ready = 0;
+ int ret, i;
+
+ /* Wait for RESET to be cleared and READY bit to be set by the device */
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, &reset) ||
+ mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &ready) ||
+ (!reset && ready),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ /* Check if device entered error state */
+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device link is not accessible\n");
+ return -EIO;
+ }
+
+ /* Timeout if device did not transition to ready state */
+ if (reset || !ready) {
+ dev_err(dev, "Device Ready timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "Device in READY State\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ mhi_cntrl->dev_state = MHI_STATE_READY;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device registers not accessible\n");
+ goto error_mmio;
+ }
+
+ /* Configure MMIO registers */
+ ret = mhi_init_mmio(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Error configuring MMIO registers\n");
+ goto error_mmio;
+ }
+
+ /* Add elements to all SW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if this is an offload or HW event */
+ if (mhi_event->offload_ev || mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* Update all cores */
+ smp_wmb();
+
+ /* Ring the event ring db */
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Set MHI to M0 state */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+error_mmio:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M0;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ dev_err(dev, "Unable to transition to M0 state\n");
+ return -EIO;
+ }
+ mhi_cntrl->M0++;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+
+ /* Ring all event rings and CMD ring only if we're in mission mode */
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct mhi_cmd *mhi_cmd =
+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Only ring primary cmd ring if ring is not empty */
+ spin_lock_irq(&mhi_cmd->lock);
+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ spin_unlock_irq(&mhi_cmd->lock);
+ }
+
+ /* Ring channel DB registers */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
+
+ /* Only ring DB if ring is not empty */
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
+ mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/*
+ * After receiving the MHI state change event from the device indicating the
+ * transition to M1 state, the host can transition the device to M2 state
+ * for keeping it in low power state.
+ */
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+ if (state == MHI_PM_M2) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+ mhi_cntrl->dev_state = MHI_STATE_M2;
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->M2++;
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* If there are any pending resources, exit M2 immediately */
+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_cntrl->dev_wake))) {
+ dev_dbg(dev,
+ "Exiting M2, pending_pkts: %d dev_wake: %d\n",
+ atomic_read(&mhi_cntrl->pending_pkts),
+ atomic_read(&mhi_cntrl->dev_wake));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ } else {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
+ }
+ } else {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ }
+}
+
+/* MHI M3 completion handler */
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M3;
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (state != MHI_PM_M3) {
+ dev_err(dev, "Unable to transition to M3 state\n");
+ return -EIO;
+ }
+
+ mhi_cntrl->M3++;
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/* Handle device Mission Mode transition */
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ee_type current_ee = mhi_cntrl->ee;
+ int i, ret;
+
+ dev_dbg(dev, "Processing Mission Mode transition\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ return -EIO;
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
+ mhi_destroy_device);
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+
+ /* Force MHI to be in M0 state before continuing */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto error_mission_mode;
+ }
+
+ /* Add elements to all HW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || !mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+ /* Update to all cores */
+ smp_wmb();
+
+ spin_lock_irq(&mhi_event->lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /*
+ * The MHI devices are only created when the client device switches its
+ * Execution Environment (EE) to either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+/* Handle SYS_ERR and Shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state transition_state)
+{
+ enum mhi_pm_state cur_state, prev_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ to_mhi_pm_state_str(transition_state));
+
+ /* We must notify MHI control driver so it can clean up first */
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS)
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ prev_state = mhi_cntrl->pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+ if (cur_state == transition_state) {
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Wake up threads waiting for state transition */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ if (cur_state != transition_state) {
+ dev_err(dev, "Failed to transition to state: %s from: %s\n",
+ to_mhi_pm_state_str(transition_state),
+ to_mhi_pm_state_str(cur_state));
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (MHI_REG_ACCESS_VALID(prev_state)) {
+ u32 in_reset = -1;
+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+ /* Skip MHI RESET if in RDDM state */
+ if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
+ goto skip_mhi_reset;
+
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &in_reset) ||
+ !in_reset, timeout);
+ if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ dev_err(dev, "Device failed to exit MHI Reset state\n");
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return;
+ }
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+skip_mhi_reset:
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ mhi_ready_state_transition(mhi_cntrl);
+ } else {
+ /* Move to disable state */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ dev_err(dev, "Error moving from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_DISABLE));
+ }
+
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Queue a new work item and schedule work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state)
+{
+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+ unsigned long flags;
+
+ if (!item)
+ return -ENOMEM;
+
+ item->state = state;
+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+ list_add_tail(&item->node, &mhi_cntrl->transition_list);
+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+ schedule_work(&mhi_cntrl->st_worker);
+
+ return 0;
+}
+
+/* SYS_ERR worker */
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
+}
+
+/* Device State Transition worker */
+void mhi_pm_st_worker(struct work_struct *work)
+{
+ struct state_transition *itr, *tmp;
+ LIST_HEAD(head);
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ st_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ spin_lock_irq(&mhi_cntrl->transition_lock);
+ list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+ spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling state transition: %s\n",
+ TO_DEV_STATE_TRANS_STR(itr->state));
+
+ switch (itr->state) {
+ case DEV_ST_TRANSITION_PBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_IN_PBL(mhi_cntrl->ee))
+ mhi_fw_load_handler(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_SBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_SBL;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /*
+ * The MHI devices are only created when the client
+ * device switches its Execution Environment (EE) to
+ * either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_MISSION_MODE:
+ mhi_pm_mission_mode_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_READY:
+ mhi_ready_state_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ break;
+ default:
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_info(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev, "Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M2 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->pm_state == MHI_PM_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Assert device wake db */
+static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+ unsigned long flags;
+
+ /*
+ * If force flag is set, then increment the wake count value and
+ * ring wake db
+ */
+ if (unlikely(force)) {
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ atomic_inc(&mhi_cntrl->dev_wake);
+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ } else {
+ /*
+ * If resources are already requested, then just increment
+ * the wake count value and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ }
+}
+
+/* De-assert device wake db */
+static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
+ bool override)
+{
+ unsigned long flags;
+
+ /*
+ * Only continue if there is a single resource, else just decrement
+ * and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+ mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+ mhi_cntrl->wake_set = false;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_state state;
+ enum mhi_ee_type current_ee;
+ enum dev_st_transition next_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 val;
+ int ret;
+
+ dev_info(dev, "Requested to power ON\n");
+
+ if (mhi_cntrl->nr_irqs < 1)
+ return -EINVAL;
+
+ /* Supply default wake routines if not provided by controller driver */
+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+ !mhi_cntrl->wake_toggle) {
+ mhi_cntrl->wake_get = mhi_assert_dev_wake;
+ mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+ if (!mhi_cntrl->pre_init) {
+ /* Setup device context */
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+ }
+
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto error_setup_irq;
+
+ /* Setup BHI offset & INTVEC */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhi = mhi_cntrl->regs + val;
+
+ /* Setup BHIE offset */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
+ if (ret) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev, "Error reading BHIE offset\n");
+ goto error_bhi_offset;
+ }
+
+ mhi_cntrl->bhie = mhi_cntrl->regs + val;
+ }
+
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->pm_state = MHI_PM_POR;
+ mhi_cntrl->ee = MHI_EE_MAX;
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Confirm that the device is in valid exec env */
+ if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+ dev_err(dev, "Not a valid EE for power on\n");
+ ret = -EIO;
+ goto error_bhi_offset;
+ }
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &val) ||
+ !val,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret) {
+ ret = -EIO;
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_bhi_offset;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ /* Transition to next state */
+ next_state = MHI_IN_PBL(current_ee) ?
+ DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ dev_info(dev, "Power on setup success\n");
+
+ return 0;
+
+error_bhi_offset:
+ mhi_deinit_free_irq(mhi_cntrl);
+
+error_setup_irq:
+ if (!mhi_cntrl->pre_init)
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_async_power_up);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* If it's not a graceful shutdown, force MHI to linkdown state */
+ if (!graceful) {
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
+ dev_dbg(dev, "Failed to move to state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
+ mhi_deinit_free_irq(mhi_cntrl);
+
+ if (!mhi_cntrl->pre_init) {
+ /* Free all allocated resources */
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret = mhi_async_power_up(mhi_cntrl);
+
+ if (ret)
+ return ret;
+
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
+ if (ret)
+ mhi_power_down(mhi_cntrl, false);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Check if device is already in RDDM */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* Wait for RDDM event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
+
+void mhi_device_get(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake++;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (!ret)
+ mhi_dev->dev_wake++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake--;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_put);
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
new file mode 100644
index 000000000..fca0d0669
--- /dev/null
+++ b/drivers/bus/mips_cdmm.c
@@ -0,0 +1,698 @@
+/*
+ * Bus driver for MIPS Common Device Memory Map (CDMM).
+ *
+ * Copyright (C) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cdmm.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/* Access control and status register fields */
+#define CDMM_ACSR_DEVTYPE_SHIFT 24
+#define CDMM_ACSR_DEVTYPE (255ul << CDMM_ACSR_DEVTYPE_SHIFT)
+#define CDMM_ACSR_DEVSIZE_SHIFT 16
+#define CDMM_ACSR_DEVSIZE (31ul << CDMM_ACSR_DEVSIZE_SHIFT)
+#define CDMM_ACSR_DEVREV_SHIFT 12
+#define CDMM_ACSR_DEVREV (15ul << CDMM_ACSR_DEVREV_SHIFT)
+#define CDMM_ACSR_UW (1ul << 3)
+#define CDMM_ACSR_UR (1ul << 2)
+#define CDMM_ACSR_SW (1ul << 1)
+#define CDMM_ACSR_SR (1ul << 0)
+
+/* Each block of device registers is 64 bytes */
+#define CDMM_DRB_SIZE 64
+
+#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+
+/* Default physical base address */
+static phys_addr_t mips_cdmm_default_base;
+
+/* Bus operations */
+
+static const struct mips_cdmm_device_id *
+mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
+ struct mips_cdmm_device *dev)
+{
+ int ret = 0;
+
+ for (; table->type; ++table) {
+ ret = (dev->type == table->type);
+ if (ret)
+ break;
+ }
+
+ return ret ? table : NULL;
+}
+
+static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+
+ return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
+}
+
+static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ int retval = 0;
+
+ retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
+ return retval;
+}
+
+/* Device attributes */
+
+#define CDMM_ATTR(name, fmt, arg...) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev); \
+ return sprintf(buf, fmt, arg); \
+} \
+static DEVICE_ATTR_RO(name);
+
+CDMM_ATTR(cpu, "%u\n", dev->cpu);
+CDMM_ATTR(type, "0x%02x\n", dev->type);
+CDMM_ATTR(revision, "%u\n", dev->rev);
+CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
+CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
+ (unsigned long long)dev->res.start,
+ (unsigned long long)dev->res.end,
+ dev->res.flags);
+
+static struct attribute *mips_cdmm_dev_attrs[] = {
+ &dev_attr_cpu.attr,
+ &dev_attr_type.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_resource.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mips_cdmm_dev);
+
+struct bus_type mips_cdmm_bustype = {
+ .name = "cdmm",
+ .dev_groups = mips_cdmm_dev_groups,
+ .match = mips_cdmm_match,
+ .uevent = mips_cdmm_uevent,
+};
+EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
+
+/*
+ * Standard driver callback helpers.
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the standard driver callbacks we need a work function
+ * (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
+ * wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
+ * function to be called on that CPU.
+ */
+
+/**
+ * struct mips_cdmm_work_dev - Data for per-device call work.
+ * @fn: CDMM driver callback function to call for the device.
+ * @dev: CDMM device to pass to @fn.
+ */
+struct mips_cdmm_work_dev {
+ void *fn;
+ struct mips_cdmm_device *dev;
+};
+
+/**
+ * mips_cdmm_void_work() - Call a void returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which doesn't return a value.
+ */
+static long mips_cdmm_void_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ void (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ fn(work->dev);
+ return 0;
+}
+
+/**
+ * mips_cdmm_int_work() - Call an int returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which returns an int.
+ */
+static long mips_cdmm_int_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ int (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ return fn(work->dev);
+}
+
+#define _BUILD_RET_void
+#define _BUILD_RET_int return
+
+/**
+ * BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
+ * @_ret: Return type (void or int).
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a specific device callback function to call a CDMM driver callback
+ * function on the appropriate CPU for the device, and if applicable return the
+ * result.
+ */
+#define BUILD_PERCPU_HELPER(_ret, _name) \
+static _ret mips_cdmm_##_name(struct device *dev) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
+ struct mips_cdmm_work_dev work = { \
+ .fn = cdrv->_name, \
+ .dev = cdev, \
+ }; \
+ \
+ _BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
+ mips_cdmm_##_ret##_work, &work); \
+}
+
+/* Driver callback functions */
+BUILD_PERCPU_HELPER(int, probe) /* int mips_cdmm_probe(struct device) */
+BUILD_PERCPU_HELPER(int, remove) /* int mips_cdmm_remove(struct device) */
+BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
+
+
+/* Driver registration */
+
+/**
+ * mips_cdmm_driver_register() - Register a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Register a CDMM driver with the CDMM subsystem. The driver will be informed
+ * of matching devices which are discovered.
+ *
+ * Returns: 0 on success.
+ */
+int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
+{
+ drv->drv.bus = &mips_cdmm_bustype;
+
+ if (drv->probe)
+ drv->drv.probe = mips_cdmm_probe;
+ if (drv->remove)
+ drv->drv.remove = mips_cdmm_remove;
+ if (drv->shutdown)
+ drv->drv.shutdown = mips_cdmm_shutdown;
+
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
+
+/**
+ * mips_cdmm_driver_unregister() - Unregister a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Unregister a CDMM driver from the CDMM subsystem.
+ */
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
+
+
+/* CDMM initialisation and bus discovery */
+
+/**
+ * struct mips_cdmm_bus - Info about CDMM bus.
+ * @phys: Physical address at which it is mapped.
+ * @regs: Virtual address where registers can be accessed.
+ * @drbs: Total number of DRBs.
+ * @drbs_reserved: Number of DRBs reserved.
+ * @discovered: Whether the devices on the bus have been discovered yet.
+ * @offline: Whether the CDMM bus is going offline (or very early
+ * coming back online), in which case it should be
+ * reconfigured each time.
+ */
+struct mips_cdmm_bus {
+ phys_addr_t phys;
+ void __iomem *regs;
+ unsigned int drbs;
+ unsigned int drbs_reserved;
+ bool discovered;
+ bool offline;
+};
+
+static struct mips_cdmm_bus mips_cdmm_boot_bus;
+static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
+static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
+
+/**
+ * mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
+ *
+ * Get information about the per-CPU CDMM bus, if the bus is present.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: Pointer to CDMM bus information for the current CPU.
+ * May return ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
+{
+ struct mips_cdmm_bus *bus, **bus_p;
+ unsigned long flags;
+ unsigned int cpu;
+
+ if (!cpu_has_cdmm)
+ return ERR_PTR(-ENODEV);
+
+ cpu = smp_processor_id();
+ /* Avoid early use of per-cpu primitives before initialised */
+ if (cpu == 0)
+ return &mips_cdmm_boot_bus;
+
+ /* Get bus pointer */
+ bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
+ local_irq_save(flags);
+ bus = *bus_p;
+ /* Attempt allocation if NULL */
+ if (unlikely(!bus)) {
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
+ if (unlikely(!bus))
+ bus = ERR_PTR(-ENOMEM);
+ else
+ *bus_p = bus;
+ }
+ local_irq_restore(flags);
+ return bus;
+}
+
+/**
+ * mips_cdmm_cur_base() - Find current physical base address of CDMM region.
+ *
+ * Returns: Physical base address of CDMM region according to cdmmbase CP0
+ * register, or 0 if the CDMM region is disabled.
+ */
+static phys_addr_t mips_cdmm_cur_base(void)
+{
+ unsigned long cdmmbase = read_c0_cdmmbase();
+
+ if (!(cdmmbase & MIPS_CDMMBASE_EN))
+ return 0;
+
+ return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
+ << MIPS_CDMMBASE_ADDR_START;
+}
+
+/**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ * By default this method tries to find a CDMM-specific node in the system
+ * dtb. Note that this won't work for early serial console.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int err;
+
+ np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
+ if (np) {
+ err = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (!err)
+ return res.start;
+ }
+
+ return 0;
+}
+
+/**
+ * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
+ * @bus: Pointer to bus information for current CPU.
+ * IS_ERR(bus) is checked, so no need for caller to check.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
+{
+ unsigned long cdmmbase, flags;
+ int ret = 0;
+
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ local_irq_save(flags);
+ /* Don't set up bus a second time unless marked offline */
+ if (bus->offline) {
+ /* If CDMM region is still set up, nothing to do */
+ if (bus->phys == mips_cdmm_cur_base())
+ goto out;
+ /*
+ * The CDMM region isn't set up as expected, so it needs
+ * reconfiguring, but then we can stop checking it.
+ */
+ bus->offline = false;
+ } else if (bus->phys > 1) {
+ goto out;
+ }
+
+ /* If the CDMM region is already configured, inherit that setup */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_cur_base();
+ /* Otherwise, ask platform code for suggestions */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_phys_base();
+ /* Otherwise, copy what other CPUs have done */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_default_base;
+ /* Otherwise, complain once */
+ if (!bus->phys) {
+ bus->phys = 1;
+ /*
+ * If you hit this, either your bootloader needs to set up the
+ * CDMM on the boot CPU, or else you need to implement
+ * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
+ */
+ pr_err("cdmm%u: Failed to choose a physical base\n",
+ smp_processor_id());
+ }
+ /* Already complained? */
+ if (bus->phys == 1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Record our success for other CPUs to copy */
+ mips_cdmm_default_base = bus->phys;
+
+ pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
+ smp_processor_id(), &bus->phys);
+
+ /* Enable CDMM */
+ cdmmbase = read_c0_cdmmbase();
+ cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
+ cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
+ << MIPS_CDMMBASE_ADDR_SHIFT;
+ cdmmbase |= MIPS_CDMMBASE_EN;
+ write_c0_cdmmbase(cdmmbase);
+ tlbw_use_hazard();
+
+ bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
+ bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
+ MIPS_CDMMBASE_SIZE_SHIFT);
+ bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
+
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
+ * @dev_type: CDMM type code to look for.
+ *
+ * Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
+ * specific device. This can be used to find a device very early in boot for
+ * example to configure an early FDC console device.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: MMIO pointer to device memory. The caller can read the ACSR
+ * register to find more information about the device (such as the
+ * version number or the number of blocks).
+ * May return IOMEM_ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
+{
+ struct mips_cdmm_bus *bus;
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size;
+ int err;
+
+ if (WARN_ON(!dev_type))
+ return IOMEM_ERR_PTR(-ENODEV);
+
+ bus = mips_cdmm_get_bus();
+ err = mips_cdmm_setup(bus);
+ if (err)
+ return IOMEM_ERR_PTR(err);
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Look for a specific device type */
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ if (type == dev_type)
+ return cdmm + drb * CDMM_DRB_SIZE;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ }
+
+ return IOMEM_ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
+
+/**
+ * mips_cdmm_release() - Release a removed CDMM device.
+ * @dev: Device object
+ *
+ * Clean up the struct mips_cdmm_device for an unused CDMM device. This is
+ * called automatically by the driver core when a device is removed.
+ */
+static void mips_cdmm_release(struct device *dev)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+
+ kfree(cdev);
+}
+
+/**
+ * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
+ * @bus: CDMM bus information, must already be set up.
+ */
+static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
+{
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size, rev;
+ struct mips_cdmm_device *dev;
+ unsigned int cpu = smp_processor_id();
+ int ret = 0;
+ int id = 0;
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Discover devices */
+ bus->discovered = true;
+ pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
+
+ if (!type)
+ continue;
+
+ pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
+ cpu, id, drb, drb * CDMM_DRB_SIZE,
+ (drb + size + 1) * CDMM_DRB_SIZE - 1,
+ type, rev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ break;
+
+ dev->cpu = cpu;
+ dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
+ dev->res.end = bus->phys +
+ (drb + size + 1) * CDMM_DRB_SIZE - 1;
+ dev->res.flags = IORESOURCE_MEM;
+ dev->type = type;
+ dev->rev = rev;
+ dev->dev.parent = get_cpu_device(cpu);
+ dev->dev.bus = &mips_cdmm_bustype;
+ dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
+ dev->dev.release = mips_cdmm_release;
+
+ dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
+ ++id;
+ ret = device_register(&dev->dev);
+ if (ret)
+ put_device(&dev->dev);
+ }
+}
+
+
+/*
+ * CPU hotplug and initialisation
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the CPU callbacks, they need to be called for all devices on
+ * that CPU, so the work function calls bus_for_each_dev, using a helper
+ * (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
+ * device's CPU matches.
+ */
+
+/**
+ * BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a bus_for_each_dev callback function to call a specific CDMM driver
+ * callback function for the device if the device's CPU matches that pointed to
+ * by the data argument.
+ *
+ * This is used for informing drivers for all devices on a given CPU of some
+ * event (such as the CPU going online/offline).
+ *
+ * It is expected to already be called from the appropriate CPU.
+ */
+#define BUILD_PERDEV_HELPER(_name) \
+static int mips_cdmm_##_name##_helper(struct device *dev, void *data) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv; \
+ unsigned int cpu = *(unsigned int *)data; \
+ \
+ if (cdev->cpu != cpu || !dev->driver) \
+ return 0; \
+ \
+ cdrv = to_mips_cdmm_driver(dev->driver); \
+ if (!cdrv->_name) \
+ return 0; \
+ return cdrv->_name(cdev); \
+}
+
+/* bus_for_each_dev callback helper functions */
+BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
+BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
+
+/**
+ * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
+ * Tear down the CDMM bus.
+ * @cpu: unsigned int CPU number.
+ *
+ * This function is executed on the hotplugged CPU and calls the CDMM
+ * driver cpu_down callback for all devices on that CPU.
+ */
+static int mips_cdmm_cpu_down_prep(unsigned int cpu)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
+ mips_cdmm_cpu_down_helper);
+
+ /*
+ * While bus is offline, each use of it should reconfigure it just in
+ * case it is first use when coming back online again.
+ */
+ bus = mips_cdmm_get_bus();
+ if (!IS_ERR(bus))
+ bus->offline = true;
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
+ * @cpu: unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to discover
+ * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
+ * devices already discovered on that CPU.
+ *
+ * It is used as work_on_cpu callback function during
+ * initialisation. When CPUs are brought online the function is
+ * invoked directly on the hotplugged CPU.
+ */
+static int mips_cdmm_cpu_online(unsigned int cpu)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ bus = mips_cdmm_get_bus();
+ ret = mips_cdmm_setup(bus);
+ if (ret)
+ return ret;
+
+ /* Bus now set up, so we can drop the offline flag if still set */
+ bus->offline = false;
+
+ if (!bus->discovered)
+ mips_cdmm_bus_discover(bus);
+ else
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
+ mips_cdmm_cpu_up_helper);
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_init() - Initialise CDMM bus.
+ *
+ * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
+ * hotplug notifications so the CDMM drivers can be kept up to date.
+ */
+static int __init mips_cdmm_init(void)
+{
+ int ret;
+
+ /* Register the bus */
+ ret = bus_register(&mips_cdmm_bustype);
+ if (ret)
+ return ret;
+
+ /* We want to be notified about new CPUs */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
+ mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
+ if (ret < 0)
+ pr_warn("cdmm: Failed to register CPU notifier\n");
+
+ return ret;
+}
+subsys_initcall(mips_cdmm_init);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
new file mode 100644
index 000000000..b20fdcbd0
--- /dev/null
+++ b/drivers/bus/moxtet.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox module configuration bus driver
+ *
+ * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <dt-bindings/bus/moxtet.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moxtet.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/spi/spi.h>
+
+/*
+ * @name: module name for sysfs
+ * @hwirq_base: base index for IRQ for this module (-1 if no IRQs)
+ * @nirqs: how many interrupts does the shift register provide
+ * @desc: module description for kernel log
+ */
+static const struct {
+ const char *name;
+ int hwirq_base;
+ int nirqs;
+ const char *desc;
+} mox_module_table[] = {
+ /* do not change order of this array! */
+ { NULL, 0, 0, NULL },
+ { "sfp", -1, 0, "MOX D (SFP cage)" },
+ { "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" },
+ { "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" },
+ { "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" },
+ { "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" },
+ { "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" },
+};
+
+static inline bool mox_module_known(unsigned int id)
+{
+ return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST;
+}
+
+static inline const char *mox_module_name(unsigned int id)
+{
+ if (mox_module_known(id))
+ return mox_module_table[id].name;
+ else
+ return "unknown";
+}
+
+#define DEF_MODULE_ATTR(name, fmt, ...) \
+static ssize_t \
+module_##name##_show(struct device *dev, struct device_attribute *a, \
+ char *buf) \
+{ \
+ struct moxtet_device *mdev = to_moxtet_device(dev); \
+ return sprintf(buf, (fmt), __VA_ARGS__); \
+} \
+static DEVICE_ATTR_RO(module_##name)
+
+DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
+DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
+DEF_MODULE_ATTR(description, "%s\n",
+ mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
+ : "");
+
+static struct attribute *moxtet_dev_attrs[] = {
+ &dev_attr_module_id.attr,
+ &dev_attr_module_name.attr,
+ &dev_attr_module_description.attr,
+ NULL,
+};
+
+static const struct attribute_group moxtet_dev_group = {
+ .attrs = moxtet_dev_attrs,
+};
+
+static const struct attribute_group *moxtet_dev_groups[] = {
+ &moxtet_dev_group,
+ NULL,
+};
+
+static int moxtet_match(struct device *dev, struct device_driver *drv)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const enum turris_mox_module_id *t;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!tdrv->id_table)
+ return 0;
+
+ for (t = tdrv->id_table; *t; ++t)
+ if (*t == mdev->id)
+ return 1;
+
+ return 0;
+}
+
+static struct bus_type moxtet_bus_type = {
+ .name = "moxtet",
+ .dev_groups = moxtet_dev_groups,
+ .match = moxtet_match,
+};
+
+int __moxtet_register_driver(struct module *owner,
+ struct moxtet_driver *mdrv)
+{
+ mdrv->driver.owner = owner;
+ mdrv->driver.bus = &moxtet_bus_type;
+ return driver_register(&mdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__moxtet_register_driver);
+
+static int moxtet_dev_check(struct device *dev, void *data)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_device *new_dev = data;
+
+ if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
+ mdev->idx == new_dev->idx)
+ return -EBUSY;
+ return 0;
+}
+
+static void moxtet_dev_release(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+
+ put_device(mdev->moxtet->dev);
+ kfree(mdev);
+}
+
+static struct moxtet_device *
+moxtet_alloc_device(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+
+ if (!get_device(moxtet->dev))
+ return NULL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ put_device(moxtet->dev);
+ return NULL;
+ }
+
+ dev->moxtet = moxtet;
+ dev->dev.parent = moxtet->dev;
+ dev->dev.bus = &moxtet_bus_type;
+ dev->dev.release = moxtet_dev_release;
+
+ device_initialize(&dev->dev);
+
+ return dev;
+}
+
+static int moxtet_add_device(struct moxtet_device *dev)
+{
+ static DEFINE_MUTEX(add_mutex);
+ int ret;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf)
+ return -EINVAL;
+
+ dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id),
+ dev->idx);
+
+ mutex_lock(&add_mutex);
+
+ ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev,
+ moxtet_dev_check);
+ if (ret)
+ goto done;
+
+ ret = device_add(&dev->dev);
+ if (ret < 0)
+ dev_err(dev->moxtet->dev, "can't add %s, status %d\n",
+ dev_name(dev->moxtet->dev), ret);
+
+done:
+ mutex_unlock(&add_mutex);
+ return ret;
+}
+
+static int __unregister(struct device *dev, void *null)
+{
+ if (dev->of_node) {
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+ of_node_put(dev->of_node);
+ }
+
+ device_unregister(dev);
+
+ return 0;
+}
+
+static struct moxtet_device *
+of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc)
+{
+ struct moxtet_device *dev;
+ u32 val;
+ int ret;
+
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev,
+ "Moxtet device alloc error for %pOF\n", nc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = of_property_read_u32(nc, "reg", &val);
+ if (ret) {
+ dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n",
+ nc, ret);
+ goto err_put;
+ }
+
+ dev->idx = val;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n",
+ nc, dev->idx);
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ dev->id = moxtet->modules[dev->idx];
+
+ if (!dev->id) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc,
+ dev->idx);
+ ret = -ENODEV;
+ goto err_put;
+ }
+
+ of_node_get(nc);
+ dev->dev.of_node = nc;
+
+ ret = moxtet_add_device(dev);
+ if (ret) {
+ dev_err(moxtet->dev,
+ "Moxtet device register error for %pOF\n", nc);
+ of_node_put(nc);
+ goto err_put;
+ }
+
+ return dev;
+
+err_put:
+ put_device(&dev->dev);
+ return ERR_PTR(ret);
+}
+
+static void of_register_moxtet_devices(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ struct device_node *nc;
+
+ if (!moxtet->dev->of_node)
+ return;
+
+ for_each_available_child_of_node(moxtet->dev->of_node, nc) {
+ if (of_node_test_and_set_flag(nc, OF_POPULATED))
+ continue;
+ dev = of_register_moxtet_device(moxtet, nc);
+ if (IS_ERR(dev)) {
+ dev_warn(moxtet->dev,
+ "Failed to create Moxtet device for %pOF\n",
+ nc);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
+ }
+}
+
+static void
+moxtet_register_devices_from_topology(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ int i, ret;
+
+ for (i = 0; i < moxtet->count; ++i) {
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev, "Moxtet device %u alloc error\n",
+ i);
+ continue;
+ }
+
+ dev->idx = i;
+ dev->id = moxtet->modules[i];
+
+ ret = moxtet_add_device(dev);
+ if (ret && ret != -EBUSY) {
+ put_device(&dev->dev);
+ dev_err(moxtet->dev,
+ "Moxtet device %u register error: %i\n", i,
+ ret);
+ }
+ }
+}
+
+/*
+ * @nsame: how many modules with same id are already in moxtet->modules
+ */
+static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame)
+{
+ int i, first;
+ struct moxtet_irqpos *pos;
+
+ first = mox_module_table[id].hwirq_base +
+ nsame * mox_module_table[id].nirqs;
+
+ if (first + mox_module_table[id].nirqs > MOXTET_NIRQS)
+ return -EINVAL;
+
+ for (i = 0; i < mox_module_table[id].nirqs; ++i) {
+ pos = &moxtet->irq.position[first + i];
+ pos->idx = idx;
+ pos->bit = i;
+ moxtet->irq.exists |= BIT(first + i);
+ }
+
+ return 0;
+}
+
+static int moxtet_find_topology(struct moxtet *moxtet)
+{
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int cnts[TURRIS_MOX_MODULE_LAST];
+ int i, ret;
+
+ memset(cnts, 0, sizeof(cnts));
+
+ ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES);
+ if (ret < 0)
+ return ret;
+
+ if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) {
+ dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n");
+ } else if (buf[0] == TURRIS_MOX_CPU_ID_SD) {
+ dev_info(moxtet->dev, "Found MOX A (CPU) module\n");
+ } else {
+ dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n",
+ buf[0]);
+ return -ENODEV;
+ }
+
+ moxtet->count = 0;
+
+ for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) {
+ int id;
+
+ if (buf[i] == 0xff)
+ break;
+
+ id = buf[i] & 0xf;
+
+ moxtet->modules[i-1] = id;
+ ++moxtet->count;
+
+ if (mox_module_known(id)) {
+ dev_info(moxtet->dev, "Found %s module\n",
+ mox_module_table[id].desc);
+
+ if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0)
+ dev_err(moxtet->dev,
+ " Cannot set IRQ for module %s\n",
+ mox_module_table[id].desc);
+ } else {
+ dev_warn(moxtet->dev,
+ "Unknown Moxtet module found (ID 0x%02x)\n",
+ id);
+ }
+ }
+
+ return 0;
+}
+
+static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf)
+{
+ struct spi_transfer xfer = {
+ .rx_buf = buf,
+ .tx_buf = moxtet->tx,
+ .len = moxtet->count + 1
+ };
+ int ret;
+
+ mutex_lock(&moxtet->lock);
+
+ ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+
+int moxtet_device_read(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ return buf[mdev->idx + 1] >> 4;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_read);
+
+int moxtet_device_write(struct device *dev, u8 val)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ moxtet->tx[moxtet->count - mdev->idx] = val;
+
+ ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_write);
+
+int moxtet_device_written(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ return moxtet->tx[moxtet->count - mdev->idx];
+}
+EXPORT_SYMBOL_GPL(moxtet_device_written);
+
+#ifdef CONFIG_DEBUG_FS
+static int moxtet_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t input_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(bin) * 2 + 1];
+ int ret, n;
+
+ ret = moxtet_spi_read(moxtet, bin);
+ if (ret < 0)
+ return ret;
+
+ n = moxtet->count + 1;
+ bin2hex(hex, bin, n);
+
+ hex[2*n] = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1);
+}
+
+static const struct file_operations input_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = input_read,
+ .llseek = no_llseek,
+};
+
+static ssize_t output_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1];
+ u8 *p = hex;
+ int i;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
+
+ mutex_unlock(&moxtet->lock);
+
+ *p++ = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
+}
+
+static ssize_t output_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(bin) * 2 + 1];
+ ssize_t res;
+ loff_t dummy = 0;
+ int err, i;
+
+ if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count)
+ return -EINVAL;
+
+ res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len);
+ if (res < 0)
+ return res;
+
+ if (len % 2 == 1 && hex[len - 1] != '\n')
+ return -EINVAL;
+
+ err = hex2bin(bin, hex, moxtet->count);
+ if (err < 0)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ moxtet->tx[moxtet->count - i] = bin[i];
+
+ err = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return err < 0 ? err : len;
+}
+
+static const struct file_operations output_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = output_read,
+ .write = output_write,
+ .llseek = no_llseek,
+};
+
+static int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("moxtet", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("input", 0444, root, moxtet,
+ &input_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ entry = debugfs_create_file_unsafe("output", 0644, root, moxtet,
+ &output_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ moxtet->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+ debugfs_remove_recursive(moxtet->debugfs_root);
+}
+#else
+static inline int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ return 0;
+}
+
+static inline void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+}
+#endif
+
+static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct moxtet *moxtet = d->host_data;
+
+ if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) {
+ dev_err(moxtet->dev, "Invalid hw irq number\n");
+ return -EINVAL;
+ }
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq);
+
+ return 0;
+}
+
+static int moxtet_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct moxtet *moxtet = d->host_data;
+ int irq;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ irq = intspec[0];
+
+ if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq)))
+ return -EINVAL;
+
+ *out_hwirq = irq;
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct irq_domain_ops moxtet_irq_domain = {
+ .map = moxtet_irq_domain_map,
+ .xlate = moxtet_irq_domain_xlate,
+};
+
+static void moxtet_irq_mask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked |= BIT(d->hwirq);
+}
+
+static void moxtet_irq_unmask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked &= ~BIT(d->hwirq);
+}
+
+static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+ struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq];
+ int id;
+
+ id = moxtet->modules[pos->idx];
+
+ seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ pos->bit);
+}
+
+static const struct irq_chip moxtet_irq_chip = {
+ .name = "moxtet",
+ .irq_mask = moxtet_irq_mask,
+ .irq_unmask = moxtet_irq_unmask,
+ .irq_print_chip = moxtet_irq_print_chip,
+};
+
+static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
+{
+ struct moxtet_irqpos *pos = moxtet->irq.position;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int i, ret;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ *map = 0;
+
+ for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) {
+ if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit)))
+ set_bit(i, map);
+ }
+
+ return 0;
+}
+
+static irqreturn_t moxtet_irq_thread_fn(int irq, void *data)
+{
+ struct moxtet *moxtet = data;
+ unsigned long set;
+ int nhandled = 0, i, sub_irq, ret;
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+
+ do {
+ for_each_set_bit(i, &set, MOXTET_NIRQS) {
+ sub_irq = irq_find_mapping(moxtet->irq.domain, i);
+ handle_nested_irq(sub_irq);
+ dev_dbg(moxtet->dev, "%i irq\n", i);
+ ++nhandled;
+ }
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+ } while (set);
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void moxtet_irq_free(struct moxtet *moxtet)
+{
+ int i, irq;
+
+ for (i = 0; i < MOXTET_NIRQS; ++i) {
+ if (moxtet->irq.exists & BIT(i)) {
+ irq = irq_find_mapping(moxtet->irq.domain, i);
+ irq_dispose_mapping(irq);
+ }
+ }
+
+ irq_domain_remove(moxtet->irq.domain);
+}
+
+static int moxtet_irq_setup(struct moxtet *moxtet)
+{
+ int i, ret;
+
+ moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
+ if (moxtet->irq.domain == NULL) {
+ dev_err(moxtet->dev, "Could not add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MOXTET_NIRQS; ++i)
+ if (moxtet->irq.exists & BIT(i))
+ irq_create_mapping(moxtet->irq.domain, i);
+
+ moxtet->irq.chip = moxtet_irq_chip;
+ moxtet->irq.masked = ~0;
+
+ ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
+ IRQF_ONESHOT, "moxtet", moxtet);
+ if (ret < 0)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ moxtet_irq_free(moxtet);
+ return ret;
+}
+
+static int moxtet_probe(struct spi_device *spi)
+{
+ struct moxtet *moxtet;
+ int ret;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet),
+ GFP_KERNEL);
+ if (!moxtet)
+ return -ENOMEM;
+
+ moxtet->dev = &spi->dev;
+ spi_set_drvdata(spi, moxtet);
+
+ mutex_init(&moxtet->lock);
+
+ moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0);
+ if (moxtet->dev_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (moxtet->dev_irq <= 0) {
+ dev_err(moxtet->dev, "No IRQ resource found\n");
+ return -ENXIO;
+ }
+
+ ret = moxtet_find_topology(moxtet);
+ if (ret < 0)
+ return ret;
+
+ if (moxtet->irq.exists) {
+ ret = moxtet_irq_setup(moxtet);
+ if (ret < 0)
+ return ret;
+ }
+
+ of_register_moxtet_devices(moxtet);
+ moxtet_register_devices_from_topology(moxtet);
+
+ ret = moxtet_register_debugfs(moxtet);
+ if (ret < 0)
+ dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n",
+ ret);
+
+ return 0;
+}
+
+static int moxtet_remove(struct spi_device *spi)
+{
+ struct moxtet *moxtet = spi_get_drvdata(spi);
+
+ free_irq(moxtet->dev_irq, moxtet);
+
+ moxtet_irq_free(moxtet);
+
+ moxtet_unregister_debugfs(moxtet);
+
+ device_for_each_child(moxtet->dev, NULL, __unregister);
+
+ mutex_destroy(&moxtet->lock);
+
+ return 0;
+}
+
+static const struct of_device_id moxtet_dt_ids[] = {
+ { .compatible = "cznic,moxtet" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, moxtet_dt_ids);
+
+static struct spi_driver moxtet_spi_driver = {
+ .driver = {
+ .name = "moxtet",
+ .of_match_table = moxtet_dt_ids,
+ },
+ .probe = moxtet_probe,
+ .remove = moxtet_remove,
+};
+
+static int __init moxtet_init(void)
+{
+ int ret;
+
+ ret = bus_register(&moxtet_bus_type);
+ if (ret < 0) {
+ pr_err("moxtet bus registration failed: %d\n", ret);
+ goto error;
+ }
+
+ ret = spi_register_driver(&moxtet_spi_driver);
+ if (ret < 0) {
+ pr_err("moxtet spi driver registration failed: %d\n", ret);
+ goto error_bus;
+ }
+
+ return 0;
+
+error_bus:
+ bus_unregister(&moxtet_bus_type);
+error:
+ return ret;
+}
+postcore_initcall_sync(moxtet_init);
+
+static void __exit moxtet_exit(void)
+{
+ spi_unregister_driver(&moxtet_spi_driver);
+ bus_unregister(&moxtet_bus_type);
+}
+module_exit(moxtet_exit);
+
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
new file mode 100644
index 000000000..2519ceede
--- /dev/null
+++ b/drivers/bus/mvebu-mbus.c
@@ -0,0 +1,1378 @@
+/*
+ * Address map functions for Marvell EBU SoCs (Kirkwood, Armada
+ * 370/XP, Dove, Orion5x and MV78xx0)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Marvell EBU SoCs have a configurable physical address space:
+ * the physical address at which certain devices (PCIe, NOR, NAND,
+ * etc.) sit can be configured. The configuration takes place through
+ * two sets of registers:
+ *
+ * - One to configure the access of the CPU to the devices. Depending
+ * on the families, there are between 8 and 20 configurable windows,
+ * each can be use to create a physical memory window that maps to a
+ * specific device. Devices are identified by a tuple (target,
+ * attribute).
+ *
+ * - One to configure the access to the CPU to the SDRAM. There are
+ * either 2 (for Dove) or 4 (for other families) windows to map the
+ * SDRAM into the physical address space.
+ *
+ * This driver:
+ *
+ * - Reads out the SDRAM address decoding windows at initialization
+ * time, and fills the mvebu_mbus_dram_info structure with these
+ * informations. The exported function mv_mbus_dram_info() allow
+ * device drivers to get those informations related to the SDRAM
+ * address decoding windows. This is because devices also have their
+ * own windows (configured through registers that are part of each
+ * device register space), and therefore the drivers for Marvell
+ * devices have to configure those device -> SDRAM windows to ensure
+ * that DMA works properly.
+ *
+ * - Provides an API for platform code or device drivers to
+ * dynamically add or remove address decoding windows for the CPU ->
+ * device accesses. This API is mvebu_mbus_add_window_by_id(),
+ * mvebu_mbus_add_window_remap_by_id() and
+ * mvebu_mbus_del_window().
+ *
+ * - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
+ * see the list of CPU -> SDRAM windows and their configuration
+ * (file 'sdram') and the list of CPU -> devices windows and their
+ * configuration (file 'devices').
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/log2.h>
+#include <linux/memblock.h>
+#include <linux/syscore_ops.h>
+
+/*
+ * DDR target is the same on all platforms.
+ */
+#define TARGET_DDR 0
+
+/*
+ * CPU Address Decode Windows registers
+ */
+#define WIN_CTRL_OFF 0x0000
+#define WIN_CTRL_ENABLE BIT(0)
+/* Only on HW I/O coherency capable platforms */
+#define WIN_CTRL_SYNCBARRIER BIT(1)
+#define WIN_CTRL_TGT_MASK 0xf0
+#define WIN_CTRL_TGT_SHIFT 4
+#define WIN_CTRL_ATTR_MASK 0xff00
+#define WIN_CTRL_ATTR_SHIFT 8
+#define WIN_CTRL_SIZE_MASK 0xffff0000
+#define WIN_CTRL_SIZE_SHIFT 16
+#define WIN_BASE_OFF 0x0004
+#define WIN_BASE_LOW 0xffff0000
+#define WIN_BASE_HIGH 0xf
+#define WIN_REMAP_LO_OFF 0x0008
+#define WIN_REMAP_LOW 0xffff0000
+#define WIN_REMAP_HI_OFF 0x000c
+
+#define UNIT_SYNC_BARRIER_OFF 0x84
+#define UNIT_SYNC_BARRIER_ALL 0xFFFF
+
+#define ATTR_HW_COHERENCY (0x1 << 4)
+
+#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
+#define DDR_BASE_CS_HIGH_MASK 0xf
+#define DDR_BASE_CS_LOW_MASK 0xff000000
+#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
+#define DDR_SIZE_ENABLED BIT(0)
+#define DDR_SIZE_CS_MASK 0x1c
+#define DDR_SIZE_CS_SHIFT 2
+#define DDR_SIZE_MASK 0xff000000
+
+#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
+
+/* Relative to mbusbridge_base */
+#define MBUS_BRIDGE_CTRL_OFF 0x0
+#define MBUS_BRIDGE_BASE_OFF 0x4
+
+/* Maximum number of windows, for all known platforms */
+#define MBUS_WINS_MAX 20
+
+struct mvebu_mbus_state;
+
+struct mvebu_mbus_soc_data {
+ unsigned int num_wins;
+ bool has_mbus_bridge;
+ unsigned int (*win_cfg_offset)(const int win);
+ unsigned int (*win_remap_offset)(const int win);
+ void (*setup_cpu_target)(struct mvebu_mbus_state *s);
+ int (*save_cpu_target)(struct mvebu_mbus_state *s,
+ u32 __iomem *store_addr);
+ int (*show_cpu_target)(struct mvebu_mbus_state *s,
+ struct seq_file *seq, void *v);
+};
+
+/*
+ * Used to store the state of one MBus window accross suspend/resume.
+ */
+struct mvebu_mbus_win_data {
+ u32 ctrl;
+ u32 base;
+ u32 remap_lo;
+ u32 remap_hi;
+};
+
+struct mvebu_mbus_state {
+ void __iomem *mbuswins_base;
+ void __iomem *sdramwins_base;
+ void __iomem *mbusbridge_base;
+ phys_addr_t sdramwins_phys_base;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_sdram;
+ struct dentry *debugfs_devs;
+ struct resource pcie_mem_aperture;
+ struct resource pcie_io_aperture;
+ const struct mvebu_mbus_soc_data *soc;
+ int hw_io_coherency;
+
+ /* Used during suspend/resume */
+ u32 mbus_bridge_ctrl;
+ u32 mbus_bridge_base;
+ struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
+};
+
+static struct mvebu_mbus_state mbus_state;
+
+/*
+ * We provide two variants of the mv_mbus_dram_info() function:
+ *
+ * - The normal one, where the described DRAM ranges may overlap with
+ * the I/O windows, but for which the DRAM ranges are guaranteed to
+ * have a power of two size. Such ranges are suitable for the DMA
+ * masters that only DMA between the RAM and the device, which is
+ * actually all devices except the crypto engines.
+ *
+ * - The 'nooverlap' one, where the described DRAM ranges are
+ * guaranteed to not overlap with the I/O windows, but for which the
+ * DRAM ranges will not have power of two sizes. They will only be
+ * aligned on a 64 KB boundary, and have a size multiple of 64
+ * KB. Such ranges are suitable for the DMA masters that DMA between
+ * the crypto SRAM (which is mapped through an I/O window) and a
+ * device. This is the case for the crypto engines.
+ */
+
+static struct mbus_dram_target_info mvebu_mbus_dram_info;
+static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap;
+
+const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return &mvebu_mbus_dram_info;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
+
+const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+ return &mvebu_mbus_dram_info_nooverlap;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap);
+
+/* Checks whether the given window has remap capability */
+static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ return mbus->soc->win_remap_offset(win) != MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Functions to manipulate the address decoding windows
+ */
+
+static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
+ int win, int *enabled, u64 *base,
+ u32 *size, u8 *target, u8 *attr,
+ u64 *remap)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 basereg = readl(addr + WIN_BASE_OFF);
+ u32 ctrlreg = readl(addr + WIN_CTRL_OFF);
+
+ if (!(ctrlreg & WIN_CTRL_ENABLE)) {
+ *enabled = 0;
+ return;
+ }
+
+ *enabled = 1;
+ *base = ((u64)basereg & WIN_BASE_HIGH) << 32;
+ *base |= (basereg & WIN_BASE_LOW);
+ *size = (ctrlreg | ~WIN_CTRL_SIZE_MASK) + 1;
+
+ if (target)
+ *target = (ctrlreg & WIN_CTRL_TGT_MASK) >> WIN_CTRL_TGT_SHIFT;
+
+ if (attr)
+ *attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
+
+ if (remap) {
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ u32 remap_low, remap_hi;
+ void __iomem *addr_rmp = mbus->mbuswins_base +
+ mbus->soc->win_remap_offset(win);
+ remap_low = readl(addr_rmp + WIN_REMAP_LO_OFF);
+ remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+ *remap = ((u64)remap_hi << 32) | remap_low;
+ } else
+ *remap = 0;
+ }
+}
+
+static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
+ int win)
+{
+ void __iomem *addr;
+
+ addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
+ writel(0, addr + WIN_BASE_OFF);
+ writel(0, addr + WIN_CTRL_OFF);
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ addr = mbus->mbuswins_base + mbus->soc->win_remap_offset(win);
+ writel(0, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+}
+
+/* Checks whether the given window number is available */
+
+static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ return !(ctrl & WIN_CTRL_ENABLE);
+}
+
+/*
+ * Checks whether the given (base, base+size) area doesn't overlap an
+ * existing region
+ */
+static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ u8 target, u8 attr)
+{
+ u64 end = (u64)base + size;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wend;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, NULL);
+
+ if (!enabled)
+ continue;
+
+ wend = wbase + wsize;
+
+ /*
+ * Check if the current window overlaps with the
+ * proposed physical range
+ */
+ if ((u64)base < wend && end > wbase)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int mvebu_mbus_find_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size)
+{
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase;
+ u32 wsize;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ NULL, NULL, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (base == wbase && size == wsize)
+ return win;
+ }
+
+ return -ENODEV;
+}
+
+static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
+ int win, phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl, remap_addr;
+
+ if (!is_power_of_2(size)) {
+ WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+ return -EINVAL;
+ }
+
+ if ((base & (phys_addr_t)(size - 1)) != 0) {
+ WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+ size);
+ return -EINVAL;
+ }
+
+ ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
+ (attr << WIN_CTRL_ATTR_SHIFT) |
+ (target << WIN_CTRL_TGT_SHIFT) |
+ WIN_CTRL_ENABLE;
+ if (mbus->hw_io_coherency)
+ ctrl |= WIN_CTRL_SYNCBARRIER;
+
+ writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
+ writel(ctrl, addr + WIN_CTRL_OFF);
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ void __iomem *addr_rmp = mbus->mbuswins_base +
+ mbus->soc->win_remap_offset(win);
+
+ if (remap == MVEBU_MBUS_NO_REMAP)
+ remap_addr = base;
+ else
+ remap_addr = remap;
+ writel(remap_addr & WIN_REMAP_LOW, addr_rmp + WIN_REMAP_LO_OFF);
+ writel(0, addr_rmp + WIN_REMAP_HI_OFF);
+ }
+
+ return 0;
+}
+
+static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ int win;
+
+ if (remap == MVEBU_MBUS_NO_REMAP) {
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ if (mvebu_mbus_window_is_remappable(mbus, win))
+ continue;
+
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base,
+ size, remap,
+ target, attr);
+ }
+ }
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ /* Skip window if need remap but is not supported */
+ if ((remap != MVEBU_MBUS_NO_REMAP) &&
+ !mvebu_mbus_window_is_remappable(mbus, win))
+ continue;
+
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base, size,
+ remap, target, attr);
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Debugfs debugging
+ */
+
+/* Common function used for Dove, Kirkwood, Armada 370/XP and Orion 5x */
+static int mvebu_sdram_debug_show_orion(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 basereg = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 sizereg = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(sizereg & DDR_SIZE_ENABLED)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = ((u64)basereg & DDR_BASE_CS_HIGH_MASK) << 32;
+ base |= basereg & DDR_BASE_CS_LOW_MASK;
+ size = (sizereg | ~DDR_SIZE_MASK);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size + 1,
+ (sizereg & DDR_SIZE_CS_MASK) >> DDR_SIZE_CS_SHIFT);
+ }
+
+ return 0;
+}
+
+/* Special function for Dove */
+static int mvebu_sdram_debug_show_dove(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(map & 1)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = map & 0xff800000;
+ size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size, i);
+ }
+
+ return 0;
+}
+
+static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ return mbus->soc->show_cpu_target(mbus, seq, v);
+}
+
+static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvebu_sdram_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_sdram_debug_fops = {
+ .open = mvebu_sdram_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wremap;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, &wremap);
+
+ if (!enabled) {
+ seq_printf(seq, "[%02d] disabled\n", win);
+ continue;
+ }
+
+ seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x",
+ win, (unsigned long long)wbase,
+ (unsigned long long)(wbase + wsize), wtarget, wattr);
+
+ if (!is_power_of_2(wsize) ||
+ ((wbase & (u64)(wsize - 1)) != 0))
+ seq_puts(seq, " (Invalid base/size!!)");
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ seq_printf(seq, " (remap %016llx)\n",
+ (unsigned long long)wremap);
+ } else
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mvebu_devs_debug_show, inode->i_private);
+}
+
+static const struct file_operations mvebu_devs_debug_fops = {
+ .open = mvebu_devs_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * SoC-specific functions and definitions
+ */
+
+static unsigned int generic_mbus_win_cfg_offset(int win)
+{
+ return win << 4;
+}
+
+static unsigned int armada_370_xp_mbus_win_cfg_offset(int win)
+{
+ /* The register layout is a bit annoying and the below code
+ * tries to cope with it.
+ * - At offset 0x0, there are the registers for the first 8
+ * windows, with 4 registers of 32 bits per window (ctrl,
+ * base, remap low, remap high)
+ * - Then at offset 0x80, there is a hole of 0x10 bytes for
+ * the internal registers base address and internal units
+ * sync barrier register.
+ * - Then at offset 0x90, there the registers for 12
+ * windows, with only 2 registers of 32 bits per window
+ * (ctrl, base).
+ */
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x90 + ((win - 8) << 3);
+}
+
+static unsigned int mv78xx0_mbus_win_cfg_offset(int win)
+{
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x900 + ((win - 8) << 4);
+}
+
+static unsigned int generic_mbus_win_remap_2_offset(int win)
+{
+ if (win < 2)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_4_offset(int win)
+{
+ if (win < 4)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_8_offset(int win)
+{
+ if (win < 8)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int armada_xp_mbus_win_remap_offset(int win)
+{
+ if (win < 8)
+ return generic_mbus_win_cfg_offset(win);
+ else if (win == 13)
+ return 0xF0 - WIN_REMAP_LO_OFF;
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Use the memblock information to find the MBus bridge hole in the
+ * physical address space.
+ */
+static void __init
+mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
+{
+ phys_addr_t reg_start, reg_end;
+ uint64_t i, s = 0;
+
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ /*
+ * This part of the memory is above 4 GB, so we don't
+ * care for the MBus bridge hole.
+ */
+ if (reg_start >= 0x100000000ULL)
+ continue;
+
+ /*
+ * The MBus bridge hole is at the end of the RAM under
+ * the 4 GB limit.
+ */
+ if (reg_end > s)
+ s = reg_end;
+ }
+
+ *start = s;
+ *end = 0x100000000ULL;
+}
+
+/*
+ * This function fills in the mvebu_mbus_dram_info_nooverlap data
+ * structure, by looking at the mvebu_mbus_dram_info data, and
+ * removing the parts of it that overlap with I/O windows.
+ */
+static void __init
+mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus)
+{
+ uint64_t mbus_bridge_base, mbus_bridge_end;
+ int cs_nooverlap = 0;
+ int i;
+
+ mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
+
+ for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) {
+ struct mbus_dram_window *w;
+ u64 base, size, end;
+
+ w = &mvebu_mbus_dram_info.cs[i];
+ base = w->base;
+ size = w->size;
+ end = base + size;
+
+ /*
+ * The CS is fully enclosed inside the MBus bridge
+ * area, so ignore it.
+ */
+ if (base >= mbus_bridge_base && end <= mbus_bridge_end)
+ continue;
+
+ /*
+ * Beginning of CS overlaps with end of MBus, raise CS
+ * base address, and shrink its size.
+ */
+ if (base >= mbus_bridge_base && end > mbus_bridge_end) {
+ size -= mbus_bridge_end - base;
+ base = mbus_bridge_end;
+ }
+
+ /*
+ * End of CS overlaps with beginning of MBus, shrink
+ * CS size.
+ */
+ if (base < mbus_bridge_base && end > mbus_bridge_base)
+ size -= end - mbus_bridge_base;
+
+ w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base;
+ w->size = size;
+ }
+
+ mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR;
+ mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap;
+}
+
+static void __init
+mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ /*
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
+ */
+ if ((size & DDR_SIZE_ENABLED) &&
+ !(base & DDR_BASE_CS_HIGH_MASK)) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base & DDR_BASE_CS_LOW_MASK;
+ w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
+ }
+ }
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
+ u32 __iomem *store_addr)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
+ store_addr++);
+ writel(base, store_addr++);
+ writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
+ store_addr++);
+ writel(size, store_addr++);
+ }
+
+ /* We've written 16 words to the store address */
+ return 16;
+}
+
+static void __init
+mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ /*
+ * Chip select enabled?
+ */
+ if (map & 1) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0; /* CS address decoding done inside */
+ /* the DDR controller, no need to */
+ /* provide attributes */
+ w->base = map & 0xff800000;
+ w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+ }
+ }
+
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
+ u32 __iomem *store_addr)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i),
+ store_addr++);
+ writel(map, store_addr++);
+ }
+
+ /* We've written 4 words to the store address */
+ return 4;
+}
+
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr)
+{
+ return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
+}
+
+static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
+ .num_wins = 20,
+ .has_mbus_bridge = true,
+ .win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
+ .win_remap_offset = generic_mbus_win_remap_8_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
+ .num_wins = 20,
+ .has_mbus_bridge = true,
+ .win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
+ .win_remap_offset = armada_xp_mbus_win_remap_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data dove_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_dove_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_dove,
+};
+
+/*
+ * Some variants of Orion5x have 4 remappable windows, some other have
+ * only two of them.
+ */
+static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_2_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
+ .num_wins = 14,
+ .win_cfg_offset = mv78xx0_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_8_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct of_device_id of_mvebu_mbus_ids[] = {
+ { .compatible = "marvell,armada370-mbus",
+ .data = &armada_370_mbus_data, },
+ { .compatible = "marvell,armada375-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,armada380-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,armadaxp-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,kirkwood-mbus",
+ .data = &kirkwood_mbus_data, },
+ { .compatible = "marvell,dove-mbus",
+ .data = &dove_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5281-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5182-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5181-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f6183-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,mv78xx0-mbus",
+ .data = &mv78xx0_mbus_data, },
+ { },
+};
+
+/*
+ * Public API of the driver
+ */
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+ unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ if (!mvebu_mbus_window_conflicts(s, base, size, target, attribute)) {
+ pr_err("cannot add window '%x:%x', conflicts with another window\n",
+ target, attribute);
+ return -EINVAL;
+ }
+
+ return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
+}
+
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size)
+{
+ return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ size, MVEBU_MBUS_NO_REMAP);
+}
+
+int mvebu_mbus_del_window(phys_addr_t base, size_t size)
+{
+ int win;
+
+ win = mvebu_mbus_find_window(&mbus_state, base, size);
+ if (win < 0)
+ return win;
+
+ mvebu_mbus_disable_window(&mbus_state, win);
+ return 0;
+}
+
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
+{
+ if (!res)
+ return;
+ *res = mbus_state.pcie_mem_aperture;
+}
+
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
+{
+ if (!res)
+ return;
+ *res = mbus_state.pcie_io_aperture;
+}
+
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
+{
+ const struct mbus_dram_target_info *dram;
+ int i;
+
+ /* Get dram info */
+ dram = mv_mbus_dram_info();
+ if (!dram) {
+ pr_err("missing DRAM information\n");
+ return -ENODEV;
+ }
+
+ /* Try to find matching DRAM window for phyaddr */
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ if (cs->base <= phyaddr &&
+ phyaddr <= (cs->base + cs->size - 1)) {
+ *target = dram->mbus_dram_target_id;
+ *attr = cs->mbus_attr;
+ return 0;
+ }
+ }
+
+ pr_err("invalid dram address %pa\n", &phyaddr);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
+
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr)
+{
+ int win;
+
+ for (win = 0; win < mbus_state.soc->num_wins; win++) {
+ u64 wbase;
+ int enabled;
+
+ mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase,
+ size, target, attr, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (wbase <= phyaddr && phyaddr <= wbase + *size)
+ return win;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info);
+
+static __init int mvebu_mbus_debugfs_init(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ /*
+ * If no base has been initialized, doesn't make sense to
+ * register the debugfs entries. We may be on a multiplatform
+ * kernel that isn't running a Marvell EBU SoC.
+ */
+ if (!s->mbuswins_base)
+ return 0;
+
+ s->debugfs_root = debugfs_create_dir("mvebu-mbus", NULL);
+ if (s->debugfs_root) {
+ s->debugfs_sdram = debugfs_create_file("sdram", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_sdram_debug_fops);
+ s->debugfs_devs = debugfs_create_file("devices", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_devs_debug_fops);
+ }
+
+ return 0;
+}
+fs_initcall(mvebu_mbus_debugfs_init);
+
+static int mvebu_mbus_suspend(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ int win;
+
+ if (!s->mbusbridge_base)
+ return -ENODEV;
+
+ for (win = 0; win < s->soc->num_wins; win++) {
+ void __iomem *addr = s->mbuswins_base +
+ s->soc->win_cfg_offset(win);
+ void __iomem *addr_rmp;
+
+ s->wins[win].base = readl(addr + WIN_BASE_OFF);
+ s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (!mvebu_mbus_window_is_remappable(s, win))
+ continue;
+
+ addr_rmp = s->mbuswins_base +
+ s->soc->win_remap_offset(win);
+
+ s->wins[win].remap_lo = readl(addr_rmp + WIN_REMAP_LO_OFF);
+ s->wins[win].remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+ }
+
+ s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
+ MBUS_BRIDGE_CTRL_OFF);
+ s->mbus_bridge_base = readl(s->mbusbridge_base +
+ MBUS_BRIDGE_BASE_OFF);
+
+ return 0;
+}
+
+static void mvebu_mbus_resume(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ int win;
+
+ writel(s->mbus_bridge_ctrl,
+ s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
+ writel(s->mbus_bridge_base,
+ s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
+
+ for (win = 0; win < s->soc->num_wins; win++) {
+ void __iomem *addr = s->mbuswins_base +
+ s->soc->win_cfg_offset(win);
+ void __iomem *addr_rmp;
+
+ writel(s->wins[win].base, addr + WIN_BASE_OFF);
+ writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
+
+ if (!mvebu_mbus_window_is_remappable(s, win))
+ continue;
+
+ addr_rmp = s->mbuswins_base +
+ s->soc->win_remap_offset(win);
+
+ writel(s->wins[win].remap_lo, addr_rmp + WIN_REMAP_LO_OFF);
+ writel(s->wins[win].remap_hi, addr_rmp + WIN_REMAP_HI_OFF);
+ }
+}
+
+static struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
+ phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size,
+ phys_addr_t mbusbridge_phys_base,
+ size_t mbusbridge_size,
+ bool is_coherent)
+{
+ int win;
+
+ mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
+ if (!mbus->mbuswins_base)
+ return -ENOMEM;
+
+ mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
+ if (!mbus->sdramwins_base) {
+ iounmap(mbus_state.mbuswins_base);
+ return -ENOMEM;
+ }
+
+ mbus->sdramwins_phys_base = sdramwins_phys_base;
+
+ if (mbusbridge_phys_base) {
+ mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
+ mbusbridge_size);
+ if (!mbus->mbusbridge_base) {
+ iounmap(mbus->sdramwins_base);
+ iounmap(mbus->mbuswins_base);
+ return -ENOMEM;
+ }
+ } else
+ mbus->mbusbridge_base = NULL;
+
+ for (win = 0; win < mbus->soc->num_wins; win++)
+ mvebu_mbus_disable_window(mbus, win);
+
+ mbus->soc->setup_cpu_target(mbus);
+ mvebu_mbus_setup_cpu_target_nooverlap(mbus);
+
+ if (is_coherent)
+ writel(UNIT_SYNC_BARRIER_ALL,
+ mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
+
+ register_syscore_ops(&mvebu_mbus_syscore_ops);
+
+ return 0;
+}
+
+int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size)
+{
+ const struct of_device_id *of_id;
+
+ for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
+ if (!strcmp(of_id->compatible, soc))
+ break;
+
+ if (!of_id->compatible[0]) {
+ pr_err("could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus_state.soc = of_id->data;
+
+ return mvebu_mbus_common_init(&mbus_state,
+ mbuswins_phys_base,
+ mbuswins_size,
+ sdramwins_phys_base,
+ sdramwins_size, 0, 0, false);
+}
+
+#ifdef CONFIG_OF
+/*
+ * The window IDs in the ranges DT property have the following format:
+ * - bits 28 to 31: MBus custom field
+ * - bits 24 to 27: window target ID
+ * - bits 16 to 23: window attribute ID
+ * - bits 0 to 15: unused
+ */
+#define CUSTOM(id) (((id) & 0xF0000000) >> 24)
+#define TARGET(id) (((id) & 0x0F000000) >> 24)
+#define ATTR(id) (((id) & 0x00FF0000) >> 16)
+
+static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
+ u32 base, u32 size,
+ u8 target, u8 attr)
+{
+ if (!mvebu_mbus_window_conflicts(mbus, base, size, target, attr)) {
+ pr_err("cannot add window '%04x:%04x', conflicts with another window\n",
+ target, attr);
+ return -EBUSY;
+ }
+
+ if (mvebu_mbus_alloc_window(mbus, base, size, MVEBU_MBUS_NO_REMAP,
+ target, attr)) {
+ pr_err("cannot add window '%04x:%04x', too many windows\n",
+ target, attr);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init
+mbus_parse_ranges(struct device_node *node,
+ int *addr_cells, int *c_addr_cells, int *c_size_cells,
+ int *cell_count, const __be32 **ranges_start,
+ const __be32 **ranges_end)
+{
+ const __be32 *prop;
+ int ranges_len, tuple_len;
+
+ /* Allow a node with no 'ranges' property */
+ *ranges_start = of_get_property(node, "ranges", &ranges_len);
+ if (*ranges_start == NULL) {
+ *addr_cells = *c_addr_cells = *c_size_cells = *cell_count = 0;
+ *ranges_start = *ranges_end = NULL;
+ return 0;
+ }
+ *ranges_end = *ranges_start + ranges_len / sizeof(__be32);
+
+ *addr_cells = of_n_addr_cells(node);
+
+ prop = of_get_property(node, "#address-cells", NULL);
+ *c_addr_cells = be32_to_cpup(prop);
+
+ prop = of_get_property(node, "#size-cells", NULL);
+ *c_size_cells = be32_to_cpup(prop);
+
+ *cell_count = *addr_cells + *c_addr_cells + *c_size_cells;
+ tuple_len = (*cell_count) * sizeof(__be32);
+
+ if (ranges_len % tuple_len) {
+ pr_warn("malformed ranges entry '%pOFn'\n", node);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
+ struct device_node *np)
+{
+ int addr_cells, c_addr_cells, c_size_cells;
+ int i, ret, cell_count;
+ const __be32 *r, *ranges_start, *ranges_end;
+
+ ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells,
+ &c_size_cells, &cell_count,
+ &ranges_start, &ranges_end);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) {
+ u32 windowid, base, size;
+ u8 target, attr;
+
+ /*
+ * An entry with a non-zero custom field do not
+ * correspond to a static window, so skip it.
+ */
+ windowid = of_read_number(r, 1);
+ if (CUSTOM(windowid))
+ continue;
+
+ target = TARGET(windowid);
+ attr = ATTR(windowid);
+
+ base = of_read_number(r + c_addr_cells, addr_cells);
+ size = of_read_number(r + c_addr_cells + addr_cells,
+ c_size_cells);
+ ret = mbus_dt_setup_win(mbus, base, size, target, attr);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
+ struct resource *mem,
+ struct resource *io)
+{
+ u32 reg[2];
+ int ret;
+
+ /*
+ * These are optional, so we make sure that resource_size(x) will
+ * return 0.
+ */
+ memset(mem, 0, sizeof(struct resource));
+ mem->end = -1;
+ memset(io, 0, sizeof(struct resource));
+ io->end = -1;
+
+ ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
+ if (!ret) {
+ mem->start = reg[0];
+ mem->end = mem->start + reg[1] - 1;
+ mem->flags = IORESOURCE_MEM;
+ }
+
+ ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
+ if (!ret) {
+ io->start = reg[0];
+ io->end = io->start + reg[1] - 1;
+ io->flags = IORESOURCE_IO;
+ }
+}
+
+int __init mvebu_mbus_dt_init(bool is_coherent)
+{
+ struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
+ struct device_node *np, *controller;
+ const struct of_device_id *of_id;
+ const __be32 *prop;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id);
+ if (!np) {
+ pr_err("could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus_state.soc = of_id->data;
+
+ prop = of_get_property(np, "controller", NULL);
+ if (!prop) {
+ pr_err("required 'controller' property missing\n");
+ return -EINVAL;
+ }
+
+ controller = of_find_node_by_phandle(be32_to_cpup(prop));
+ if (!controller) {
+ pr_err("could not find an 'mbus-controller' node\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(controller, 0, &mbuswins_res)) {
+ pr_err("cannot get MBUS register address\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(controller, 1, &sdramwins_res)) {
+ pr_err("cannot get SDRAM register address\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Set the resource to 0 so that it can be left unmapped by
+ * mvebu_mbus_common_init() if the DT doesn't carry the
+ * necessary information. This is needed to preserve backward
+ * compatibility.
+ */
+ memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
+
+ if (mbus_state.soc->has_mbus_bridge) {
+ if (of_address_to_resource(controller, 2, &mbusbridge_res))
+ pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
+ }
+
+ mbus_state.hw_io_coherency = is_coherent;
+
+ /* Get optional pcie-{mem,io}-aperture properties */
+ mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
+ &mbus_state.pcie_io_aperture);
+
+ ret = mvebu_mbus_common_init(&mbus_state,
+ mbuswins_res.start,
+ resource_size(&mbuswins_res),
+ sdramwins_res.start,
+ resource_size(&sdramwins_res),
+ mbusbridge_res.start,
+ resource_size(&mbusbridge_res),
+ is_coherent);
+ if (ret)
+ return ret;
+
+ /* Setup statically declared windows in the DT */
+ return mbus_dt_setup(&mbus_state, np);
+}
+#endif
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
new file mode 100644
index 000000000..e02d06562
--- /dev/null
+++ b/drivers/bus/omap-ocp2scp.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * omap-ocp2scp.c - transform ocp interface protocol to scp protocol
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define OCP2SCP_TIMING 0x18
+#define SYNC2_MASK 0xf
+
+static int ocp2scp_remove_devices(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int omap_ocp2scp_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 reg;
+ void __iomem *regs;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np) {
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
+ goto err0;
+ }
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ /*
+ * As per AM572x TRM: http://www.ti.com/lit/ug/spruhz6/spruhz6.pdf
+ * under section 26.3.2.2, table 26-26 OCP2SCP TIMING Caution;
+ * As per OMAP4430 TRM: http://www.ti.com/lit/ug/swpu231ap/swpu231ap.pdf
+ * under section 23.12.6.2.2 , Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP4460 TRM: http://www.ti.com/lit/ug/swpu235ab/swpu235ab.pdf
+ * under section 23.12.6.2.2, Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP543x TRM http://www.ti.com/lit/pdf/swpu249
+ * under section 27.3.2.2, Table 27-27 OCP2SCP TIMING Caution;
+ *
+ * Read path of OCP2SCP is not working properly due to low reset value
+ * of SYNC2 parameter in OCP2SCP. Suggested reset value is 0x6 or more.
+ */
+ if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err1;
+ }
+
+ pm_runtime_get_sync(&pdev->dev);
+ reg = readl_relaxed(regs + OCP2SCP_TIMING);
+ reg &= ~(SYNC2_MASK);
+ reg |= 0x6;
+ writel_relaxed(reg, regs + OCP2SCP_TIMING);
+ pm_runtime_put_sync(&pdev->dev);
+ }
+
+ return 0;
+
+err1:
+ pm_runtime_disable(&pdev->dev);
+
+err0:
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+ return ret;
+}
+
+static int omap_ocp2scp_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_ocp2scp_id_table[] = {
+ { .compatible = "ti,omap-ocp2scp" },
+ { .compatible = "ti,am437x-ocp2scp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+#endif
+
+static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+ .remove = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+ },
+};
+
+module_platform_driver(omap_ocp2scp_driver);
+
+MODULE_ALIAS("platform:omap-ocp2scp");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("OMAP OCP2SCP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
new file mode 100644
index 000000000..dcfb32ee5
--- /dev/null
+++ b/drivers/bus/omap_l3_noc.c
@@ -0,0 +1,382 @@
+/*
+ * OMAP L3 Interconnect error handling driver
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "omap_l3_noc.h"
+
+/**
+ * l3_handle_target() - Handle Target specific parse and reporting
+ * @l3: pointer to l3 struct
+ * @base: base address of clkdm
+ * @flag_mux: flagmux corresponding to the event
+ * @err_src: error source index of the slave (target)
+ *
+ * This does the second part of the error interrupt handling:
+ * 3) Parse in the slave information
+ * 4) Print the logged information.
+ * 5) Add dump stack to provide kernel trace.
+ * 6) Clear the source if known.
+ *
+ * This handles two types of errors:
+ * 1) Custom errors in L3 :
+ * Target like DMM/FW/EMIF generates SRESP=ERR error
+ * 2) Standard L3 error:
+ * - Unsupported CMD.
+ * L3 tries to access target while it is idle
+ * - OCP disconnect.
+ * - Address hole error:
+ * If DSS/ISS/FDIF/USBHOSTFS access a target where they
+ * do not have connectivity, the error is logged in
+ * their default target which is DMM2.
+ *
+ * On High Secure devices, firewall errors are possible and those
+ * can be trapped as well. But the trapping is implemented as part
+ * secure software and hence need not be implemented here.
+ */
+static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
+ struct l3_flagmux_data *flag_mux, int err_src)
+{
+ int k;
+ u32 std_err_main, clear, masterid;
+ u8 op_code, m_req_info;
+ void __iomem *l3_targ_base;
+ void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
+ void __iomem *l3_targ_hdr, *l3_targ_info;
+ struct l3_target_data *l3_targ_inst;
+ struct l3_masters_data *master;
+ char *target_name, *master_name = "UN IDENTIFIED";
+ char *err_description;
+ char err_string[30] = { 0 };
+ char info_string[60] = { 0 };
+
+ /* We DONOT expect err_src to go out of bounds */
+ BUG_ON(err_src > MAX_CLKDM_TARGETS);
+
+ if (err_src < flag_mux->num_targ_data) {
+ l3_targ_inst = &flag_mux->l3_targ[err_src];
+ target_name = l3_targ_inst->name;
+ l3_targ_base = base + l3_targ_inst->offset;
+ } else {
+ target_name = L3_TARGET_NOT_SUPPORTED;
+ }
+
+ if (target_name == L3_TARGET_NOT_SUPPORTED)
+ return -ENODEV;
+
+ /* Read the stderrlog_main_source from clk domain */
+ l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
+ l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
+
+ std_err_main = readl_relaxed(l3_targ_stderr);
+
+ switch (std_err_main & CUSTOM_ERROR) {
+ case STANDARD_ERROR:
+ err_description = "Standard";
+ snprintf(err_string, sizeof(err_string),
+ ": At Address: 0x%08X ",
+ readl_relaxed(l3_targ_slvofslsb));
+
+ l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
+ break;
+
+ case CUSTOM_ERROR:
+ err_description = "Custom";
+
+ l3_targ_mstaddr = l3_targ_base +
+ L3_TARG_STDERRLOG_CINFO_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
+ break;
+
+ default:
+ /* Nothing to be handled here as of now */
+ return 0;
+ }
+
+ /* STDERRLOG_MSTADDR Stores the NTTP master address. */
+ masterid = (readl_relaxed(l3_targ_mstaddr) &
+ l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
+
+ for (k = 0, master = l3->l3_masters; k < l3->num_masters;
+ k++, master++) {
+ if (masterid == master->id) {
+ master_name = master->name;
+ break;
+ }
+ }
+
+ op_code = readl_relaxed(l3_targ_hdr) & 0x7;
+
+ m_req_info = readl_relaxed(l3_targ_info) & 0xF;
+ snprintf(info_string, sizeof(info_string),
+ ": %s in %s mode during %s access",
+ (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
+ (m_req_info & BIT(1)) ? "Supervisor" : "User",
+ (m_req_info & BIT(3)) ? "Debug" : "Functional");
+
+ WARN(true,
+ "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
+ dev_name(l3->dev),
+ err_description,
+ master_name, target_name,
+ l3_transaction_type[op_code],
+ err_string, info_string);
+
+ /* clear the std error log*/
+ clear = std_err_main | CLEAR_STDERR_LOG;
+ writel_relaxed(clear, l3_targ_stderr);
+
+ return 0;
+}
+
+/**
+ * l3_interrupt_handler() - interrupt handler for l3 events
+ * @irq: irq number
+ * @_l3: pointer to l3 structure
+ *
+ * Interrupt Handler for L3 error detection.
+ * 1) Identify the L3 clockdomain partition to which the error belongs to.
+ * 2) Identify the slave where the error information is logged
+ * ... handle the slave event..
+ * 7) if the slave is unknown, mask out the slave.
+ */
+static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+{
+ struct omap_l3 *l3 = _l3;
+ int inttype, i, ret;
+ int err_src = 0;
+ u32 err_reg, mask_val;
+ void __iomem *base, *mask_reg;
+ struct l3_flagmux_data *flag_mux;
+
+ /* Get the Type of interrupt */
+ inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+
+ for (i = 0; i < l3->num_modules; i++) {
+ /*
+ * Read the regerr register of the clock domain
+ * to determine the source
+ */
+ base = l3->l3_base[i];
+ flag_mux = l3->l3_flagmux[i];
+ err_reg = readl_relaxed(base + flag_mux->offset +
+ L3_FLAGMUX_REGERR0 + (inttype << 3));
+
+ err_reg &= ~(inttype ? flag_mux->mask_app_bits :
+ flag_mux->mask_dbg_bits);
+
+ /* Get the corresponding error and analyse */
+ if (err_reg) {
+ /* Identify the source from control status register */
+ err_src = __ffs(err_reg);
+
+ ret = l3_handle_target(l3, base, flag_mux, err_src);
+
+ /*
+ * Certain plaforms may have "undocumented" status
+ * pending on boot. So dont generate a severe warning
+ * here. Just mask it off to prevent the error from
+ * reoccuring and locking up the system.
+ */
+ if (ret) {
+ dev_err(l3->dev,
+ "L3 %s error: target %d mod:%d %s\n",
+ inttype ? "debug" : "application",
+ err_src, i, "(unclearable)");
+
+ mask_reg = base + flag_mux->offset +
+ L3_FLAGMUX_MASK0 + (inttype << 3);
+ mask_val = readl_relaxed(mask_reg);
+ mask_val &= ~(1 << err_src);
+ writel_relaxed(mask_val, mask_reg);
+
+ /* Mark these bits as to be ignored */
+ if (inttype)
+ flag_mux->mask_app_bits |= 1 << err_src;
+ else
+ flag_mux->mask_dbg_bits |= 1 << err_src;
+ }
+
+ /* Error found so break the for loop */
+ return IRQ_HANDLED;
+ }
+ }
+
+ dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
+ inttype ? "debug" : "application");
+
+ return IRQ_NONE;
+}
+
+static const struct of_device_id l3_noc_match[] = {
+ {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+ {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
+ {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
+ {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, l3_noc_match);
+
+static int omap_l3_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ static struct omap_l3 *l3;
+ int ret, i, res_idx;
+
+ of_id = of_match_device(l3_noc_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
+ if (!l3)
+ return -ENOMEM;
+
+ memcpy(l3, of_id->data, sizeof(*l3));
+ l3->dev = &pdev->dev;
+ platform_set_drvdata(pdev, l3);
+
+ /* Get mem resources */
+ for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
+ struct resource *res;
+
+ if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
+ /* First entry cannot be submodule */
+ BUG_ON(i == 0);
+ l3->l3_base[i] = l3->l3_base[i - 1];
+ continue;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
+ l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3->l3_base[i])) {
+ dev_err(l3->dev, "ioremap %d failed\n", i);
+ return PTR_ERR(l3->l3_base[i]);
+ }
+ res_idx++;
+ }
+
+ /*
+ * Setup interrupt Handlers
+ */
+ l3->debug_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+ IRQF_NO_THREAD, "l3-dbg-irq", l3);
+ if (ret) {
+ dev_err(l3->dev, "request_irq failed for %d\n",
+ l3->debug_irq);
+ return ret;
+ }
+
+ l3->app_irq = platform_get_irq(pdev, 1);
+ ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+ IRQF_NO_THREAD, "l3-app-irq", l3);
+ if (ret)
+ dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * l3_resume_noirq() - resume function for l3_noc
+ * @dev: pointer to l3_noc device structure
+ *
+ * We only have the resume handler only since we
+ * have already maintained the delta register
+ * configuration as part of configuring the system
+ */
+static int l3_resume_noirq(struct device *dev)
+{
+ struct omap_l3 *l3 = dev_get_drvdata(dev);
+ int i;
+ struct l3_flagmux_data *flag_mux;
+ void __iomem *base, *mask_regx = NULL;
+ u32 mask_val;
+
+ for (i = 0; i < l3->num_modules; i++) {
+ base = l3->l3_base[i];
+ flag_mux = l3->l3_flagmux[i];
+ if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
+ continue;
+
+ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+ (L3_APPLICATION_ERROR << 3);
+ mask_val = readl_relaxed(mask_regx);
+ mask_val &= ~(flag_mux->mask_app_bits);
+
+ writel_relaxed(mask_val, mask_regx);
+ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+ (L3_DEBUG_ERROR << 3);
+ mask_val = readl_relaxed(mask_regx);
+ mask_val &= ~(flag_mux->mask_dbg_bits);
+
+ writel_relaxed(mask_val, mask_regx);
+ }
+
+ /* Dummy read to force OCP barrier */
+ if (mask_regx)
+ (void)readl(mask_regx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops l3_dev_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
+};
+
+#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
+#else
+#define L3_DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver omap_l3_driver = {
+ .probe = omap_l3_probe,
+ .driver = {
+ .name = "omap_l3_noc",
+ .pm = L3_DEV_PM_OPS,
+ .of_match_table = of_match_ptr(l3_noc_match),
+ },
+};
+
+static int __init omap_l3_init(void)
+{
+ return platform_driver_register(&omap_l3_driver);
+}
+postcore_initcall_sync(omap_l3_init);
+
+static void __exit omap_l3_exit(void)
+{
+ platform_driver_unregister(&omap_l3_driver);
+}
+module_exit(omap_l3_exit);
+
+MODULE_AUTHOR("Santosh Shilimkar");
+MODULE_AUTHOR("Sricharan R");
+MODULE_DESCRIPTION("OMAP L3 Interconnect error handling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
new file mode 100644
index 000000000..73431f81d
--- /dev/null
+++ b/drivers/bus/omap_l3_noc.h
@@ -0,0 +1,501 @@
+/*
+ * OMAP L3 Interconnect error handling driver header
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __OMAP_L3_NOC_H
+#define __OMAP_L3_NOC_H
+
+#define MAX_L3_MODULES 3
+#define MAX_CLKDM_TARGETS 31
+
+#define CLEAR_STDERR_LOG (1 << 31)
+#define CUSTOM_ERROR 0x2
+#define STANDARD_ERROR 0x0
+#define INBAND_ERROR 0x0
+#define L3_APPLICATION_ERROR 0x0
+#define L3_DEBUG_ERROR 0x1
+
+/* L3 TARG register offsets */
+#define L3_TARG_STDERRLOG_MAIN 0x48
+#define L3_TARG_STDERRLOG_HDR 0x4c
+#define L3_TARG_STDERRLOG_MSTADDR 0x50
+#define L3_TARG_STDERRLOG_INFO 0x58
+#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c
+#define L3_TARG_STDERRLOG_CINFO_INFO 0x64
+#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c
+#define L3_FLAGMUX_REGERR0 0xc
+#define L3_FLAGMUX_MASK0 0x8
+
+#define L3_TARGET_NOT_SUPPORTED NULL
+
+#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0))
+
+static const char * const l3_transaction_type[] = {
+ /* 0 0 0 */ "Idle",
+ /* 0 0 1 */ "Write",
+ /* 0 1 0 */ "Read",
+ /* 0 1 1 */ "ReadEx",
+ /* 1 0 0 */ "Read Link",
+ /* 1 0 1 */ "Write Non-Posted",
+ /* 1 1 0 */ "Write Conditional",
+ /* 1 1 1 */ "Write Broadcast",
+};
+
+/**
+ * struct l3_masters_data - L3 Master information
+ * @id: ID of the L3 Master
+ * @name: master name
+ */
+struct l3_masters_data {
+ u32 id;
+ char *name;
+};
+
+/**
+ * struct l3_target_data - L3 Target information
+ * @offset: Offset from base for L3 Target
+ * @name: Target name
+ *
+ * Target information is organized indexed by bit field definitions.
+ */
+struct l3_target_data {
+ u32 offset;
+ char *name;
+};
+
+/**
+ * struct l3_flagmux_data - Flag Mux information
+ * @offset: offset from base for flagmux register
+ * @l3_targ: array indexed by flagmux index (bit offset) pointing to the
+ * target data. unsupported ones are marked with
+ * L3_TARGET_NOT_SUPPORTED
+ * @num_targ_data: number of entries in target data
+ * @mask_app_bits: ignore these from raw application irq status
+ * @mask_dbg_bits: ignore these from raw debug irq status
+ */
+struct l3_flagmux_data {
+ u32 offset;
+ struct l3_target_data *l3_targ;
+ u8 num_targ_data;
+ u32 mask_app_bits;
+ u32 mask_dbg_bits;
+};
+
+
+/**
+ * struct omap_l3 - Description of data relevant for L3 bus.
+ * @dev: device representing the bus (populated runtime)
+ * @l3_base: base addresses of modules (populated runtime if 0)
+ * if set to L3_BASE_IS_SUBMODULE, then uses previous
+ * module index as the base address
+ * @l3_flag_mux: array containing flag mux data per module
+ * offset from corresponding module base indexed per
+ * module.
+ * @num_modules: number of clock domains / modules.
+ * @l3_masters: array pointing to master data containing name and register
+ * offset for the master.
+ * @num_master: number of masters
+ * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
+ * @debug_irq: irq number of the debug interrupt (populated runtime)
+ * @app_irq: irq number of the application interrupt (populated runtime)
+ */
+struct omap_l3 {
+ struct device *dev;
+
+ void __iomem *l3_base[MAX_L3_MODULES];
+ struct l3_flagmux_data **l3_flagmux;
+ int num_modules;
+
+ struct l3_masters_data *l3_masters;
+ int num_masters;
+ u32 mst_addr_mask;
+
+ int debug_irq;
+ int app_irq;
+};
+
+static struct l3_target_data omap_l3_target_data_clk1[] = {
+ {0x100, "DMM1",},
+ {0x200, "DMM2",},
+ {0x300, "ABE",},
+ {0x400, "L4CFG",},
+ {0x600, "CLK2PWRDISC",},
+ {0x0, "HOSTCLK1",},
+ {0x900, "L4WAKEUP",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
+ .offset = 0x500,
+ .l3_targ = omap_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk2[] = {
+ {0x500, "CORTEXM3",},
+ {0x300, "DSS",},
+ {0x100, "GPMC",},
+ {0x400, "ISS",},
+ {0x700, "IVAHD",},
+ {0xD00, "AES1",},
+ {0x900, "L4PER0",},
+ {0x200, "OCMRAM",},
+ {0x100, "GPMCsERROR",},
+ {0x600, "SGX",},
+ {0x800, "SL2",},
+ {0x1600, "C2C",},
+ {0x1100, "PWRDISCCLK1",},
+ {0xF00, "SHA1",},
+ {0xE00, "AES2",},
+ {0xC00, "L4PER3",},
+ {0xA00, "L4PER1",},
+ {0xB00, "L4PER2",},
+ {0x0, "HOSTCLK2",},
+ {0x1800, "CAL",},
+ {0x1700, "LLI",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
+ .offset = 0x1000,
+ .l3_targ = omap_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
+};
+
+
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+ {0x0100, "DEBUGSS",},
+};
+
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap4_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
+};
+
+static struct l3_masters_data omap_l3_masters[] = {
+ { 0x00, "MPU"},
+ { 0x04, "CS_ADP"},
+ { 0x05, "xxx"},
+ { 0x08, "DSP"},
+ { 0x0C, "IVAHD"},
+ { 0x10, "ISS"},
+ { 0x11, "DucatiM3"},
+ { 0x12, "FaceDetect"},
+ { 0x14, "SDMA_Rd"},
+ { 0x15, "SDMA_Wr"},
+ { 0x16, "xxx"},
+ { 0x17, "xxx"},
+ { 0x18, "SGX"},
+ { 0x1C, "DSS"},
+ { 0x20, "C2C"},
+ { 0x22, "xxx"},
+ { 0x23, "xxx"},
+ { 0x24, "HSI"},
+ { 0x28, "MMC1"},
+ { 0x29, "MMC2"},
+ { 0x2A, "MMC6"},
+ { 0x2C, "UNIPRO1"},
+ { 0x30, "USBHOSTHS"},
+ { 0x31, "USBOTGHS"},
+ { 0x32, "USBHOSTFS"}
+};
+
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap4_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap4_l3_data = {
+ .l3_flagmux = omap4_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+ {0x0100, "L3INSTR",},
+ {0x0300, "DEBUGSS",},
+ {0x0, "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap5_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+ .l3_flagmux = omap5_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0x7E0,
+};
+
+/* DRA7 data */
+static struct l3_target_data dra_l3_target_data_clk1[] = {
+ {0x2a00, "AES1",},
+ {0x0200, "DMM_P1",},
+ {0x0600, "DSP2_SDMA",},
+ {0x0b00, "EVE2",},
+ {0x1300, "DMM_P2",},
+ {0x2c00, "AES2",},
+ {0x0300, "DSP1_SDMA",},
+ {0x0a00, "EVE1",},
+ {0x0c00, "EVE3",},
+ {0x0d00, "EVE4",},
+ {0x2900, "DSS",},
+ {0x0100, "GPMC",},
+ {0x3700, "PCIE1",},
+ {0x1600, "IVA_CONFIG",},
+ {0x1800, "IVA_SL2IF",},
+ {0x0500, "L4_CFG",},
+ {0x1d00, "L4_WKUP",},
+ {0x3800, "PCIE2",},
+ {0x3300, "SHA2_1",},
+ {0x1200, "GPU",},
+ {0x1000, "IPU1",},
+ {0x1100, "IPU2",},
+ {0x2000, "TPCC_EDMA",},
+ {0x2e00, "TPTC1_EDMA",},
+ {0x2b00, "TPTC2_EDMA",},
+ {0x0700, "VCP1",},
+ {0x2500, "L4_PER2_P3",},
+ {0x0e00, "L4_PER3_P3",},
+ {0x2200, "MMU1",},
+ {0x1400, "PRUSS1",},
+ {0x1500, "PRUSS2"},
+ {0x0800, "VCP1",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
+ .offset = 0x803500,
+ .l3_targ = dra_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
+};
+
+static struct l3_target_data dra_l3_target_data_clk2[] = {
+ {0x0, "HOST CLK1",},
+ {0x800000, "HOST CLK2",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x3400, "SHA2_2",},
+ {0x0900, "BB2D",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x2100, "L4_PER1_P3",},
+ {0x1c00, "L4_PER1_P1",},
+ {0x1f00, "L4_PER1_P2",},
+ {0x2300, "L4_PER2_P1",},
+ {0x2400, "L4_PER2_P2",},
+ {0x2600, "L4_PER3_P1",},
+ {0x2700, "L4_PER3_P2",},
+ {0x2f00, "MCASP1",},
+ {0x3000, "MCASP2",},
+ {0x3100, "MCASP3",},
+ {0x2800, "MMU2",},
+ {0x0f00, "OCMC_RAM1",},
+ {0x1700, "OCMC_RAM2",},
+ {0x1900, "OCMC_RAM3",},
+ {0x1e00, "OCMC_ROM",},
+ {0x3900, "QSPI",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
+ .offset = 0x803600,
+ .l3_targ = dra_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
+};
+
+static struct l3_target_data dra_l3_target_data_clk3[] = {
+ {0x0100, "L3_INSTR"},
+ {0x0300, "DEBUGSS_CT_TBR"},
+ {0x0, "HOST CLK3"},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
+ .offset = 0x200,
+ .l3_targ = dra_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
+};
+
+static struct l3_masters_data dra_l3_masters[] = {
+ { 0x0, "MPU" },
+ { 0x4, "CS_DAP" },
+ { 0x5, "IEEE1500_2_OCP" },
+ { 0x8, "DSP1_MDMA" },
+ { 0x9, "DSP1_CFG" },
+ { 0xA, "DSP1_DMA" },
+ { 0xB, "DSP2_MDMA" },
+ { 0xC, "DSP2_CFG" },
+ { 0xD, "DSP2_DMA" },
+ { 0xE, "IVA" },
+ { 0x10, "EVE1_P1" },
+ { 0x11, "EVE2_P1" },
+ { 0x12, "EVE3_P1" },
+ { 0x13, "EVE4_P1" },
+ { 0x14, "PRUSS1 PRU1" },
+ { 0x15, "PRUSS1 PRU2" },
+ { 0x16, "PRUSS2 PRU1" },
+ { 0x17, "PRUSS2 PRU2" },
+ { 0x18, "IPU1" },
+ { 0x19, "IPU2" },
+ { 0x1A, "SDMA" },
+ { 0x1B, "CDMA" },
+ { 0x1C, "TC1_EDMA" },
+ { 0x1D, "TC2_EDMA" },
+ { 0x20, "DSS" },
+ { 0x21, "MMU1" },
+ { 0x22, "PCIE1" },
+ { 0x23, "MMU2" },
+ { 0x24, "VIP1" },
+ { 0x25, "VIP2" },
+ { 0x26, "VIP3" },
+ { 0x27, "VPE" },
+ { 0x28, "GPU_P1" },
+ { 0x29, "BB2D" },
+ { 0x29, "GPU_P2" },
+ { 0x2B, "GMAC_SW" },
+ { 0x2C, "USB3" },
+ { 0x2D, "USB2_SS" },
+ { 0x2E, "USB2_ULPI_SS1" },
+ { 0x2F, "USB2_ULPI_SS2" },
+ { 0x30, "CSI2_1" },
+ { 0x31, "CSI2_2" },
+ { 0x33, "SATA" },
+ { 0x34, "EVE1_P2" },
+ { 0x35, "EVE2_P2" },
+ { 0x36, "EVE3_P2" },
+ { 0x37, "EVE4_P2" }
+};
+
+static struct l3_flagmux_data *dra_l3_flagmux[] = {
+ &dra_l3_flagmux_clk1,
+ &dra_l3_flagmux_clk2,
+ &dra_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 dra_l3_data = {
+ .l3_base = { [1] = L3_BASE_IS_SUBMODULE },
+ .l3_flagmux = dra_l3_flagmux,
+ .num_modules = ARRAY_SIZE(dra_l3_flagmux),
+ .l3_masters = dra_l3_masters,
+ .num_masters = ARRAY_SIZE(dra_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* AM4372 data */
+static struct l3_target_data am4372_l3_target_data_200f[] = {
+ {0xf00, "EMIF",},
+ {0x1200, "DES",},
+ {0x400, "OCMCRAM",},
+ {0x700, "TPTC0",},
+ {0x800, "TPTC1",},
+ {0x900, "TPTC2"},
+ {0xb00, "TPCC",},
+ {0xd00, "DEBUGSS",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x200, "SHA",},
+ {0xc00, "SGX530",},
+ {0x500, "AES0",},
+ {0xa00, "L4_FAST",},
+ {0x300, "MPUSS_L2_RAM",},
+ {0x100, "ICSS",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_200f = {
+ .offset = 0x1000,
+ .l3_targ = am4372_l3_target_data_200f,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
+};
+
+static struct l3_target_data am4372_l3_target_data_100s[] = {
+ {0x100, "L4_PER_0",},
+ {0x200, "L4_PER_1",},
+ {0x300, "L4_PER_2",},
+ {0x400, "L4_PER_3",},
+ {0x800, "McASP0",},
+ {0x900, "McASP1",},
+ {0xC00, "MMCHS2",},
+ {0x700, "GPMC",},
+ {0xD00, "L4_FW",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x500, "ADCTSC",},
+ {0xE00, "L4_WKUP",},
+ {0xA00, "MAG_CARD",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_100s = {
+ .offset = 0x600,
+ .l3_targ = am4372_l3_target_data_100s,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
+};
+
+static struct l3_masters_data am4372_l3_masters[] = {
+ { 0x0, "M1 (128-bit)"},
+ { 0x1, "M2 (64-bit)"},
+ { 0x4, "DAP"},
+ { 0x5, "P1500"},
+ { 0xC, "ICSS0"},
+ { 0xD, "ICSS1"},
+ { 0x14, "Wakeup Processor"},
+ { 0x18, "TPTC0 Read"},
+ { 0x19, "TPTC0 Write"},
+ { 0x1A, "TPTC1 Read"},
+ { 0x1B, "TPTC1 Write"},
+ { 0x1C, "TPTC2 Read"},
+ { 0x1D, "TPTC2 Write"},
+ { 0x20, "SGX530"},
+ { 0x21, "OCP WP Traffic Probe"},
+ { 0x22, "OCP WP DMA Profiling"},
+ { 0x23, "OCP WP Event Trace"},
+ { 0x25, "DSS"},
+ { 0x28, "Crypto DMA RD"},
+ { 0x29, "Crypto DMA WR"},
+ { 0x2C, "VPFE0"},
+ { 0x2D, "VPFE1"},
+ { 0x30, "GEMAC"},
+ { 0x34, "USB0 RD"},
+ { 0x35, "USB0 WR"},
+ { 0x36, "USB1 RD"},
+ { 0x37, "USB1 WR"},
+};
+
+static struct l3_flagmux_data *am4372_l3_flagmux[] = {
+ &am4372_l3_flagmux_200f,
+ &am4372_l3_flagmux_100s,
+};
+
+static const struct omap_l3 am4372_l3_data = {
+ .l3_flagmux = am4372_l3_flagmux,
+ .num_modules = ARRAY_SIZE(am4372_l3_flagmux),
+ .l3_masters = am4372_l3_masters,
+ .num_masters = ARRAY_SIZE(am4372_l3_masters),
+ /* All 6 bits of register field used to distinguish initiator */
+ .mst_addr_mask = 0x3F,
+};
+
+#endif /* __OMAP_L3_NOC_H */
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
new file mode 100644
index 000000000..bb1606f5c
--- /dev/null
+++ b/drivers/bus/omap_l3_smx.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OMAP3XXX L3 Interconnect Driver
+ *
+ * Copyright (C) 2011 Texas Corporation
+ * Felipe Balbi <balbi@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "omap_l3_smx.h"
+
+static inline u64 omap3_l3_readll(void __iomem *base, u16 reg)
+{
+ return __raw_readll(base + reg);
+}
+
+static inline void omap3_l3_writell(void __iomem *base, u16 reg, u64 value)
+{
+ __raw_writell(value, base + reg);
+}
+
+static inline enum omap3_l3_code omap3_l3_decode_error_code(u64 error)
+{
+ return (error & 0x0f000000) >> L3_ERROR_LOG_CODE;
+}
+
+static inline u32 omap3_l3_decode_addr(u64 error_addr)
+{
+ return error_addr & 0xffffffff;
+}
+
+static inline unsigned omap3_l3_decode_cmd(u64 error)
+{
+ return (error & 0x07) >> L3_ERROR_LOG_CMD;
+}
+
+static inline enum omap3_l3_initiator_id omap3_l3_decode_initid(u64 error)
+{
+ return (error & 0xff00) >> L3_ERROR_LOG_INITID;
+}
+
+static inline unsigned omap3_l3_decode_req_info(u64 error)
+{
+ return (error >> 32) & 0xffff;
+}
+
+static char *omap3_l3_code_string(u8 code)
+{
+ switch (code) {
+ case OMAP_L3_CODE_NOERROR:
+ return "No Error";
+ case OMAP_L3_CODE_UNSUP_CMD:
+ return "Unsupported Command";
+ case OMAP_L3_CODE_ADDR_HOLE:
+ return "Address Hole";
+ case OMAP_L3_CODE_PROTECT_VIOLATION:
+ return "Protection Violation";
+ case OMAP_L3_CODE_IN_BAND_ERR:
+ return "In-band Error";
+ case OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT:
+ return "Request Timeout Not Accepted";
+ case OMAP_L3_CODE_REQ_TOUT_NO_RESP:
+ return "Request Timeout, no response";
+ default:
+ return "UNKNOWN error";
+ }
+}
+
+static char *omap3_l3_initiator_string(u8 initid)
+{
+ switch (initid) {
+ case OMAP_L3_LCD:
+ return "LCD";
+ case OMAP_L3_SAD2D:
+ return "SAD2D";
+ case OMAP_L3_IA_MPU_SS_1:
+ case OMAP_L3_IA_MPU_SS_2:
+ case OMAP_L3_IA_MPU_SS_3:
+ case OMAP_L3_IA_MPU_SS_4:
+ case OMAP_L3_IA_MPU_SS_5:
+ return "MPU";
+ case OMAP_L3_IA_IVA_SS_1:
+ case OMAP_L3_IA_IVA_SS_2:
+ case OMAP_L3_IA_IVA_SS_3:
+ return "IVA_SS";
+ case OMAP_L3_IA_IVA_SS_DMA_1:
+ case OMAP_L3_IA_IVA_SS_DMA_2:
+ case OMAP_L3_IA_IVA_SS_DMA_3:
+ case OMAP_L3_IA_IVA_SS_DMA_4:
+ case OMAP_L3_IA_IVA_SS_DMA_5:
+ case OMAP_L3_IA_IVA_SS_DMA_6:
+ return "IVA_SS_DMA";
+ case OMAP_L3_IA_SGX:
+ return "SGX";
+ case OMAP_L3_IA_CAM_1:
+ case OMAP_L3_IA_CAM_2:
+ case OMAP_L3_IA_CAM_3:
+ return "CAM";
+ case OMAP_L3_IA_DAP:
+ return "DAP";
+ case OMAP_L3_SDMA_WR_1:
+ case OMAP_L3_SDMA_WR_2:
+ return "SDMA_WR";
+ case OMAP_L3_SDMA_RD_1:
+ case OMAP_L3_SDMA_RD_2:
+ case OMAP_L3_SDMA_RD_3:
+ case OMAP_L3_SDMA_RD_4:
+ return "SDMA_RD";
+ case OMAP_L3_USBOTG:
+ return "USB_OTG";
+ case OMAP_L3_USBHOST:
+ return "USB_HOST";
+ default:
+ return "UNKNOWN Initiator";
+ }
+}
+
+/*
+ * omap3_l3_block_irq - handles a register block's irq
+ * @l3: struct omap3_l3 *
+ * @base: register block base address
+ * @error: L3_ERROR_LOG register of our block
+ *
+ * Called in hard-irq context. Caller should take care of locking
+ *
+ * OMAP36xx TRM gives, on page 2001, Figure 9-10, the Typical Error
+ * Analysis Sequence, we are following that sequence here, please
+ * refer to that Figure for more information on the subject.
+ */
+static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
+ u64 error, int error_addr)
+{
+ u8 code = omap3_l3_decode_error_code(error);
+ u8 initid = omap3_l3_decode_initid(error);
+ u8 multi = error & L3_ERROR_LOG_MULTI;
+ u32 address = omap3_l3_decode_addr(error_addr);
+
+ pr_err("%s seen by %s %s at address %x\n",
+ omap3_l3_code_string(code),
+ omap3_l3_initiator_string(initid),
+ multi ? "Multiple Errors" : "", address);
+ WARN_ON(1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
+{
+ struct omap3_l3 *l3 = _l3;
+ u64 status, clear;
+ u64 error;
+ u64 error_addr;
+ u64 err_source = 0;
+ void __iomem *base;
+ int int_type;
+ irqreturn_t ret = IRQ_NONE;
+
+ int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+ if (!int_type) {
+ status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
+ /*
+ * if we have a timeout error, there's nothing we can
+ * do besides rebooting the board. So let's BUG on any
+ * of such errors and handle the others. timeout error
+ * is severe and not expected to occur.
+ */
+ BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK);
+ } else {
+ status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
+ /* No timeout error for debug sources */
+ }
+
+ /* identify the error source */
+ err_source = __ffs(status);
+
+ base = l3->rt + omap3_l3_bases[int_type][err_source];
+ error = omap3_l3_readll(base, L3_ERROR_LOG);
+ if (error) {
+ error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
+ ret |= omap3_l3_block_irq(l3, error, error_addr);
+ }
+
+ /* Clear the status register */
+ clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
+ L3_AGENT_STATUS_CLEAR_TA;
+ omap3_l3_writell(base, L3_AGENT_STATUS, clear);
+
+ /* clear the error log register */
+ omap3_l3_writell(base, L3_ERROR_LOG, error);
+
+ return ret;
+}
+
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id omap3_l3_match[] = {
+ {
+ .compatible = "ti,omap3-l3-smx",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap3_l3_match);
+#endif
+
+static int omap3_l3_probe(struct platform_device *pdev)
+{
+ struct omap3_l3 *l3;
+ struct resource *res;
+ int ret;
+
+ l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
+ if (!l3)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, l3);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "couldn't find resource\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+ l3->rt = ioremap(res->start, resource_size(res));
+ if (!l3->rt) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ l3->debug_irq = platform_get_irq(pdev, 0);
+ ret = request_irq(l3->debug_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+ "l3-debug-irq", l3);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request debug irq\n");
+ goto err1;
+ }
+
+ l3->app_irq = platform_get_irq(pdev, 1);
+ ret = request_irq(l3->app_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+ "l3-app-irq", l3);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request app irq\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ free_irq(l3->debug_irq, l3);
+err1:
+ iounmap(l3->rt);
+err0:
+ kfree(l3);
+ return ret;
+}
+
+static int omap3_l3_remove(struct platform_device *pdev)
+{
+ struct omap3_l3 *l3 = platform_get_drvdata(pdev);
+
+ free_irq(l3->app_irq, l3);
+ free_irq(l3->debug_irq, l3);
+ iounmap(l3->rt);
+ kfree(l3);
+
+ return 0;
+}
+
+static struct platform_driver omap3_l3_driver = {
+ .probe = omap3_l3_probe,
+ .remove = omap3_l3_remove,
+ .driver = {
+ .name = "omap_l3_smx",
+ .of_match_table = of_match_ptr(omap3_l3_match),
+ },
+};
+
+static int __init omap3_l3_init(void)
+{
+ return platform_driver_register(&omap3_l3_driver);
+}
+postcore_initcall_sync(omap3_l3_init);
+
+static void __exit omap3_l3_exit(void)
+{
+ platform_driver_unregister(&omap3_l3_driver);
+}
+module_exit(omap3_l3_exit);
+
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Santosh Shilimkar");
+MODULE_AUTHOR("Sricharan R");
+MODULE_DESCRIPTION("OMAP3XXX L3 Interconnect Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/omap_l3_smx.h b/drivers/bus/omap_l3_smx.h
new file mode 100644
index 000000000..1d841d10b
--- /dev/null
+++ b/drivers/bus/omap_l3_smx.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OMAP3XXX L3 Interconnect Driver header
+ *
+ * Copyright (C) 2011 Texas Corporation
+ * Felipe Balbi <balbi@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * sricharan <r.sricharan@ti.com>
+ */
+#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+
+/* Register definitions. All 64-bit wide */
+#define L3_COMPONENT 0x000
+#define L3_CORE 0x018
+#define L3_AGENT_CONTROL 0x020
+#define L3_AGENT_STATUS 0x028
+#define L3_ERROR_LOG 0x058
+
+#define L3_ERROR_LOG_MULTI (1 << 31)
+#define L3_ERROR_LOG_SECONDARY (1 << 30)
+
+#define L3_ERROR_LOG_ADDR 0x060
+
+/* Register definitions for Sideband Interconnect */
+#define L3_SI_CONTROL 0x020
+#define L3_SI_FLAG_STATUS_0 0x510
+
+static const u64 shift = 1;
+
+#define L3_STATUS_0_MPUIA_BRST (shift << 0)
+#define L3_STATUS_0_MPUIA_RSP (shift << 1)
+#define L3_STATUS_0_MPUIA_INBAND (shift << 2)
+#define L3_STATUS_0_IVAIA_BRST (shift << 6)
+#define L3_STATUS_0_IVAIA_RSP (shift << 7)
+#define L3_STATUS_0_IVAIA_INBAND (shift << 8)
+#define L3_STATUS_0_SGXIA_BRST (shift << 9)
+#define L3_STATUS_0_SGXIA_RSP (shift << 10)
+#define L3_STATUS_0_SGXIA_MERROR (shift << 11)
+#define L3_STATUS_0_CAMIA_BRST (shift << 12)
+#define L3_STATUS_0_CAMIA_RSP (shift << 13)
+#define L3_STATUS_0_CAMIA_INBAND (shift << 14)
+#define L3_STATUS_0_DISPIA_BRST (shift << 15)
+#define L3_STATUS_0_DISPIA_RSP (shift << 16)
+#define L3_STATUS_0_DMARDIA_BRST (shift << 18)
+#define L3_STATUS_0_DMARDIA_RSP (shift << 19)
+#define L3_STATUS_0_DMAWRIA_BRST (shift << 21)
+#define L3_STATUS_0_DMAWRIA_RSP (shift << 22)
+#define L3_STATUS_0_USBOTGIA_BRST (shift << 24)
+#define L3_STATUS_0_USBOTGIA_RSP (shift << 25)
+#define L3_STATUS_0_USBOTGIA_INBAND (shift << 26)
+#define L3_STATUS_0_USBHOSTIA_BRST (shift << 27)
+#define L3_STATUS_0_USBHOSTIA_INBAND (shift << 28)
+#define L3_STATUS_0_SMSTA_REQ (shift << 48)
+#define L3_STATUS_0_GPMCTA_REQ (shift << 49)
+#define L3_STATUS_0_OCMRAMTA_REQ (shift << 50)
+#define L3_STATUS_0_OCMROMTA_REQ (shift << 51)
+#define L3_STATUS_0_IVATA_REQ (shift << 54)
+#define L3_STATUS_0_SGXTA_REQ (shift << 55)
+#define L3_STATUS_0_SGXTA_SERROR (shift << 56)
+#define L3_STATUS_0_GPMCTA_SERROR (shift << 57)
+#define L3_STATUS_0_L4CORETA_REQ (shift << 58)
+#define L3_STATUS_0_L4PERTA_REQ (shift << 59)
+#define L3_STATUS_0_L4EMUTA_REQ (shift << 60)
+#define L3_STATUS_0_MAD2DTA_REQ (shift << 61)
+
+#define L3_STATUS_0_TIMEOUT_MASK (L3_STATUS_0_MPUIA_BRST \
+ | L3_STATUS_0_MPUIA_RSP \
+ | L3_STATUS_0_IVAIA_BRST \
+ | L3_STATUS_0_IVAIA_RSP \
+ | L3_STATUS_0_SGXIA_BRST \
+ | L3_STATUS_0_SGXIA_RSP \
+ | L3_STATUS_0_CAMIA_BRST \
+ | L3_STATUS_0_CAMIA_RSP \
+ | L3_STATUS_0_DISPIA_BRST \
+ | L3_STATUS_0_DISPIA_RSP \
+ | L3_STATUS_0_DMARDIA_BRST \
+ | L3_STATUS_0_DMARDIA_RSP \
+ | L3_STATUS_0_DMAWRIA_BRST \
+ | L3_STATUS_0_DMAWRIA_RSP \
+ | L3_STATUS_0_USBOTGIA_BRST \
+ | L3_STATUS_0_USBOTGIA_RSP \
+ | L3_STATUS_0_USBHOSTIA_BRST \
+ | L3_STATUS_0_SMSTA_REQ \
+ | L3_STATUS_0_GPMCTA_REQ \
+ | L3_STATUS_0_OCMRAMTA_REQ \
+ | L3_STATUS_0_OCMROMTA_REQ \
+ | L3_STATUS_0_IVATA_REQ \
+ | L3_STATUS_0_SGXTA_REQ \
+ | L3_STATUS_0_L4CORETA_REQ \
+ | L3_STATUS_0_L4PERTA_REQ \
+ | L3_STATUS_0_L4EMUTA_REQ \
+ | L3_STATUS_0_MAD2DTA_REQ)
+
+#define L3_SI_FLAG_STATUS_1 0x530
+
+#define L3_STATUS_1_MPU_DATAIA (1 << 0)
+#define L3_STATUS_1_DAPIA0 (1 << 3)
+#define L3_STATUS_1_DAPIA1 (1 << 4)
+#define L3_STATUS_1_IVAIA (1 << 6)
+
+#define L3_PM_ERROR_LOG 0x020
+#define L3_PM_CONTROL 0x028
+#define L3_PM_ERROR_CLEAR_SINGLE 0x030
+#define L3_PM_ERROR_CLEAR_MULTI 0x038
+#define L3_PM_REQ_INFO_PERMISSION(n) (0x048 + (0x020 * n))
+#define L3_PM_READ_PERMISSION(n) (0x050 + (0x020 * n))
+#define L3_PM_WRITE_PERMISSION(n) (0x058 + (0x020 * n))
+#define L3_PM_ADDR_MATCH(n) (0x060 + (0x020 * n))
+
+/* L3 error log bit fields. Common for IA and TA */
+#define L3_ERROR_LOG_CODE 24
+#define L3_ERROR_LOG_INITID 8
+#define L3_ERROR_LOG_CMD 0
+
+/* L3 agent status bit fields. */
+#define L3_AGENT_STATUS_CLEAR_IA 0x10000000
+#define L3_AGENT_STATUS_CLEAR_TA 0x01000000
+
+#define OMAP34xx_IRQ_L3_APP 10
+#define L3_APPLICATION_ERROR 0x0
+#define L3_DEBUG_ERROR 0x1
+
+enum omap3_l3_initiator_id {
+ /* LCD has 1 ID */
+ OMAP_L3_LCD = 29,
+ /* SAD2D has 1 ID */
+ OMAP_L3_SAD2D = 28,
+ /* MPU has 5 IDs */
+ OMAP_L3_IA_MPU_SS_1 = 27,
+ OMAP_L3_IA_MPU_SS_2 = 26,
+ OMAP_L3_IA_MPU_SS_3 = 25,
+ OMAP_L3_IA_MPU_SS_4 = 24,
+ OMAP_L3_IA_MPU_SS_5 = 23,
+ /* IVA2.2 SS has 3 IDs*/
+ OMAP_L3_IA_IVA_SS_1 = 22,
+ OMAP_L3_IA_IVA_SS_2 = 21,
+ OMAP_L3_IA_IVA_SS_3 = 20,
+ /* IVA 2.2 SS DMA has 6 IDS */
+ OMAP_L3_IA_IVA_SS_DMA_1 = 19,
+ OMAP_L3_IA_IVA_SS_DMA_2 = 18,
+ OMAP_L3_IA_IVA_SS_DMA_3 = 17,
+ OMAP_L3_IA_IVA_SS_DMA_4 = 16,
+ OMAP_L3_IA_IVA_SS_DMA_5 = 15,
+ OMAP_L3_IA_IVA_SS_DMA_6 = 14,
+ /* SGX has 1 ID */
+ OMAP_L3_IA_SGX = 13,
+ /* CAM has 3 ID */
+ OMAP_L3_IA_CAM_1 = 12,
+ OMAP_L3_IA_CAM_2 = 11,
+ OMAP_L3_IA_CAM_3 = 10,
+ /* DAP has 1 ID */
+ OMAP_L3_IA_DAP = 9,
+ /* SDMA WR has 2 IDs */
+ OMAP_L3_SDMA_WR_1 = 8,
+ OMAP_L3_SDMA_WR_2 = 7,
+ /* SDMA RD has 4 IDs */
+ OMAP_L3_SDMA_RD_1 = 6,
+ OMAP_L3_SDMA_RD_2 = 5,
+ OMAP_L3_SDMA_RD_3 = 4,
+ OMAP_L3_SDMA_RD_4 = 3,
+ /* HSUSB OTG has 1 ID */
+ OMAP_L3_USBOTG = 2,
+ /* HSUSB HOST has 1 ID */
+ OMAP_L3_USBHOST = 1,
+};
+
+enum omap3_l3_code {
+ OMAP_L3_CODE_NOERROR = 0,
+ OMAP_L3_CODE_UNSUP_CMD = 1,
+ OMAP_L3_CODE_ADDR_HOLE = 2,
+ OMAP_L3_CODE_PROTECT_VIOLATION = 3,
+ OMAP_L3_CODE_IN_BAND_ERR = 4,
+ /* codes 5 and 6 are reserved */
+ OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT = 7,
+ OMAP_L3_CODE_REQ_TOUT_NO_RESP = 8,
+ /* codes 9 - 15 are also reserved */
+};
+
+struct omap3_l3 {
+ struct device *dev;
+ struct clk *ick;
+
+ /* memory base*/
+ void __iomem *rt;
+
+ int debug_irq;
+ int app_irq;
+
+ /* true when and inband functional error occurs */
+ unsigned inband:1;
+};
+
+/* offsets for l3 agents in order with the Flag status register */
+static unsigned int omap3_l3_app_bases[] = {
+ /* MPU IA */
+ 0x1400,
+ 0x1400,
+ 0x1400,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ /* IVA 2.2 IA */
+ 0x1800,
+ 0x1800,
+ 0x1800,
+ /* SGX IA */
+ 0x1c00,
+ 0x1c00,
+ /* RESERVED */
+ 0,
+ /* CAMERA IA */
+ 0x5800,
+ 0x5800,
+ 0x5800,
+ /* DISPLAY IA */
+ 0x5400,
+ 0x5400,
+ /* RESERVED */
+ 0,
+ /*SDMA RD IA */
+ 0x4c00,
+ 0x4c00,
+ /* RESERVED */
+ 0,
+ /* SDMA WR IA */
+ 0x5000,
+ 0x5000,
+ /* RESERVED */
+ 0,
+ /* USB OTG IA */
+ 0x4400,
+ 0x4400,
+ 0x4400,
+ /* USB HOST IA */
+ 0x4000,
+ 0x4000,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ 0,
+ /* SAD2D IA */
+ 0x3000,
+ 0x3000,
+ 0x3000,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* SMA TA */
+ 0x2000,
+ /* GPMC TA */
+ 0x2400,
+ /* OCM RAM TA */
+ 0x2800,
+ /* OCM ROM TA */
+ 0x2C00,
+ /* L4 CORE TA */
+ 0x6800,
+ /* L4 PER TA */
+ 0x6c00,
+ /* IVA 2.2 TA */
+ 0x6000,
+ /* SGX TA */
+ 0x6400,
+ /* L4 EMU TA */
+ 0x7000,
+ /* GPMC TA */
+ 0x2400,
+ /* L4 CORE TA */
+ 0x6800,
+ /* L4 PER TA */
+ 0x6c00,
+ /* L4 EMU TA */
+ 0x7000,
+ /* MAD2D TA */
+ 0x3400,
+ /* RESERVED */
+ 0,
+ 0,
+};
+
+static unsigned int omap3_l3_debug_bases[] = {
+ /* MPU DATA IA */
+ 0x1400,
+ /* RESERVED */
+ 0,
+ 0,
+ /* DAP IA */
+ 0x5c00,
+ 0x5c00,
+ /* RESERVED */
+ 0,
+ /* IVA 2.2 IA */
+ 0x1800,
+ /* REST RESERVED */
+};
+
+static u32 *omap3_l3_bases[] = {
+ omap3_l3_app_bases,
+ omap3_l3_debug_bases,
+};
+
+/*
+ * REVISIT define __raw_readll/__raw_writell here, but move them to
+ * <asm/io.h> at some point
+ */
+#define __raw_writell(v, a) (__chk_io_ptr(a), \
+ *(volatile u64 __force *)(a) = (v))
+#define __raw_readll(a) (__chk_io_ptr(a), \
+ *(volatile u64 __force *)(a))
+
+#endif
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
new file mode 100644
index 000000000..0b8f53a68
--- /dev/null
+++ b/drivers/bus/qcom-ebi2.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm External Bus Interface 2 (EBI2) driver
+ * an older version of the Qualcomm Parallel Interface Controller (QPIC)
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/*
+ * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
+ */
+#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
+#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
+#define EBI2_CS2_ENABLE_MASK BIT(4)
+#define EBI2_CS3_ENABLE_MASK BIT(5)
+#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
+#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
+#define EBI2_CSN_MASK GENMASK(9, 0)
+
+#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
+
+/*
+ * SLOW CSn CFG
+ *
+ * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
+ * memory continues to drive the data bus after OE is de-asserted.
+ * Inserted when reading one CS and switching to another CS or read
+ * followed by write on the same CS. Valid values 0 thru 15.
+ * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
+ * every write minimum 1. The data out is driven from the time WE is
+ * asserted until CS is asserted. With a hold of 1, the CS stays
+ * active for 1 extra cycle etc. Valid values 0 thru 15.
+ * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
+ * write to a page or burst memory
+ * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
+ * read to a page or burst memory
+ * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ */
+#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
+#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
+#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
+#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
+#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
+#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
+
+#define EBI2_XMEM_RECOVERY_SHIFT 28
+#define EBI2_XMEM_WR_HOLD_SHIFT 24
+#define EBI2_XMEM_WR_DELTA_SHIFT 16
+#define EBI2_XMEM_RD_DELTA_SHIFT 8
+#define EBI2_XMEM_WR_WAIT_SHIFT 4
+#define EBI2_XMEM_RD_WAIT_SHIFT 0
+
+/*
+ * FAST CSn CFG
+ * Bits 31-28: ?
+ * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
+ * transfer. For a single read trandfer this will be the time
+ * from CS assertion to OE assertion.
+ * Bits 18-24: ?
+ * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
+ * assertion, with respect to the cycle where ADV is asserted.
+ * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
+ * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
+ * hold time requirements with ADV assertion.
+ *
+ * The manual mentions "write precharge cycles" and "precharge cycles".
+ * We have not been able to figure out which bit fields these correspond to
+ * in the hardware, or what valid values exist. The current hypothesis is that
+ * this is something just used on the FAST chip selects. There is also a "byte
+ * device enable" flag somewhere for 8bit memories.
+ */
+#define EBI2_XMEM_CS0_FAST_CFG 0x0028
+#define EBI2_XMEM_CS1_FAST_CFG 0x002C
+#define EBI2_XMEM_CS2_FAST_CFG 0x0030
+#define EBI2_XMEM_CS3_FAST_CFG 0x0034
+#define EBI2_XMEM_CS4_FAST_CFG 0x0038
+#define EBI2_XMEM_CS5_FAST_CFG 0x003C
+
+#define EBI2_XMEM_RD_HOLD_SHIFT 24
+#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
+#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
+
+/**
+ * struct cs_data - struct with info on a chipselect setting
+ * @enable_mask: mask to enable the chipselect in the EBI2 config
+ * @slow_cfg0: offset to XMEMC slow CS config
+ * @fast_cfg1: offset to XMEMC fast CS config
+ */
+struct cs_data {
+ u32 enable_mask;
+ u16 slow_cfg;
+ u16 fast_cfg;
+};
+
+static const struct cs_data cs_info[] = {
+ {
+ /* CS0 */
+ .enable_mask = EBI2_CS0_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
+ },
+ {
+ /* CS1 */
+ .enable_mask = EBI2_CS1_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
+ },
+ {
+ /* CS2 */
+ .enable_mask = EBI2_CS2_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
+ },
+ {
+ /* CS3 */
+ .enable_mask = EBI2_CS3_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
+ },
+ {
+ /* CS4 */
+ .enable_mask = EBI2_CS4_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
+ },
+ {
+ /* CS5 */
+ .enable_mask = EBI2_CS5_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
+ },
+};
+
+/**
+ * struct ebi2_xmem_prop - describes an XMEM config property
+ * @prop: the device tree binding name
+ * @max: maximum value for the property
+ * @slowreg: true if this property is in the SLOW CS config register
+ * else it is assumed to be in the FAST config register
+ * @shift: the bit field start in the SLOW or FAST register for this
+ * property
+ */
+struct ebi2_xmem_prop {
+ const char *prop;
+ u32 max;
+ bool slowreg;
+ u16 shift;
+};
+
+static const struct ebi2_xmem_prop xmem_props[] = {
+ {
+ .prop = "qcom,xmem-recovery-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-hold-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_HOLD_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-address-hold-enable",
+ .max = 1, /* boolean prop */
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
+ .max = 3,
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-hold-cycles",
+ .max = 15,
+ .slowreg = false,
+ .shift = EBI2_XMEM_RD_HOLD_SHIFT,
+ },
+};
+
+static void qcom_ebi2_setup_chipselect(struct device_node *np,
+ struct device *dev,
+ void __iomem *ebi2_base,
+ void __iomem *ebi2_xmem,
+ u32 csindex)
+{
+ const struct cs_data *csd;
+ u32 slowcfg, fastcfg;
+ u32 val;
+ int ret;
+ int i;
+
+ csd = &cs_info[csindex];
+ val = readl(ebi2_base);
+ val |= csd->enable_mask;
+ writel(val, ebi2_base);
+ dev_dbg(dev, "enabled CS%u\n", csindex);
+
+ /* Next set up the XMEMC */
+ slowcfg = 0;
+ fastcfg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
+ const struct ebi2_xmem_prop *xp = &xmem_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, xp->prop, &val);
+ if (ret) {
+ dev_dbg(dev, "could not read %s for CS%d\n",
+ xp->prop, csindex);
+ continue;
+ }
+
+ /* First check boolean props */
+ if (xp->max == 1 && val) {
+ if (xp->slowreg)
+ slowcfg |= BIT(xp->shift);
+ else
+ fastcfg |= BIT(xp->shift);
+ dev_dbg(dev, "set %s flag\n", xp->prop);
+ continue;
+ }
+
+ /* We're dealing with an u32 */
+ if (val > xp->max) {
+ dev_err(dev,
+ "too high value for %s: %u, capped at %u\n",
+ xp->prop, val, xp->max);
+ val = xp->max;
+ }
+ if (xp->slowreg)
+ slowcfg |= (val << xp->shift);
+ else
+ fastcfg |= (val << xp->shift);
+ dev_dbg(dev, "set %s to %u\n", xp->prop, val);
+ }
+
+ dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
+ csindex, slowcfg, fastcfg);
+
+ if (slowcfg)
+ writel(slowcfg, ebi2_xmem + csd->slow_cfg);
+ if (fastcfg)
+ writel(fastcfg, ebi2_xmem + csd->fast_cfg);
+}
+
+static int qcom_ebi2_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *ebi2_base;
+ void __iomem *ebi2_xmem;
+ struct clk *ebi2xclk;
+ struct clk *ebi2clk;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ ebi2xclk = devm_clk_get(dev, "ebi2x");
+ if (IS_ERR(ebi2xclk))
+ return PTR_ERR(ebi2xclk);
+
+ ret = clk_prepare_enable(ebi2xclk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
+ return ret;
+ }
+
+ ebi2clk = devm_clk_get(dev, "ebi2");
+ if (IS_ERR(ebi2clk)) {
+ ret = PTR_ERR(ebi2clk);
+ goto err_disable_2x_clk;
+ }
+
+ ret = clk_prepare_enable(ebi2clk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2 clk\n");
+ goto err_disable_2x_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ebi2_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_base)) {
+ ret = PTR_ERR(ebi2_base);
+ goto err_disable_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ebi2_xmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_xmem)) {
+ ret = PTR_ERR(ebi2_xmem);
+ goto err_disable_clk;
+ }
+
+ /* Allegedly this turns the power save mode off */
+ writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
+
+ /* Disable all chipselects */
+ val = readl(ebi2_base);
+ val &= ~EBI2_CSN_MASK;
+ writel(val, ebi2_base);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ u32 csindex;
+
+ /* Figure out the chipselect */
+ ret = of_property_read_u32(child, "reg", &csindex);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+
+ if (csindex > 5) {
+ dev_err(dev,
+ "invalid chipselect %u, we only support 0-5\n",
+ csindex);
+ continue;
+ }
+
+ qcom_ebi2_setup_chipselect(child,
+ dev,
+ ebi2_base,
+ ebi2_xmem,
+ csindex);
+
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ebi2clk);
+err_disable_2x_clk:
+ clk_disable_unprepare(ebi2xclk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ebi2_of_match[] = {
+ { .compatible = "qcom,msm8660-ebi2", },
+ { .compatible = "qcom,apq8060-ebi2", },
+ { }
+};
+
+static struct platform_driver qcom_ebi2_driver = {
+ .probe = qcom_ebi2_probe,
+ .driver = {
+ .name = "qcom-ebi2",
+ .of_match_table = qcom_ebi2_of_match,
+ },
+};
+module_platform_driver(qcom_ebi2_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm EBI2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
new file mode 100644
index 000000000..c5eb46cbf
--- /dev/null
+++ b/drivers/bus/simple-pm-bus.c
@@ -0,0 +1,58 @@
+/*
+ * Simple Power-Managed Bus Driver
+ *
+ * Copyright (C) 2014-2015 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+
+static int simple_pm_bus_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (np)
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int simple_pm_bus_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id simple_pm_bus_of_match[] = {
+ { .compatible = "simple-pm-bus", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
+
+static struct platform_driver simple_pm_bus_driver = {
+ .probe = simple_pm_bus_probe,
+ .remove = simple_pm_bus_remove,
+ .driver = {
+ .name = "simple-pm-bus",
+ .of_match_table = simple_pm_bus_of_match,
+ },
+};
+
+module_platform_driver(simple_pm_bus_driver);
+
+MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
new file mode 100644
index 000000000..672518741
--- /dev/null
+++ b/drivers/bus/sun50i-de2.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner A64 Display Engine 2.0 Bus Driver
+ *
+ * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+static int sun50i_de2_bus_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = sunxi_sram_claim(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
+ return ret;
+ }
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int sun50i_de2_bus_remove(struct platform_device *pdev)
+{
+ sunxi_sram_release(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id sun50i_de2_bus_of_match[] = {
+ { .compatible = "allwinner,sun50i-a64-de2", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun50i_de2_bus_driver = {
+ .probe = sun50i_de2_bus_probe,
+ .remove = sun50i_de2_bus_remove,
+ .driver = {
+ .name = "sun50i-de2-bus",
+ .of_match_table = sun50i_de2_bus_of_match,
+ },
+};
+
+builtin_platform_driver(sun50i_de2_bus_driver);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
new file mode 100644
index 000000000..98cbb18f1
--- /dev/null
+++ b/drivers/bus/sunxi-rsb.c
@@ -0,0 +1,803 @@
+/*
+ * RSB (Reduced Serial Bus) driver.
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The RSB controller looks like an SMBus controller which only supports
+ * byte and word data transfers. But, it differs from standard SMBus
+ * protocol on several aspects:
+ * - it uses addresses set at runtime to address slaves. Runtime addresses
+ * are sent to slaves using their 12bit hardware addresses. Up to 15
+ * runtime addresses are available.
+ * - it adds a parity bit every 8bits of data and address for read and
+ * write accesses; this replaces the ack bit
+ * - only one read access is required to read a byte (instead of a write
+ * followed by a read access in standard SMBus protocol)
+ * - there's no Ack bit after each read access
+ *
+ * This means this bus cannot be used to interface with standard SMBus
+ * devices. Devices known to support this interface include the AXP223,
+ * AXP809, and AXP806 PMICs, and the AC100 audio codec, all from X-Powers.
+ *
+ * A description of the operation and wire protocol can be found in the
+ * RSB section of Allwinner's A80 user manual, which can be found at
+ *
+ * https://github.com/allwinner-zh/documents/tree/master/A80
+ *
+ * This document is officially released by Allwinner.
+ *
+ * This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/sunxi-rsb.h>
+#include <linux/types.h>
+
+/* RSB registers */
+#define RSB_CTRL 0x0 /* Global control */
+#define RSB_CCR 0x4 /* Clock control */
+#define RSB_INTE 0x8 /* Interrupt controls */
+#define RSB_INTS 0xc /* Interrupt status */
+#define RSB_ADDR 0x10 /* Address to send with read/write command */
+#define RSB_DATA 0x1c /* Data to read/write */
+#define RSB_LCR 0x24 /* Line control */
+#define RSB_DMCR 0x28 /* Device mode (init) control */
+#define RSB_CMD 0x2c /* RSB Command */
+#define RSB_DAR 0x30 /* Device address / runtime address */
+
+/* CTRL fields */
+#define RSB_CTRL_START_TRANS BIT(7)
+#define RSB_CTRL_ABORT_TRANS BIT(6)
+#define RSB_CTRL_GLOBAL_INT_ENB BIT(1)
+#define RSB_CTRL_SOFT_RST BIT(0)
+
+/* CLK CTRL fields */
+#define RSB_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8)
+#define RSB_CCR_MAX_CLK_DIV 0xff
+#define RSB_CCR_CLK_DIV(v) ((v) & RSB_CCR_MAX_CLK_DIV)
+
+/* STATUS fields */
+#define RSB_INTS_TRANS_ERR_ACK BIT(16)
+#define RSB_INTS_TRANS_ERR_DATA_BIT(v) (((v) >> 8) & 0xf)
+#define RSB_INTS_TRANS_ERR_DATA GENMASK(11, 8)
+#define RSB_INTS_LOAD_BSY BIT(2)
+#define RSB_INTS_TRANS_ERR BIT(1)
+#define RSB_INTS_TRANS_OVER BIT(0)
+
+/* LINE CTRL fields*/
+#define RSB_LCR_SCL_STATE BIT(5)
+#define RSB_LCR_SDA_STATE BIT(4)
+#define RSB_LCR_SCL_CTL BIT(3)
+#define RSB_LCR_SCL_CTL_EN BIT(2)
+#define RSB_LCR_SDA_CTL BIT(1)
+#define RSB_LCR_SDA_CTL_EN BIT(0)
+
+/* DEVICE MODE CTRL field values */
+#define RSB_DMCR_DEVICE_START BIT(31)
+#define RSB_DMCR_MODE_DATA (0x7c << 16)
+#define RSB_DMCR_MODE_REG (0x3e << 8)
+#define RSB_DMCR_DEV_ADDR 0x00
+
+/* CMD values */
+#define RSB_CMD_RD8 0x8b
+#define RSB_CMD_RD16 0x9c
+#define RSB_CMD_RD32 0xa6
+#define RSB_CMD_WR8 0x4e
+#define RSB_CMD_WR16 0x59
+#define RSB_CMD_WR32 0x63
+#define RSB_CMD_STRA 0xe8
+
+/* DAR fields */
+#define RSB_DAR_RTA(v) (((v) & 0xff) << 16)
+#define RSB_DAR_DA(v) ((v) & 0xffff)
+
+#define RSB_MAX_FREQ 20000000
+
+#define RSB_CTRL_NAME "sunxi-rsb"
+
+struct sunxi_rsb_addr_map {
+ u16 hwaddr;
+ u8 rtaddr;
+};
+
+struct sunxi_rsb {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ struct reset_control *rstc;
+ struct completion complete;
+ struct mutex lock;
+ unsigned int status;
+};
+
+/* bus / slave device related functions */
+static struct bus_type sunxi_rsb_bus;
+
+static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static int sunxi_rsb_device_probe(struct device *dev)
+{
+ const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
+ struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+ int ret;
+
+ if (!drv->probe)
+ return -ENODEV;
+
+ if (!rdev->irq) {
+ int irq = -ENOENT;
+
+ if (dev->of_node)
+ irq = of_irq_get(dev->of_node, 0);
+
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ if (irq < 0)
+ irq = 0;
+
+ rdev->irq = irq;
+ }
+
+ ret = of_clk_set_defaults(dev->of_node, false);
+ if (ret < 0)
+ return ret;
+
+ return drv->probe(rdev);
+}
+
+static int sunxi_rsb_device_remove(struct device *dev)
+{
+ const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
+
+ return drv->remove(to_sunxi_rsb_device(dev));
+}
+
+static struct bus_type sunxi_rsb_bus = {
+ .name = RSB_CTRL_NAME,
+ .match = sunxi_rsb_device_match,
+ .probe = sunxi_rsb_device_probe,
+ .remove = sunxi_rsb_device_remove,
+ .uevent = of_device_uevent_modalias,
+};
+
+static void sunxi_rsb_dev_release(struct device *dev)
+{
+ struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+
+ kfree(rdev);
+}
+
+/**
+ * sunxi_rsb_device_create() - allocate and add an RSB device
+ * @rsb: RSB controller
+ * @node: RSB slave device node
+ * @hwaddr: RSB slave hardware address
+ * @rtaddr: RSB slave runtime address
+ */
+static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
+ struct device_node *node, u16 hwaddr, u8 rtaddr)
+{
+ int err;
+ struct sunxi_rsb_device *rdev;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return ERR_PTR(-ENOMEM);
+
+ rdev->rsb = rsb;
+ rdev->hwaddr = hwaddr;
+ rdev->rtaddr = rtaddr;
+ rdev->dev.bus = &sunxi_rsb_bus;
+ rdev->dev.parent = rsb->dev;
+ rdev->dev.of_node = node;
+ rdev->dev.release = sunxi_rsb_dev_release;
+
+ dev_set_name(&rdev->dev, "%s-%x", RSB_CTRL_NAME, hwaddr);
+
+ err = device_register(&rdev->dev);
+ if (err < 0) {
+ dev_err(&rdev->dev, "Can't add %s, status %d\n",
+ dev_name(&rdev->dev), err);
+ goto err_device_add;
+ }
+
+ dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+
+ return rdev;
+
+err_device_add:
+ put_device(&rdev->dev);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * sunxi_rsb_device_unregister(): unregister an RSB device
+ * @rdev: rsb_device to be removed
+ */
+static void sunxi_rsb_device_unregister(struct sunxi_rsb_device *rdev)
+{
+ device_unregister(&rdev->dev);
+}
+
+static int sunxi_rsb_remove_devices(struct device *dev, void *data)
+{
+ struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+
+ if (dev->bus == &sunxi_rsb_bus)
+ sunxi_rsb_device_unregister(rdev);
+
+ return 0;
+}
+
+/**
+ * sunxi_rsb_driver_register() - Register device driver with RSB core
+ * @rdrv: device driver to be associated with slave-device.
+ *
+ * This API will register the client driver with the RSB framework.
+ * It is typically called from the driver's module-init function.
+ */
+int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv)
+{
+ rdrv->driver.bus = &sunxi_rsb_bus;
+ return driver_register(&rdrv->driver);
+}
+EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
+
+/* common code that starts a transfer */
+static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
+{
+ u32 int_mask, status;
+ bool timeout;
+
+ if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
+ dev_dbg(rsb->dev, "RSB transfer still in progress\n");
+ return -EBUSY;
+ }
+
+ reinit_completion(&rsb->complete);
+
+ int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
+ writel(int_mask, rsb->regs + RSB_INTE);
+ writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
+ rsb->regs + RSB_CTRL);
+
+ if (irqs_disabled()) {
+ timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
+ status, (status & int_mask),
+ 10, 100000);
+ writel(status, rsb->regs + RSB_INTS);
+ } else {
+ timeout = !wait_for_completion_io_timeout(&rsb->complete,
+ msecs_to_jiffies(100));
+ status = rsb->status;
+ }
+
+ if (timeout) {
+ dev_dbg(rsb->dev, "RSB timeout\n");
+
+ /* abort the transfer */
+ writel(RSB_CTRL_ABORT_TRANS, rsb->regs + RSB_CTRL);
+
+ /* clear any interrupt flags */
+ writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
+
+ return -ETIMEDOUT;
+ }
+
+ if (status & RSB_INTS_LOAD_BSY) {
+ dev_dbg(rsb->dev, "RSB busy\n");
+ return -EBUSY;
+ }
+
+ if (status & RSB_INTS_TRANS_ERR) {
+ if (status & RSB_INTS_TRANS_ERR_ACK) {
+ dev_dbg(rsb->dev, "RSB slave nack\n");
+ return -EINVAL;
+ }
+
+ if (status & RSB_INTS_TRANS_ERR_DATA) {
+ dev_dbg(rsb->dev, "RSB transfer data error\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
+ u32 *buf, size_t len)
+{
+ u32 cmd;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ switch (len) {
+ case 1:
+ cmd = RSB_CMD_RD8;
+ break;
+ case 2:
+ cmd = RSB_CMD_RD16;
+ break;
+ case 4:
+ cmd = RSB_CMD_RD32;
+ break;
+ default:
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rsb->lock);
+
+ writel(addr, rsb->regs + RSB_ADDR);
+ writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
+ writel(cmd, rsb->regs + RSB_CMD);
+
+ ret = _sunxi_rsb_run_xfer(rsb);
+ if (ret)
+ goto unlock;
+
+ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
+
+unlock:
+ mutex_unlock(&rsb->lock);
+
+ return ret;
+}
+
+static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
+ const u32 *buf, size_t len)
+{
+ u32 cmd;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ switch (len) {
+ case 1:
+ cmd = RSB_CMD_WR8;
+ break;
+ case 2:
+ cmd = RSB_CMD_WR16;
+ break;
+ case 4:
+ cmd = RSB_CMD_WR32;
+ break;
+ default:
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rsb->lock);
+
+ writel(addr, rsb->regs + RSB_ADDR);
+ writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
+ writel(*buf, rsb->regs + RSB_DATA);
+ writel(cmd, rsb->regs + RSB_CMD);
+ ret = _sunxi_rsb_run_xfer(rsb);
+
+ mutex_unlock(&rsb->lock);
+
+ return ret;
+}
+
+/* RSB regmap functions */
+struct sunxi_rsb_ctx {
+ struct sunxi_rsb_device *rdev;
+ int size;
+};
+
+static int regmap_sunxi_rsb_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct sunxi_rsb_ctx *ctx = context;
+ struct sunxi_rsb_device *rdev = ctx->rdev;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ return sunxi_rsb_read(rdev->rsb, rdev->rtaddr, reg, val, ctx->size);
+}
+
+static int regmap_sunxi_rsb_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct sunxi_rsb_ctx *ctx = context;
+ struct sunxi_rsb_device *rdev = ctx->rdev;
+
+ return sunxi_rsb_write(rdev->rsb, rdev->rtaddr, reg, &val, ctx->size);
+}
+
+static void regmap_sunxi_rsb_free_ctx(void *context)
+{
+ struct sunxi_rsb_ctx *ctx = context;
+
+ kfree(ctx);
+}
+
+static struct regmap_bus regmap_sunxi_rsb = {
+ .reg_write = regmap_sunxi_rsb_reg_write,
+ .reg_read = regmap_sunxi_rsb_reg_read,
+ .free_context = regmap_sunxi_rsb_free_ctx,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct sunxi_rsb_ctx *regmap_sunxi_rsb_init_ctx(struct sunxi_rsb_device *rdev,
+ const struct regmap_config *config)
+{
+ struct sunxi_rsb_ctx *ctx;
+
+ switch (config->val_bits) {
+ case 8:
+ case 16:
+ case 32:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->rdev = rdev;
+ ctx->size = config->val_bits / 8;
+
+ return ctx;
+}
+
+struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct sunxi_rsb_ctx *ctx = regmap_sunxi_rsb_init_ctx(rdev, config);
+
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&rdev->dev, &regmap_sunxi_rsb, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sunxi_rsb);
+
+/* RSB controller driver functions */
+static irqreturn_t sunxi_rsb_irq(int irq, void *dev_id)
+{
+ struct sunxi_rsb *rsb = dev_id;
+ u32 status;
+
+ status = readl(rsb->regs + RSB_INTS);
+ rsb->status = status;
+
+ /* Clear interrupts */
+ status &= (RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR |
+ RSB_INTS_TRANS_OVER);
+ writel(status, rsb->regs + RSB_INTS);
+
+ complete(&rsb->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
+{
+ int ret = 0;
+ u32 reg;
+
+ /* send init sequence */
+ writel(RSB_DMCR_DEVICE_START | RSB_DMCR_MODE_DATA |
+ RSB_DMCR_MODE_REG | RSB_DMCR_DEV_ADDR, rsb->regs + RSB_DMCR);
+
+ readl_poll_timeout(rsb->regs + RSB_DMCR, reg,
+ !(reg & RSB_DMCR_DEVICE_START), 100, 250000);
+ if (reg & RSB_DMCR_DEVICE_START)
+ ret = -ETIMEDOUT;
+
+ /* clear interrupt status bits */
+ writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
+
+ return ret;
+}
+
+/*
+ * There are 15 valid runtime addresses, though Allwinner typically
+ * skips the first, for unknown reasons, and uses the following three.
+ *
+ * 0x17, 0x2d, 0x3a, 0x4e, 0x59, 0x63, 0x74, 0x8b,
+ * 0x9c, 0xa6, 0xb1, 0xc5, 0xd2, 0xe8, 0xff
+ *
+ * No designs with 2 RSB slave devices sharing identical hardware
+ * addresses on the same bus have been seen in the wild. All designs
+ * use 0x2d for the primary PMIC, 0x3a for the secondary PMIC if
+ * there is one, and 0x45 for peripheral ICs.
+ *
+ * The hardware does not seem to support re-setting runtime addresses.
+ * Attempts to do so result in the slave devices returning a NACK.
+ * Hence we just hardcode the mapping here, like Allwinner does.
+ */
+
+static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
+ { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
+ { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
+ { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
+};
+
+static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sunxi_rsb_addr_maps); i++)
+ if (hwaddr == sunxi_rsb_addr_maps[i].hwaddr)
+ return sunxi_rsb_addr_maps[i].rtaddr;
+
+ return 0; /* 0 is an invalid runtime address */
+}
+
+static int of_rsb_register_devices(struct sunxi_rsb *rsb)
+{
+ struct device *dev = rsb->dev;
+ struct device_node *child, *np = dev->of_node;
+ u32 hwaddr;
+ u8 rtaddr;
+ int ret;
+
+ if (!np)
+ return -EINVAL;
+
+ /* Runtime addresses for all slaves should be set first */
+ for_each_available_child_of_node(np, child) {
+ dev_dbg(dev, "setting child %pOF runtime address\n",
+ child);
+
+ ret = of_property_read_u32(child, "reg", &hwaddr);
+ if (ret) {
+ dev_err(dev, "%pOF: invalid 'reg' property: %d\n",
+ child, ret);
+ continue;
+ }
+
+ rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
+ if (!rtaddr) {
+ dev_err(dev, "%pOF: unknown hardware device address\n",
+ child);
+ continue;
+ }
+
+ /*
+ * Since no devices have been registered yet, we are the
+ * only ones using the bus, we can skip locking the bus.
+ */
+
+ /* setup command parameters */
+ writel(RSB_CMD_STRA, rsb->regs + RSB_CMD);
+ writel(RSB_DAR_RTA(rtaddr) | RSB_DAR_DA(hwaddr),
+ rsb->regs + RSB_DAR);
+
+ /* send command */
+ ret = _sunxi_rsb_run_xfer(rsb);
+ if (ret)
+ dev_warn(dev, "%pOF: set runtime address failed: %d\n",
+ child, ret);
+ }
+
+ /* Then we start adding devices and probing them */
+ for_each_available_child_of_node(np, child) {
+ struct sunxi_rsb_device *rdev;
+
+ dev_dbg(dev, "adding child %pOF\n", child);
+
+ ret = of_property_read_u32(child, "reg", &hwaddr);
+ if (ret)
+ continue;
+
+ rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
+ if (!rtaddr)
+ continue;
+
+ rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr);
+ if (IS_ERR(rdev))
+ dev_err(dev, "failed to add child device %pOF: %ld\n",
+ child, PTR_ERR(rdev));
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_rsb_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a23-rsb" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+
+static int sunxi_rsb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *r;
+ struct sunxi_rsb *rsb;
+ unsigned long p_clk_freq;
+ u32 clk_delay, clk_freq = 3000000;
+ int clk_div, irq, ret;
+ u32 reg;
+
+ of_property_read_u32(np, "clock-frequency", &clk_freq);
+ if (clk_freq > RSB_MAX_FREQ) {
+ dev_err(dev,
+ "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
+ if (!rsb)
+ return -ENOMEM;
+
+ rsb->dev = dev;
+ platform_set_drvdata(pdev, rsb);
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rsb->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(rsb->regs))
+ return PTR_ERR(rsb->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ rsb->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(rsb->clk)) {
+ ret = PTR_ERR(rsb->clk);
+ dev_err(dev, "failed to retrieve clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rsb->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk: %d\n", ret);
+ return ret;
+ }
+
+ p_clk_freq = clk_get_rate(rsb->clk);
+
+ rsb->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(rsb->rstc)) {
+ ret = PTR_ERR(rsb->rstc);
+ dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ ret = reset_control_deassert(rsb->rstc);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset line: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ init_completion(&rsb->complete);
+ mutex_init(&rsb->lock);
+
+ /* reset the controller */
+ writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
+ readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
+ !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
+
+ /*
+ * Clock frequency and delay calculation code is from
+ * Allwinner U-boot sources.
+ *
+ * From A83 user manual:
+ * bus clock frequency = parent clock frequency / (2 * (divider + 1))
+ */
+ clk_div = p_clk_freq / clk_freq / 2;
+ if (!clk_div)
+ clk_div = 1;
+ else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
+ clk_div = RSB_CCR_MAX_CLK_DIV + 1;
+
+ clk_delay = clk_div >> 1;
+ if (!clk_delay)
+ clk_delay = 1;
+
+ dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
+ writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
+ rsb->regs + RSB_CCR);
+
+ ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
+ if (ret) {
+ dev_err(dev, "can't register interrupt handler irq %d: %d\n",
+ irq, ret);
+ goto err_reset_assert;
+ }
+
+ /* initialize all devices on the bus into RSB mode */
+ ret = sunxi_rsb_init_device_mode(rsb);
+ if (ret)
+ dev_warn(dev, "Initialize device mode failed: %d\n", ret);
+
+ of_rsb_register_devices(rsb);
+
+ return 0;
+
+err_reset_assert:
+ reset_control_assert(rsb->rstc);
+
+err_clk_disable:
+ clk_disable_unprepare(rsb->clk);
+
+ return ret;
+}
+
+static int sunxi_rsb_remove(struct platform_device *pdev)
+{
+ struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
+
+ device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
+ reset_control_assert(rsb->rstc);
+ clk_disable_unprepare(rsb->clk);
+
+ return 0;
+}
+
+static struct platform_driver sunxi_rsb_driver = {
+ .probe = sunxi_rsb_probe,
+ .remove = sunxi_rsb_remove,
+ .driver = {
+ .name = RSB_CTRL_NAME,
+ .of_match_table = sunxi_rsb_of_match_table,
+ },
+};
+
+static int __init sunxi_rsb_init(void)
+{
+ int ret;
+
+ ret = bus_register(&sunxi_rsb_bus);
+ if (ret) {
+ pr_err("failed to register sunxi sunxi_rsb bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(sunxi_rsb_init);
+
+static void __exit sunxi_rsb_exit(void)
+{
+ platform_driver_unregister(&sunxi_rsb_driver);
+ bus_unregister(&sunxi_rsb_bus);
+}
+module_exit(sunxi_rsb_exit);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunXi Reduced Serial Bus controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
new file mode 100644
index 000000000..ac5814230
--- /dev/null
+++ b/drivers/bus/tegra-aconnect.c
@@ -0,0 +1,120 @@
+/*
+ * Tegra ACONNECT Bus Driver
+ *
+ * Copyright (C) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+struct tegra_aconnect {
+ struct clk *ape_clk;
+ struct clk *apb2ape_clk;
+};
+
+static int tegra_aconnect_probe(struct platform_device *pdev)
+{
+ struct tegra_aconnect *aconnect;
+
+ if (!pdev->dev.of_node)
+ return -EINVAL;
+
+ aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect),
+ GFP_KERNEL);
+ if (!aconnect)
+ return -ENOMEM;
+
+ aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape");
+ if (IS_ERR(aconnect->ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve ape clock\n");
+ return PTR_ERR(aconnect->ape_clk);
+ }
+
+ aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape");
+ if (IS_ERR(aconnect->apb2ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n");
+ return PTR_ERR(aconnect->apb2ape_clk);
+ }
+
+ dev_set_drvdata(&pdev->dev, aconnect);
+ pm_runtime_enable(&pdev->dev);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
+
+ return 0;
+}
+
+static int tegra_aconnect_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int tegra_aconnect_runtime_resume(struct device *dev)
+{
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(aconnect->ape_clk);
+ if (ret) {
+ dev_err(dev, "ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(aconnect->apb2ape_clk);
+ if (ret) {
+ clk_disable_unprepare(aconnect->ape_clk);
+ dev_err(dev, "apb2ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_aconnect_runtime_suspend(struct device *dev)
+{
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aconnect->ape_clk);
+ clk_disable_unprepare(aconnect->apb2ape_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_aconnect_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
+ tegra_aconnect_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id tegra_aconnect_of_match[] = {
+ { .compatible = "nvidia,tegra210-aconnect", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
+
+static struct platform_driver tegra_aconnect_driver = {
+ .probe = tegra_aconnect_probe,
+ .remove = tegra_aconnect_remove,
+ .driver = {
+ .name = "tegra-aconnect",
+ .of_match_table = tegra_aconnect_of_match,
+ .pm = &tegra_aconnect_pm_ops,
+ },
+};
+module_platform_driver(tegra_aconnect_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
new file mode 100644
index 000000000..a6570789f
--- /dev/null
+++ b/drivers/bus/tegra-gmi.c
@@ -0,0 +1,284 @@
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+#define TEGRA_GMI_CONFIG 0x00
+#define TEGRA_GMI_CONFIG_GO BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
+#define TEGRA_GMI_MUX_MODE BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH BIT(20)
+#define TEGRA_GMI_CS_SELECT(x) ((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0 0x10
+#define TEGRA_GMI_MUXED_WIDTH(x) ((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x) ((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x) ((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x) (x & 0xf)
+
+#define TEGRA_GMI_TIMING1 0x14
+#define TEGRA_GMI_WE_WIDTH(x) ((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x) ((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x) (x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT 8
+
+struct tegra_gmi {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ u32 snor_config;
+ u32 snor_timing0;
+ u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(gmi->rst);
+ usleep_range(2000, 4000);
+ reset_control_deassert(gmi->rst);
+
+ writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+ writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+ gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+ writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+ return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+ u32 config;
+
+ /* stop GMI operation */
+ config = readl(gmi->base + TEGRA_GMI_CONFIG);
+ config &= ~TEGRA_GMI_CONFIG_GO;
+ writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+ reset_control_assert(gmi->rst);
+ clk_disable_unprepare(gmi->clk);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+ struct device_node *child;
+ u32 property, ranges[4];
+ int err;
+
+ child = of_get_next_available_child(gmi->dev->of_node, NULL);
+ if (!child) {
+ dev_err(gmi->dev, "no child nodes found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently only support one child device due to lack of
+ * chip-select address decoding. Which means that we only have one
+ * chip-select line from the GMI controller.
+ */
+ if (of_get_child_count(gmi->dev->of_node) > 1)
+ dev_warn(gmi->dev, "only one child device is supported.");
+
+ if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+ gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+ if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+ gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+ gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+ gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+ gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+ gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+ gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+ /* Decode the CS# */
+ err = of_property_read_u32_array(child, "ranges", ranges, 4);
+ if (err < 0) {
+ /* Invalid binding */
+ if (err == -EOVERFLOW) {
+ dev_err(gmi->dev,
+ "failed to decode CS: invalid ranges length\n");
+ goto error_cs;
+ }
+
+ /*
+ * If we reach here it means that the child node has an empty
+ * ranges or it does not exist at all. Attempt to decode the
+ * CS# from the reg property instead.
+ */
+ err = of_property_read_u32(child, "reg", &property);
+ if (err < 0) {
+ dev_err(gmi->dev,
+ "failed to decode CS: no reg property found\n");
+ goto error_cs;
+ }
+ } else {
+ property = ranges[1];
+ }
+
+ /* Valid chip selects are CS0-CS7 */
+ if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+ dev_err(gmi->dev, "invalid chip select: %d", property);
+ err = -EINVAL;
+ goto error_cs;
+ }
+
+ gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+ /* The default values that are provided below are reset values */
+ if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+ if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+ of_node_put(child);
+ return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_gmi *gmi;
+ struct resource *res;
+ int err;
+
+ gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+ if (!gmi)
+ return -ENOMEM;
+
+ gmi->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gmi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gmi->base))
+ return PTR_ERR(gmi->base);
+
+ gmi->clk = devm_clk_get(dev, "gmi");
+ if (IS_ERR(gmi->clk)) {
+ dev_err(dev, "can not get clock\n");
+ return PTR_ERR(gmi->clk);
+ }
+
+ gmi->rst = devm_reset_control_get(dev, "gmi");
+ if (IS_ERR(gmi->rst)) {
+ dev_err(dev, "can not get reset\n");
+ return PTR_ERR(gmi->rst);
+ }
+
+ err = tegra_gmi_parse_dt(gmi);
+ if (err)
+ return err;
+
+ err = tegra_gmi_enable(gmi);
+ if (err < 0)
+ return err;
+
+ err = of_platform_default_populate(dev->of_node, NULL, dev);
+ if (err < 0) {
+ dev_err(dev, "fail to create devices.\n");
+ tegra_gmi_disable(gmi);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, gmi);
+
+ return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+ struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(gmi->dev);
+ tegra_gmi_disable(gmi);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+ { .compatible = "nvidia,tegra20-gmi", },
+ { .compatible = "nvidia,tegra30-gmi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+ .probe = tegra_gmi_probe,
+ .remove = tegra_gmi_remove,
+ .driver = {
+ .name = "tegra-gmi",
+ .of_match_table = tegra_gmi_id_table,
+ },
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
new file mode 100644
index 000000000..e9c26c942
--- /dev/null
+++ b/drivers/bus/ti-pwmss.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TI PWM Subsystem driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+
+static const struct of_device_id pwmss_of_match[] = {
+ { .compatible = "ti,am33xx-pwmss" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pwmss_of_match);
+
+static int pwmss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node = pdev->dev.of_node;
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* Populate all the child nodes here... */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "no child node found\n");
+
+ return ret;
+}
+
+static int pwmss_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver pwmss_driver = {
+ .driver = {
+ .name = "pwmss",
+ .of_match_table = pwmss_of_match,
+ },
+ .probe = pwmss_probe,
+ .remove = pwmss_remove,
+};
+
+module_platform_driver(pwmss_driver);
+
+MODULE_DESCRIPTION("PWM Subsystem driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
new file mode 100644
index 000000000..b1aa793b9
--- /dev/null
+++ b/drivers/bus/ti-sysc.c
@@ -0,0 +1,3394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ti-sysc.c - Texas Instruments sysc interconnect target driver
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/timekeeping.h>
+#include <linux/iopoll.h>
+
+#include <linux/platform_data/ti-sysc.h>
+
+#include <dt-bindings/bus/ti-sysc.h>
+
+#define DIS_ISP BIT(2)
+#define DIS_IVA BIT(1)
+#define DIS_SGX BIT(0)
+
+#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
+
+#define MAX_MODULE_SOFTRESET_WAIT 10000
+
+enum sysc_soc {
+ SOC_UNKNOWN,
+ SOC_2420,
+ SOC_2430,
+ SOC_3430,
+ SOC_AM35,
+ SOC_3630,
+ SOC_4430,
+ SOC_4460,
+ SOC_4470,
+ SOC_5430,
+ SOC_AM3,
+ SOC_AM4,
+ SOC_DRA7,
+};
+
+struct sysc_address {
+ unsigned long base;
+ struct list_head node;
+};
+
+struct sysc_module {
+ struct sysc *ddata;
+ struct list_head node;
+};
+
+struct sysc_soc_info {
+ unsigned long general_purpose:1;
+ enum sysc_soc soc;
+ struct mutex list_lock; /* disabled and restored modules list lock */
+ struct list_head disabled_modules;
+ struct list_head restored_modules;
+ struct notifier_block nb;
+};
+
+enum sysc_clocks {
+ SYSC_FCK,
+ SYSC_ICK,
+ SYSC_OPTFCK0,
+ SYSC_OPTFCK1,
+ SYSC_OPTFCK2,
+ SYSC_OPTFCK3,
+ SYSC_OPTFCK4,
+ SYSC_OPTFCK5,
+ SYSC_OPTFCK6,
+ SYSC_OPTFCK7,
+ SYSC_MAX_CLOCKS,
+};
+
+static struct sysc_soc_info *sysc_soc;
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
+
+#define SYSC_IDLEMODE_MASK 3
+#define SYSC_CLOCKACTIVITY_MASK 3
+
+/**
+ * struct sysc - TI sysc interconnect target module registers and capabilities
+ * @dev: struct device pointer
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @module_va: virtual address of the interconnect target module
+ * @offsets: register offsets from module base
+ * @mdata: ti-sysc to hwmod translation data for a module
+ * @clocks: clocks used by the interconnect target module
+ * @clock_roles: clock role names for the found clocks
+ * @nr_clocks: number of clocks used by the interconnect target module
+ * @rsts: resets used by the interconnect target module
+ * @legacy_mode: configured for legacy mode if set
+ * @cap: interconnect target module capabilities
+ * @cfg: interconnect target module configuration
+ * @cookie: data used by legacy platform callbacks
+ * @name: name if available
+ * @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
+ * @enabled: sysc runtime enabled status
+ * @needs_resume: runtime resume needed on resume from suspend
+ * @child_needs_resume: runtime resume needed for child on resume from suspend
+ * @disable_on_idle: status flag used for disabling modules with resets
+ * @idle_work: work structure used to perform delayed idle on a module
+ * @pre_reset_quirk: module specific pre-reset quirk
+ * @post_reset_quirk: module specific post-reset quirk
+ * @reset_done_quirk: module specific reset done quirk
+ * @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
+ * @module_unlock_quirk: module specific sysconfig unlock quirk
+ * @module_lock_quirk: module specific sysconfig lock quirk
+ */
+struct sysc {
+ struct device *dev;
+ u64 module_pa;
+ u32 module_size;
+ void __iomem *module_va;
+ int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
+ struct clk **clocks;
+ const char **clock_roles;
+ int nr_clocks;
+ struct reset_control *rsts;
+ const char *legacy_mode;
+ const struct sysc_capabilities *cap;
+ struct sysc_config cfg;
+ struct ti_sysc_cookie cookie;
+ const char *name;
+ u32 revision;
+ unsigned int reserved:1;
+ unsigned int enabled:1;
+ unsigned int needs_resume:1;
+ unsigned int child_needs_resume:1;
+ struct delayed_work idle_work;
+ void (*pre_reset_quirk)(struct sysc *sysc);
+ void (*post_reset_quirk)(struct sysc *sysc);
+ void (*reset_done_quirk)(struct sysc *sysc);
+ void (*module_enable_quirk)(struct sysc *sysc);
+ void (*module_disable_quirk)(struct sysc *sysc);
+ void (*module_unlock_quirk)(struct sysc *sysc);
+ void (*module_lock_quirk)(struct sysc *sysc);
+};
+
+static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
+ bool is_child);
+
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
+{
+ if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
+ writew_relaxed(value & 0xffff, ddata->module_va + offset);
+
+ /* Only i2c revision has LO and HI register with stride of 4 */
+ if (ddata->offsets[SYSC_REVISION] >= 0 &&
+ offset == ddata->offsets[SYSC_REVISION]) {
+ u16 hi = value >> 16;
+
+ writew_relaxed(hi, ddata->module_va + offset + 4);
+ }
+
+ return;
+ }
+
+ writel_relaxed(value, ddata->module_va + offset);
+}
+
+static u32 sysc_read(struct sysc *ddata, int offset)
+{
+ if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
+ u32 val;
+
+ val = readw_relaxed(ddata->module_va + offset);
+
+ /* Only i2c revision has LO and HI register with stride of 4 */
+ if (ddata->offsets[SYSC_REVISION] >= 0 &&
+ offset == ddata->offsets[SYSC_REVISION]) {
+ u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
+
+ val |= tmp << 16;
+ }
+
+ return val;
+ }
+
+ return readl_relaxed(ddata->module_va + offset);
+}
+
+static bool sysc_opt_clks_needed(struct sysc *ddata)
+{
+ return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
+}
+
+static u32 sysc_read_revision(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_REVISION];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static u32 sysc_read_sysconfig(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSCONFIG];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static u32 sysc_read_sysstatus(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static int sysc_poll_reset_sysstatus(struct sysc *ddata)
+{
+ int error, retries;
+ u32 syss_done, rstval;
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (likely(!timekeeping_suspended)) {
+ error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
+ rstval, (rstval & ddata->cfg.syss_mask) ==
+ syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysstatus(ddata);
+ if ((rstval & ddata->cfg.syss_mask) == syss_done)
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+static int sysc_poll_reset_sysconfig(struct sysc *ddata)
+{
+ int error, retries;
+ u32 sysc_mask, rstval;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (likely(!timekeeping_suspended)) {
+ error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
+ rstval, !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysconfig(ddata);
+ if (!(rstval & sysc_mask))
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ int syss_offset, error = 0;
+
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (syss_offset >= 0)
+ error = sysc_poll_reset_sysstatus(ddata);
+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
+ error = sysc_poll_reset_sysconfig(ddata);
+
+ return error;
+}
+
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
+static int sysc_get_one_clock(struct sysc *ddata, const char *name)
+{
+ int error, i, index = -ENODEV;
+
+ if (!strncmp(clock_names[SYSC_FCK], name, 3))
+ index = SYSC_FCK;
+ else if (!strncmp(clock_names[SYSC_ICK], name, 3))
+ index = SYSC_ICK;
+
+ if (index < 0) {
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!ddata->clocks[i]) {
+ index = i;
+ break;
+ }
+ }
+ }
+
+ if (index < 0) {
+ dev_err(ddata->dev, "clock %s not added\n", name);
+ return index;
+ }
+
+ ddata->clocks[index] = devm_clk_get(ddata->dev, name);
+ if (IS_ERR(ddata->clocks[index])) {
+ dev_err(ddata->dev, "clock get error for %s: %li\n",
+ name, PTR_ERR(ddata->clocks[index]));
+
+ return PTR_ERR(ddata->clocks[index]);
+ }
+
+ error = clk_prepare(ddata->clocks[index]);
+ if (error) {
+ dev_err(ddata->dev, "clock prepare error for %s: %i\n",
+ name, error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+static int sysc_get_clocks(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct property *prop;
+ const char *name;
+ int nr_fck = 0, nr_ick = 0, i, error = 0;
+
+ ddata->clock_roles = devm_kcalloc(ddata->dev,
+ SYSC_MAX_CLOCKS,
+ sizeof(*ddata->clock_roles),
+ GFP_KERNEL);
+ if (!ddata->clock_roles)
+ return -ENOMEM;
+
+ of_property_for_each_string(np, "clock-names", prop, name) {
+ if (!strncmp(clock_names[SYSC_FCK], name, 3))
+ nr_fck++;
+ if (!strncmp(clock_names[SYSC_ICK], name, 3))
+ nr_ick++;
+ ddata->clock_roles[ddata->nr_clocks] = name;
+ ddata->nr_clocks++;
+ }
+
+ if (ddata->nr_clocks < 1)
+ return 0;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
+ if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
+ dev_err(ddata->dev, "too many clocks for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ if (nr_fck > 1 || nr_ick > 1) {
+ dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ /* Always add a slot for main clocks fck and ick even if unused */
+ if (!nr_fck)
+ ddata->nr_clocks++;
+ if (!nr_ick)
+ ddata->nr_clocks++;
+
+ ddata->clocks = devm_kcalloc(ddata->dev,
+ ddata->nr_clocks, sizeof(*ddata->clocks),
+ GFP_KERNEL);
+ if (!ddata->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ const char *name = ddata->clock_roles[i];
+
+ if (!name)
+ continue;
+
+ error = sysc_get_one_clock(ddata, name);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
+static void sysc_clkdm_deny_idle(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+
+ if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
+ return;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->clkdm_deny_idle)
+ pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
+}
+
+static void sysc_clkdm_allow_idle(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+
+ if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
+ return;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->clkdm_allow_idle)
+ pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
+}
+
+/**
+ * sysc_init_resets - init rstctrl reset line if configured
+ * @ddata: device driver data
+ *
+ * See sysc_rstctrl_reset_deassert().
+ */
+static int sysc_init_resets(struct sysc *ddata)
+{
+ ddata->rsts =
+ devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
+
+ return PTR_ERR_OR_ZERO(ddata->rsts);
+}
+
+/**
+ * sysc_parse_and_check_child_range - parses module IO region from ranges
+ * @ddata: device driver data
+ *
+ * In general we only need rev, syss, and sysc registers and not the whole
+ * module range. But we do want the offsets for these registers from the
+ * module base. This allows us to check them against the legacy hwmod
+ * platform data. Let's also check the ranges are configured properly.
+ */
+static int sysc_parse_and_check_child_range(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ const __be32 *ranges;
+ u32 nr_addr, nr_size;
+ int len, error;
+
+ ranges = of_get_property(np, "ranges", &len);
+ if (!ranges) {
+ dev_err(ddata->dev, "missing ranges for %pOF\n", np);
+
+ return -ENOENT;
+ }
+
+ len /= sizeof(*ranges);
+
+ if (len < 3) {
+ dev_err(ddata->dev, "incomplete ranges for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ error = of_property_read_u32(np, "#address-cells", &nr_addr);
+ if (error)
+ return -ENOENT;
+
+ error = of_property_read_u32(np, "#size-cells", &nr_size);
+ if (error)
+ return -ENOENT;
+
+ if (nr_addr != 1 || nr_size != 1) {
+ dev_err(ddata->dev, "invalid ranges for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ ranges++;
+ ddata->module_pa = of_translate_address(np, ranges++);
+ ddata->module_size = be32_to_cpup(ranges);
+
+ return 0;
+}
+
+/* Interconnect instances to probe before l4_per instances */
+static struct resource early_bus_ranges[] = {
+ /* am3/4 l4_wkup */
+ { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
+ /* omap4/5 and dra7 l4_cfg */
+ { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
+ /* omap4 l4_wkup */
+ { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
+ /* omap5 and dra7 l4_wkup without dra7 dcan segment */
+ { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
+};
+
+static atomic_t sysc_defer = ATOMIC_INIT(10);
+
+/**
+ * sysc_defer_non_critical - defer non_critical interconnect probing
+ * @ddata: device driver data
+ *
+ * We want to probe l4_cfg and l4_wkup interconnect instances before any
+ * l4_per instances as l4_per instances depend on resources on l4_cfg and
+ * l4_wkup interconnects.
+ */
+static int sysc_defer_non_critical(struct sysc *ddata)
+{
+ struct resource *res;
+ int i;
+
+ if (!atomic_read(&sysc_defer))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
+ res = &early_bus_ranges[i];
+ if (ddata->module_pa >= res->start &&
+ ddata->module_pa <= res->end) {
+ atomic_set(&sysc_defer, 0);
+
+ return 0;
+ }
+ }
+
+ atomic_dec_if_positive(&sysc_defer);
+
+ return -EPROBE_DEFER;
+}
+
+static struct device_node *stdout_path;
+
+static void sysc_init_stdout_path(struct sysc *ddata)
+{
+ struct device_node *np = NULL;
+ const char *uart;
+
+ if (IS_ERR(stdout_path))
+ return;
+
+ if (stdout_path)
+ return;
+
+ np = of_find_node_by_path("/chosen");
+ if (!np)
+ goto err;
+
+ uart = of_get_property(np, "stdout-path", NULL);
+ if (!uart)
+ goto err;
+
+ np = of_find_node_by_path(uart);
+ if (!np)
+ goto err;
+
+ stdout_path = np;
+
+ return;
+
+err:
+ stdout_path = ERR_PTR(-ENODEV);
+}
+
+static void sysc_check_quirk_stdout(struct sysc *ddata,
+ struct device_node *np)
+{
+ sysc_init_stdout_path(ddata);
+ if (np != stdout_path)
+ return;
+
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
+ SYSC_QUIRK_NO_RESET_ON_INIT;
+}
+
+/**
+ * sysc_check_one_child - check child configuration
+ * @ddata: device driver data
+ * @np: child device node
+ *
+ * Let's avoid messy situations where we have new interconnect target
+ * node but children have "ti,hwmods". These belong to the interconnect
+ * target node and are managed by this driver.
+ */
+static void sysc_check_one_child(struct sysc *ddata,
+ struct device_node *np)
+{
+ const char *name;
+
+ name = of_get_property(np, "ti,hwmods", NULL);
+ if (name && !of_device_is_compatible(np, "ti,sysc"))
+ dev_warn(ddata->dev, "really a child ti,hwmods property?");
+
+ sysc_check_quirk_stdout(ddata, np);
+ sysc_parse_dts_quirks(ddata, np, true);
+}
+
+static void sysc_check_children(struct sysc *ddata)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(ddata->dev->of_node, child)
+ sysc_check_one_child(ddata, child);
+}
+
+/*
+ * So far only I2C uses 16-bit read access with clockactivity with revision
+ * in two registers with stride of 4. We can detect this based on the rev
+ * register size to configure things far enough to be able to properly read
+ * the revision register.
+ */
+static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
+{
+ if (resource_size(res) == 8)
+ ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
+}
+
+/**
+ * sysc_parse_one - parses the interconnect target module registers
+ * @ddata: device driver data
+ * @reg: register to parse
+ */
+static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
+{
+ struct resource *res;
+ const char *name;
+
+ switch (reg) {
+ case SYSC_REVISION:
+ case SYSC_SYSCONFIG:
+ case SYSC_SYSSTATUS:
+ name = reg_names[reg];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(to_platform_device(ddata->dev),
+ IORESOURCE_MEM, name);
+ if (!res) {
+ ddata->offsets[reg] = -ENODEV;
+
+ return 0;
+ }
+
+ ddata->offsets[reg] = res->start - ddata->module_pa;
+ if (reg == SYSC_REVISION)
+ sysc_check_quirk_16bit(ddata, res);
+
+ return 0;
+}
+
+static int sysc_parse_registers(struct sysc *ddata)
+{
+ int i, error;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ error = sysc_parse_one(ddata, i);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_check_registers - check for misconfigured register overlaps
+ * @ddata: device driver data
+ */
+static int sysc_check_registers(struct sysc *ddata)
+{
+ int i, j, nr_regs = 0, nr_matches = 0;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ if (ddata->offsets[i] < 0)
+ continue;
+
+ if (ddata->offsets[i] > (ddata->module_size - 4)) {
+ dev_err(ddata->dev, "register outside module range");
+
+ return -EINVAL;
+ }
+
+ for (j = 0; j < SYSC_MAX_REGS; j++) {
+ if (ddata->offsets[j] < 0)
+ continue;
+
+ if (ddata->offsets[i] == ddata->offsets[j])
+ nr_matches++;
+ }
+ nr_regs++;
+ }
+
+ if (nr_matches > nr_regs) {
+ dev_err(ddata->dev, "overlapping registers: (%i/%i)",
+ nr_regs, nr_matches);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * syc_ioremap - ioremap register space for the interconnect target module
+ * @ddata: device driver data
+ *
+ * Note that the interconnect target module registers can be anywhere
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * the interconnect target module registers are at the beginning of
+ * the module range though.
+ */
+static int sysc_ioremap(struct sysc *ddata)
+{
+ int size;
+
+ if (ddata->offsets[SYSC_REVISION] < 0 &&
+ ddata->offsets[SYSC_SYSCONFIG] < 0 &&
+ ddata->offsets[SYSC_SYSSTATUS] < 0) {
+ size = ddata->module_size;
+ } else {
+ size = max3(ddata->offsets[SYSC_REVISION],
+ ddata->offsets[SYSC_SYSCONFIG],
+ ddata->offsets[SYSC_SYSSTATUS]);
+
+ if (size < SZ_1K)
+ size = SZ_1K;
+
+ if ((size + sizeof(u32)) > ddata->module_size)
+ size = ddata->module_size;
+ }
+
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ size + sizeof(u32));
+ if (!ddata->module_va)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * sysc_map_and_check_registers - ioremap and check device registers
+ * @ddata: device driver data
+ */
+static int sysc_map_and_check_registers(struct sysc *ddata)
+{
+ int error;
+
+ error = sysc_parse_and_check_child_range(ddata);
+ if (error)
+ return error;
+
+ error = sysc_defer_non_critical(ddata);
+ if (error)
+ return error;
+
+ sysc_check_children(ddata);
+
+ error = sysc_parse_registers(ddata);
+ if (error)
+ return error;
+
+ error = sysc_ioremap(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_registers(ddata);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/**
+ * sysc_show_rev - read and show interconnect target module revision
+ * @bufp: buffer to print the information to
+ * @ddata: device driver data
+ */
+static int sysc_show_rev(char *bufp, struct sysc *ddata)
+{
+ int len;
+
+ if (ddata->offsets[SYSC_REVISION] < 0)
+ return sprintf(bufp, ":NA");
+
+ len = sprintf(bufp, ":%08x", ddata->revision);
+
+ return len;
+}
+
+static int sysc_show_reg(struct sysc *ddata,
+ char *bufp, enum sysc_registers reg)
+{
+ if (ddata->offsets[reg] < 0)
+ return sprintf(bufp, ":NA");
+
+ return sprintf(bufp, ":%x", ddata->offsets[reg]);
+}
+
+static int sysc_show_name(char *bufp, struct sysc *ddata)
+{
+ if (!ddata->name)
+ return 0;
+
+ return sprintf(bufp, ":%s", ddata->name);
+}
+
+/**
+ * sysc_show_registers - show information about interconnect target module
+ * @ddata: device driver data
+ */
+static void sysc_show_registers(struct sysc *ddata)
+{
+ char buf[128];
+ char *bufp = buf;
+ int i;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++)
+ bufp += sysc_show_reg(ddata, bufp, i);
+
+ bufp += sysc_show_rev(bufp, ddata);
+ bufp += sysc_show_name(bufp, ddata);
+
+ dev_dbg(ddata->dev, "%llx:%x%s\n",
+ ddata->module_pa, ddata->module_size,
+ buf);
+}
+
+/**
+ * sysc_write_sysconfig - handle sysconfig quirks for register write
+ * @ddata: device driver data
+ * @value: register value
+ */
+static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
+{
+ if (ddata->module_unlock_quirk)
+ ddata->module_unlock_quirk(ddata);
+
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
+
+ if (ddata->module_lock_quirk)
+ ddata->module_lock_quirk(ddata);
+}
+
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+#define SYSC_CLOCACT_ICK 2
+
+/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
+static int sysc_enable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int error;
+
+ ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ /*
+ * Some modules like i2c and hdq1w have unusable reset status unless
+ * the module reset quirk is enabled. Skip status check on enable.
+ */
+ if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ }
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
+ if (regbits->clkact_shift >= 0 &&
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
+ reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
+ SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
+ best_mode = SYSC_IDLE_NO;
+
+ /* Clear WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg &= ~BIT(regbits->enwkup_shift);
+ } else {
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_autoidle;
+
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+ best_mode = SYSC_IDLE_NO;
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+set_autoidle:
+ /* Autoidle bit must enabled separately if available */
+ if (regbits->autoidle_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
+ reg |= 1 << regbits->autoidle_shift;
+ sysc_write_sysconfig(ddata, reg);
+ }
+
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ if (ddata->module_enable_quirk)
+ ddata->module_enable_quirk(ddata);
+
+ return 0;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ if (ddata->module_disable_quirk)
+ ddata->module_disable_quirk(ddata);
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
+ }
+
+ if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
+ ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
+ best_mode = SYSC_IDLE_FORCE;
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ return 0;
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
+ best_mode = SYSC_IDLE_FORCE;
+ } else {
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
+ }
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ if (regbits->autoidle_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
+ reg |= 1 << regbits->autoidle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ reset_control_assert(ddata->rsts);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ reset_control_deassert(ddata->rsts);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (!ddata->enabled)
+ return 0;
+
+ sysc_clkdm_deny_idle(ddata);
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ goto err_allow_idle;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ goto err_allow_idle;
+ }
+
+ sysc_disable_main_clocks(ddata);
+
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
+ ddata->enabled = false;
+
+err_allow_idle:
+ reset_control_assert(ddata->rsts);
+
+ sysc_clkdm_allow_idle(ddata);
+
+ return error;
+}
+
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->enabled)
+ return 0;
+
+
+ sysc_clkdm_deny_idle(ddata);
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ goto err_allow_idle;
+ }
+
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ reset_control_deassert(ddata->rsts);
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
+ ddata->enabled = true;
+
+ sysc_clkdm_allow_idle(ddata);
+
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+err_allow_idle:
+ sysc_clkdm_allow_idle(ddata);
+
+ return error;
+}
+
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+ struct device *dev = ddata->dev;
+ int error;
+
+ /* Disable target module if it is enabled */
+ if (ddata->enabled) {
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+ }
+
+ /* Enable target module */
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "reinit resume failed: %i\n", error);
+
+ if (leave_enabled)
+ return error;
+
+ /* Disable target module if no leave_enabled was set */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+ return error;
+}
+
+static int __maybe_unused sysc_noirq_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ return 0;
+
+ if (!ddata->enabled)
+ return 0;
+
+ ddata->needs_resume = 1;
+
+ return sysc_runtime_suspend(dev);
+}
+
+static int __maybe_unused sysc_noirq_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ return 0;
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+ error = sysc_reinit_module(ddata, ddata->needs_resume);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ } else if (ddata->needs_resume) {
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ }
+
+ ddata->needs_resume = 0;
+
+ return error;
+}
+
+static const struct dev_pm_ops sysc_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
+ SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
+ sysc_runtime_resume,
+ NULL)
+};
+
+/* Module revision register based quirks */
+struct sysc_revision_quirk {
+ const char *name;
+ u32 base;
+ int rev_offset;
+ int sysc_offset;
+ int syss_offset;
+ u32 revision;
+ u32 revision_mask;
+ u32 quirks;
+};
+
+#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
+ optrev_val, optrevmask, optquirkmask) \
+ { \
+ .name = (optname), \
+ .base = (optbase), \
+ .rev_offset = (optrev), \
+ .sysc_offset = (optsysc), \
+ .syss_offset = (optsyss), \
+ .revision = (optrev_val), \
+ .revision_mask = (optrevmask), \
+ .quirks = (optquirkmask), \
+ }
+
+static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
+ SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ /* Uarts on omap4 and later */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
+
+ /* Quirks that need to be set based on detected module */
+ SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
+ SYSC_MODULE_QUIRK_AESS),
+ /* Errata i893 handling for dra7 dcan1 and 2 */
+ SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_GPMC_DEBUG),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_NEEDED),
+ SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
+ SYSC_MODULE_QUIRK_SGX),
+ SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
+ SYSC_MODULE_QUIRK_RTC_UNLOCK),
+ SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_QUIRK_REINIT_ON_CTX_LOST),
+ SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT),
+ /* PRUSS on am3, am4 and am5 */
+ SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
+ SYSC_MODULE_QUIRK_PRUSS),
+ /* Watchdog on am3 and am4 */
+ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
+
+#ifdef DEBUG
+ SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
+ SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
+ SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
+ SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
+ 0xffff00f0, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
+ SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
+ SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
+ SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
+ SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
+ SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
+ SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
+ SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
+ SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
+ SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
+ SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
+ SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
+ SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
+ SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
+#endif
+};
+
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
+static void sysc_init_revision_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (q->base && q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ if (q->revision == ddata->revision ||
+ (q->revision & q->revision_mask) ==
+ (ddata->revision & q->revision_mask)) {
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+ }
+}
+
+/*
+ * DSS needs dispc outputs disabled to reset modules. Returns mask of
+ * enabled DSS interrupts. Eventually we may be able to do this on
+ * dispc init rather than top-level DSS init.
+ */
+static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ bool disable)
+{
+ bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
+ const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
+ int manager_count;
+ bool framedonetv_irq = true;
+ u32 val, irq_mask = 0;
+
+ switch (sysc_soc->soc) {
+ case SOC_2420 ... SOC_3630:
+ manager_count = 2;
+ framedonetv_irq = false;
+ break;
+ case SOC_4430 ... SOC_4470:
+ manager_count = 3;
+ break;
+ case SOC_5430:
+ case SOC_DRA7:
+ manager_count = 4;
+ break;
+ case SOC_AM4:
+ manager_count = 1;
+ framedonetv_irq = false;
+ break;
+ case SOC_UNKNOWN:
+ default:
+ return 0;
+ };
+
+ /* Remap the whole module range to be able to reset dispc outputs */
+ devm_iounmap(ddata->dev, ddata->module_va);
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ ddata->module_size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ /* DISP_CONTROL, shut down lcd and digit on disable if enabled */
+ val = sysc_read(ddata, dispc_offset + 0x40);
+ lcd_en = val & lcd_en_mask;
+ digit_en = val & digit_en_mask;
+ if (lcd_en)
+ irq_mask |= BIT(0); /* FRAMEDONE */
+ if (digit_en) {
+ if (framedonetv_irq)
+ irq_mask |= BIT(24); /* FRAMEDONETV */
+ else
+ irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
+ }
+ if (disable && (lcd_en || digit_en))
+ sysc_write(ddata, dispc_offset + 0x40,
+ val & ~(lcd_en_mask | digit_en_mask));
+
+ if (manager_count <= 2)
+ return irq_mask;
+
+ /* DISPC_CONTROL2 */
+ val = sysc_read(ddata, dispc_offset + 0x238);
+ lcd2_en = val & lcd_en_mask;
+ if (lcd2_en)
+ irq_mask |= BIT(22); /* FRAMEDONE2 */
+ if (disable && lcd2_en)
+ sysc_write(ddata, dispc_offset + 0x238,
+ val & ~lcd_en_mask);
+
+ if (manager_count <= 3)
+ return irq_mask;
+
+ /* DISPC_CONTROL3 */
+ val = sysc_read(ddata, dispc_offset + 0x848);
+ lcd3_en = val & lcd_en_mask;
+ if (lcd3_en)
+ irq_mask |= BIT(30); /* FRAMEDONE3 */
+ if (disable && lcd3_en)
+ sysc_write(ddata, dispc_offset + 0x848,
+ val & ~lcd_en_mask);
+
+ return irq_mask;
+}
+
+/* DSS needs child outputs disabled and SDI registers cleared for reset */
+static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+{
+ const int dispc_offset = 0x1000;
+ int error;
+ u32 irq_mask, val;
+
+ /* Get enabled outputs */
+ irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
+ if (!irq_mask)
+ return;
+
+ /* Clear IRQSTATUS */
+ sysc_write(ddata, dispc_offset + 0x18, irq_mask);
+
+ /* Disable outputs */
+ val = sysc_quirk_dispc(ddata, dispc_offset, true);
+
+ /* Poll IRQSTATUS */
+ error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
+ val, val != irq_mask, 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ __func__, val, irq_mask);
+
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
+ /* Clear DSS_SDI_CONTROL */
+ sysc_write(ddata, 0x44, 0);
+
+ /* Clear DSS_PLL_CONTROL */
+ sysc_write(ddata, 0x48, 0);
+ }
+
+ /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
+ sysc_write(ddata, 0x40, 0);
+}
+
+/* 1-wire needs module's internal clocks enabled for reset */
+static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
+{
+ int offset = 0x0c; /* HDQ_CTRL_STATUS */
+ u16 val;
+
+ val = sysc_read(ddata, offset);
+ val |= BIT(5);
+ sysc_write(ddata, offset, val);
+}
+
+/* AESS (Audio Engine SubSystem) needs autogating set after enable */
+static void sysc_module_enable_quirk_aess(struct sysc *ddata)
+{
+ int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
+
+ sysc_write(ddata, offset, 1);
+}
+
+/* I2C needs to be disabled for reset */
+static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
+{
+ int offset;
+ u16 val;
+
+ /* I2C_CON, omap2/3 is different from omap4 and later */
+ if ((ddata->revision & 0xffffff00) == 0x001f0000)
+ offset = 0x24;
+ else
+ offset = 0xa4;
+
+ /* I2C_EN */
+ val = sysc_read(ddata, offset);
+ if (enable)
+ val |= BIT(15);
+ else
+ val &= ~BIT(15);
+ sysc_write(ddata, offset, val);
+}
+
+static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, false);
+}
+
+static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, true);
+}
+
+/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
+static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
+{
+ u32 val, kick0_val = 0, kick1_val = 0;
+ unsigned long flags;
+ int error;
+
+ if (!lock) {
+ kick0_val = 0x83e70b13;
+ kick1_val = 0x95a4f1e0;
+ }
+
+ local_irq_save(flags);
+ /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
+ error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "rtc busy timeout\n");
+ /* Now we have ~15 microseconds to read/write various registers */
+ sysc_write(ddata, 0x6c, kick0_val);
+ sysc_write(ddata, 0x70, kick1_val);
+ local_irq_restore(flags);
+}
+
+static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, false);
+}
+
+static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, true);
+}
+
+/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
+static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
+{
+ int offset = 0xff08; /* OCP_DEBUG_CONFIG */
+ u32 val = BIT(31); /* THALIA_INT_BYPASS */
+
+ sysc_write(ddata, offset, val);
+}
+
+/* Watchdog timer needs a disable sequence after reset */
+static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
+{
+ int wps, spr, error;
+ u32 val;
+
+ wps = 0x34;
+ spr = 0x48;
+
+ sysc_write(ddata, spr, 0xaaaa);
+ error = readl_poll_timeout(ddata->module_va + wps, val,
+ !(val & 0x10), 100,
+ MAX_MODULE_SOFTRESET_WAIT);
+ if (error)
+ dev_warn(ddata->dev, "wdt disable step1 failed\n");
+
+ sysc_write(ddata, spr, 0x5555);
+ error = readl_poll_timeout(ddata->module_va + wps, val,
+ !(val & 0x10), 100,
+ MAX_MODULE_SOFTRESET_WAIT);
+ if (error)
+ dev_warn(ddata->dev, "wdt disable step2 failed\n");
+}
+
+/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
+static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ reg |= SYSC_PRUSS_STANDBY_INIT;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
+static void sysc_init_module_quirks(struct sysc *ddata)
+{
+ if (ddata->legacy_mode || !ddata->name)
+ return;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
+
+ return;
+ }
+
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
+
+ return;
+ }
+#endif
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
+ ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
+ ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
+ ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
+ ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
+ ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
+ ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+ ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+}
+
+static int sysc_clockdomain_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ struct clk *fck = NULL, *ick = NULL;
+ int error;
+
+ if (!pdata || !pdata->init_clockdomain)
+ return 0;
+
+ switch (ddata->nr_clocks) {
+ case 2:
+ ick = ddata->clocks[SYSC_ICK];
+ fallthrough;
+ case 1:
+ fck = ddata->clocks[SYSC_FCK];
+ break;
+ case 0:
+ return 0;
+ }
+
+ error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
+ if (!error || error == -ENODEV)
+ return 0;
+
+ return error;
+}
+
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/*
+ * Note that the caller must ensure the interconnect target module is enabled
+ * before calling reset. Otherwise reset will not complete.
+ */
+static int sysc_reset(struct sysc *ddata)
+{
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;
+
+ sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
+
+ if (ddata->legacy_mode ||
+ ddata->cap->regbits->srst_shift < 0 ||
+ ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ return 0;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (ddata->pre_reset_quirk)
+ ddata->pre_reset_quirk(ddata);
+
+ if (sysc_offset >= 0) {
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+
+ /*
+ * Some devices need a delay before reading registers
+ * after reset. Presumably a srst_udelay is not needed
+ * for devices that use a rstctrl register reset.
+ */
+ if (ddata->cfg.srst_udelay)
+ fsleep(ddata->cfg.srst_udelay);
+
+ /*
+ * Flush posted write. For devices needing srst_udelay
+ * this should trigger an interconnect error if the
+ * srst_udelay value is needed but not configured.
+ */
+ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
+
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+
+ if (ddata->reset_done_quirk)
+ ddata->reset_done_quirk(ddata);
+
+ return error;
+}
+
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
+static int sysc_init_module(struct sysc *ddata)
+{
+ int error = 0;
+
+ error = sysc_clockdomain_init(ddata);
+ if (error)
+ return error;
+
+ sysc_clkdm_deny_idle(ddata);
+
+ /*
+ * Always enable clocks. The bootloader may or may not have enabled
+ * the related clocks.
+ */
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ error = reset_control_deassert(ddata->rsts);
+ if (error)
+ goto err_main_clocks;
+ }
+
+ ddata->revision = sysc_read_revision(ddata);
+ sysc_init_revision_quirks(ddata);
+ sysc_init_module_quirks(ddata);
+
+ if (ddata->legacy_mode) {
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_reset;
+ }
+
+ if (!ddata->legacy_mode) {
+ error = sysc_enable_module(ddata->dev);
+ if (error)
+ goto err_reset;
+ }
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+ if (error && !ddata->legacy_mode)
+ sysc_disable_module(ddata->dev);
+
+err_reset:
+ if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
+err_main_clocks:
+ if (error)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ /* No re-enable of clockdomain autoidle to prevent module autoidle */
+ if (error) {
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ return error;
+}
+
+static int sysc_init_sysc_mask(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+ u32 val;
+
+ error = of_property_read_u32(np, "ti,sysc-mask", &val);
+ if (error)
+ return 0;
+
+ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
+
+ return 0;
+}
+
+static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
+ const char *name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct property *prop;
+ const __be32 *p;
+ u32 val;
+
+ of_property_for_each_u32(np, name, prop, p, val) {
+ if (val >= SYSC_NR_IDLEMODES) {
+ dev_err(ddata->dev, "invalid idlemode: %i\n", val);
+ return -EINVAL;
+ }
+ *idlemodes |= (1 << val);
+ }
+
+ return 0;
+}
+
+static int sysc_init_idlemodes(struct sysc *ddata)
+{
+ int error;
+
+ error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
+ "ti,sysc-midle");
+ if (error)
+ return error;
+
+ error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
+ "ti,sysc-sidle");
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/*
+ * Only some devices on omap4 and later have SYSCONFIG reset done
+ * bit. We can detect this if there is no SYSSTATUS at all, or the
+ * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
+ * have multiple bits for the child devices like OHCI and EHCI.
+ * Depends on SYSC being parsed first.
+ */
+static int sysc_init_syss_mask(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+ u32 val;
+
+ error = of_property_read_u32(np, "ti,syss-mask", &val);
+ if (error) {
+ if ((ddata->cap->type == TI_SYSC_OMAP4 ||
+ ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
+ (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
+ ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
+
+ return 0;
+ }
+
+ if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
+ ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
+
+ ddata->cfg.syss_mask = val;
+
+ return 0;
+}
+
+/*
+ * Many child device drivers need to have fck and opt clocks available
+ * to get the clock rate for device internal configuration etc.
+ */
+static int sysc_child_add_named_clock(struct sysc *ddata,
+ struct device *child,
+ const char *name)
+{
+ struct clk *clk;
+ struct clk_lookup *l;
+ int error = 0;
+
+ if (!name)
+ return 0;
+
+ clk = clk_get(child, name);
+ if (!IS_ERR(clk)) {
+ error = -EEXIST;
+ goto put_clk;
+ }
+
+ clk = clk_get(ddata->dev, name);
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ l = clkdev_create(clk, name, dev_name(child));
+ if (!l)
+ error = -ENOMEM;
+put_clk:
+ clk_put(clk);
+
+ return error;
+}
+
+static int sysc_child_add_clocks(struct sysc *ddata,
+ struct device *child)
+{
+ int i, error;
+
+ for (i = 0; i < ddata->nr_clocks; i++) {
+ error = sysc_child_add_named_clock(ddata,
+ child,
+ ddata->clock_roles[i]);
+ if (error && error != -EEXIST) {
+ dev_err(ddata->dev, "could not add child clock %s: %i\n",
+ ddata->clock_roles[i], error);
+
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static struct device_type sysc_device_type = {
+};
+
+static struct sysc *sysc_child_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent || parent->type != &sysc_device_type)
+ return NULL;
+
+ return dev_get_drvdata(parent);
+}
+
+static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ error = pm_generic_runtime_suspend(dev);
+ if (error)
+ return error;
+
+ if (!ddata->enabled)
+ return 0;
+
+ return sysc_runtime_suspend(ddata->dev);
+}
+
+static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ if (!ddata->enabled) {
+ error = sysc_runtime_resume(ddata->dev);
+ if (error < 0)
+ dev_err(ddata->dev,
+ "%s error: %i\n", __func__, error);
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sysc_child_suspend_noirq(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ dev_dbg(ddata->dev, "%s %s\n", __func__,
+ ddata->name ? ddata->name : "");
+
+ error = pm_generic_suspend_noirq(dev);
+ if (error) {
+ dev_err(dev, "%s error at %i: %i\n",
+ __func__, __LINE__, error);
+
+ return error;
+ }
+
+ if (!pm_runtime_status_suspended(dev)) {
+ error = pm_generic_runtime_suspend(dev);
+ if (error) {
+ dev_dbg(dev, "%s busy at %i: %i\n",
+ __func__, __LINE__, error);
+
+ return 0;
+ }
+
+ error = sysc_runtime_suspend(ddata->dev);
+ if (error) {
+ dev_err(dev, "%s error at %i: %i\n",
+ __func__, __LINE__, error);
+
+ return error;
+ }
+
+ ddata->child_needs_resume = true;
+ }
+
+ return 0;
+}
+
+static int sysc_child_resume_noirq(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ dev_dbg(ddata->dev, "%s %s\n", __func__,
+ ddata->name ? ddata->name : "");
+
+ if (ddata->child_needs_resume) {
+ ddata->child_needs_resume = false;
+
+ error = sysc_runtime_resume(ddata->dev);
+ if (error)
+ dev_err(ddata->dev,
+ "%s runtime resume error: %i\n",
+ __func__, error);
+
+ error = pm_generic_runtime_resume(dev);
+ if (error)
+ dev_err(ddata->dev,
+ "%s generic runtime resume: %i\n",
+ __func__, error);
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+#endif
+
+static struct dev_pm_domain sysc_child_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
+ sysc_child_runtime_resume,
+ NULL)
+ USE_PLATFORM_PM_SLEEP_OPS
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
+ sysc_child_resume_noirq)
+ }
+};
+
+/* Caller needs to take list_lock if ever used outside of cpu_pm */
+static void sysc_reinit_modules(struct sysc_soc_info *soc)
+{
+ struct sysc_module *module;
+ struct list_head *pos;
+ struct sysc *ddata;
+
+ list_for_each(pos, &sysc_soc->restored_modules) {
+ module = list_entry(pos, struct sysc_module, node);
+ ddata = module->ddata;
+ sysc_reinit_module(ddata, ddata->enabled);
+ }
+}
+
+/**
+ * sysc_context_notifier - optionally reset and restore module after idle
+ * @nb: notifier block
+ * @cmd: unused
+ * @v: unused
+ *
+ * Some interconnect target modules need to be restored, or reset and restored
+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
+ * OTG and GPMC target modules even if the modules are unused.
+ */
+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct sysc_soc_info *soc;
+
+ soc = container_of(nb, struct sysc_soc_info, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ sysc_reinit_modules(soc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * sysc_add_restored - optionally add reset and restore quirk hanlling
+ * @ddata: device data
+ */
+static void sysc_add_restored(struct sysc *ddata)
+{
+ struct sysc_module *restored_module;
+
+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
+ if (!restored_module)
+ return;
+
+ restored_module->ddata = ddata;
+
+ mutex_lock(&sysc_soc->list_lock);
+
+ list_add(&restored_module->node, &sysc_soc->restored_modules);
+
+ if (sysc_soc->nb.notifier_call)
+ goto out_unlock;
+
+ sysc_soc->nb.notifier_call = sysc_context_notifier;
+ cpu_pm_register_notifier(&sysc_soc->nb);
+
+out_unlock:
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+/**
+ * sysc_legacy_idle_quirk - handle children in omap_device compatible way
+ * @ddata: device driver data
+ * @child: child device driver
+ *
+ * Allow idle for child devices as done with _od_runtime_suspend().
+ * Otherwise many child devices will not idle because of the permanent
+ * parent usecount set in pm_runtime_irq_safe().
+ *
+ * Note that the long term solution is to just modify the child device
+ * drivers to not set pm_runtime_irq_safe() and then this can be just
+ * dropped.
+ */
+static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
+{
+ if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ dev_pm_domain_set(child, &sysc_child_pm_domain);
+}
+
+static int sysc_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *device)
+{
+ struct device *dev = device;
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+ if (!ddata)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ error = sysc_child_add_clocks(ddata, dev);
+ if (error)
+ return error;
+ sysc_legacy_idle_quirk(ddata, dev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sysc_nb = {
+ .notifier_call = sysc_notifier_call,
+};
+
+/* Device tree configured quirks */
+struct sysc_dts_quirk {
+ const char *name;
+ u32 mask;
+};
+
+static const struct sysc_dts_quirk sysc_dts_quirks[] = {
+ { .name = "ti,no-idle-on-init",
+ .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
+ { .name = "ti,no-reset-on-init",
+ .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
+};
+
+static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
+ bool is_child)
+{
+ const struct property *prop;
+ int i, len;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
+ const char *name = sysc_dts_quirks[i].name;
+
+ prop = of_get_property(np, name, &len);
+ if (!prop)
+ continue;
+
+ ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
+ if (is_child) {
+ dev_warn(ddata->dev,
+ "dts flag should be at module level for %s\n",
+ name);
+ }
+ }
+}
+
+static int sysc_init_dts_quirks(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+ u32 val;
+
+ ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
+
+ sysc_parse_dts_quirks(ddata, np, false);
+ error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
+ if (!error) {
+ if (val > 255) {
+ dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
+ val);
+ }
+
+ ddata->cfg.srst_udelay = (u8)val;
+ }
+
+ return 0;
+}
+
+static void sysc_unprepare(struct sysc *ddata)
+{
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!IS_ERR_OR_NULL(ddata->clocks[i]))
+ clk_unprepare(ddata->clocks[i]);
+ }
+}
+
+/*
+ * Common sysc register bits found on omap2, also known as type1
+ */
+static const struct sysc_regbits sysc_regbits_omap2 = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = 12,
+ .sidle_shift = 3,
+ .clkact_shift = 8,
+ .emufree_shift = 5,
+ .enwkup_shift = 2,
+ .srst_shift = 1,
+ .autoidle_shift = 0,
+};
+
+static const struct sysc_capabilities sysc_omap2 = {
+ .type = TI_SYSC_OMAP2,
+ .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap2,
+};
+
+/* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
+static const struct sysc_capabilities sysc_omap2_timer = {
+ .type = TI_SYSC_OMAP2_TIMER,
+ .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap2,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
+};
+
+/*
+ * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
+ * with different sidle position
+ */
+static const struct sysc_regbits sysc_regbits_omap3_sham = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 4,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = 1,
+ .autoidle_shift = 0,
+ .emufree_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap3_sham = {
+ .type = TI_SYSC_OMAP3_SHAM,
+ .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap3_sham,
+};
+
+/*
+ * AES register bits found on omap3 and later, a variant of
+ * sysc_regbits_omap2 with different sidle position
+ */
+static const struct sysc_regbits sysc_regbits_omap3_aes = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 6,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = 1,
+ .autoidle_shift = 0,
+ .emufree_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap3_aes = {
+ .type = TI_SYSC_OMAP3_AES,
+ .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap3_aes,
+};
+
+/*
+ * Common sysc register bits found on omap4, also known as type2
+ */
+static const struct sysc_regbits sysc_regbits_omap4 = {
+ .dmadisable_shift = 16,
+ .midle_shift = 4,
+ .sidle_shift = 2,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .emufree_shift = 1,
+ .srst_shift = 0,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4 = {
+ .type = TI_SYSC_OMAP4,
+ .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_omap4,
+};
+
+static const struct sysc_capabilities sysc_omap4_timer = {
+ .type = TI_SYSC_OMAP4_TIMER,
+ .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_omap4,
+};
+
+/*
+ * Common sysc register bits found on omap4, also known as type3
+ */
+static const struct sysc_regbits sysc_regbits_omap4_simple = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = 2,
+ .sidle_shift = 0,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4_simple = {
+ .type = TI_SYSC_OMAP4_SIMPLE,
+ .regbits = &sysc_regbits_omap4_simple,
+};
+
+/*
+ * SmartReflex sysc found on omap34xx
+ */
+static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = -ENODEV,
+ .clkact_shift = 20,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_34xx_sr = {
+ .type = TI_SYSC_OMAP34XX_SR,
+ .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
+ .regbits = &sysc_regbits_omap34xx_sr,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
+ SYSC_QUIRK_LEGACY_IDLE,
+};
+
+/*
+ * SmartReflex sysc found on omap36xx and later
+ */
+static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 24,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 26,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_36xx_sr = {
+ .type = TI_SYSC_OMAP36XX_SR,
+ .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
+ .regbits = &sysc_regbits_omap36xx_sr,
+ .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
+};
+
+static const struct sysc_capabilities sysc_omap4_sr = {
+ .type = TI_SYSC_OMAP4_SR,
+ .regbits = &sysc_regbits_omap36xx_sr,
+ .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
+};
+
+/*
+ * McASP register bits found on omap4 and later
+ */
+static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 0,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4_mcasp = {
+ .type = TI_SYSC_OMAP4_MCASP,
+ .regbits = &sysc_regbits_omap4_mcasp,
+ .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
+};
+
+/*
+ * McASP found on dra7 and later
+ */
+static const struct sysc_capabilities sysc_dra7_mcasp = {
+ .type = TI_SYSC_OMAP4_SIMPLE,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
+};
+
+/*
+ * FS USB host found on omap4 and later
+ */
+static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 24,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 26,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
+ .type = TI_SYSC_OMAP4_USB_HOST_FS,
+ .sysc_mask = SYSC_OMAP2_ENAWAKEUP,
+ .regbits = &sysc_regbits_omap4_usb_host_fs,
+};
+
+static const struct sysc_regbits sysc_regbits_dra7_mcan = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = -ENODEV,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 4,
+ .srst_shift = 0,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_dra7_mcan = {
+ .type = TI_SYSC_DRA7_MCAN,
+ .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_dra7_mcan,
+ .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
+};
+
+/*
+ * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
+ */
+static const struct sysc_capabilities sysc_pruss = {
+ .type = TI_SYSC_PRUSS,
+ .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
+};
+
+static int sysc_init_pdata(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ struct ti_sysc_module_data *mdata;
+
+ if (!pdata)
+ return 0;
+
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
+
+ if (ddata->legacy_mode) {
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
+ }
+
+ ddata->mdata = mdata;
+
+ return 0;
+}
+
+static int sysc_init_match(struct sysc *ddata)
+{
+ const struct sysc_capabilities *cap;
+
+ cap = of_device_get_match_data(ddata->dev);
+ if (!cap)
+ return -EINVAL;
+
+ ddata->cap = cap;
+ if (ddata->cap)
+ ddata->cfg.quirks |= ddata->cap->mod_quirks;
+
+ return 0;
+}
+
+static void ti_sysc_idle(struct work_struct *work)
+{
+ struct sysc *ddata;
+
+ ddata = container_of(work, struct sysc, idle_work.work);
+
+ /*
+ * One time decrement of clock usage counts if left on from init.
+ * Note that we disable opt clocks unconditionally in this case
+ * as they are enabled unconditionally during init without
+ * considering sysc_opt_clks_needed() at that point.
+ */
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
+ SYSC_QUIRK_NO_IDLE_ON_INIT)) {
+ sysc_disable_main_clocks(ddata);
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
+ return;
+
+ /*
+ * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
+ * and SYSC_QUIRK_NO_RESET_ON_INIT
+ */
+ if (pm_runtime_active(ddata->dev))
+ pm_runtime_put_sync(ddata->dev);
+}
+
+/*
+ * SoC model and features detection. Only needed for SoCs that need
+ * special handling for quirks, no need to list others.
+ */
+static const struct soc_device_attribute sysc_soc_match[] = {
+ SOC_FLAG("OMAP242*", SOC_2420),
+ SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM35*", SOC_AM35),
+ SOC_FLAG("OMAP3[45]*", SOC_3430),
+ SOC_FLAG("OMAP3[67]*", SOC_3630),
+ SOC_FLAG("OMAP443*", SOC_4430),
+ SOC_FLAG("OMAP446*", SOC_4460),
+ SOC_FLAG("OMAP447*", SOC_4470),
+ SOC_FLAG("OMAP54*", SOC_5430),
+ SOC_FLAG("AM433", SOC_AM3),
+ SOC_FLAG("AM43*", SOC_AM4),
+ SOC_FLAG("DRA7*", SOC_DRA7),
+
+ { /* sentinel */ },
+};
+
+/*
+ * List of SoCs variants with disabled features. By default we assume all
+ * devices in the device tree are available so no need to list those SoCs.
+ */
+static const struct soc_device_attribute sysc_soc_feat_match[] = {
+ /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
+ SOC_FLAG("AM3505", DIS_SGX),
+ SOC_FLAG("OMAP3525", DIS_SGX),
+ SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
+
+ /* OMAP3630/DM3730 variants with some accelerators disabled */
+ SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
+ SOC_FLAG("DM3725", DIS_SGX),
+ SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
+ SOC_FLAG("OMAP3621", DIS_ISP),
+
+ { /* sentinel */ },
+};
+
+static int sysc_add_disabled(unsigned long base)
+{
+ struct sysc_address *disabled_module;
+
+ disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
+ if (!disabled_module)
+ return -ENOMEM;
+
+ disabled_module->base = base;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_add(&disabled_module->node, &sysc_soc->disabled_modules);
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return 0;
+}
+
+/*
+ * One time init to detect the booted SoC, disable unavailable features
+ * and initialize list for optional cpu_pm notifier.
+ *
+ * Note that we initialize static data shared across all ti-sysc instances
+ * so ddata is only used for SoC type. This can be called from module_init
+ * once we no longer need to rely on platform data.
+ */
+static int sysc_init_static_data(struct sysc *ddata)
+{
+ const struct soc_device_attribute *match;
+ struct ti_sysc_platform_data *pdata;
+ unsigned long features = 0;
+
+ if (sysc_soc)
+ return 0;
+
+ sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
+ if (!sysc_soc)
+ return -ENOMEM;
+
+ mutex_init(&sysc_soc->list_lock);
+ INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ INIT_LIST_HEAD(&sysc_soc->restored_modules);
+ sysc_soc->general_purpose = true;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->soc_type_gp)
+ sysc_soc->general_purpose = pdata->soc_type_gp();
+
+ match = soc_device_match(sysc_soc_match);
+ if (match && match->data)
+ sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
+
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ case SOC_AM3:
+ sysc_add_disabled(0x48310000); /* rng */
+ break;
+ default:
+ break;
+ };
+ }
+
+ match = soc_device_match(sysc_soc_feat_match);
+ if (!match)
+ return 0;
+
+ if (match->data)
+ features = (unsigned long)match->data;
+
+ /*
+ * Add disabled devices to the list based on the module base.
+ * Note that this must be done before we attempt to access the
+ * device and have module revision checks working.
+ */
+ if (features & DIS_ISP)
+ sysc_add_disabled(0x480bd400);
+ if (features & DIS_IVA)
+ sysc_add_disabled(0x5d000000);
+ if (features & DIS_SGX)
+ sysc_add_disabled(0x50000000);
+
+ return 0;
+}
+
+static void sysc_cleanup_static_data(void)
+{
+ struct sysc_module *restored_module;
+ struct sysc_address *disabled_module;
+ struct list_head *pos, *tmp;
+
+ if (!sysc_soc)
+ return;
+
+ if (sysc_soc->nb.notifier_call)
+ cpu_pm_unregister_notifier(&sysc_soc->nb);
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
+ restored_module = list_entry(pos, struct sysc_module, node);
+ list_del(pos);
+ kfree(restored_module);
+ }
+ list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ list_del(pos);
+ kfree(disabled_module);
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+static int sysc_check_disabled_devices(struct sysc *ddata)
+{
+ struct sysc_address *disabled_module;
+ struct list_head *pos;
+ int error = 0;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each(pos, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ if (ddata->module_pa == disabled_module->base) {
+ dev_dbg(ddata->dev, "module disabled for this SoC\n");
+ error = -ENODEV;
+ break;
+ }
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return error;
+}
+
+/*
+ * Ignore timers tagged with no-reset and no-idle. These are likely in use,
+ * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
+ * are needed, we could also look at the timer register configuration.
+ */
+static int sysc_check_active_timer(struct sysc *ddata)
+{
+ int error;
+
+ if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
+ ddata->cap->type != TI_SYSC_OMAP4_TIMER)
+ return 0;
+
+ /*
+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
+ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
+ * dts: Fix timer regression for beagleboard revision c"). This all
+ * can be dropped if we stop supporting old beagleboard revisions
+ * A to B4 at some point.
+ */
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ error = -ENXIO;
+ else
+ error = -EBUSY;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
+ (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id sysc_match_table[] = {
+ { .compatible = "simple-bus", },
+ { /* sentinel */ },
+};
+
+static int sysc_probe(struct platform_device *pdev)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct sysc *ddata;
+ int error;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ddata);
+
+ error = sysc_init_static_data(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_match(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_dts_quirks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_map_and_check_registers(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_sysc_mask(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_idlemodes(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_syss_mask(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_pdata(ddata);
+ if (error)
+ return error;
+
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_check_disabled_devices(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_active_timer(ddata);
+ if (error == -ENXIO)
+ ddata->reserved = true;
+ else if (error)
+ return error;
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_resets(ddata);
+ if (error)
+ goto unprepare;
+
+ error = sysc_init_module(ddata);
+ if (error)
+ goto unprepare;
+
+ pm_runtime_enable(ddata->dev);
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ /* Balance use counts as PM runtime should have enabled these all */
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
+ if (!(ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
+ sysc_disable_main_clocks(ddata);
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ sysc_show_registers(ddata);
+
+ ddata->dev->type = &sysc_device_type;
+
+ if (!ddata->reserved) {
+ error = of_platform_populate(ddata->dev->of_node,
+ sysc_match_table,
+ pdata ? pdata->auxdata : NULL,
+ ddata->dev);
+ if (error)
+ goto err;
+ }
+
+ INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
+
+ /* At least earlycon won't survive without deferred idle */
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
+ SYSC_QUIRK_NO_IDLE_ON_INIT |
+ SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ schedule_delayed_work(&ddata->idle_work, 3000);
+ } else {
+ pm_runtime_put(&pdev->dev);
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
+ sysc_add_restored(ddata);
+
+ return 0;
+
+err:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+unprepare:
+ sysc_unprepare(ddata);
+
+ return error;
+}
+
+static int sysc_remove(struct platform_device *pdev)
+{
+ struct sysc *ddata = platform_get_drvdata(pdev);
+ int error;
+
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
+
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ if (!reset_control_status(ddata->rsts))
+ reset_control_assert(ddata->rsts);
+
+unprepare:
+ sysc_unprepare(ddata);
+
+ return 0;
+}
+
+static const struct of_device_id sysc_match[] = {
+ { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
+ { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
+ { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
+ { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
+ { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
+ { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
+ { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
+ { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
+ { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
+ { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
+ { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
+ { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
+ { .compatible = "ti,sysc-usb-host-fs",
+ .data = &sysc_omap4_usb_host_fs, },
+ { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
+ { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sysc_match);
+
+static struct platform_driver sysc_driver = {
+ .probe = sysc_probe,
+ .remove = sysc_remove,
+ .driver = {
+ .name = "ti-sysc",
+ .of_match_table = sysc_match,
+ .pm = &sysc_pm_ops,
+ },
+};
+
+static int __init sysc_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &sysc_nb);
+
+ return platform_driver_register(&sysc_driver);
+}
+module_init(sysc_init);
+
+static void __exit sysc_exit(void)
+{
+ bus_unregister_notifier(&platform_bus_type, &sysc_nb);
+ platform_driver_unregister(&sysc_driver);
+ sysc_cleanup_static_data();
+}
+module_exit(sysc_exit);
+
+MODULE_DESCRIPTION("TI sysc interconnect target driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
new file mode 100644
index 000000000..9989ce904
--- /dev/null
+++ b/drivers/bus/ts-nbus.c
@@ -0,0 +1,369 @@
+/*
+ * NBUS driver for TS-4600 based boards
+ *
+ * Copyright (c) 2016 - Savoir-faire Linux
+ * Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
+ * Systems. It is used to communicate with the peripherals in the FPGA on the
+ * TS-4600 SoM.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/ts-nbus.h>
+
+#define TS_NBUS_DIRECTION_IN 0
+#define TS_NBUS_DIRECTION_OUT 1
+#define TS_NBUS_WRITE_ADR 0
+#define TS_NBUS_WRITE_VAL 1
+
+struct ts_nbus {
+ struct pwm_device *pwm;
+ struct gpio_descs *data;
+ struct gpio_desc *csn;
+ struct gpio_desc *txrx;
+ struct gpio_desc *strobe;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct mutex lock;
+};
+
+/*
+ * request all gpios required by the bus.
+ */
+static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
+ *ts_nbus)
+{
+ ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->data)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
+ return PTR_ERR(ts_nbus->data);
+ }
+
+ ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->csn)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
+ return PTR_ERR(ts_nbus->csn);
+ }
+
+ ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->txrx)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
+ return PTR_ERR(ts_nbus->txrx);
+ }
+
+ ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->strobe)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
+ return PTR_ERR(ts_nbus->strobe);
+ }
+
+ ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->ale)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
+ return PTR_ERR(ts_nbus->ale);
+ }
+
+ ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
+ if (IS_ERR(ts_nbus->rdy)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
+ return PTR_ERR(ts_nbus->rdy);
+ }
+
+ return 0;
+}
+
+/*
+ * the data gpios are used for reading and writing values, their directions
+ * should be adjusted accordingly.
+ */
+static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (direction == TS_NBUS_DIRECTION_IN)
+ gpiod_direction_input(ts_nbus->data->desc[i]);
+ else
+ /* when used as output the default state of the data
+ * lines are set to high */
+ gpiod_direction_output(ts_nbus->data->desc[i], 1);
+ }
+}
+
+/*
+ * reset the bus in its initial state.
+ * The data, csn, strobe and ale lines must be zero'ed to let the FPGA knows a
+ * new transaction can be process.
+ */
+static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus)
+{
+ DECLARE_BITMAP(values, 8);
+
+ values[0] = 0;
+
+ gpiod_set_array_value_cansleep(8, ts_nbus->data->desc,
+ ts_nbus->data->info, values);
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->strobe, 0);
+ gpiod_set_value_cansleep(ts_nbus->ale, 0);
+}
+
+/*
+ * let the FPGA knows it can process.
+ */
+static void ts_nbus_start_transaction(struct ts_nbus *ts_nbus)
+{
+ gpiod_set_value_cansleep(ts_nbus->strobe, 1);
+}
+
+/*
+ * read a byte value from the data gpios.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ int ret, i;
+
+ *val = 0;
+ for (i = 0; i < 8; i++) {
+ ret = gpiod_get_value_cansleep(gpios->desc[i]);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ *val |= BIT(i);
+ }
+
+ return 0;
+}
+
+/*
+ * set the data gpios accordingly to the byte value.
+ */
+static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ DECLARE_BITMAP(values, 8);
+
+ values[0] = byte;
+
+ gpiod_set_array_value_cansleep(8, gpios->desc, gpios->info, values);
+}
+
+/*
+ * reading the bus consists of resetting the bus, then notifying the FPGA to
+ * send the data in the data gpios and return the read value.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_bus(struct ts_nbus *ts_nbus, u8 *val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+ ts_nbus_start_transaction(ts_nbus);
+
+ return ts_nbus_read_byte(ts_nbus, val);
+}
+
+/*
+ * writing to the bus consists of resetting the bus, then define the type of
+ * command (address/value), write the data and notify the FPGA to retrieve the
+ * value in the data gpios.
+ */
+static void ts_nbus_write_bus(struct ts_nbus *ts_nbus, int cmd, u8 val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+
+ if (cmd == TS_NBUS_WRITE_ADR)
+ gpiod_set_value_cansleep(ts_nbus->ale, 1);
+
+ ts_nbus_write_byte(ts_nbus, val);
+ ts_nbus_start_transaction(ts_nbus);
+}
+
+/*
+ * read the value in the FPGA register at the given address.
+ * return 0 on success or negative errno on failure.
+ */
+int ts_nbus_read(struct ts_nbus *ts_nbus, u8 adr, u16 *val)
+{
+ int ret, i;
+ u8 byte;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in read mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 0);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* set the data gpios direction as input before reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_IN);
+
+ /* reading value MSB first */
+ do {
+ *val = 0;
+ byte = 0;
+ for (i = 1; i >= 0; i--) {
+ /* read a byte from the bus, leave on error */
+ ret = ts_nbus_read_bus(ts_nbus, &byte);
+ if (ret < 0)
+ goto err;
+
+ /* append the byte read to the final value */
+ *val |= byte << (i * 8);
+ }
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ ret = gpiod_get_value_cansleep(ts_nbus->rdy);
+ } while (ret);
+
+err:
+ /* restore the data gpios direction as output after reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_OUT);
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_read);
+
+/*
+ * write the desired value in the FPGA register at the given address.
+ */
+int ts_nbus_write(struct ts_nbus *ts_nbus, u8 adr, u16 val)
+{
+ int i;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in write mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 1);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* writing value MSB first */
+ for (i = 1; i >= 0; i--)
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_VAL, (u8)(val >> (i * 8)));
+
+ /* wait for completion */
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ while (gpiod_get_value_cansleep(ts_nbus->rdy) != 0) {
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ }
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_write);
+
+static int ts_nbus_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct pwm_args pargs;
+ struct device *dev = &pdev->dev;
+ struct ts_nbus *ts_nbus;
+ int ret;
+
+ ts_nbus = devm_kzalloc(dev, sizeof(*ts_nbus), GFP_KERNEL);
+ if (!ts_nbus)
+ return -ENOMEM;
+
+ mutex_init(&ts_nbus->lock);
+
+ ret = ts_nbus_init_pdata(pdev, ts_nbus);
+ if (ret < 0)
+ return ret;
+
+ pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(pwm)) {
+ ret = PTR_ERR(pwm);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to request PWM\n");
+ return ret;
+ }
+
+ pwm_get_args(pwm, &pargs);
+ if (!pargs.period) {
+ dev_err(&pdev->dev, "invalid PWM period\n");
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pwm);
+ ret = pwm_config(pwm, pargs.period, pargs.period);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we can now start the FPGA and populate the peripherals.
+ */
+ pwm_enable(pwm);
+ ts_nbus->pwm = pwm;
+
+ /*
+ * let the child nodes retrieve this instance of the ts-nbus.
+ */
+ dev_set_drvdata(dev, ts_nbus);
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "initialized\n");
+
+ return 0;
+}
+
+static int ts_nbus_remove(struct platform_device *pdev)
+{
+ struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev);
+
+ /* shutdown the FPGA */
+ mutex_lock(&ts_nbus->lock);
+ pwm_disable(ts_nbus->pwm);
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+
+static const struct of_device_id ts_nbus_of_match[] = {
+ { .compatible = "technologic,ts-nbus", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
+
+static struct platform_driver ts_nbus_driver = {
+ .probe = ts_nbus_probe,
+ .remove = ts_nbus_remove,
+ .driver = {
+ .name = "ts_nbus",
+ .of_match_table = ts_nbus_of_match,
+ },
+};
+
+module_platform_driver(ts_nbus_driver);
+
+MODULE_ALIAS("platform:ts_nbus");
+MODULE_AUTHOR("Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems NBUS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
new file mode 100644
index 000000000..f70dedace
--- /dev/null
+++ b/drivers/bus/uniphier-system-bus.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* System Bus Controller registers */
+#define UNIPHIER_SBC_BASE 0x100 /* base address of bank0 space */
+#define UNIPHIER_SBC_BASE_BE BIT(0) /* bank_enable */
+#define UNIPHIER_SBC_CTRL0 0x200 /* timing parameter 0 of bank0 */
+#define UNIPHIER_SBC_CTRL1 0x204 /* timing parameter 1 of bank0 */
+#define UNIPHIER_SBC_CTRL2 0x208 /* timing parameter 2 of bank0 */
+#define UNIPHIER_SBC_CTRL3 0x20c /* timing parameter 3 of bank0 */
+#define UNIPHIER_SBC_CTRL4 0x300 /* timing parameter 4 of bank0 */
+
+#define UNIPHIER_SBC_STRIDE 0x10 /* register stride to next bank */
+#define UNIPHIER_SBC_NR_BANKS 8 /* number of banks (chip select) */
+#define UNIPHIER_SBC_BASE_DUMMY 0xffffffff /* data to squash bank 0, 1 */
+
+struct uniphier_system_bus_bank {
+ u32 base;
+ u32 end;
+};
+
+struct uniphier_system_bus_priv {
+ struct device *dev;
+ void __iomem *membase;
+ struct uniphier_system_bus_bank bank[UNIPHIER_SBC_NR_BANKS];
+};
+
+static int uniphier_system_bus_add_bank(struct uniphier_system_bus_priv *priv,
+ int bank, u32 addr, u64 paddr, u32 size)
+{
+ u64 end, mask;
+
+ dev_dbg(priv->dev,
+ "range found: bank = %d, addr = %08x, paddr = %08llx, size = %08x\n",
+ bank, addr, paddr, size);
+
+ if (bank >= ARRAY_SIZE(priv->bank)) {
+ dev_err(priv->dev, "unsupported bank number %d\n", bank);
+ return -EINVAL;
+ }
+
+ if (priv->bank[bank].base || priv->bank[bank].end) {
+ dev_err(priv->dev,
+ "range for bank %d has already been specified\n", bank);
+ return -EINVAL;
+ }
+
+ if (paddr > U32_MAX) {
+ dev_err(priv->dev, "base address %llx is too high\n", paddr);
+ return -EINVAL;
+ }
+
+ end = paddr + size;
+
+ if (addr > paddr) {
+ dev_err(priv->dev,
+ "base %08x cannot be mapped to %08llx of parent\n",
+ addr, paddr);
+ return -EINVAL;
+ }
+ paddr -= addr;
+
+ paddr = round_down(paddr, 0x00020000);
+ end = round_up(end, 0x00020000);
+
+ if (end > U32_MAX) {
+ dev_err(priv->dev, "end address %08llx is too high\n", end);
+ return -EINVAL;
+ }
+ mask = paddr ^ (end - 1);
+ mask = roundup_pow_of_two(mask);
+
+ paddr = round_down(paddr, mask);
+ end = round_up(end, mask);
+
+ priv->bank[bank].base = paddr;
+ priv->bank[bank].end = end;
+
+ dev_dbg(priv->dev, "range added: bank = %d, addr = %08x, end = %08x\n",
+ bank, priv->bank[bank].base, priv->bank[bank].end);
+
+ return 0;
+}
+
+static int uniphier_system_bus_check_overlap(
+ const struct uniphier_system_bus_priv *priv)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+ for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
+ if (priv->bank[i].end > priv->bank[j].base &&
+ priv->bank[i].base < priv->bank[j].end) {
+ dev_err(priv->dev,
+ "region overlap between bank%d and bank%d\n",
+ i, j);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void uniphier_system_bus_check_boot_swap(
+ struct uniphier_system_bus_priv *priv)
+{
+ void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
+ int is_swapped;
+
+ is_swapped = !(readl(base_reg) & UNIPHIER_SBC_BASE_BE);
+
+ dev_dbg(priv->dev, "Boot Swap: %s\n", is_swapped ? "on" : "off");
+
+ /*
+ * If BOOT_SWAP was asserted on power-on-reset, the CS0 and CS1 are
+ * swapped. In this case, bank0 and bank1 should be swapped as well.
+ */
+ if (is_swapped)
+ swap(priv->bank[0], priv->bank[1]);
+}
+
+static void uniphier_system_bus_set_reg(
+ const struct uniphier_system_bus_priv *priv)
+{
+ void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
+ u32 base, end, mask, val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+ base = priv->bank[i].base;
+ end = priv->bank[i].end;
+
+ if (base == end) {
+ /*
+ * If SBC_BASE0 or SBC_BASE1 is set to zero, the access
+ * to anywhere in the system bus space is routed to
+ * bank 0 (if boot swap if off) or bank 1 (if boot swap
+ * if on). It means that CPUs cannot get access to
+ * bank 2 or later. In other words, bank 0/1 cannot
+ * be disabled even if its bank_enable bits is cleared.
+ * This seems odd, but it is how this hardware goes.
+ * As a workaround, dummy data (0xffffffff) should be
+ * set when the bank 0/1 is unused. As for bank 2 and
+ * later, they can be simply disable by clearing the
+ * bank_enable bit.
+ */
+ if (i < 2)
+ val = UNIPHIER_SBC_BASE_DUMMY;
+ else
+ val = 0;
+ } else {
+ mask = base ^ (end - 1);
+
+ val = base & 0xfffe0000;
+ val |= (~mask >> 16) & 0xfffe;
+ val |= UNIPHIER_SBC_BASE_BE;
+ }
+ dev_dbg(priv->dev, "SBC_BASE[%d] = 0x%08x\n", i, val);
+
+ writel(val, base_reg + UNIPHIER_SBC_STRIDE * i);
+ }
+}
+
+static int uniphier_system_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_system_bus_priv *priv;
+ const __be32 *ranges;
+ u32 cells, addr, size;
+ u64 paddr;
+ int pna, bank, rlen, rone, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->membase))
+ return PTR_ERR(priv->membase);
+
+ priv->dev = dev;
+
+ pna = of_n_addr_cells(dev->of_node);
+
+ ret = of_property_read_u32(dev->of_node, "#address-cells", &cells);
+ if (ret) {
+ dev_err(dev, "failed to get #address-cells\n");
+ return ret;
+ }
+ if (cells != 2) {
+ dev_err(dev, "#address-cells must be 2\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "#size-cells", &cells);
+ if (ret) {
+ dev_err(dev, "failed to get #size-cells\n");
+ return ret;
+ }
+ if (cells != 1) {
+ dev_err(dev, "#size-cells must be 1\n");
+ return -EINVAL;
+ }
+
+ ranges = of_get_property(dev->of_node, "ranges", &rlen);
+ if (!ranges) {
+ dev_err(dev, "failed to get ranges property\n");
+ return -ENOENT;
+ }
+
+ rlen /= sizeof(*ranges);
+ rone = pna + 2;
+
+ for (; rlen >= rone; rlen -= rone) {
+ bank = be32_to_cpup(ranges++);
+ addr = be32_to_cpup(ranges++);
+ paddr = of_translate_address(dev->of_node, ranges);
+ if (paddr == OF_BAD_ADDR)
+ return -EINVAL;
+ ranges += pna;
+ size = be32_to_cpup(ranges++);
+
+ ret = uniphier_system_bus_add_bank(priv, bank, addr,
+ paddr, size);
+ if (ret)
+ return ret;
+ }
+
+ ret = uniphier_system_bus_check_overlap(priv);
+ if (ret)
+ return ret;
+
+ uniphier_system_bus_check_boot_swap(priv);
+
+ uniphier_system_bus_set_reg(priv);
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Now, the bus is configured. Populate platform_devices below it */
+ return of_platform_default_populate(dev->of_node, NULL, dev);
+}
+
+static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
+{
+ uniphier_system_bus_set_reg(dev_get_drvdata(dev));
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
+};
+
+static const struct of_device_id uniphier_system_bus_match[] = {
+ { .compatible = "socionext,uniphier-system-bus" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_system_bus_match);
+
+static struct platform_driver uniphier_system_bus_driver = {
+ .probe = uniphier_system_bus_probe,
+ .driver = {
+ .name = "uniphier-system-bus",
+ .of_match_table = uniphier_system_bus_match,
+ .pm = &uniphier_system_bus_pm_ops,
+ },
+};
+module_platform_driver(uniphier_system_bus_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier System Bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
new file mode 100644
index 000000000..a58ac0c8e
--- /dev/null
+++ b/drivers/bus/vexpress-config.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/vexpress.h>
+
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
+
+struct vexpress_config_bridge {
+ struct vexpress_config_bridge_ops *ops;
+ void *context;
+};
+
+
+static DEFINE_MUTEX(vexpress_config_mutex);
+static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
+
+
+static void vexpress_config_set_master(u32 site)
+{
+ vexpress_config_site_master = site;
+}
+
+static void vexpress_config_lock(void *arg)
+{
+ mutex_lock(&vexpress_config_mutex);
+}
+
+static void vexpress_config_unlock(void *arg)
+{
+ mutex_unlock(&vexpress_config_mutex);
+}
+
+
+static void vexpress_config_find_prop(struct device_node *node,
+ const char *name, u32 *val)
+{
+ /* Default value */
+ *val = 0;
+
+ of_node_get(node);
+ while (node) {
+ if (of_property_read_u32(node, name, val) == 0) {
+ of_node_put(node);
+ return;
+ }
+ node = of_get_next_parent(node);
+ }
+}
+
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
+ u32 *position, u32 *dcc)
+{
+ vexpress_config_find_prop(node, "arm,vexpress,site", site);
+ if (*site == VEXPRESS_SITE_MASTER)
+ *site = vexpress_config_site_master;
+ if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER))
+ return -EINVAL;
+ vexpress_config_find_prop(node, "arm,vexpress,position", position);
+ vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
+
+ return 0;
+}
+
+
+static void vexpress_config_devres_release(struct device *dev, void *res)
+{
+ struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent);
+ struct regmap *regmap = res;
+
+ bridge->ops->regmap_exit(regmap, bridge->context);
+}
+
+struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
+{
+ struct vexpress_config_bridge *bridge;
+ struct regmap *regmap;
+ struct regmap **res;
+
+ bridge = dev_get_drvdata(dev->parent);
+ if (WARN_ON(!bridge))
+ return ERR_PTR(-EINVAL);
+
+ res = devres_alloc(vexpress_config_devres_release, sizeof(*res),
+ GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ regmap = (bridge->ops->regmap_init)(dev, bridge->context);
+ if (IS_ERR(regmap)) {
+ devres_free(res);
+ return regmap;
+ }
+
+ *res = regmap;
+ devres_add(dev, res);
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
+
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
+{
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
+
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
+ }
+
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
+ return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
+ }
+
+ vexpress_syscfg_regmap_config.max_register = num - 1;
+
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
+
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
+
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
+{
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
+
+ regmap_exit(regmap);
+
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
+}
+
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
+{
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
+
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
+
+ dev_set_drvdata(&pdev->dev, bridge);
+
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
+
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
+
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
+
+ for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");