summaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/bus
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig256
-rw-r--r--drivers/bus/Makefile43
-rw-r--r--drivers/bus/arm-cci.c587
-rw-r--r--drivers/bus/arm-integrator-lm.c128
-rw-r--r--drivers/bus/brcmstb_gisb.c550
-rw-r--r--drivers/bus/bt1-apb.c418
-rw-r--r--drivers/bus/bt1-axi.c311
-rw-r--r--drivers/bus/da8xx-mstpri.c264
-rw-r--r--drivers/bus/fsl-mc/Kconfig23
-rw-r--r--drivers/bus/fsl-mc/Makefile22
-rw-r--r--drivers/bus/fsl-mc/dpbp.c185
-rw-r--r--drivers/bus/fsl-mc/dpcon.c221
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c99
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c889
-rw-r--r--drivers/bus/fsl-mc/dprc.c704
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c663
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c1303
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c233
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h695
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c597
-rw-r--r--drivers/bus/fsl-mc/mc-io.c285
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c297
-rw-r--r--drivers/bus/fsl-mc/obj-api.c103
-rw-r--r--drivers/bus/hisi_lpc.c696
-rw-r--r--drivers/bus/imx-weim.c413
-rw-r--r--drivers/bus/intel-ixp4xx-eb.c425
-rw-r--r--drivers/bus/mhi/Kconfig9
-rw-r--r--drivers/bus/mhi/Makefile5
-rw-r--r--drivers/bus/mhi/common.h326
-rw-r--r--drivers/bus/mhi/ep/Kconfig10
-rw-r--r--drivers/bus/mhi/ep/Makefile2
-rw-r--r--drivers/bus/mhi/ep/internal.h218
-rw-r--r--drivers/bus/mhi/ep/main.c1670
-rw-r--r--drivers/bus/mhi/ep/mmio.c273
-rw-r--r--drivers/bus/mhi/ep/ring.c212
-rw-r--r--drivers/bus/mhi/ep/sm.c154
-rw-r--r--drivers/bus/mhi/host/Kconfig31
-rw-r--r--drivers/bus/mhi/host/Makefile6
-rw-r--r--drivers/bus/mhi/host/boot.c558
-rw-r--r--drivers/bus/mhi/host/debugfs.c413
-rw-r--r--drivers/bus/mhi/host/init.c1464
-rw-r--r--drivers/bus/mhi/host/internal.h383
-rw-r--r--drivers/bus/mhi/host/main.c1693
-rw-r--r--drivers/bus/mhi/host/pci_generic.c1292
-rw-r--r--drivers/bus/mhi/host/pm.c1283
-rw-r--r--drivers/bus/mips_cdmm.c698
-rw-r--r--drivers/bus/moxtet.c889
-rw-r--r--drivers/bus/mvebu-mbus.c1316
-rw-r--r--drivers/bus/omap-ocp2scp.c118
-rw-r--r--drivers/bus/omap_l3_noc.c374
-rw-r--r--drivers/bus/omap_l3_noc.h493
-rw-r--r--drivers/bus/omap_l3_smx.c301
-rw-r--r--drivers/bus/omap_l3_smx.h324
-rw-r--r--drivers/bus/qcom-ebi2.c405
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c388
-rw-r--r--drivers/bus/simple-pm-bus.c143
-rw-r--r--drivers/bus/sun50i-de2.c47
-rw-r--r--drivers/bus/sunxi-rsb.c882
-rw-r--r--drivers/bus/tegra-aconnect.c120
-rw-r--r--drivers/bus/tegra-gmi.c319
-rw-r--r--drivers/bus/ti-pwmss.c55
-rw-r--r--drivers/bus/ti-sysc.c3477
-rw-r--r--drivers/bus/ts-nbus.c366
-rw-r--r--drivers/bus/uniphier-system-bus.c251
-rw-r--r--drivers/bus/vexpress-config.c417
65 files changed, 31795 insertions, 0 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
new file mode 100644
index 0000000000..c98dd6ca26
--- /dev/null
+++ b/drivers/bus/Kconfig
@@ -0,0 +1,256 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Bus Devices
+#
+
+menu "Bus devices"
+
+config ARM_CCI
+ bool
+
+config ARM_CCI400_COMMON
+ bool
+ select ARM_CCI
+
+config ARM_CCI400_PORT_CTRL
+ bool
+ depends on ARM && OF && CPU_V7
+ select ARM_CCI400_COMMON
+ help
+ Low level power management driver for CCI400 cache coherent
+ interconnect for ARM platforms.
+
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
+config BRCMSTB_GISB_ARB
+ tristate "Broadcom STB GISB bus arbiter"
+ depends on ARM || ARM64 || MIPS
+ default ARCH_BRCMSTB || BMIPS_GENERIC
+ help
+ Driver for the Broadcom Set Top Box System-on-a-chip internal bus
+ arbiter. This driver provides timeout and target abort error handling
+ and internal bus master decoding.
+
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
+config MOXTET
+ tristate "CZ.NIC Turris Mox module configuration bus"
+ depends on SPI_MASTER && OF
+ help
+ Say yes here to add support for the module configuration bus found
+ on CZ.NIC's Turris Mox. This is needed for the ability to discover
+ the order in which the modules are connected and to get/set some of
+ their settings. For example the GPIOs on Mox SFP module are
+ configured through this bus.
+
+config HISILICON_LPC
+ bool "Support for ISA I/O space on HiSilicon Hip06/7"
+ depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC)
+ depends on HAS_IOPORT
+ select INDIRECT_PIO if ARM64
+ help
+ Driver to enable I/O access to devices attached to the Low Pin
+ Count bus on the HiSilicon Hip06/7 SoC.
+
+config IMX_WEIM
+ bool "Freescale EIM DRIVER"
+ depends on ARCH_MXC
+ help
+ Driver for i.MX WEIM controller.
+ The WEIM(Wireless External Interface Module) works like a bus.
+ You can attach many different devices on it, such as NOR, onenand.
+
+config INTEL_IXP4XX_EB
+ bool "Intel IXP4xx expansion bus interface driver"
+ depends on HAS_IOMEM
+ depends on ARCH_IXP4XX || COMPILE_TEST
+ default ARCH_IXP4XX
+ select MFD_SYSCON
+ help
+ Driver for the Intel IXP4xx expansion bus interface. The driver is
+ needed to set up various chip select configuration parameters before
+ devices on the expansion bus can be discovered.
+
+config MIPS_CDMM
+ bool "MIPS Common Device Memory Map (CDMM) Driver"
+ depends on CPU_MIPSR2 || CPU_MIPSR5
+ help
+ Driver needed for the MIPS Common Device Memory Map bus in MIPS
+ cores. This bus is for per-CPU tightly coupled devices such as the
+ Fast Debug Channel (FDC).
+
+ For this to work, either your bootloader needs to enable the CDMM
+ region at an unused physical address on the boot CPU, or else your
+ platform code needs to implement mips_cdmm_phys_base() (see
+ asm/cdmm.h).
+
+config MVEBU_MBUS
+ bool
+ depends on PLAT_ORION
+ help
+ Driver needed for the MBus configuration on Marvell EBU SoCs
+ (Kirkwood, Dove, Orion5x, MV78XX0 and Armada 370/XP).
+
+config OMAP_INTERCONNECT
+ tristate "OMAP INTERCONNECT DRIVER"
+ depends on ARCH_OMAP2PLUS
+
+ help
+ Driver to enable OMAP interconnect error handling driver.
+
+config OMAP_OCP2SCP
+ tristate "OMAP OCP2SCP DRIVER"
+ depends on ARCH_OMAP2PLUS
+ help
+ Driver to enable ocp2scp module which transforms ocp interface
+ protocol to scp protocol. In OMAP4, USB PHY is connected via
+ OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
+ OCP2SCP.
+
+config QCOM_EBI2
+ bool "Qualcomm External Bus Interface 2 (EBI2)"
+ depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ default ARCH_QCOM
+ help
+ Say y here to enable support for the Qualcomm External Bus
+ Interface 2, which can be used to connect things like NAND Flash,
+ SRAM, ethernet adapters, FPGAs and LCD displays.
+
+config QCOM_SSC_BLOCK_BUS
+ bool "Qualcomm SSC Block Bus Init Driver"
+ depends on ARCH_QCOM
+ help
+ Say y here to enable support for initializing the bus that connects
+ the SSC block's internal bus to the cNoC (configurantion NoC) on
+ (some) qcom SoCs.
+ The SSC (Snapdragon Sensor Core) block contains a gpio controller,
+ i2c/spi/uart controllers, a hexagon core, and a clock controller
+ which provides clocks for the above.
+
+config SUN50I_DE2_BUS
+ bool "Allwinner A64 DE2 Bus Driver"
+ default ARM64
+ depends on ARCH_SUNXI
+ select SUNXI_SRAM
+ help
+ Say y here to enable support for Allwinner A64 DE2 bus driver. It's
+ mostly transparent, but a SRAM region needs to be claimed in the SRAM
+ controller to make the all blocks in the DE2 part accessible.
+
+config SUNXI_RSB
+ tristate "Allwinner sunXi Reduced Serial Bus Driver"
+ default MACH_SUN8I || MACH_SUN9I || ARM64
+ depends on ARCH_SUNXI
+ select REGMAP
+ help
+ Say y here to enable support for Allwinner's Reduced Serial Bus
+ (RSB) support. This controller is responsible for communicating
+ with various RSB based devices, such as AXP223, AXP8XX PMICs,
+ and AC100/AC200 ICs.
+
+config TEGRA_ACONNECT
+ tristate "Tegra ACONNECT Bus Driver"
+ depends on ARCH_TEGRA_210_SOC
+ depends on OF && PM
+ help
+ Driver for the Tegra ACONNECT bus which is used to interface with
+ the devices inside the Audio Processing Engine (APE) for Tegra210.
+
+config TEGRA_GMI
+ tristate "Tegra Generic Memory Interface bus driver"
+ depends on ARCH_TEGRA
+ help
+ Driver for the Tegra Generic Memory Interface bus which can be used
+ to attach devices such as NOR, UART, FPGA and more.
+
+config TI_PWMSS
+ bool
+ default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM || TI_EQEP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
+config TI_SYSC
+ bool "TI sysc interconnect target module driver"
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ default y
+ help
+ Generic driver for Texas Instruments interconnect target module
+ found on many TI SoCs.
+
+config TS_NBUS
+ tristate "Technologic Systems NBUS Driver"
+ depends on SOC_IMX28
+ depends on OF_GPIO && PWM
+ help
+ Driver for the Technologic Systems NBUS which is used to interface
+ with the peripherals in the FPGA of the TS-4600 SoM.
+
+config UNIPHIER_SYSTEM_BUS
+ tristate "UniPhier System Bus driver"
+ depends on ARCH_UNIPHIER && OF
+ default y
+ help
+ Support for UniPhier System Bus, a simple external bus. This is
+ needed to use on-board devices connected to UniPhier SoCs.
+
+config VEXPRESS_CONFIG
+ tristate "Versatile Express configuration bus"
+ default y if ARCH_VEXPRESS
+ depends on ARM || ARM64
+ depends on OF
+ select REGMAP
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
+
+config DA8XX_MSTPRI
+ bool "TI da8xx master peripheral priority driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Driver for Texas Instruments da8xx master peripheral priority
+ configuration. Allows to adjust the priorities of all master
+ peripherals.
+
+source "drivers/bus/fsl-mc/Kconfig"
+source "drivers/bus/mhi/Kconfig"
+
+endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
new file mode 100644
index 0000000000..d90eed189a
--- /dev/null
+++ b/drivers/bus/Makefile
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the bus drivers.
+#
+
+# Interconnect bus drivers for ARM platforms
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
+obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
+obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
+obj-$(CONFIG_MOXTET) += moxtet.o
+
+# DPAA2 fsl-mc bus
+obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
+obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+obj-$(CONFIG_INTEL_IXP4XX_EB) += intel-ixp4xx-eb.o
+obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
+obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
+
+# Interconnect bus driver for OMAP SoCs.
+obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
+
+obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
+obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o
+obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
+obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
+obj-$(CONFIG_OF) += simple-pm-bus.o
+obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
+obj-$(CONFIG_TI_SYSC) += ti-sysc.o
+obj-$(CONFIG_TS_NBUS) += ts-nbus.o
+obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
+obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
+
+# MHI
+obj-y += mhi/
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
new file mode 100644
index 0000000000..b8184a9035
--- /dev/null
+++ b/drivers/bus/arm-cci.c
@@ -0,0 +1,587 @@
+/*
+ * CCI cache coherent interconnect driver
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+
+static void __iomem *cci_ctrl_base __ro_after_init;
+static unsigned long cci_ctrl_phys __ro_after_init;
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+struct cci_nb_ports {
+ unsigned int nb_ace;
+ unsigned int nb_ace_lite;
+};
+
+static const struct cci_nb_ports cci400_ports = {
+ .nb_ace = 2,
+ .nb_ace_lite = 3
+};
+
+#define CCI400_PORTS_DATA (&cci400_ports)
+#else
+#define CCI400_PORTS_DATA (NULL)
+#endif
+
+static const struct of_device_id arm_cci_matches[] = {
+#ifdef CONFIG_ARM_CCI400_COMMON
+ {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
+#endif
+#ifdef CONFIG_ARM_CCI5xx_PMU
+ { .compatible = "arm,cci-500", },
+ { .compatible = "arm,cci-550", },
+#endif
+ {},
+};
+
+static const struct of_dev_auxdata arm_cci_auxdata[] = {
+ OF_DEV_AUXDATA("arm,cci-400-pmu", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-400-pmu,r0", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-400-pmu,r1", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-500-pmu,r0", 0, NULL, &cci_ctrl_base),
+ OF_DEV_AUXDATA("arm,cci-550-pmu,r0", 0, NULL, &cci_ctrl_base),
+ {}
+};
+
+#define DRIVER_NAME "ARM-CCI"
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+ if (!cci_probed())
+ return -ENODEV;
+
+ return of_platform_populate(pdev->dev.of_node, NULL,
+ arm_cci_auxdata, &pdev->dev);
+}
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+ return platform_driver_register(&cci_platform_driver);
+}
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+
+#define CCI_PORT_CTRL 0x0
+#define CCI_CTRL_STATUS 0xc
+
+#define CCI_ENABLE_SNOOP_REQ 0x1
+#define CCI_ENABLE_DVM_REQ 0x2
+#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
+
+enum cci_ace_port_type {
+ ACE_INVALID_PORT = 0x0,
+ ACE_PORT,
+ ACE_LITE_PORT,
+};
+
+struct cci_ace_port {
+ void __iomem *base;
+ unsigned long phys;
+ enum cci_ace_port_type type;
+ struct device_node *dn;
+};
+
+static struct cci_ace_port *ports;
+static unsigned int nb_cci_ports;
+
+struct cpu_port {
+ u64 mpidr;
+ u32 port;
+};
+
+/*
+ * Use the port MSB as valid flag, shift can be made dynamic
+ * by computing number of bits required for port indexes.
+ * Code disabling CCI cpu ports runs with D-cache invalidated
+ * and SCTLR bit clear so data accesses must be kept to a minimum
+ * to improve performance; for now shift is left static to
+ * avoid one more data access while disabling the CCI port.
+ */
+#define PORT_VALID_SHIFT 31
+#define PORT_VALID (0x1 << PORT_VALID_SHIFT)
+
+static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
+{
+ port->port = PORT_VALID | index;
+ port->mpidr = mpidr;
+}
+
+static inline bool cpu_port_is_valid(struct cpu_port *port)
+{
+ return !!(port->port & PORT_VALID);
+}
+
+static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
+{
+ return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
+}
+
+static struct cpu_port cpu_port[NR_CPUS];
+
+/**
+ * __cci_ace_get_port - Function to retrieve the port index connected to
+ * a cpu or device.
+ *
+ * @dn: device node of the device to look-up
+ * @type: port type
+ *
+ * Return value:
+ * - CCI port index if success
+ * - -ENODEV if failure
+ */
+static int __cci_ace_get_port(struct device_node *dn, int type)
+{
+ int i;
+ bool ace_match;
+ struct device_node *cci_portn;
+
+ cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
+ for (i = 0; i < nb_cci_ports; i++) {
+ ace_match = ports[i].type == type;
+ if (ace_match && cci_portn == ports[i].dn)
+ return i;
+ }
+ return -ENODEV;
+}
+
+int cci_ace_get_port(struct device_node *dn)
+{
+ return __cci_ace_get_port(dn, ACE_LITE_PORT);
+}
+EXPORT_SYMBOL_GPL(cci_ace_get_port);
+
+static void cci_ace_init_ports(void)
+{
+ int port, cpu;
+ struct device_node *cpun;
+
+ /*
+ * Port index look-up speeds up the function disabling ports by CPU,
+ * since the logical to port index mapping is done once and does
+ * not change after system boot.
+ * The stashed index array is initialized for all possible CPUs
+ * at probe time.
+ */
+ for_each_possible_cpu(cpu) {
+ /* too early to use cpu->of_node */
+ cpun = of_get_cpu_node(cpu, NULL);
+
+ if (WARN(!cpun, "Missing cpu device node\n"))
+ continue;
+
+ port = __cci_ace_get_port(cpun, ACE_PORT);
+ if (port < 0)
+ continue;
+
+ init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
+ }
+
+ for_each_possible_cpu(cpu) {
+ WARN(!cpu_port_is_valid(&cpu_port[cpu]),
+ "CPU %u does not have an associated CCI port\n",
+ cpu);
+ }
+}
+/*
+ * Functions to enable/disable a CCI interconnect slave port
+ *
+ * They are called by low-level power management code to disable slave
+ * interfaces snoops and DVM broadcast.
+ * Since they may execute with cache data allocation disabled and
+ * after the caches have been cleaned and invalidated the functions provide
+ * no explicit locking since they may run with D-cache disabled, so normal
+ * cacheable kernel locks based on ldrex/strex may not work.
+ * Locking has to be provided by BSP implementations to ensure proper
+ * operations.
+ */
+
+/**
+ * cci_port_control() - function to control a CCI port
+ *
+ * @port: index of the port to setup
+ * @enable: if true enables the port, if false disables it
+ */
+static void notrace cci_port_control(unsigned int port, bool enable)
+{
+ void __iomem *base = ports[port].base;
+
+ writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
+ /*
+ * This function is called from power down procedures
+ * and must not execute any instruction that might
+ * cause the processor to be put in a quiescent state
+ * (eg wfi). Hence, cpu_relax() can not be added to this
+ * read loop to optimize power, since it might hide possibly
+ * disruptive operations.
+ */
+ while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
+ ;
+}
+
+/**
+ * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
+ * reference
+ *
+ * @mpidr: mpidr of the CPU whose CCI port should be disabled
+ *
+ * Disabling a CCI port for a CPU implies disabling the CCI port
+ * controlling that CPU cluster. Code disabling CPU CCI ports
+ * must make sure that the CPU running the code is the last active CPU
+ * in the cluster ie all other CPUs are quiescent in a low power state.
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port look-up failure
+ */
+int notrace cci_disable_port_by_cpu(u64 mpidr)
+{
+ int cpu;
+ bool is_valid;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ is_valid = cpu_port_is_valid(&cpu_port[cpu]);
+ if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
+ cci_port_control(cpu_port[cpu].port, false);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
+
+/**
+ * cci_enable_port_for_self() - enable a CCI port for calling CPU
+ *
+ * Enabling a CCI port for the calling CPU implies enabling the CCI
+ * port controlling that CPU's cluster. Caller must make sure that the
+ * CPU running the code is the first active CPU in the cluster and all
+ * other CPUs are quiescent in a low power state or waiting for this CPU
+ * to complete the CCI initialization.
+ *
+ * Because this is called when the MMU is still off and with no stack,
+ * the code must be position independent and ideally rely on callee
+ * clobbered registers only. To achieve this we must code this function
+ * entirely in assembler.
+ *
+ * On success this returns with the proper CCI port enabled. In case of
+ * any failure this never returns as the inability to enable the CCI is
+ * fatal and there is no possible recovery at this stage.
+ */
+asmlinkage void __naked cci_enable_port_for_self(void)
+{
+ asm volatile ("\n"
+" .arch armv7-a\n"
+" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
+" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
+" adr r1, 5f \n"
+" ldr r2, [r1] \n"
+" add r1, r1, r2 @ &cpu_port \n"
+" add ip, r1, %[sizeof_cpu_port] \n"
+
+ /* Loop over the cpu_port array looking for a matching MPIDR */
+"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
+" cmp r2, r0 @ compare MPIDR \n"
+" bne 2f \n"
+
+ /* Found a match, now test port validity */
+" ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
+" tst r3, #"__stringify(PORT_VALID)" \n"
+" bne 3f \n"
+
+ /* no match, loop with the next cpu_port entry */
+"2: add r1, r1, %[sizeof_struct_cpu_port] \n"
+" cmp r1, ip @ done? \n"
+" blo 1b \n"
+
+ /* CCI port not found -- cheaply try to stall this CPU */
+"cci_port_not_found: \n"
+" wfi \n"
+" wfe \n"
+" b cci_port_not_found \n"
+
+ /* Use matched port index to look up the corresponding ports entry */
+"3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
+" adr r0, 6f \n"
+" ldmia r0, {r1, r2} \n"
+" sub r1, r1, r0 @ virt - phys \n"
+" ldr r0, [r0, r2] @ *(&ports) \n"
+" mov r2, %[sizeof_struct_ace_port] \n"
+" mla r0, r2, r3, r0 @ &ports[index] \n"
+" sub r0, r0, r1 @ virt_to_phys() \n"
+
+ /* Enable the CCI port */
+" ldr r0, [r0, %[offsetof_port_phys]] \n"
+" mov r3, %[cci_enable_req]\n"
+" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
+
+ /* poll the status reg for completion */
+" adr r1, 7f \n"
+" ldr r0, [r1] \n"
+" ldr r0, [r0, r1] @ cci_ctrl_base \n"
+"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
+" tst r1, %[cci_control_status_bits] \n"
+" bne 4b \n"
+
+" mov r0, #0 \n"
+" bx lr \n"
+
+" .align 2 \n"
+"5: .word cpu_port - . \n"
+"6: .word . \n"
+" .word ports - 6b \n"
+"7: .word cci_ctrl_phys - . \n"
+ : :
+ [sizeof_cpu_port] "i" (sizeof(cpu_port)),
+ [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
+ [cci_control_status_bits] "i" cpu_to_le32(1),
+#ifndef __ARMEB__
+ [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
+#else
+ [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
+#endif
+ [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
+ [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
+ [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
+ [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
+}
+
+/**
+ * __cci_control_port_by_device() - function to control a CCI port by device
+ * reference
+ *
+ * @dn: device node pointer of the device whose CCI port should be
+ * controlled
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port look-up failure
+ */
+int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
+{
+ int port;
+
+ if (!dn)
+ return -ENODEV;
+
+ port = __cci_ace_get_port(dn, ACE_LITE_PORT);
+ if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n",
+ dn))
+ return -ENODEV;
+ cci_port_control(port, enable);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
+
+/**
+ * __cci_control_port_by_index() - function to control a CCI port by port index
+ *
+ * @port: port index previously retrieved with cci_ace_get_port()
+ * @enable: if true enables the port, if false disables it
+ *
+ * Return:
+ * 0 on success
+ * -ENODEV on port index out of range
+ * -EPERM if operation carried out on an ACE PORT
+ */
+int notrace __cci_control_port_by_index(u32 port, bool enable)
+{
+ if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
+ return -ENODEV;
+ /*
+ * CCI control for ports connected to CPUS is extremely fragile
+ * and must be made to go through a specific and controlled
+ * interface (ie cci_disable_port_by_cpu(); control by general purpose
+ * indexing is therefore disabled for ACE ports.
+ */
+ if (ports[port].type == ACE_PORT)
+ return -EPERM;
+
+ cci_port_control(port, enable);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
+
+static const struct of_device_id arm_cci_ctrl_if_matches[] = {
+ {.compatible = "arm,cci-400-ctrl-if", },
+ {},
+};
+
+static int cci_probe_ports(struct device_node *np)
+{
+ struct cci_nb_ports const *cci_config;
+ int ret, i, nb_ace = 0, nb_ace_lite = 0;
+ struct device_node *cp;
+ struct resource res;
+ const char *match_str;
+ bool is_ace;
+
+
+ cci_config = of_match_node(arm_cci_matches, np)->data;
+ if (!cci_config)
+ return -ENODEV;
+
+ nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
+
+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
+ if (!ports)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, cp) {
+ if (!of_match_node(arm_cci_ctrl_if_matches, cp))
+ continue;
+
+ i = nb_ace + nb_ace_lite;
+
+ if (i >= nb_cci_ports)
+ break;
+
+ if (of_property_read_string(cp, "interface-type",
+ &match_str)) {
+ WARN(1, "node %pOF missing interface-type property\n",
+ cp);
+ continue;
+ }
+ is_ace = strcmp(match_str, "ace") == 0;
+ if (!is_ace && strcmp(match_str, "ace-lite")) {
+ WARN(1, "node %pOF containing invalid interface-type property, skipping it\n",
+ cp);
+ continue;
+ }
+
+ ret = of_address_to_resource(cp, 0, &res);
+ if (!ret) {
+ ports[i].base = ioremap(res.start, resource_size(&res));
+ ports[i].phys = res.start;
+ }
+ if (ret || !ports[i].base) {
+ WARN(1, "unable to ioremap CCI port %d\n", i);
+ continue;
+ }
+
+ if (is_ace) {
+ if (WARN_ON(nb_ace >= cci_config->nb_ace))
+ continue;
+ ports[i].type = ACE_PORT;
+ ++nb_ace;
+ } else {
+ if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
+ continue;
+ ports[i].type = ACE_LITE_PORT;
+ ++nb_ace_lite;
+ }
+ ports[i].dn = cp;
+ }
+
+ /*
+ * If there is no CCI port that is under kernel control
+ * return early and report probe status.
+ */
+ if (!nb_ace && !nb_ace_lite)
+ return -ENODEV;
+
+ /* initialize a stashed array of ACE ports to speed-up look-up */
+ cci_ace_init_ports();
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory.
+ */
+ sync_cache_w(&cci_ctrl_base);
+ sync_cache_w(&cci_ctrl_phys);
+ sync_cache_w(&ports);
+ sync_cache_w(&cpu_port);
+ __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
+ pr_info("ARM CCI driver probed\n");
+
+ return 0;
+}
+#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
+static inline int cci_probe_ports(struct device_node *np)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
+
+static int cci_probe(void)
+{
+ int ret;
+ struct device_node *np;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, arm_cci_matches);
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (!ret) {
+ cci_ctrl_base = ioremap(res.start, resource_size(&res));
+ cci_ctrl_phys = res.start;
+ }
+ if (ret || !cci_ctrl_base) {
+ WARN(1, "unable to ioremap CCI ctrl\n");
+ return -ENXIO;
+ }
+
+ return cci_probe_ports(np);
+}
+
+static int cci_init_status = -EAGAIN;
+static DEFINE_MUTEX(cci_probing);
+
+static int cci_init(void)
+{
+ if (cci_init_status != -EAGAIN)
+ return cci_init_status;
+
+ mutex_lock(&cci_probing);
+ if (cci_init_status == -EAGAIN)
+ cci_init_status = cci_probe();
+ mutex_unlock(&cci_probing);
+ return cci_init_status;
+}
+
+/*
+ * To sort out early init calls ordering a helper function is provided to
+ * check if the CCI driver has beed initialized. Function check if the driver
+ * has been initialized, if not it calls the init function that probes
+ * the driver and updates the return value.
+ */
+bool cci_probed(void)
+{
+ return cci_init() == 0;
+}
+EXPORT_SYMBOL_GPL(cci_probed);
+
+early_initcall(cci_init);
+core_initcall(cci_platform_init);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ARM CCI support");
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 0000000000..b715c8ab36
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ of_node_put(child);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
new file mode 100644
index 0000000000..b6dfe4340d
--- /dev/null
+++ b/drivers/bus/brcmstb_gisb.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2021 Broadcom
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/panic_notifier.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+
+#ifdef CONFIG_MIPS
+#include <asm/traps.h>
+#endif
+
+#define ARB_ERR_CAP_CLEAR (1 << 0)
+#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
+#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
+#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
+#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+
+#define ARB_BP_CAP_CLEAR (1 << 0)
+#define ARB_BP_CAP_STATUS_PROT_SHIFT 14
+#define ARB_BP_CAP_STATUS_TYPE (1 << 13)
+#define ARB_BP_CAP_STATUS_RSP_SHIFT 10
+#define ARB_BP_CAP_STATUS_MASK GENMASK(1, 0)
+#define ARB_BP_CAP_STATUS_BS_SHIFT 2
+#define ARB_BP_CAP_STATUS_WRITE (1 << 1)
+#define ARB_BP_CAP_STATUS_VALID (1 << 0)
+
+enum {
+ ARB_TIMER,
+ ARB_BP_CAP_CLR,
+ ARB_BP_CAP_HI_ADDR,
+ ARB_BP_CAP_ADDR,
+ ARB_BP_CAP_STATUS,
+ ARB_BP_CAP_MASTER,
+ ARB_ERR_CAP_CLR,
+ ARB_ERR_CAP_HI_ADDR,
+ ARB_ERR_CAP_ADDR,
+ ARB_ERR_CAP_STATUS,
+ ARB_ERR_CAP_MASTER,
+};
+
+static const int gisb_offsets_bcm7038[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = -1,
+ [ARB_ERR_CAP_CLR] = 0x0c4,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x0c8,
+ [ARB_ERR_CAP_STATUS] = 0x0d0,
+ [ARB_ERR_CAP_MASTER] = -1,
+};
+
+static const int gisb_offsets_bcm7278[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x01c,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x220,
+ [ARB_BP_CAP_STATUS] = 0x230,
+ [ARB_BP_CAP_MASTER] = 0x234,
+ [ARB_ERR_CAP_CLR] = 0x7f8,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x7e0,
+ [ARB_ERR_CAP_STATUS] = 0x7f0,
+ [ARB_ERR_CAP_MASTER] = 0x7f4,
+};
+
+static const int gisb_offsets_bcm7400[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = 0x0c4,
+ [ARB_ERR_CAP_CLR] = 0x0c8,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x0cc,
+ [ARB_ERR_CAP_STATUS] = 0x0d4,
+ [ARB_ERR_CAP_MASTER] = 0x0d8,
+};
+
+static const int gisb_offsets_bcm7435[] = {
+ [ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x158,
+ [ARB_BP_CAP_STATUS] = 0x160,
+ [ARB_BP_CAP_MASTER] = 0x164,
+ [ARB_ERR_CAP_CLR] = 0x168,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x16c,
+ [ARB_ERR_CAP_STATUS] = 0x174,
+ [ARB_ERR_CAP_MASTER] = 0x178,
+};
+
+static const int gisb_offsets_bcm7445[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x010,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x1d8,
+ [ARB_BP_CAP_STATUS] = 0x1e0,
+ [ARB_BP_CAP_MASTER] = 0x1e4,
+ [ARB_ERR_CAP_CLR] = 0x7e4,
+ [ARB_ERR_CAP_HI_ADDR] = 0x7e8,
+ [ARB_ERR_CAP_ADDR] = 0x7ec,
+ [ARB_ERR_CAP_STATUS] = 0x7f4,
+ [ARB_ERR_CAP_MASTER] = 0x7f8,
+};
+
+struct brcmstb_gisb_arb_device {
+ void __iomem *base;
+ const int *gisb_offsets;
+ bool big_endian;
+ struct mutex lock;
+ struct list_head next;
+ u32 valid_mask;
+ const char *master_names[sizeof(u32) * BITS_PER_BYTE];
+ u32 saved_timeout;
+};
+
+static LIST_HEAD(brcmstb_gisb_arb_device_list);
+
+static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
+{
+ int offset = gdev->gisb_offsets[reg];
+
+ if (offset < 0) {
+ /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+ if (reg == ARB_ERR_CAP_MASTER)
+ return 1;
+ else
+ return 0;
+ }
+
+ if (gdev->big_endian)
+ return ioread32be(gdev->base + offset);
+ else
+ return ioread32(gdev->base + offset);
+}
+
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
+static u64 gisb_read_bp_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_BP_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_BP_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
+static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
+{
+ int offset = gdev->gisb_offsets[reg];
+
+ if (offset == -1)
+ return;
+
+ if (gdev->big_endian)
+ iowrite32be(val, gdev->base + offset);
+ else
+ iowrite32(val, gdev->base + offset);
+}
+
+static ssize_t gisb_arb_get_timeout(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+ u32 timeout;
+
+ mutex_lock(&gdev->lock);
+ timeout = gisb_read(gdev, ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return sprintf(buf, "%d", timeout);
+}
+
+static ssize_t gisb_arb_set_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+ int val, ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val == 0 || val >= 0xffffffff)
+ return -EINVAL;
+
+ mutex_lock(&gdev->lock);
+ gisb_write(gdev, val, ARB_TIMER);
+ mutex_unlock(&gdev->lock);
+
+ return count;
+}
+
+static const char *
+brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
+ u32 masters)
+{
+ u32 mask = gdev->valid_mask & masters;
+
+ if (hweight_long(mask) != 1)
+ return NULL;
+
+ return gdev->master_names[ffs(mask) - 1];
+}
+
+static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ const char *reason)
+{
+ u32 cap_status;
+ u64 arb_addr;
+ u32 master;
+ const char *m_name;
+ char m_fmt[11];
+
+ cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID))
+ return 1;
+
+ /* Read the address and master */
+ arb_addr = gisb_read_address(gdev);
+ master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("GISB: %s at 0x%llx [%c %s], core: %s\n",
+ reason, arb_addr,
+ cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+ cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return 0;
+}
+
+#ifdef CONFIG_MIPS
+static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
+{
+ int ret = 0;
+ struct brcmstb_gisb_arb_device *gdev;
+ u32 cap_status;
+
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) {
+ cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) {
+ is_fixup = 1;
+ goto out;
+ }
+
+ ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ }
+out:
+ return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+}
+#endif
+
+static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
+{
+ brcmstb_gisb_arb_decode_addr(dev_id, "target abort");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t brcmstb_gisb_bp_handler(int irq, void *dev_id)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_id;
+ const char *m_name;
+ u32 bp_status;
+ u64 arb_addr;
+ u32 master;
+ char m_fmt[11];
+
+ bp_status = gisb_read(gdev, ARB_BP_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(bp_status & ARB_BP_CAP_STATUS_VALID))
+ return IRQ_HANDLED;
+
+ /* Read the address and master */
+ arb_addr = gisb_read_bp_address(gdev);
+ master = gisb_read(gdev, ARB_BP_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("GISB: breakpoint at 0x%llx [%c], core: %s\n",
+ arb_addr, bp_status & ARB_BP_CAP_STATUS_WRITE ? 'W' : 'R',
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dump out gisb errors on die or panic.
+ */
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+ void *p);
+
+static struct notifier_block gisb_die_notifier = {
+ .notifier_call = dump_gisb_error,
+};
+
+static struct notifier_block gisb_panic_notifier = {
+ .notifier_call = dump_gisb_error,
+};
+
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+ void *p)
+{
+ struct brcmstb_gisb_arb_device *gdev;
+ const char *reason = "panic";
+
+ if (self == &gisb_die_notifier)
+ reason = "die";
+
+ /* iterate over each GISB arb registered handlers */
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+ brcmstb_gisb_arb_decode_addr(gdev, reason);
+
+ return NOTIFY_DONE;
+}
+
+static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
+ gisb_arb_get_timeout, gisb_arb_set_timeout);
+
+static struct attribute *gisb_arb_sysfs_attrs[] = {
+ &dev_attr_gisb_arb_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group gisb_arb_sysfs_attr_group = {
+ .attrs = gisb_arb_sysfs_attrs,
+};
+
+static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
+ { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
+ { .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
+ { .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
+ { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
+ { .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
+ { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+ { },
+};
+
+static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct brcmstb_gisb_arb_device *gdev;
+ const struct of_device_id *of_id;
+ int err, timeout_irq, tea_irq, bp_irq;
+ unsigned int num_masters, j = 0;
+ int i, first, last;
+
+ timeout_irq = platform_get_irq(pdev, 0);
+ tea_irq = platform_get_irq(pdev, 1);
+ bp_irq = platform_get_irq(pdev, 2);
+
+ gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ mutex_init(&gdev->lock);
+ INIT_LIST_HEAD(&gdev->next);
+
+ gdev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(gdev->base))
+ return PTR_ERR(gdev->base);
+
+ of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
+ if (!of_id) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+ gdev->gisb_offsets = of_id->data;
+ gdev->big_endian = of_device_is_big_endian(dn);
+
+ err = devm_request_irq(&pdev->dev, timeout_irq,
+ brcmstb_gisb_timeout_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(&pdev->dev, tea_irq,
+ brcmstb_gisb_tea_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+
+ /* Interrupt is optional */
+ if (bp_irq > 0) {
+ err = devm_request_irq(&pdev->dev, bp_irq,
+ brcmstb_gisb_bp_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+ }
+
+ /* If we do not have a valid mask, assume all masters are enabled */
+ if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
+ &gdev->valid_mask))
+ gdev->valid_mask = 0xffffffff;
+
+ /* Proceed with reading the litteral names if we agree on the
+ * number of masters
+ */
+ num_masters = of_property_count_strings(dn,
+ "brcm,gisb-arb-master-names");
+ if (hweight_long(gdev->valid_mask) == num_masters) {
+ first = ffs(gdev->valid_mask) - 1;
+ last = fls(gdev->valid_mask) - 1;
+
+ for (i = first; i < last; i++) {
+ if (!(gdev->valid_mask & BIT(i)))
+ continue;
+
+ of_property_read_string_index(dn,
+ "brcm,gisb-arb-master-names", j,
+ &gdev->master_names[i]);
+ j++;
+ }
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, gdev);
+
+ list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
+
+#ifdef CONFIG_MIPS
+ mips_set_be_handler(brcmstb_bus_error_handler);
+#endif
+
+ if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
+ register_die_notifier(&gisb_die_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &gisb_panic_notifier);
+ }
+
+ dev_info(&pdev->dev, "registered irqs: %d, %d\n",
+ timeout_irq, tea_irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmstb_gisb_arb_suspend(struct device *dev)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+
+ gdev->saved_timeout = gisb_read(gdev, ARB_TIMER);
+
+ return 0;
+}
+
+/* Make sure we provide the same timeout value that was configured before, and
+ * do this before the GISB timeout interrupt handler has any chance to run.
+ */
+static int brcmstb_gisb_arb_resume_noirq(struct device *dev)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_get_drvdata(dev);
+
+ gisb_write(gdev, gdev->saved_timeout, ARB_TIMER);
+
+ return 0;
+}
+#else
+#define brcmstb_gisb_arb_suspend NULL
+#define brcmstb_gisb_arb_resume_noirq NULL
+#endif
+
+static const struct dev_pm_ops brcmstb_gisb_arb_pm_ops = {
+ .suspend = brcmstb_gisb_arb_suspend,
+ .resume_noirq = brcmstb_gisb_arb_resume_noirq,
+};
+
+static struct platform_driver brcmstb_gisb_arb_driver = {
+ .driver = {
+ .name = "brcm-gisb-arb",
+ .of_match_table = brcmstb_gisb_arb_of_match,
+ .pm = &brcmstb_gisb_arb_pm_ops,
+ },
+};
+
+static int __init brcm_gisb_driver_init(void)
+{
+ return platform_driver_probe(&brcmstb_gisb_arb_driver,
+ brcmstb_gisb_arb_probe);
+}
+
+module_init(brcm_gisb_driver_init);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB GISB arbiter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 0000000000..e97c1d1c75
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst))
+ return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
+ "Couldn't get reset control line\n");
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk))
+ return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
+ "Couldn't get APB clock descriptor\n");
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 0000000000..4007e7322c
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst))
+ return dev_err_probe(axi->dev, PTR_ERR(axi->arst),
+ "Couldn't get reset control line\n");
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk))
+ return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
+ "Couldn't get AXI Interconnect clock\n");
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 0000000000..ee4c023351
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET 0
+#define DA8XX_MSTPRI1_OFFSET 4
+#define DA8XX_MSTPRI2_OFFSET 8
+
+enum {
+ DA8XX_MSTPRI_ARM_I = 0,
+ DA8XX_MSTPRI_ARM_D,
+ DA8XX_MSTPRI_UPP,
+ DA8XX_MSTPRI_SATA,
+ DA8XX_MSTPRI_PRU0,
+ DA8XX_MSTPRI_PRU1,
+ DA8XX_MSTPRI_EDMA30TC0,
+ DA8XX_MSTPRI_EDMA30TC1,
+ DA8XX_MSTPRI_EDMA31TC0,
+ DA8XX_MSTPRI_VPIF_DMA_0,
+ DA8XX_MSTPRI_VPIF_DMA_1,
+ DA8XX_MSTPRI_EMAC,
+ DA8XX_MSTPRI_USB0CFG,
+ DA8XX_MSTPRI_USB0CDMA,
+ DA8XX_MSTPRI_UHPI,
+ DA8XX_MSTPRI_USB1,
+ DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+ int reg;
+ int shift;
+ int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+ [DA8XX_MSTPRI_ARM_I] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_ARM_D] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_UPP] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_SATA] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_PRU0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_PRU1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_EDMA30TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_EDMA30TC1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_EDMA31TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+ [DA8XX_MSTPRI_EMAC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_USB0CFG] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_USB0CDMA] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_UHPI] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_USB1] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_LCDC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+};
+
+struct da8xx_mstpri_priority {
+ int which;
+ u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+ const char *board;
+ const struct da8xx_mstpri_priority *priorities;
+ size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ * tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+ {
+ .which = DA8XX_MSTPRI_LCDC,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC1,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC0,
+ .val = 1,
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .priorities = da850_lcdk_priorities,
+ .numprio = ARRAY_SIZE(da850_lcdk_priorities),
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+ const struct da8xx_mstpri_board_priorities *board_prio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+ board_prio = &da8xx_mstpri_board_confs[i];
+
+ if (of_machine_is_compatible(board_prio->board))
+ return board_prio;
+ }
+
+ return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+ const struct da8xx_mstpri_board_priorities *prio_list;
+ const struct da8xx_mstpri_descr *prio_descr;
+ const struct da8xx_mstpri_priority *prio;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mstpri;
+ u32 reg;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mstpri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mstpri)) {
+ dev_err(dev, "unable to map MSTPRI registers\n");
+ return PTR_ERR(mstpri);
+ }
+
+ prio_list = da8xx_mstpri_get_board_prio();
+ if (!prio_list) {
+ dev_err(dev, "no master priorities defined for this board\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prio_list->numprio; i++) {
+ prio = &prio_list->priorities[i];
+ prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+ if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev, "register offset out of range\n");
+ continue;
+ }
+
+ reg = readl(mstpri + prio_descr->reg);
+ reg &= ~prio_descr->mask;
+ reg |= prio->val << prio_descr->shift;
+
+ writel(reg, mstpri + prio_descr->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+ { .compatible = "ti,da850-mstpri", },
+ { },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+ .probe = da8xx_mstpri_probe,
+ .driver = {
+ .name = "da8xx-mstpri",
+ .of_match_table = da8xx_mstpri_of_match,
+ },
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
new file mode 100644
index 0000000000..9492342e7d
--- /dev/null
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DPAA2 fsl-mc bus
+#
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+#
+
+config FSL_MC_BUS
+ bool "QorIQ DPAA2 fsl-mc bus driver"
+ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
+ select GENERIC_MSI_IRQ
+ help
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
+
+config FSL_MC_UAPI_SUPPORT
+ bool "Management Complex (MC) userspace support"
+ depends on FSL_MC_BUS
+ help
+ Provides userspace support for interrogating, creating, destroying or
+ configuring DPAA2 objects exported by the Management Complex.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
new file mode 100644
index 0000000000..8929462455
--- /dev/null
+++ b/drivers/bus/fsl-mc/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Freescale Management Complex (MC) bus drivers
+#
+# Copyright (C) 2014 Freescale Semiconductor, Inc.
+#
+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
+
+mc-bus-driver-objs := fsl-mc-bus.o \
+ mc-sys.o \
+ mc-io.o \
+ dpbp.o \
+ dpcon.o \
+ dprc.o \
+ dprc-driver.o \
+ fsl-mc-allocator.o \
+ fsl-mc-msi.o \
+ dpmcp.o \
+ obj-api.o
+
+# MC userspace support
+obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/dpbp.c b/drivers/bus/fsl-mc/dpbp.c
new file mode 100644
index 0000000000..9003cd3698
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpbp.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpbp_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpbp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpbp_open);
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_close);
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_enable);
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_disable);
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpbp_reset);
+
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpbp_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpbp_get_attributes);
diff --git a/drivers/bus/fsl-mc/dpcon.c b/drivers/bus/fsl-mc/dpcon.c
new file mode 100644
index 0000000000..97b6fa605e
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpcon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpcon_open);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_close);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_enable);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_disable);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_reset);
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpcon_get_attributes);
+
+/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @cfg: Notification parameters
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
+ cmd_flags,
+ token);
+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
+ dpcon_cmd->priority = cfg->priority;
+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpcon_set_notification);
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
new file mode 100644
index 0000000000..5fbd0dbde2
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpmcp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmcp_id: DPMCP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmcp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpmcp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmcp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
new file mode 100644
index 0000000000..4b68c84ef4
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale data path resource container (DPRC) driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+
+struct fsl_mc_child_objs {
+ int child_count;
+ struct fsl_mc_obj_desc *child_array;
+};
+
+static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ return mc_dev->obj_desc.id == obj_desc->id &&
+ strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
+}
+
+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+{
+ int i;
+ struct fsl_mc_child_objs *objs;
+ struct fsl_mc_device *mc_dev;
+
+ if (!dev_is_fsl_mc(dev))
+ return 0;
+
+ mc_dev = to_fsl_mc_device(dev);
+ objs = data;
+
+ for (i = 0; i < objs->child_count; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
+
+ if (strlen(obj_desc->type) != 0 &&
+ fsl_mc_device_match(mc_dev, obj_desc))
+ break;
+ }
+
+ if (i == objs->child_count)
+ fsl_mc_device_remove(mc_dev);
+
+ return 0;
+}
+
+static int __fsl_mc_device_remove(struct device *dev, void *data)
+{
+ if (!dev_is_fsl_mc(dev))
+ return 0;
+
+ fsl_mc_device_remove(to_fsl_mc_device(dev));
+ return 0;
+}
+
+/**
+ * dprc_remove_devices - Removes devices for objects removed from a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of object descriptors for child objects currently
+ * present in the DPRC in the MC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual state of
+ * the MC by removing devices that represent MC objects that have
+ * been dynamically removed in the physical DPRC.
+ */
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ if (num_child_objects_in_mc != 0) {
+ /*
+ * Remove child objects that are in the DPRC in Linux,
+ * but not in the MC:
+ */
+ struct fsl_mc_child_objs objs;
+
+ objs.child_count = num_child_objects_in_mc;
+ objs.child_array = obj_desc_array;
+ device_for_each_child(&mc_bus_dev->dev, &objs,
+ __fsl_mc_device_remove_if_not_in_mc);
+ } else {
+ /*
+ * There are no child objects for this DPRC in the MC.
+ * So, remove all the child devices from Linux:
+ */
+ device_for_each_child(&mc_bus_dev->dev, NULL,
+ __fsl_mc_device_remove);
+ }
+}
+EXPORT_SYMBOL_GPL(dprc_remove_devices);
+
+static int __fsl_mc_device_match(struct device *dev, void *data)
+{
+ struct fsl_mc_obj_desc *obj_desc = data;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return fsl_mc_device_match(mc_dev, obj_desc);
+}
+
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ struct device *dev;
+
+ dev = device_find_child(&mc_bus_dev->dev, obj_desc,
+ __fsl_mc_device_match);
+
+ return dev ? to_fsl_mc_device(dev) : NULL;
+}
+
+/**
+ * check_plugged_state_change - Check change in an MC object's plugged state
+ *
+ * @mc_dev: pointer to the fsl-mc device for a given MC object
+ * @obj_desc: pointer to the MC object's descriptor in the MC
+ *
+ * If the plugged state has changed from unplugged to plugged, the fsl-mc
+ * device is bound to the corresponding device driver.
+ * If the plugged state has changed from plugged to unplugged, the fsl-mc
+ * device is unbound from the corresponding device driver.
+ */
+static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ u32 plugged_flag_at_mc =
+ obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
+
+ if (plugged_flag_at_mc !=
+ (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
+ if (plugged_flag_at_mc) {
+ mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
+ error = device_attach(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "device_attach() failed: %d\n",
+ error);
+ }
+ } else {
+ mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
+ device_release_driver(&mc_dev->dev);
+ }
+ }
+}
+
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
+/**
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual
+ * state of the MC by adding objects that have been newly discovered
+ * in the physical DPRC.
+ */
+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ int i;
+
+ /* probe the allocable objects first */
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
+
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
+}
+
+/**
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+ * state of the Linux bus driver, MC by adding and removing
+ * devices accordingly.
+ * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
+ * All allocatable devices needed to be probed before all non-allocatable
+ * devices, to ensure that device drivers for non-allocatable
+ * devices can allocate any type of allocatable devices.
+ * That is, we need to ensure that the corresponding resource pools are
+ * populated before they can get allocation requests from probe callbacks
+ * of the device drivers for the non-allocatable devices.
+ */
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
+{
+ int num_child_objects;
+ int dprc_get_obj_failures;
+ int error;
+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
+ struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ error = dprc_get_obj_count(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ &num_child_objects);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
+ error);
+ return error;
+ }
+
+ if (num_child_objects != 0) {
+ int i;
+
+ child_obj_desc_array =
+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
+ sizeof(*child_obj_desc_array),
+ GFP_KERNEL);
+ if (!child_obj_desc_array)
+ return -ENOMEM;
+
+ /*
+ * Discover objects currently present in the physical DPRC:
+ */
+ dprc_get_obj_failures = 0;
+ for (i = 0; i < num_child_objects; i++) {
+ struct fsl_mc_obj_desc *obj_desc =
+ &child_obj_desc_array[i];
+
+ error = dprc_get_obj(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ i, obj_desc);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev,
+ "dprc_get_obj(i=%d) failed: %d\n",
+ i, error);
+ /*
+ * Mark the obj entry as "invalid", by using the
+ * empty string as obj type:
+ */
+ obj_desc->type[0] = '\0';
+ obj_desc->id = error;
+ dprc_get_obj_failures++;
+ continue;
+ }
+
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
+ irq_count += obj_desc->irq_count;
+ dev_dbg(&mc_bus_dev->dev,
+ "Discovered object: type %s, id %d\n",
+ obj_desc->type, obj_desc->id);
+ }
+
+ if (dprc_get_obj_failures != 0) {
+ dev_err(&mc_bus_dev->dev,
+ "%d out of %d devices could not be retrieved\n",
+ dprc_get_obj_failures, num_child_objects);
+ }
+ }
+
+ /*
+ * Allocate IRQ's before binding the scanned devices with their
+ * respective drivers.
+ */
+ if (dev_get_msi_domain(&mc_bus_dev->dev)) {
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(&mc_bus_dev->dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+
+ if (alloc_interrupts && !mc_bus->irq_resources) {
+ error = fsl_mc_populate_irq_pool(mc_bus_dev,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
+ }
+
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
+
+ return 0;
+}
+
+/**
+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
+ * Scans the physical DPRC and synchronizes the state of the Linux
+ * bus driver with the actual state of the MC by adding and removing
+ * devices as appropriate.
+ */
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
+{
+ int error = 0;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ fsl_mc_init_all_resource_pools(mc_bus_dev);
+
+ /*
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+ error = dprc_scan_objects(mc_bus_dev, alloc_interrupts);
+ mutex_unlock(&mc_bus->scan_mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_scan_container);
+
+/**
+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+ *
+ * @irq_num: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+ *
+ * @irq_num: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+{
+ int error;
+ u32 status;
+ struct device *dev = arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+ int irq = mc_dev->irqs[0]->virq;
+
+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+ irq_num, smp_processor_id());
+
+ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
+ return IRQ_HANDLED;
+
+ mutex_lock(&mc_bus->scan_mutex);
+ if (irq != (u32)irq_num)
+ goto out;
+
+ status = 0;
+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ &status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_get_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_clear_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+ DPRC_IRQ_EVENT_OBJ_REMOVED |
+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+
+ error = dprc_scan_objects(mc_dev, true);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+ * that the object scan was aborted, as we detected that
+ * an object was removed from the DPRC in the MC, while
+ * we were scanning the DPRC.
+ */
+ if (error != -ENXIO) {
+ dev_err(dev, "dprc_scan_objects() failed: %d\n",
+ error);
+ }
+
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&mc_bus->scan_mutex);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Disable and clear interrupt for a given DPRC object
+ */
+int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+
+ /*
+ * Disable generation of interrupt, while we configure it:
+ */
+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Disable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Clear any leftover interrupts:
+ */
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+ error);
+ return error;
+ }
+
+ mc_bus->irq_enabled = 0;
+
+ return 0;
+}
+
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ return mc_bus->irq_enabled;
+}
+
+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ /*
+ * NOTE: devm_request_threaded_irq() invokes the device-specific
+ * function that programs the MSI physically in the device
+ */
+ error = devm_request_threaded_irq(&mc_dev->dev,
+ irq->virq,
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&mc_dev->dev),
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "devm_request_threaded_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+
+ /*
+ * Enable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+ ~0x0u);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ /*
+ * Enable generation of the interrupt:
+ */
+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ mc_bus->irq_enabled = 1;
+
+ return 0;
+}
+
+/*
+ * Setup interrupt for a given DPRC device
+ */
+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = fsl_mc_allocate_irqs(mc_dev);
+ if (error < 0)
+ return error;
+
+ error = disable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = register_dprc_irq_handler(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = enable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ return 0;
+
+error_free_irqs:
+ fsl_mc_free_irqs(mc_dev);
+ return error;
+}
+
+/**
+ * dprc_setup - opens and creates a mc_io for DPRC
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It configures the DPRC portal used to communicate with MC
+ */
+
+int dprc_setup(struct fsl_mc_device *mc_dev)
+{
+ struct device *parent_dev = mc_dev->dev.parent;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct irq_domain *mc_msi_domain;
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
+ bool uapi_created = false;
+ u16 major_ver, minor_ver;
+ size_t region_size;
+ int error;
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ return -EINVAL;
+
+ if (!mc_dev->mc_io) {
+ /*
+ * This is a child DPRC:
+ */
+ if (!dev_is_fsl_mc(parent_dev))
+ return -EINVAL;
+
+ if (mc_dev->obj_desc.region_count == 0)
+ return -EINVAL;
+
+ region_size = resource_size(mc_dev->regions);
+
+ error = fsl_create_mc_io(&mc_dev->dev,
+ mc_dev->regions[0].start,
+ region_size,
+ NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
+
+ mc_io_created = true;
+ } else {
+ error = fsl_mc_uapi_create_device_file(mc_bus);
+ if (error < 0)
+ return -EPROBE_DEFER;
+ uapi_created = true;
+ }
+
+ mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
+ if (!mc_msi_domain) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
+ } else {
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
+ }
+
+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
+ goto error_cleanup_msi_domain;
+ }
+
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ error = dprc_get_api_version(mc_dev->mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (major_ver < DPRC_MIN_VER_MAJOR) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ major_ver, minor_ver);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
+ return 0;
+
+error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ if (uapi_created)
+ fsl_mc_uapi_remove_device_file(mc_bus);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_setup);
+
+/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It scans the DPRC to discover the MC objects contained in it.
+ * It creates the interrupt pool for the MC bus associated with the DPRC.
+ * It configures the interrupts for the DPRC device itself.
+ */
+static int dprc_probe(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = dprc_setup(mc_dev);
+ if (error < 0)
+ return error;
+
+ /*
+ * Discover MC objects in DPRC object:
+ */
+ error = dprc_scan_container(mc_dev, true);
+ if (error < 0)
+ goto dprc_cleanup;
+
+ /*
+ * Configure interrupt for the DPRC object associated with this MC bus:
+ */
+ error = dprc_setup_irq(mc_dev);
+ if (error < 0)
+ goto scan_cleanup;
+
+ dev_info(&mc_dev->dev, "DPRC device bound to driver");
+ return 0;
+
+scan_cleanup:
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+dprc_cleanup:
+ dprc_cleanup(mc_dev);
+ return error;
+}
+
+/*
+ * Tear down interrupt for a given DPRC object
+ */
+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ (void)disable_dprc_irq(mc_dev);
+
+ devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);
+
+ fsl_mc_free_irqs(mc_dev);
+}
+
+/**
+ * dprc_cleanup - function that cleanups a DPRC
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It closes the DPRC device in the MC.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ int error;
+
+ /* this function should be called only for DPRCs, it
+ * is an error to call it for regular objects
+ */
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_dev);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
+ fsl_mc_cleanup_all_resource_pools(mc_dev);
+
+ /* if this step fails we cannot go further with cleanup as there is no way of
+ * communicating with the firmware
+ */
+ if (!mc_dev->mc_io) {
+ dev_err(&mc_dev->dev, "mc_io is NULL, tear down cannot be performed in firmware\n");
+ return -EINVAL;
+ }
+
+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ if (error < 0)
+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ } else {
+ fsl_mc_uapi_remove_device_file(mc_bus);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_cleanup);
+
+/**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It removes the DPRC's child objects from Linux (not from the MC) and
+ * closes the DPRC device in the MC.
+ * It tears down the interrupts that were configured for the DPRC device.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+static void dprc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (!mc_bus->irq_resources) {
+ dev_err(&mc_dev->dev, "No irq resources, so unbinding the device failed\n");
+ return;
+ }
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ dprc_cleanup(mc_dev);
+
+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprc"},
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver dprc_driver = {
+ .driver = {
+ .name = FSL_MC_DPRC_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = dprc_probe,
+ .remove = dprc_remove,
+};
+
+int __init dprc_driver_init(void)
+{
+ return fsl_mc_driver_register(&dprc_driver);
+}
+
+void dprc_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&dprc_driver);
+}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
new file mode 100644
index 0000000000..d129338b8b
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * cache the DPRC version to reduce the number of commands
+ * towards the mc firmware
+ */
+static u16 dprc_major_ver;
+static u16 dprc_minor_ver;
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_open);
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_close);
+
+/**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @child_container_id: ID of the container to reset
+ * @options: 32 bit options:
+ * - 0 (no bits set) - all the objects inside the container are
+ * reset. The child containers are entered recursively and the
+ * objects reset. All the objects (including the child containers)
+ * are closed.
+ * - bit 0 set - all the objects inside the container are reset.
+ * However the child containers are not entered recursively.
+ * This option is supported for API versions >= 6.5
+ * In case a software context crashes or becomes non-responsive, the parent
+ * may wish to reset its resources container before the software context is
+ * restarted.
+ *
+ * This routine informs all objects assigned to the child container that the
+ * container is being reset, so they may perform any cleanup operations that are
+ * needed. All objects handles that were owned by the child container shall be
+ * closed.
+ *
+ * Note that such request may be submitted even if the child software context
+ * has not crashed, but the resulting object cleanup operations will not be
+ * aware of that.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
+ u32 cmdid = DPRC_CMDID_RESET_CONT;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ /*
+ * MC API 6.5 introduced a new field in the command used to pass
+ * some flags.
+ * Bit 0 indicates that the child containers are not recursively reset.
+ */
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 5))
+ cmdid = DPRC_CMDID_RESET_CONT_V2;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmdid, cmd_flags, token);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_reset_container);
+
+/**
+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: Identifies the interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPRC_ENABLE;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting irq
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_status *cmd_params;
+ struct dprc_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprc_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_attributes() - Obtains container attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @attr: Returned container attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+ attr->container_id = le32_to_cpu(rsp_params->container_id);
+ attr->icid = le32_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+
+ return 0;
+}
+
+/**
+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_count: Number of objects assigned to the DPRC
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_rsp_get_obj_count *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+ *obj_count = le32_to_cpu(rsp_params->obj_count);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_count);
+
+/**
+ * dprc_get_obj() - Get general information on an object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_index: Index of the object to be queried (< obj_count)
+ * @obj_desc: Returns the requested object descriptor
+ *
+ * The object descriptors are retrieved one by one by incrementing
+ * obj_index up to (not including) the value of obj_count returned
+ * from dprc_get_obj_count(). dprc_get_obj_count() must
+ * be called prior to dprc_get_obj().
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj *cmd_params;
+ struct dprc_rsp_get_obj *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+ cmd_params->obj_index = cpu_to_le32(obj_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
+ obj_desc->type[15] = '\0';
+ strncpy(obj_desc->label, rsp_params->label, 16);
+ obj_desc->label[15] = '\0';
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj);
+
+/**
+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type: Type of the object to set its IRQ
+ * @obj_id: ID of the object to set its IRQ
+ * @irq_index: The interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
+
+/**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type: Object type as returned in dprc_get_obj()
+ * @obj_id: Unique object instance as returned in dprc_get_obj()
+ * @region_index: The specific region to query
+ * @region_desc: Returns the requested region descriptor
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_region *cmd_params;
+ struct dprc_rsp_get_obj_region *rsp_params;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 6)) {
+ /*
+ * MC API version 6.6 changed the size of the MC portals and software
+ * portals to 64K (as implemented by hardware). If older API is in use the
+ * size reported is less (64 bytes for mc portals and 4K for software
+ * portals).
+ */
+
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V3,
+ cmd_flags, token);
+
+ } else if (dprc_major_ver == 6 && dprc_minor_ver >= 3) {
+ /*
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address. If the older API is in use then the base
+ * address is set to zero to indicate it needs to be obtained elsewhere
+ * (typically the device tree).
+ */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
+ cmd_flags, token);
+ } else {
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+ }
+
+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->region_index = region_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
+ region_desc->size = le32_to_cpu(rsp_params->size);
+ region_desc->type = rsp_params->type;
+ region_desc->flags = le32_to_cpu(rsp_params->flags);
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
+ region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
+ else
+ region_desc->base_address = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_region);
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Resource Container API
+ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return -ENOTCONN;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
new file mode 100644
index 0000000000..b5e8c021fa
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fsl-mc object allocator driver
+ *
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
+{
+ return is_fsl_mc_bus_dpbp(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev) ||
+ is_fsl_mc_bus_dpcon(mc_dev);
+}
+
+/**
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
+ *
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
+ */
+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ *mc_bus,
+ enum fsl_mc_pool_type
+ pool_type,
+ struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+ if (!fsl_mc_is_allocatable(mc_dev))
+ goto out;
+ if (mc_dev->resource)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->type != pool_type)
+ goto out;
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count < 0)
+ goto out_unlock;
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
+ GFP_KERNEL);
+ if (!resource) {
+ error = -ENOMEM;
+ dev_err(&mc_bus_dev->dev,
+ "Failed to allocate memory for fsl_mc_resource\n");
+ goto out_unlock;
+ }
+
+ resource->type = pool_type;
+ resource->id = mc_dev->obj_desc.id;
+ resource->data = mc_dev;
+ resource->parent_pool = res_pool;
+ INIT_LIST_HEAD(&resource->node);
+ list_add_tail(&resource->node, &res_pool->free_list);
+ mc_dev->resource = resource;
+ res_pool->free_count++;
+ res_pool->max_count++;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+/**
+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+ * resource pool
+ *
+ * @mc_dev: pointer to allocatable fsl-mc device
+ *
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
+ */
+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ int error = -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ resource = mc_dev->resource;
+ if (!resource || resource->data != mc_dev) {
+ dev_err(&mc_bus_dev->dev, "resource mismatch\n");
+ goto out;
+ }
+
+ res_pool = resource->parent_pool;
+ if (res_pool != &mc_bus->resource_pools[resource->type]) {
+ dev_err(&mc_bus_dev->dev, "pool mismatch\n");
+ goto out;
+ }
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count <= 0) {
+ dev_err(&mc_bus_dev->dev, "max_count underflow\n");
+ goto out_unlock;
+ }
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count) {
+ dev_err(&mc_bus_dev->dev, "free_count mismatch\n");
+ goto out_unlock;
+ }
+
+ /*
+ * If the device is currently allocated, its resource is not
+ * in the free list and thus, the device cannot be removed.
+ */
+ if (list_empty(&resource->node)) {
+ error = -EBUSY;
+ dev_err(&mc_bus_dev->dev,
+ "Device %s cannot be removed from resource pool\n",
+ dev_name(&mc_dev->dev));
+ goto out_unlock;
+ }
+
+ list_del_init(&resource->node);
+ res_pool->free_count--;
+ res_pool->max_count--;
+
+ devm_kfree(&mc_bus_dev->dev, resource);
+ mc_dev->resource = NULL;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+static const char *const fsl_mc_pool_type_strings[] = {
+ [FSL_MC_POOL_DPMCP] = "dpmcp",
+ [FSL_MC_POOL_DPBP] = "dpbp",
+ [FSL_MC_POOL_DPCON] = "dpcon",
+ [FSL_MC_POOL_IRQ] = "irq",
+};
+
+static int __must_check object_type_to_pool_type(const char *object_type,
+ enum fsl_mc_pool_type
+ *pool_type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
+ *pool_type = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource **new_resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
+ FSL_MC_NUM_POOL_TYPES);
+
+ *new_resource = NULL;
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+ resource = list_first_entry_or_null(&res_pool->free_list,
+ struct fsl_mc_resource, node);
+
+ if (!resource) {
+ error = -ENXIO;
+ dev_err(&mc_bus_dev->dev,
+ "No more resources of type %s left\n",
+ fsl_mc_pool_type_strings[pool_type]);
+ goto out_unlock;
+ }
+
+ if (resource->type != pool_type)
+ goto out_unlock;
+ if (resource->parent_pool != res_pool)
+ goto out_unlock;
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ list_del_init(&resource->node);
+
+ res_pool->free_count--;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+ *new_resource = resource;
+out:
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+
+ res_pool = resource->parent_pool;
+ if (resource->type != res_pool->type)
+ return;
+
+ mutex_lock(&res_pool->mutex);
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count >= res_pool->max_count)
+ goto out_unlock;
+
+ if (!list_empty(&resource->node))
+ goto out_unlock;
+
+ list_add_tail(&resource->node, &res_pool->free_list);
+ res_pool->free_count++;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
+/**
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
+ *
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_adev: pointer to area where the pointer to the allocated device
+ * is to be returned
+ *
+ * Allocatable objects are always used in conjunction with some functional
+ * device. This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
+ *
+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+ * portals are allocated using fsl_mc_portal_allocate(), instead of
+ * this function.
+ */
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device *mc_adev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+
+ *new_mc_adev = NULL;
+ if (mc_dev->flags & FSL_MC_IS_DPRC)
+ goto error;
+
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ goto error;
+
+ if (pool_type == FSL_MC_POOL_DPMCP)
+ goto error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
+ if (error < 0)
+ goto error;
+
+ mc_adev = resource->data;
+ if (!mc_adev) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ mc_adev->consumer_link = device_link_add(&mc_dev->dev,
+ &mc_adev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!mc_adev->consumer_link) {
+ error = -EINVAL;
+ goto error;
+ }
+
+ *new_mc_adev = mc_adev;
+ return 0;
+error:
+ if (resource)
+ fsl_mc_resource_free(resource);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+/**
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
+ */
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+{
+ struct fsl_mc_resource *resource;
+
+ resource = mc_adev->resource;
+ if (resource->type == FSL_MC_POOL_DPMCP)
+ return;
+ if (resource->data != mc_adev)
+ return;
+
+ fsl_mc_resource_free(resource);
+
+ mc_adev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
+/*
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID. A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
+ */
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count)
+{
+ unsigned int i;
+ struct fsl_mc_device_irq *irq_resources;
+ struct fsl_mc_device_irq *mc_dev_irq;
+ int error;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ /* do nothing if the IRQ pool is already populated */
+ if (mc_bus->irq_resources)
+ return 0;
+
+ if (irq_count == 0 ||
+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
+ return -EINVAL;
+
+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+ if (error < 0)
+ return error;
+
+ irq_resources = devm_kcalloc(&mc_bus_dev->dev,
+ irq_count, sizeof(*irq_resources),
+ GFP_KERNEL);
+ if (!irq_resources) {
+ error = -ENOMEM;
+ goto cleanup_msi_irqs;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ mc_dev_irq = &irq_resources[i];
+
+ /*
+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+ * by the fsl_mc_msi_write_msg() callback
+ */
+ mc_dev_irq->resource.type = res_pool->type;
+ mc_dev_irq->resource.data = mc_dev_irq;
+ mc_dev_irq->resource.parent_pool = res_pool;
+ mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i);
+ mc_dev_irq->resource.id = mc_dev_irq->virq;
+ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+ }
+
+ res_pool->max_count = irq_count;
+ res_pool->free_count = irq_count;
+ mc_bus->irq_resources = irq_resources;
+ return 0;
+
+cleanup_msi_irqs:
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+/*
+ * Teardown the interrupt pool associated with an fsl-mc bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ if (res_pool->max_count == 0)
+ return;
+
+ if (res_pool->free_count != res_pool->max_count)
+ return;
+
+ INIT_LIST_HEAD(&res_pool->free_list);
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ mc_bus->irq_resources = NULL;
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+/*
+ * Allocate the IRQs required by a given fsl-mc device.
+ */
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ int res_allocated_count = 0;
+ int error = -EINVAL;
+ struct fsl_mc_device_irq **irqs = NULL;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+
+ if (mc_dev->irqs)
+ return -EINVAL;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+ if (irq_count == 0)
+ return -EINVAL;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ if (res_pool->free_count < irq_count) {
+ dev_err(&mc_dev->dev,
+ "Not able to allocate %u irqs for device\n", irq_count);
+ return -ENOSPC;
+ }
+
+ irqs = devm_kcalloc(&mc_dev->dev, irq_count, sizeof(irqs[0]),
+ GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_count; i++) {
+ struct fsl_mc_resource *resource;
+
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+ &resource);
+ if (error < 0)
+ goto error_resource_alloc;
+
+ irqs[i] = to_fsl_mc_irq(resource);
+ res_allocated_count++;
+
+ irqs[i]->mc_dev = mc_dev;
+ irqs[i]->dev_irq_index = i;
+ }
+
+ mc_dev->irqs = irqs;
+ return 0;
+
+error_resource_alloc:
+ for (i = 0; i < res_allocated_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+/*
+ * Frees the IRQs that were allocated for an fsl-mc device.
+ */
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
+ if (!irqs)
+ return;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ for (i = 0; i < irq_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ mc_dev->irqs = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ res_pool->type = pool_type;
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ res_pool->mc_bus = mc_bus;
+ INIT_LIST_HEAD(&res_pool->free_list);
+ mutex_init(&res_pool->mutex);
+ }
+}
+
+static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
+ enum fsl_mc_pool_type pool_type)
+{
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_resource *next;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
+ devm_kfree(&mc_bus_dev->dev, resource);
+}
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
+ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+}
+
+/*
+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
+ * being added to the system
+ */
+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+{
+ enum fsl_mc_pool_type pool_type;
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ return -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
+ return -EINVAL;
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
+ if (error < 0)
+ return error;
+
+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
+ if (error < 0)
+ return error;
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
+ return 0;
+}
+
+/*
+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is
+ * being removed from the system
+ */
+static void fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ if (mc_dev->resource) {
+ error = fsl_mc_resource_pool_remove_device(mc_dev);
+ if (error < 0)
+ return;
+ }
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpbp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmcp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpcon",
+ },
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver fsl_mc_allocator_driver = {
+ .driver = {
+ .name = "fsl_mc_allocator",
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = fsl_mc_allocator_probe,
+ .remove = fsl_mc_allocator_remove,
+};
+
+int __init fsl_mc_allocator_driver_init(void)
+{
+ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+}
+
+void fsl_mc_allocator_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
new file mode 100644
index 0000000000..2f6d5002e4
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -0,0 +1,1303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+
+static struct fsl_mc_version mc_version;
+
+/**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ * @fsl_mc_regs: base address of register bank
+ */
+struct fsl_mc {
+ struct fsl_mc_device *root_mc_bus_dev;
+ u8 num_translation_ranges;
+ struct fsl_mc_addr_translation_range *translation_ranges;
+ void __iomem *fsl_mc_regs;
+};
+
+/**
+ * struct fsl_mc_addr_translation_range - bus to system address translation
+ * range
+ * @mc_region_type: Type of MC region for the range being translated
+ * @start_mc_offset: Start MC offset of the range being translated
+ * @end_mc_offset: MC offset of the first byte after the range (last MC
+ * offset of the range is end_mc_offset - 1)
+ * @start_phys_addr: system physical address corresponding to start_mc_addr
+ */
+struct fsl_mc_addr_translation_range {
+ enum dprc_region_type mc_region_type;
+ u64 start_mc_offset;
+ u64 end_mc_offset;
+ phys_addr_t start_phys_addr;
+};
+
+#define FSL_MC_GCR1 0x0
+#define GCR1_P1_STOP BIT(31)
+#define GCR1_P2_STOP BIT(30)
+
+#define FSL_MC_FAPR 0x28
+#define MC_FAPR_PL BIT(18)
+#define MC_FAPR_BMT BIT(17)
+
+static phys_addr_t mc_portal_base_phys_addr;
+
+/**
+ * fsl_mc_bus_match - device to driver matching callback
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
+ * structures
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ const struct fsl_mc_device_id *id;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (mc_dev->driver_override) {
+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
+ }
+
+ if (!mc_drv->match_id_table)
+ goto out;
+
+ /*
+ * If the object is not 'plugged' don't match.
+ * Only exception is the root DPRC, which is a special case.
+ */
+ if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
+ !fsl_mc_is_root_dprc(&mc_dev->dev))
+ goto out;
+
+ /*
+ * Traverse the match_id table of the given driver, trying to find
+ * a matching for the given device.
+ */
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
+ found = true;
+
+ break;
+ }
+ }
+
+out:
+ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
+ return found;
+}
+
+/*
+ * fsl_mc_bus_uevent - callback invoked when a device is added
+ */
+static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+ mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int fsl_mc_dma_configure(struct device *dev)
+{
+ struct device *dma_dev = dev;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ u32 input_id = mc_dev->icid;
+ int ret;
+
+ while (dev_is_fsl_mc(dma_dev))
+ dma_dev = dma_dev->parent;
+
+ if (dev_of_node(dma_dev))
+ ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+ else
+ ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
+
+ if (!ret && !mc_drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return ret;
+}
+
+static void fsl_mc_dma_cleanup(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+
+ if (!mc_drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int ret;
+
+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
+ ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_dev);
+
+static int scan_fsl_mc_bus(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ struct fsl_mc_bus *root_mc_bus;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
+ mutex_lock(&root_mc_bus->scan_mutex);
+ dprc_scan_objects(root_mc_dev, false);
+ mutex_unlock(&root_mc_bus->scan_mutex);
+
+exit:
+ return 0;
+}
+
+static ssize_t rescan_store(const struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static int fsl_mc_bus_set_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ unsigned long val;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val)
+ enable_dprc_irq(root_mc_dev);
+ else
+ disable_dprc_irq(root_mc_dev);
+
+exit:
+ return 0;
+}
+
+static int fsl_mc_bus_get_autorescan(struct device *dev, void *data)
+{
+ struct fsl_mc_device *root_mc_dev;
+ char *buf = data;
+
+ if (!fsl_mc_is_root_dprc(dev))
+ goto exit;
+
+ root_mc_dev = to_fsl_mc_device(dev);
+
+ sprintf(buf, "%d\n", get_dprc_irq_state(root_mc_dev));
+exit:
+ return 0;
+}
+
+static ssize_t autorescan_store(const struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan);
+
+ return count;
+}
+
+static ssize_t autorescan_show(const struct bus_type *bus, char *buf)
+{
+ bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan);
+ return strlen(buf);
+}
+
+static BUS_ATTR_RW(autorescan);
+
+static struct attribute *fsl_mc_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ &bus_attr_autorescan.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_bus);
+
+struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dma_configure = fsl_mc_dma_configure,
+ .dma_cleanup = fsl_mc_dma_cleanup,
+ .dev_groups = fsl_mc_dev_groups,
+ .bus_groups = fsl_mc_bus_groups,
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
+struct device_type fsl_mc_bus_dprc_type = {
+ .name = "fsl_mc_bus_dprc"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
+
+struct device_type fsl_mc_bus_dpni_type = {
+ .name = "fsl_mc_bus_dpni"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
+
+struct device_type fsl_mc_bus_dpio_type = {
+ .name = "fsl_mc_bus_dpio"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
+
+struct device_type fsl_mc_bus_dpsw_type = {
+ .name = "fsl_mc_bus_dpsw"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
+
+struct device_type fsl_mc_bus_dpbp_type = {
+ .name = "fsl_mc_bus_dpbp"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
+
+struct device_type fsl_mc_bus_dpcon_type = {
+ .name = "fsl_mc_bus_dpcon"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
+
+struct device_type fsl_mc_bus_dpmcp_type = {
+ .name = "fsl_mc_bus_dpmcp"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
+
+struct device_type fsl_mc_bus_dpmac_type = {
+ .name = "fsl_mc_bus_dpmac"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
+
+struct device_type fsl_mc_bus_dprtc_type = {
+ .name = "fsl_mc_bus_dprtc"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
+
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
+struct device_type fsl_mc_bus_dpdbg_type = {
+ .name = "fsl_mc_bus_dpdbg"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
+
+static struct device_type *fsl_mc_get_device_type(const char *type)
+{
+ static const struct {
+ struct device_type *dev_type;
+ const char *type;
+ } dev_types[] = {
+ { &fsl_mc_bus_dprc_type, "dprc" },
+ { &fsl_mc_bus_dpni_type, "dpni" },
+ { &fsl_mc_bus_dpio_type, "dpio" },
+ { &fsl_mc_bus_dpsw_type, "dpsw" },
+ { &fsl_mc_bus_dpbp_type, "dpbp" },
+ { &fsl_mc_bus_dpcon_type, "dpcon" },
+ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
+ { &fsl_mc_bus_dpmac_type, "dpmac" },
+ { &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
+ { &fsl_mc_bus_dpdbg_type, "dpdbg" },
+ { NULL, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_types[i].dev_type; i++)
+ if (!strcmp(dev_types[i].type, type))
+ return dev_types[i].dev_type;
+
+ return NULL;
+}
+
+static int fsl_mc_driver_probe(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int error;
+
+ mc_drv = to_fsl_mc_driver(dev->driver);
+
+ error = mc_drv->probe(mc_dev);
+ if (error < 0) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int fsl_mc_driver_remove(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ mc_drv->remove(mc_dev);
+
+ return 0;
+}
+
+static void fsl_mc_driver_shutdown(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ mc_drv->shutdown(mc_dev);
+}
+
+/*
+ * __fsl_mc_driver_register - registers a child device driver with the
+ * MC bus
+ *
+ * This function is implicitly invoked from the registration function of
+ * fsl_mc device drivers, which is generated by the
+ * module_fsl_mc_driver() macro.
+ */
+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+ struct module *owner)
+{
+ int error;
+
+ mc_driver->driver.owner = owner;
+ mc_driver->driver.bus = &fsl_mc_bus_type;
+
+ if (mc_driver->probe)
+ mc_driver->driver.probe = fsl_mc_driver_probe;
+
+ if (mc_driver->remove)
+ mc_driver->driver.remove = fsl_mc_driver_remove;
+
+ if (mc_driver->shutdown)
+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
+
+ error = driver_register(&mc_driver->driver);
+ if (error < 0) {
+ pr_err("driver_register() failed for %s: %d\n",
+ mc_driver->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+
+/*
+ * fsl_mc_driver_unregister - unregisters a device driver from the
+ * MC bus
+ */
+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+{
+ driver_unregister(&mc_driver->driver);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+static int mc_get_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ struct fsl_mc_version *mc_ver_info)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * fsl_mc_get_version - function to retrieve the MC f/w version information
+ *
+ * Return: mc version when called after fsl-mc-bus probe; NULL otherwise.
+ */
+struct fsl_mc_version *fsl_mc_get_version(void)
+{
+ if (mc_version.major)
+ return &mc_version;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_version);
+
+/*
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
+{
+ if (!dev) {
+ *root_dprc_dev = NULL;
+ } else if (!dev_is_fsl_mc(dev)) {
+ *root_dprc_dev = NULL;
+ } else {
+ *root_dprc_dev = dev;
+ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
+ *root_dprc_dev = (*root_dprc_dev)->parent;
+ }
+}
+
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
+{
+ u16 dprc_handle;
+ int error;
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
+ return error;
+ }
+
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto common_cleanup;
+ }
+
+ error = 0;
+
+common_cleanup:
+ (void)dprc_close(mc_io, 0, dprc_handle);
+ return error;
+}
+
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u32 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+{
+ int i;
+ struct device *root_dprc_dev;
+ struct fsl_mc *mc;
+
+ fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
+ mc = dev_get_drvdata(root_dprc_dev->parent);
+
+ if (mc->num_translation_ranges == 0) {
+ /*
+ * Do identity mapping:
+ */
+ *phys_addr = mc_offset;
+ return 0;
+ }
+
+ for (i = 0; i < mc->num_translation_ranges; i++) {
+ struct fsl_mc_addr_translation_range *range =
+ &mc->translation_ranges[i];
+
+ if (mc_region_type == range->mc_region_type &&
+ mc_offset >= range->start_mc_offset &&
+ mc_offset < range->end_mc_offset) {
+ *phys_addr = range->start_phys_addr +
+ (mc_offset - range->start_mc_offset);
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ int i;
+ int error;
+ struct resource *regions;
+ struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
+ struct device *parent_dev = mc_dev->dev.parent;
+ enum dprc_region_type mc_region_type;
+
+ if (is_fsl_mc_bus_dprc(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
+ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
+ } else {
+ /*
+ * This function should not have been called for this MC object
+ * type, as this object type is not supposed to have MMIO
+ * regions
+ */
+ return -EINVAL;
+ }
+
+ regions = kmalloc_array(obj_desc->region_count,
+ sizeof(regions[0]), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ for (i = 0; i < obj_desc->region_count; i++) {
+ struct dprc_region_desc region_desc;
+
+ error = dprc_get_obj_region(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ obj_desc->type,
+ obj_desc->id, i, &region_desc);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "dprc_get_obj_region() failed: %d\n", error);
+ goto error_cleanup_regions;
+ }
+ /*
+ * Older MC only returned region offset and no base address
+ * If base address is in the region_desc use it otherwise
+ * revert to old mechanism
+ */
+ if (region_desc.base_address) {
+ regions[i].start = region_desc.base_address +
+ region_desc.base_offset;
+ } else {
+ error = translate_mc_addr(mc_dev, mc_region_type,
+ region_desc.base_offset,
+ &regions[i].start);
+
+ /*
+ * Some versions of the MC firmware wrongly report
+ * 0 for register base address of the DPMCP associated
+ * with child DPRC objects thus rendering them unusable.
+ * This is particularly troublesome in ACPI boot
+ * scenarios where the legacy way of extracting this
+ * base address from the device tree does not apply.
+ * Given that DPMCPs share the same base address,
+ * workaround this by using the base address extracted
+ * from the root DPRC container.
+ */
+ if (is_fsl_mc_bus_dprc(mc_dev) &&
+ regions[i].start == region_desc.base_offset)
+ regions[i].start += mc_portal_base_phys_addr;
+ }
+
+ if (error < 0) {
+ dev_err(parent_dev,
+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
+ region_desc.base_offset,
+ obj_desc->type, obj_desc->id, i);
+ goto error_cleanup_regions;
+ }
+
+ regions[i].end = regions[i].start + region_desc.size - 1;
+ regions[i].name = "fsl-mc object MMIO region";
+ regions[i].flags = region_desc.flags & IORESOURCE_BITS;
+ regions[i].flags |= IORESOURCE_MEM;
+ }
+
+ mc_dev->regions = regions;
+ return 0;
+
+error_cleanup_regions:
+ kfree(regions);
+ return error;
+}
+
+/*
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+ struct device *root_dprc_dev;
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+ return dev == root_dprc_dev;
+}
+
+static void fsl_mc_device_release(struct device *dev)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ kfree(mc_dev->regions);
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ kfree(to_fsl_mc_bus(mc_dev));
+ else
+ kfree(mc_dev);
+}
+
+/*
+ * Add a newly discovered fsl-mc device to be visible in Linux
+ */
+int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev)
+{
+ int error;
+ struct fsl_mc_device *mc_dev = NULL;
+ struct fsl_mc_bus *mc_bus = NULL;
+ struct fsl_mc_device *parent_mc_dev;
+
+ if (dev_is_fsl_mc(parent_dev))
+ parent_mc_dev = to_fsl_mc_device(parent_dev);
+ else
+ parent_mc_dev = NULL;
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ /*
+ * Allocate an MC bus device object:
+ */
+ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
+ if (!mc_bus)
+ return -ENOMEM;
+
+ mutex_init(&mc_bus->scan_mutex);
+ mc_dev = &mc_bus->mc_dev;
+ } else {
+ /*
+ * Allocate a regular fsl_mc_device object:
+ */
+ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
+ if (!mc_dev)
+ return -ENOMEM;
+ }
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
+ mc_dev->dev.release = fsl_mc_device_release;
+ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
+ if (!mc_dev->dev.type) {
+ error = -ENODEV;
+ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
+ goto error_cleanup_dev;
+ }
+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ struct fsl_mc_io *mc_io2;
+
+ mc_dev->flags |= FSL_MC_IS_DPRC;
+
+ /*
+ * To get the DPRC's ICID, we need to open the DPRC
+ * in get_dprc_icid(). For child DPRCs, we do so using the
+ * parent DPRC's MC portal instead of the child DPRC's MC
+ * portal, in case the child DPRC is already opened with
+ * its own portal (e.g., the DPRC used by AIOP).
+ *
+ * NOTE: There cannot be more than one active open for a
+ * given MC object, using the same MC portal.
+ */
+ if (parent_mc_dev) {
+ /*
+ * device being added is a child DPRC device
+ */
+ mc_io2 = parent_mc_dev->mc_io;
+ } else {
+ /*
+ * device being added is the root DPRC device
+ */
+ if (!mc_io) {
+ error = -EINVAL;
+ goto error_cleanup_dev;
+ }
+
+ mc_io2 = mc_io;
+ }
+
+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+ if (error < 0)
+ goto error_cleanup_dev;
+ } else {
+ /*
+ * A non-DPRC object has to be a child of a DPRC, use the
+ * parent's ICID and interrupt domain.
+ */
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(&parent_mc_dev->dev));
+ }
+
+ /*
+ * Get MMIO regions for the device from the MC:
+ *
+ * NOTE: the root DPRC is a special case as its MMIO region is
+ * obtained from the device tree
+ */
+ if (parent_mc_dev && obj_desc->region_count != 0) {
+ error = fsl_mc_device_get_mmio_regions(mc_dev,
+ parent_mc_dev);
+ if (error < 0)
+ goto error_cleanup_dev;
+ }
+
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+ */
+ error = device_add(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "device_add() failed for device %s: %d\n",
+ dev_name(&mc_dev->dev), error);
+ goto error_cleanup_dev;
+ }
+
+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
+
+ *new_mc_dev = mc_dev;
+ return 0;
+
+error_cleanup_dev:
+ kfree(mc_dev->regions);
+ kfree(mc_bus);
+ kfree(mc_dev);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+static struct notifier_block fsl_mc_nb;
+
+/**
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
+ * Linux
+ *
+ * @mc_dev: Pointer to an fsl-mc device
+ */
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+{
+ kfree(mc_dev->driver_override);
+ mc_dev->driver_override = NULL;
+
+ /*
+ * The device-specific remove callback will get invoked by device_del()
+ */
+ device_del(&mc_dev->dev);
+ put_device(&mc_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id)
+{
+ struct fsl_mc_device *mc_bus_dev, *endpoint;
+ struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
+ struct dprc_endpoint endpoint1 = {{ 0 }};
+ struct dprc_endpoint endpoint2 = {{ 0 }};
+ int state, err;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ strcpy(endpoint1.type, mc_dev->obj_desc.type);
+ endpoint1.id = mc_dev->obj_desc.id;
+ endpoint1.if_id = if_id;
+
+ err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+ mc_bus_dev->mc_handle,
+ &endpoint1, &endpoint2,
+ &state);
+
+ if (err == -ENOTCONN || state == -1)
+ return ERR_PTR(-ENOTCONN);
+
+ if (err < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ strcpy(endpoint_desc.type, endpoint2.type);
+ endpoint_desc.id = endpoint2.id;
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+ /*
+ * We know that the device has an endpoint because we verified by
+ * interrogating the firmware. This is the case when the device was not
+ * yet discovered by the fsl-mc bus, thus the lookup returned NULL.
+ * Force a rescan of the devices in this container and retry the lookup.
+ */
+ if (!endpoint) {
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
+ }
+
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ /*
+ * This means that the endpoint might reside in a different isolation
+ * context (DPRC/container). Not much to do, so return a permssion
+ * error.
+ */
+ if (!endpoint)
+ return ERR_PTR(-EPERM);
+
+ return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
+static int get_mc_addr_translation_ranges(struct device *dev,
+ struct fsl_mc_addr_translation_range
+ **ranges,
+ u8 *num_ranges)
+{
+ struct fsl_mc_addr_translation_range *r;
+ struct of_range_parser parser;
+ struct of_range range;
+
+ of_range_parser_init(&parser, dev->of_node);
+ *num_ranges = of_range_count(&parser);
+ if (!*num_ranges) {
+ /*
+ * Missing or empty ranges property ("ranges;") for the
+ * 'fsl,qoriq-mc' node. In this case, identity mapping
+ * will be used.
+ */
+ *ranges = NULL;
+ return 0;
+ }
+
+ *ranges = devm_kcalloc(dev, *num_ranges,
+ sizeof(struct fsl_mc_addr_translation_range),
+ GFP_KERNEL);
+ if (!(*ranges))
+ return -ENOMEM;
+
+ r = *ranges;
+ for_each_of_range(&parser, &range) {
+ r->mc_region_type = range.flags;
+ r->start_mc_offset = range.bus_addr;
+ r->end_mc_offset = range.bus_addr + range.size;
+ r->start_phys_addr = range.cpu_addr;
+ r++;
+ }
+
+ return 0;
+}
+
+/*
+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being
+ * added
+ */
+static int fsl_mc_bus_probe(struct platform_device *pdev)
+{
+ struct fsl_mc_obj_desc obj_desc;
+ int error;
+ struct fsl_mc *mc;
+ struct fsl_mc_device *mc_bus_dev = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+ int container_id;
+ phys_addr_t mc_portal_phys_addr;
+ u32 mc_portal_size, mc_stream_id;
+ struct resource *plat_res;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mc);
+
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (plat_res) {
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+ if (IS_ERR(mc->fsl_mc_regs))
+ return PTR_ERR(mc->fsl_mc_regs);
+ }
+
+ if (mc->fsl_mc_regs) {
+ if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+ mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+ /*
+ * HW ORs the PL and BMT bit, places the result in bit
+ * 14 of the StreamID and ORs in the ICID. Calculate it
+ * accordingly.
+ */
+ mc_stream_id = (mc_stream_id & 0xffff) |
+ ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
+ BIT(14) : 0);
+ error = acpi_dma_configure_id(&pdev->dev,
+ DEV_DMA_COHERENT,
+ &mc_stream_id);
+ if (error == -EPROBE_DEFER)
+ return error;
+ if (error)
+ dev_warn(&pdev->dev,
+ "failed to configure dma: %d.\n",
+ error);
+ }
+
+ /*
+ * Some bootloaders pause the MC firmware before booting the
+ * kernel so that MC will not cause faults as soon as the
+ * SMMU probes due to the fact that there's no configuration
+ * in place for MC.
+ * At this point MC should have all its SMMU setup done so make
+ * sure it is resumed.
+ */
+ writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) &
+ (~(GCR1_P1_STOP | GCR1_P2_STOP)),
+ mc->fsl_mc_regs + FSL_MC_GCR1);
+ }
+
+ /*
+ * Get physical address of MC portal for the root DPRC:
+ */
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mc_portal_phys_addr = plat_res->start;
+ mc_portal_size = resource_size(plat_res);
+ mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
+
+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
+ mc_portal_size, NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
+ if (error < 0)
+ return error;
+
+ error = mc_get_version(mc_io, 0, &mc_version);
+ if (error != 0) {
+ dev_err(&pdev->dev,
+ "mc_get_version() failed with error %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+ if (dev_of_node(&pdev->dev)) {
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+ }
+
+ error = dprc_get_container_id(mc_io, 0, &container_id);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+ "dprc_get_container_id() failed: %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
+ error = dprc_get_api_version(mc_io, 0,
+ &obj_desc.ver_major,
+ &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
+ strcpy(obj_desc.type, "dprc");
+ obj_desc.id = container_id;
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ mc->root_mc_bus_dev = mc_bus_dev;
+ mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/*
+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being
+ * removed
+ */
+static int fsl_mc_bus_remove(struct platform_device *pdev)
+{
+ struct fsl_mc *mc = platform_get_drvdata(pdev);
+ struct fsl_mc_io *mc_io;
+
+ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
+ return -EINVAL;
+
+ mc_io = mc->root_mc_bus_dev->mc_io;
+ fsl_mc_device_remove(mc->root_mc_bus_dev);
+ fsl_destroy_mc_io(mc_io);
+
+ bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb);
+
+ if (mc->fsl_mc_regs) {
+ /*
+ * Pause the MC firmware so that it doesn't crash in certain
+ * scenarios, such as kexec.
+ */
+ writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) |
+ (GCR1_P1_STOP | GCR1_P2_STOP),
+ mc->fsl_mc_regs + FSL_MC_GCR1);
+ }
+
+ return 0;
+}
+
+static void fsl_mc_bus_shutdown(struct platform_device *pdev)
+{
+ fsl_mc_bus_remove(pdev);
+}
+
+static const struct of_device_id fsl_mc_bus_match_table[] = {
+ {.compatible = "fsl,qoriq-mc",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+
+static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
+ {"NXP0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
+
+static struct platform_driver fsl_mc_bus_driver = {
+ .driver = {
+ .name = "fsl_mc_bus",
+ .pm = NULL,
+ .of_match_table = fsl_mc_bus_match_table,
+ .acpi_match_table = fsl_mc_bus_acpi_match_table,
+ },
+ .probe = fsl_mc_bus_probe,
+ .remove = fsl_mc_bus_remove,
+ .shutdown = fsl_mc_bus_shutdown,
+};
+
+static int fsl_mc_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct resource *res;
+ void __iomem *fsl_mc_regs;
+
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ if (!of_match_device(fsl_mc_bus_match_table, dev) &&
+ !acpi_match_device(fsl_mc_bus_acpi_match_table, dev))
+ return 0;
+
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
+ if (!res)
+ return 0;
+
+ fsl_mc_regs = ioremap(res->start, resource_size(res));
+ if (!fsl_mc_regs)
+ return 0;
+
+ /*
+ * Make sure that the MC firmware is paused before the IOMMU setup for
+ * it is done or otherwise the firmware will crash right after the SMMU
+ * gets probed and enabled.
+ */
+ writel(readl(fsl_mc_regs + FSL_MC_GCR1) | (GCR1_P1_STOP | GCR1_P2_STOP),
+ fsl_mc_regs + FSL_MC_GCR1);
+ iounmap(fsl_mc_regs);
+
+ return 0;
+}
+
+static struct notifier_block fsl_mc_nb = {
+ .notifier_call = fsl_mc_bus_notifier,
+};
+
+static int __init fsl_mc_bus_driver_init(void)
+{
+ int error;
+
+ error = bus_register(&fsl_mc_bus_type);
+ if (error < 0) {
+ pr_err("bus type registration failed: %d\n", error);
+ goto error_cleanup_cache;
+ }
+
+ error = platform_driver_register(&fsl_mc_bus_driver);
+ if (error < 0) {
+ pr_err("platform_driver_register() failed: %d\n", error);
+ goto error_cleanup_bus;
+ }
+
+ error = dprc_driver_init();
+ if (error < 0)
+ goto error_cleanup_driver;
+
+ error = fsl_mc_allocator_driver_init();
+ if (error < 0)
+ goto error_cleanup_dprc_driver;
+
+ return bus_register_notifier(&platform_bus_type, &fsl_mc_nb);
+
+error_cleanup_dprc_driver:
+ dprc_driver_exit();
+
+error_cleanup_driver:
+ platform_driver_unregister(&fsl_mc_bus_driver);
+
+error_cleanup_bus:
+ bus_unregister(&fsl_mc_bus_type);
+
+error_cleanup_cache:
+ return error;
+}
+postcore_initcall(fsl_mc_bus_driver_init);
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
new file mode 100644
index 0000000000..82cd69f788
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/acpi_iort.h>
+
+#include "fsl-mc-private.h"
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain. Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+ struct msi_desc *desc)
+{
+ /*
+ * Make the base hwirq value for ICID*10000 so it is readable
+ * as a decimal value in /proc/interrupts.
+ */
+ return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));
+}
+
+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+ desc);
+}
+#else
+#define fsl_mc_msi_set_desc NULL
+#endif
+
+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (!ops)
+ return;
+
+ /*
+ * set_desc should not be set by the caller
+ */
+ if (!ops->set_desc)
+ ops->set_desc = fsl_mc_msi_set_desc;
+}
+
+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_device_irq *mc_dev_irq,
+ struct msi_desc *msi_desc)
+{
+ int error;
+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+ struct dprc_irq_cfg irq_cfg;
+
+ /*
+ * msi_desc->msg.address is 0x0 when this function is invoked in
+ * the free_irq() code path. In this case, for the MC, we don't
+ * really need to "unprogram" the MSI, so we just return.
+ */
+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+ return;
+
+ if (!owner_mc_dev)
+ return;
+
+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ irq_cfg.val = msi_desc->msg.data;
+ irq_cfg.irq_num = msi_desc->irq;
+
+ if (owner_mc_dev == mc_bus_dev) {
+ /*
+ * IRQ is for the mc_bus_dev's DPRC itself
+ */
+ error = dprc_set_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_set_irq() failed: %d\n", error);
+ }
+ } else {
+ /*
+ * IRQ is for for a child device of mc_bus_dev
+ */
+ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ owner_mc_dev->obj_desc.type,
+ owner_mc_dev->obj_desc.id,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_obj_set_irq() failed: %d\n", error);
+ }
+ }
+}
+
+/*
+ * NOTE: This function is invoked with interrupts disabled
+ */
+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+ struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_device_irq *mc_dev_irq =
+ &mc_bus->irq_resources[msi_desc->msi_index];
+
+ msi_desc->msg = *msg;
+
+ /*
+ * Program the MSI (paddr, value) pair in the device:
+ */
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);
+}
+
+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!chip)
+ return;
+
+ /*
+ * irq_write_msi_msg should not be set by the caller
+ */
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
+/**
+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+ * @fwnode: Optional firmware node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a fsl-mc MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE)))
+ info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ fsl_mc_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ fsl_mc_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
+
+ return domain;
+}
+
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
+{
+ struct device *root_dprc_dev;
+ struct device *bus_dev;
+ struct irq_domain *msi_domain;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ bus_dev = root_dprc_dev->parent;
+
+ if (bus_dev->of_node) {
+ msi_domain = of_msi_map_get_device_domain(dev,
+ mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
+
+ /*
+ * if the msi-map property is missing assume that all the
+ * child containers inherit the domain from the parent
+ */
+ if (!msi_domain)
+
+ msi_domain = of_msi_get_domain(bus_dev,
+ bus_dev->of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ } else {
+ msi_domain = iort_get_device_domain(dev, mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
+ }
+
+ return msi_domain;
+}
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
+{
+ int error = msi_setup_device_data(dev);
+
+ if (error)
+ return error;
+
+ /*
+ * NOTE: Calling this function will trigger the invocation of the
+ * its_fsl_mc_msi_prepare() callback
+ */
+ error = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, irq_count - 1);
+
+ if (error)
+ dev_err(dev, "Failed to allocate IRQs\n");
+ return error;
+}
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
+ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
new file mode 100644
index 0000000000..b3520ea1b9
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -0,0 +1,695 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus private declarations
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_MC_PRIVATE_H_
+#define _FSL_MC_PRIVATE_H_
+
+#include <linux/fsl/mc.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+
+/*
+ * Data Path Management Complex (DPMNG) General API
+ */
+
+/* DPMNG command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* DPMNG command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
+/*
+ * Data Path Management Command Portal (DPMCP) API
+ */
+
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* DPMCP command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+
+/* DPMCP command IDs */
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+/*
+ * Initialization and runtime control APIs for DPMCP
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token);
+
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/*
+ * Data Path Resource Container (DPRC) API
+ */
+
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 6
+#define DPRC_MIN_VER_MINOR 0
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_3RD_VERSION 3
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+#define DPRC_CMD_V3(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_3RD_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+#define DPRC_CMDID_RESET_CONT_V2 DPRC_CMD_V2(0x005)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V3 DPRC_CMD_V3(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+ __le32 options;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le32 icid;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le64 base_offset;
+ /* response word 2 */
+ __le32 size;
+ u8 type;
+ u8 pad2[3];
+ /* response word 3 */
+ __le32 flags;
+ __le32 pad3;
+ /* response word 4 */
+ /* base_addr may be zero if older MC firmware is used */
+ __le64 base_addr;
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_get_connection {
+ __le32 ep1_id;
+ __le16 ep1_interface_id;
+ u8 pad[2];
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ __le64 pad[3];
+ __le32 ep2_id;
+ __le16 ep2_interface_id;
+ __le16 pad1;
+ u8 ep2_type[16];
+ __le32 state;
+};
+
+/*
+ * DPRC API for managing and querying DPAA resources
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPRC IRQ events */
+
+/* IRQ event - Indicates that a new object added to the container */
+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+/* IRQ event - Indicates that an object was removed from the container */
+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+
+/* Irq event - Indicates that object is created at the container */
+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+
+/**
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+};
+
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ */
+struct dprc_attributes {
+ int container_id;
+ u32 icid;
+ int portal_id;
+ u64 options;
+};
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+/**
+ * enum dprc_region_type - Region type
+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
+};
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_offset: Region offset from region's base address.
+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+ * base address; For DPIO, region base is offset from SoC QMan portals
+ * base address
+ * @size: Region size (in bytes)
+ * @flags: Region attributes
+ * @type: Portal region type
+ */
+struct dprc_region_desc {
+ u32 base_offset;
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
+ u64 base_address;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
+
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
+/*
+ * Data Path Buffer Pool (DPBP) API
+ */
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 3
+#define DPBP_VER_MINOR 2
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION 1
+#define DPBP_CMD_ID_OFFSET 4
+
+#define DPBP_CMD(id) (((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
+
+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+
+struct dpbp_cmd_open {
+ __le32 dpbp_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_get_attributes {
+ /* response word 0 */
+ __le16 pad;
+ __le16 bpid;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+/*
+ * Data Path Concentrator (DPCON) API
+ */
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 2
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+struct dpcon_cmd_open {
+ __le32 dpcon_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_get_attr {
+ /* response word 0 */
+ __le32 id;
+ __le16 qbman_ch_id;
+ u8 num_priorities;
+ u8 pad;
+};
+
+struct dpcon_cmd_set_notification {
+ /* cmd word 0 */
+ __le32 dpio_id;
+ u8 priority;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 user_ctx;
+};
+
+/*
+ * Generic FSL MC API
+ */
+
+/* generic command versioning */
+#define OBJ_CMD_BASE_VERSION 1
+#define OBJ_CMD_ID_OFFSET 4
+
+#define OBJ_CMD(id) (((id) << OBJ_CMD_ID_OFFSET) | OBJ_CMD_BASE_VERSION)
+
+/* open command codes */
+#define DPRTC_CMDID_OPEN OBJ_CMD(0x810)
+#define DPNI_CMDID_OPEN OBJ_CMD(0x801)
+#define DPSW_CMDID_OPEN OBJ_CMD(0x802)
+#define DPIO_CMDID_OPEN OBJ_CMD(0x803)
+#define DPBP_CMDID_OPEN OBJ_CMD(0x804)
+#define DPRC_CMDID_OPEN OBJ_CMD(0x805)
+#define DPDMUX_CMDID_OPEN OBJ_CMD(0x806)
+#define DPCI_CMDID_OPEN OBJ_CMD(0x807)
+#define DPCON_CMDID_OPEN OBJ_CMD(0x808)
+#define DPSECI_CMDID_OPEN OBJ_CMD(0x809)
+#define DPAIOP_CMDID_OPEN OBJ_CMD(0x80a)
+#define DPMCP_CMDID_OPEN OBJ_CMD(0x80b)
+#define DPMAC_CMDID_OPEN OBJ_CMD(0x80c)
+#define DPDCEI_CMDID_OPEN OBJ_CMD(0x80d)
+#define DPDMAI_CMDID_OPEN OBJ_CMD(0x80e)
+#define DPDBG_CMDID_OPEN OBJ_CMD(0x80f)
+
+/* Generic object command IDs */
+#define OBJ_CMDID_CLOSE OBJ_CMD(0x800)
+#define OBJ_CMDID_RESET OBJ_CMD(0x005)
+
+struct fsl_mc_obj_cmd_open {
+ __le32 obj_id;
+};
+
+/**
+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
+ * type
+ * @type: type of resources in the pool
+ * @max_count: maximum number of resources in the pool
+ * @free_count: number of free resources in the pool
+ * @mutex: mutex to serialize access to the pool's free list
+ * @free_list: anchor node of list of free resources in the pool
+ * @mc_bus: pointer to the MC bus that owns this resource pool
+ */
+struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+ int max_count;
+ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+};
+
+/**
+ * struct fsl_mc_uapi - information associated with a device file
+ * @misc: struct miscdevice linked to the root dprc
+ * @device: newly created device in /dev
+ * @mutex: mutex lock to serialize the open/release operations
+ * @local_instance_in_use: local MC I/O instance in use or not
+ * @static_mc_io: pointer to the static MC I/O object
+ */
+struct fsl_mc_uapi {
+ struct miscdevice misc;
+ struct device *device;
+ struct mutex mutex; /* serialize open/release operations */
+ u32 local_instance_in_use;
+ struct fsl_mc_io *static_mc_io;
+};
+
+/**
+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+ * @mc_dev: fsl-mc device for the bus device itself.
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
+ * @uapi_misc: struct that abstracts the interaction with userspace
+ */
+struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
+ struct fsl_mc_uapi uapi_misc;
+ int irq_enabled;
+};
+
+#define to_fsl_mc_bus(_mc_dev) \
+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+
+int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev);
+
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+
+int __init dprc_driver_init(void);
+
+void dprc_driver_exit(void);
+
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
+int __init fsl_mc_allocator_driver_init(void);
+
+void fsl_mc_allocator_driver_exit(void);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource
+ **new_resource);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
+
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io);
+
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
+bool fsl_mc_is_root_dprc(struct device *dev);
+
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
+
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev);
+
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_UAPI_SUPPORT
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus);
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus);
+
+#else
+
+static inline int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ return 0;
+}
+
+static inline void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+}
+
+#endif
+
+int disable_dprc_irq(struct fsl_mc_device *mc_dev);
+int enable_dprc_irq(struct fsl_mc_device *mc_dev);
+int get_dprc_irq_state(struct fsl_mc_device *mc_dev);
+
+#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
new file mode 100644
index 0000000000..9c4c1395fc
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Complex (MC) userspace support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "fsl-mc-private.h"
+
+struct uapi_priv_data {
+ struct fsl_mc_uapi *uapi;
+ struct fsl_mc_io *mc_io;
+};
+
+struct fsl_mc_cmd_desc {
+ u16 cmdid_value;
+ u16 cmdid_mask;
+ int size;
+ bool token;
+ int flags;
+};
+
+#define FSL_MC_CHECK_MODULE_ID BIT(0)
+#define FSL_MC_CAP_NET_ADMIN_NEEDED BIT(1)
+
+enum fsl_mc_cmd_index {
+ DPDBG_DUMP = 0,
+ DPDBG_SET,
+ DPRC_GET_CONTAINER_ID,
+ DPRC_CREATE_CONT,
+ DPRC_DESTROY_CONT,
+ DPRC_ASSIGN,
+ DPRC_UNASSIGN,
+ DPRC_GET_OBJ_COUNT,
+ DPRC_GET_OBJ,
+ DPRC_GET_RES_COUNT,
+ DPRC_GET_RES_IDS,
+ DPRC_SET_OBJ_LABEL,
+ DPRC_SET_LOCKED,
+ DPRC_CONNECT,
+ DPRC_DISCONNECT,
+ DPRC_GET_POOL,
+ DPRC_GET_POOL_COUNT,
+ DPRC_GET_CONNECTION,
+ DPCI_GET_LINK_STATE,
+ DPCI_GET_PEER_ATTR,
+ DPAIOP_GET_SL_VERSION,
+ DPAIOP_GET_STATE,
+ DPMNG_GET_VERSION,
+ DPSECI_GET_TX_QUEUE,
+ DPMAC_GET_COUNTER,
+ DPMAC_GET_MAC_ADDR,
+ DPNI_SET_PRIM_MAC,
+ DPNI_GET_PRIM_MAC,
+ DPNI_GET_STATISTICS,
+ DPNI_GET_LINK_STATE,
+ DPNI_GET_MAX_FRAME_LENGTH,
+ DPSW_GET_TAILDROP,
+ DPSW_SET_TAILDROP,
+ DPSW_IF_GET_COUNTER,
+ DPSW_IF_GET_MAX_FRAME_LENGTH,
+ DPDMUX_GET_COUNTER,
+ DPDMUX_IF_GET_MAX_FRAME_LENGTH,
+ GET_ATTR,
+ GET_IRQ_MASK,
+ GET_IRQ_STATUS,
+ CLOSE,
+ OPEN,
+ GET_API_VERSION,
+ DESTROY,
+ CREATE,
+};
+
+static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
+ [DPDBG_DUMP] = {
+ .cmdid_value = 0x1300,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPDBG_SET] = {
+ .cmdid_value = 0x1400,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 28,
+ },
+ [DPRC_GET_CONTAINER_ID] = {
+ .cmdid_value = 0x8300,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPRC_CREATE_CONT] = {
+ .cmdid_value = 0x1510,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DESTROY_CONT] = {
+ .cmdid_value = 0x1520,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_ASSIGN] = {
+ .cmdid_value = 0x1570,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_UNASSIGN] = {
+ .cmdid_value = 0x1580,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_OBJ_COUNT] = {
+ .cmdid_value = 0x1590,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ },
+ [DPRC_GET_OBJ] = {
+ .cmdid_value = 0x15A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_RES_COUNT] = {
+ .cmdid_value = 0x15B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+ [DPRC_GET_RES_IDS] = {
+ .cmdid_value = 0x15C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 40,
+ },
+ [DPRC_SET_OBJ_LABEL] = {
+ .cmdid_value = 0x1610,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 48,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_SET_LOCKED] = {
+ .cmdid_value = 0x16B0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_CONNECT] = {
+ .cmdid_value = 0x1670,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 56,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_DISCONNECT] = {
+ .cmdid_value = 0x1680,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPRC_GET_POOL] = {
+ .cmdid_value = 0x1690,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
+ [DPRC_GET_POOL_COUNT] = {
+ .cmdid_value = 0x16A0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPRC_GET_CONNECTION] = {
+ .cmdid_value = 0x16C0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 32,
+ },
+
+ [DPCI_GET_LINK_STATE] = {
+ .cmdid_value = 0x0E10,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPCI_GET_PEER_ATTR] = {
+ .cmdid_value = 0x0E20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_SL_VERSION] = {
+ .cmdid_value = 0x2820,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPAIOP_GET_STATE] = {
+ .cmdid_value = 0x2830,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPMNG_GET_VERSION] = {
+ .cmdid_value = 0x8310,
+ .cmdid_mask = 0xFFF0,
+ .token = false,
+ .size = 8,
+ },
+ [DPSECI_GET_TX_QUEUE] = {
+ .cmdid_value = 0x1970,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPMAC_GET_COUNTER] = {
+ .cmdid_value = 0x0c40,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 9,
+ },
+ [DPMAC_GET_MAC_ADDR] = {
+ .cmdid_value = 0x0c50,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_SET_PRIM_MAC] = {
+ .cmdid_value = 0x2240,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 16,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPNI_GET_PRIM_MAC] = {
+ .cmdid_value = 0x2250,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_STATISTICS] = {
+ .cmdid_value = 0x25D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPNI_GET_LINK_STATE] = {
+ .cmdid_value = 0x2150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPNI_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x2170,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [DPSW_GET_TAILDROP] = {
+ .cmdid_value = 0x0A80,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 14,
+ },
+ [DPSW_SET_TAILDROP] = {
+ .cmdid_value = 0x0A90,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 24,
+ .flags = FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [DPSW_IF_GET_COUNTER] = {
+ .cmdid_value = 0x0340,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPSW_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0450,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [DPDMUX_GET_COUNTER] = {
+ .cmdid_value = 0x0b20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 11,
+ },
+ [DPDMUX_IF_GET_MAX_FRAME_LENGTH] = {
+ .cmdid_value = 0x0a20,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 10,
+ },
+ [GET_ATTR] = {
+ .cmdid_value = 0x0040,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+ [GET_IRQ_MASK] = {
+ .cmdid_value = 0x0150,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [GET_IRQ_STATUS] = {
+ .cmdid_value = 0x0160,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 13,
+ },
+ [CLOSE] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 8,
+ },
+
+ /* Common commands amongst all types of objects. Must be checked last. */
+ [OPEN] = {
+ .cmdid_value = 0x8000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [GET_API_VERSION] = {
+ .cmdid_value = 0xA000,
+ .cmdid_mask = 0xFC00,
+ .token = false,
+ .size = 8,
+ .flags = FSL_MC_CHECK_MODULE_ID,
+ },
+ [DESTROY] = {
+ .cmdid_value = 0x9800,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 12,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+ [CREATE] = {
+ .cmdid_value = 0x9000,
+ .cmdid_mask = 0xFC00,
+ .token = true,
+ .size = 64,
+ .flags = FSL_MC_CHECK_MODULE_ID | FSL_MC_CAP_NET_ADMIN_NEEDED,
+ },
+};
+
+#define FSL_MC_NUM_ACCEPTED_CMDS ARRAY_SIZE(fsl_mc_accepted_cmds)
+
+#define FSL_MC_MAX_MODULE_ID 0x10
+
+static int fsl_mc_command_check(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_command *mc_cmd)
+{
+ struct fsl_mc_cmd_desc *desc = NULL;
+ int mc_cmd_max_size, i;
+ bool token_provided;
+ u16 cmdid, module_id;
+ char *mc_cmd_end;
+ char sum = 0;
+
+ /* Check if this is an accepted MC command */
+ cmdid = mc_cmd_hdr_read_cmdid(mc_cmd);
+ for (i = 0; i < FSL_MC_NUM_ACCEPTED_CMDS; i++) {
+ desc = &fsl_mc_accepted_cmds[i];
+ if ((cmdid & desc->cmdid_mask) == desc->cmdid_value)
+ break;
+ }
+ if (i == FSL_MC_NUM_ACCEPTED_CMDS) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: cmdid not accepted\n", cmdid);
+ return -EACCES;
+ }
+
+ /* Check if the size of the command is honored. Anything beyond the
+ * last valid byte of the command should be zeroed.
+ */
+ mc_cmd_max_size = sizeof(*mc_cmd);
+ mc_cmd_end = ((char *)mc_cmd) + desc->size;
+ for (i = desc->size; i < mc_cmd_max_size; i++)
+ sum |= *mc_cmd_end++;
+ if (sum) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: garbage beyond max size of %d bytes!\n",
+ cmdid, desc->size);
+ return -EACCES;
+ }
+
+ /* Some MC commands request a token to be passed so that object
+ * identification is possible. Check if the token passed in the command
+ * is as expected.
+ */
+ token_provided = mc_cmd_hdr_read_token(mc_cmd) ? true : false;
+ if (token_provided != desc->token) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: token 0x%04x is invalid!\n",
+ cmdid, mc_cmd_hdr_read_token(mc_cmd));
+ return -EACCES;
+ }
+
+ /* If needed, check if the module ID passed is valid */
+ if (desc->flags & FSL_MC_CHECK_MODULE_ID) {
+ /* The module ID is represented by bits [4:9] from the cmdid */
+ module_id = (cmdid & GENMASK(9, 4)) >> 4;
+ if (module_id == 0 || module_id > FSL_MC_MAX_MODULE_ID) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: unknown module ID 0x%x\n",
+ cmdid, module_id);
+ return -EACCES;
+ }
+ }
+
+ /* Some commands alter how hardware resources are managed. For these
+ * commands, check for CAP_NET_ADMIN.
+ */
+ if (desc->flags & FSL_MC_CAP_NET_ADMIN_NEEDED) {
+ if (!capable(CAP_NET_ADMIN)) {
+ dev_err(&mc_dev->dev, "MC command 0x%04x: needs CAP_NET_ADMIN!\n",
+ cmdid);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_mc_uapi_send_command(struct fsl_mc_device *mc_dev, unsigned long arg,
+ struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_command mc_cmd;
+ int error;
+
+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ error = fsl_mc_command_check(mc_dev, &mc_cmd);
+ if (error)
+ return error;
+
+ error = mc_send_command(mc_io, &mc_cmd);
+ if (error)
+ return error;
+
+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
+ if (error)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int fsl_mc_uapi_dev_open(struct inode *inode, struct file *filep)
+{
+ struct fsl_mc_device *root_mc_device;
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_io *dynamic_mc_io;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ mc_uapi = container_of(filep->private_data, struct fsl_mc_uapi, misc);
+ mc_bus = container_of(mc_uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (!mc_uapi->local_instance_in_use) {
+ priv_data->mc_io = mc_uapi->static_mc_io;
+ mc_uapi->local_instance_in_use = 1;
+ } else {
+ error = fsl_mc_portal_allocate(root_mc_device, 0,
+ &dynamic_mc_io);
+ if (error) {
+ dev_dbg(&root_mc_device->dev,
+ "Could not allocate MC portal\n");
+ goto error_portal_allocate;
+ }
+
+ priv_data->mc_io = dynamic_mc_io;
+ }
+ priv_data->uapi = mc_uapi;
+ filep->private_data = priv_data;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+
+error_portal_allocate:
+ mutex_unlock(&mc_uapi->mutex);
+ kfree(priv_data);
+
+ return error;
+}
+
+static int fsl_mc_uapi_dev_release(struct inode *inode, struct file *filep)
+{
+ struct uapi_priv_data *priv_data;
+ struct fsl_mc_uapi *mc_uapi;
+ struct fsl_mc_io *mc_io;
+
+ priv_data = filep->private_data;
+ mc_uapi = priv_data->uapi;
+ mc_io = priv_data->mc_io;
+
+ mutex_lock(&mc_uapi->mutex);
+
+ if (mc_io == mc_uapi->static_mc_io)
+ mc_uapi->local_instance_in_use = 0;
+ else
+ fsl_mc_portal_free(mc_io);
+
+ kfree(filep->private_data);
+ filep->private_data = NULL;
+
+ mutex_unlock(&mc_uapi->mutex);
+
+ return 0;
+}
+
+static long fsl_mc_uapi_dev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct uapi_priv_data *priv_data = file->private_data;
+ struct fsl_mc_device *root_mc_device;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ mc_bus = container_of(priv_data->uapi, struct fsl_mc_bus, uapi_misc);
+ root_mc_device = &mc_bus->mc_dev;
+
+ switch (cmd) {
+ case FSL_MC_SEND_MC_COMMAND:
+ error = fsl_mc_uapi_send_command(root_mc_device, arg, priv_data->mc_io);
+ break;
+ default:
+ dev_dbg(&root_mc_device->dev, "unexpected ioctl call number\n");
+ error = -EINVAL;
+ }
+
+ return error;
+}
+
+static const struct file_operations fsl_mc_uapi_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = fsl_mc_uapi_dev_open,
+ .release = fsl_mc_uapi_dev_release,
+ .unlocked_ioctl = fsl_mc_uapi_dev_ioctl,
+};
+
+int fsl_mc_uapi_create_device_file(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
+ struct fsl_mc_uapi *mc_uapi = &mc_bus->uapi_misc;
+ int error;
+
+ mc_uapi->misc.minor = MISC_DYNAMIC_MINOR;
+ mc_uapi->misc.name = dev_name(&mc_dev->dev);
+ mc_uapi->misc.fops = &fsl_mc_uapi_dev_fops;
+
+ error = misc_register(&mc_uapi->misc);
+ if (error)
+ return error;
+
+ mc_uapi->static_mc_io = mc_bus->mc_dev.mc_io;
+
+ mutex_init(&mc_uapi->mutex);
+
+ return 0;
+}
+
+void fsl_mc_uapi_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
+ misc_deregister(&mc_bus->uapi_misc.misc);
+}
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
new file mode 100644
index 0000000000..95b10a6cf3
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
+ struct fsl_mc_device *dpmcp_dev)
+{
+ int error;
+
+ if (mc_io->dpmcp_dev)
+ return -EINVAL;
+
+ if (dpmcp_dev->mc_io)
+ return -EINVAL;
+
+ error = dpmcp_open(mc_io,
+ 0,
+ dpmcp_dev->obj_desc.id,
+ &dpmcp_dev->mc_handle);
+ if (error < 0)
+ return error;
+
+ mc_io->dpmcp_dev = dpmcp_dev;
+ dpmcp_dev->mc_io = mc_io;
+ return 0;
+}
+
+static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_close(mc_io,
+ 0,
+ dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
+ error);
+ }
+
+ mc_io->dpmcp_dev = NULL;
+ dpmcp_dev->mc_io = NULL;
+}
+
+/**
+ * fsl_create_mc_io() - Creates an MC I/O object
+ *
+ * @dev: device to be associated with the MC I/O object
+ * @mc_portal_phys_addr: physical address of the MC portal to use
+ * @mc_portal_size: size in bytes of the MC portal
+ * @dpmcp_dev: Pointer to the DPMCP object associated with this MC I/O
+ * object or NULL if none.
+ * @flags: flags for the new MC I/O object
+ * @new_mc_io: Area to return pointer to newly created MC I/O object
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io)
+{
+ int error;
+ struct fsl_mc_io *mc_io;
+ void __iomem *mc_portal_virt_addr;
+ struct resource *res;
+
+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
+ if (!mc_io)
+ return -ENOMEM;
+
+ mc_io->dev = dev;
+ mc_io->flags = flags;
+ mc_io->portal_phys_addr = mc_portal_phys_addr;
+ mc_io->portal_size = mc_portal_size;
+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_lock_init(&mc_io->spinlock);
+ else
+ mutex_init(&mc_io->mutex);
+
+ res = devm_request_mem_region(dev,
+ mc_portal_phys_addr,
+ mc_portal_size,
+ "mc_portal");
+ if (!res) {
+ dev_err(dev,
+ "devm_request_mem_region failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -EBUSY;
+ }
+
+ mc_portal_virt_addr = devm_ioremap(dev,
+ mc_portal_phys_addr,
+ mc_portal_size);
+ if (!mc_portal_virt_addr) {
+ dev_err(dev,
+ "devm_ioremap failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -ENXIO;
+ }
+
+ mc_io->portal_virt_addr = mc_portal_virt_addr;
+ if (dpmcp_dev) {
+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
+ if (error < 0)
+ goto error_destroy_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_destroy_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * fsl_destroy_mc_io() - Destroys an MC I/O object
+ *
+ * @mc_io: MC I/O object to destroy
+ */
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+
+ if (!mc_io)
+ return;
+
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (dpmcp_dev)
+ fsl_mc_io_unset_dpmcp(mc_io);
+
+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
+ devm_release_mem_region(mc_io->dev,
+ mc_io->portal_phys_addr,
+ mc_io->portal_size);
+
+ mc_io->portal_virt_addr = NULL;
+ devm_kfree(mc_io->dev, mc_io);
+}
+
+/**
+ * fsl_mc_portal_allocate - Allocates an MC portal
+ *
+ * @mc_dev: MC device for which the MC portal is to be allocated
+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
+ * MC portal.
+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
+ * that wraps the allocated MC portal is to be returned
+ *
+ * This function allocates an MC portal from the device's parent DPRC,
+ * from the corresponding MC bus' pool of MC portals and wraps
+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
+ * portal is allocated from its own MC bus.
+ */
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ phys_addr_t mc_portal_phys_addr;
+ size_t mc_portal_size;
+ struct fsl_mc_device *dpmcp_dev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+
+ if (mc_dev->flags & FSL_MC_IS_DPRC) {
+ mc_bus_dev = mc_dev;
+ } else {
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ return error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ }
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ *new_mc_io = NULL;
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
+ if (error < 0)
+ return error;
+
+ error = -EINVAL;
+ dpmcp_dev = resource->data;
+
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
+ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
+ mc_portal_size = resource_size(dpmcp_dev->regions);
+
+ error = fsl_create_mc_io(&mc_bus_dev->dev,
+ mc_portal_phys_addr,
+ mc_portal_size, dpmcp_dev,
+ mc_io_flags, &mc_io);
+ if (error < 0)
+ goto error_cleanup_resource;
+
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+error_cleanup_resource:
+ fsl_mc_resource_free(resource);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
+
+/**
+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
+ * of a given MC bus
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+ struct fsl_mc_resource *resource;
+
+ /*
+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
+ * to have a DPMCP object associated with.
+ */
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ resource = dpmcp_dev->resource;
+ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
+ return;
+
+ if (resource->data != dpmcp_dev)
+ return;
+
+ fsl_destroy_mc_io(mc_io);
+ fsl_mc_resource_free(resource);
+
+ dpmcp_dev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
+/**
+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
new file mode 100644
index 0000000000..f2052cd0a0
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * I/O services to send MC commands to the MC hardware
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/*
+ * Timeout in milliseconds to wait for the completion of an MC command
+ */
+#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+
+/*
+ * usleep_range() min and max values used to throttle down polling
+ * iterations while waiting for MC command completion
+ */
+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
+
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+ return (enum mc_cmd_status)hdr->status;
+}
+
+u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+ return cmd_id;
+}
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ static const int mc_status_to_error_map[] = {
+ [MC_CMD_STATUS_OK] = 0,
+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
+ [MC_CMD_STATUS_DMA_ERR] = -EIO,
+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
+ [MC_CMD_STATUS_BUSY] = -EBUSY,
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
+ };
+
+ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
+ return -EINVAL;
+
+ return mc_status_to_error_map[status];
+}
+
+static const char *mc_status_to_string(enum mc_cmd_status status)
+{
+ static const char *const status_strings[] = {
+ [MC_CMD_STATUS_OK] = "Command completed successfully",
+ [MC_CMD_STATUS_READY] = "Command ready to be processed",
+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
+ [MC_CMD_STATUS_BUSY] = "Device is busy",
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
+ };
+
+ if ((unsigned int)status >= ARRAY_SIZE(status_strings))
+ return "Unknown MC error";
+
+ return status_strings[status];
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
+ struct fsl_mc_command *cmd)
+{
+ int i;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is already in the expected LE byte-order. Do an
+ * extra LE -> CPU conversion so that the CPU -> LE done in
+ * the device io write api puts it back in the right order.
+ */
+ writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
+
+ /* submit the command by writing the header */
+ writeq(le64_to_cpu(cmd->header), &portal->header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
+ *portal,
+ struct fsl_mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = cpu_to_le64(readq_relaxed(&portal->header));
+ status = mc_cmd_hdr_read_status(resp);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is expected to be in LE byte-order. Do an
+ * extra CPU -> LE to revert the LE -> CPU done in
+ * the device io read api.
+ */
+ resp->params[i] =
+ cpu_to_le64(readq_relaxed(&portal->params[i]));
+
+ return status;
+}
+
+/**
+ * mc_polling_wait_preemptible() - Waits for the completion of an MC
+ * command doing preemptible polling.
+ * uslepp_range() is called between
+ * polling iterations.
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+ struct fsl_mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long jiffies_until_timeout =
+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ /*
+ * TODO: When MC command completion interrupts are supported
+ * call wait function here instead of usleep_range()
+ */
+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * mc_polling_wait_atomic() - Waits for the completion of an MC command
+ * doing atomic polling. udelay() is called
+ * between polling iterations.
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+ struct fsl_mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
+
+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
+
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * mc_send_command() - Sends a command to the MC device using the given
+ * MC I/O object
+ * @mc_io: MC I/O object to be used
+ * @cmd: command to be sent
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
+{
+ int error;
+ enum mc_cmd_status status;
+ unsigned long irq_flags = 0;
+
+ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ return -EINVAL;
+
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ else
+ mutex_lock(&mc_io->mutex);
+
+ /*
+ * Send command to the MC hardware:
+ */
+ mc_write_command(mc_io->portal_virt_addr, cmd);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ error = mc_polling_wait_preemptible(mc_io, cmd, &status);
+ else
+ error = mc_polling_wait_atomic(mc_io, cmd, &status);
+
+ if (error < 0)
+ goto common_exit;
+
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+ mc_status_to_string(status),
+ (unsigned int)status);
+
+ error = mc_status_to_error(status);
+ goto common_exit;
+ }
+
+ error = 0;
+common_exit:
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ else
+ mutex_unlock(&mc_io->mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(mc_send_command);
diff --git a/drivers/bus/fsl-mc/obj-api.c b/drivers/bus/fsl-mc/obj-api.c
new file mode 100644
index 0000000000..06c1dd84e3
--- /dev/null
+++ b/drivers/bus/fsl-mc/obj-api.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_get_open_cmd_id(const char *type)
+{
+ static const struct {
+ int cmd_id;
+ const char *type;
+ } dev_ids[] = {
+ { DPRTC_CMDID_OPEN, "dprtc" },
+ { DPRC_CMDID_OPEN, "dprc" },
+ { DPNI_CMDID_OPEN, "dpni" },
+ { DPIO_CMDID_OPEN, "dpio" },
+ { DPSW_CMDID_OPEN, "dpsw" },
+ { DPBP_CMDID_OPEN, "dpbp" },
+ { DPCON_CMDID_OPEN, "dpcon" },
+ { DPMCP_CMDID_OPEN, "dpmcp" },
+ { DPMAC_CMDID_OPEN, "dpmac" },
+ { DPSECI_CMDID_OPEN, "dpseci" },
+ { DPDMUX_CMDID_OPEN, "dpdmux" },
+ { DPDCEI_CMDID_OPEN, "dpdcei" },
+ { DPAIOP_CMDID_OPEN, "dpaiop" },
+ { DPCI_CMDID_OPEN, "dpci" },
+ { DPDMAI_CMDID_OPEN, "dpdmai" },
+ { DPDBG_CMDID_OPEN, "dpdbg" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_ids[i].type; i++)
+ if (!strcmp(dev_ids[i].type, type))
+ return dev_ids[i].cmd_id;
+
+ return -1;
+}
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct fsl_mc_obj_cmd_open *cmd_params;
+ int err = 0;
+ int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
+
+ if (cmd_id == -1)
+ return -ENODEV;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
+ cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
new file mode 100644
index 0000000000..cdc4e38c11
--- /dev/null
+++ b/drivers/bus/hisi_lpc.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Hisilicon Limited, All Rights Reserved.
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ * Author: Zou Rongrong <zourongrong@huawei.com>
+ * Author: John Garry <john.garry@huawei.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/logic_pio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "hisi-lpc"
+
+/*
+ * Setting this bit means each IO operation will target a different port
+ * address; 0 means repeated IO operations will use the same port,
+ * such as BT.
+ */
+#define FG_INCRADDR_LPC 0x02
+
+struct lpc_cycle_para {
+ unsigned int opflags;
+ unsigned int csize; /* data length of each operation */
+};
+
+struct hisi_lpc_dev {
+ spinlock_t cycle_lock;
+ void __iomem *membase;
+ struct logic_pio_hwaddr *io_host;
+};
+
+/* The max IO cycle counts supported is four per operation at maximum */
+#define LPC_MAX_DWIDTH 4
+
+#define LPC_REG_STARTUP_SIGNAL 0x00
+#define LPC_REG_STARTUP_SIGNAL_START BIT(0)
+#define LPC_REG_OP_STATUS 0x04
+#define LPC_REG_OP_STATUS_IDLE BIT(0)
+#define LPC_REG_OP_STATUS_FINISHED BIT(1)
+#define LPC_REG_OP_LEN 0x10 /* LPC cycles count per start */
+#define LPC_REG_CMD 0x14
+#define LPC_REG_CMD_OP BIT(0) /* 0: read, 1: write */
+#define LPC_REG_CMD_SAMEADDR BIT(3)
+#define LPC_REG_ADDR 0x20 /* target address */
+#define LPC_REG_WDATA 0x24 /* write FIFO */
+#define LPC_REG_RDATA 0x28 /* read FIFO */
+
+/* The minimal nanosecond interval for each query on LPC cycle status */
+#define LPC_NSEC_PERWAIT 100
+
+/*
+ * The maximum waiting time is about 128us. It is specific for stream I/O,
+ * such as ins.
+ *
+ * The fastest IO cycle time is about 390ns, but the worst case will wait
+ * for extra 256 lpc clocks, so (256 + 13) * 30ns = 8 us. The maximum burst
+ * cycles is 16. So, the maximum waiting time is about 128us under worst
+ * case.
+ *
+ * Choose 1300 as the maximum.
+ */
+#define LPC_MAX_WAITCNT 1300
+
+/* About 10us. This is specific for single IO operations, such as inb */
+#define LPC_PEROP_WAITCNT 100
+
+static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
+{
+ u32 status;
+
+ do {
+ status = readl(mbase + LPC_REG_OP_STATUS);
+ if (status & LPC_REG_OP_STATUS_IDLE)
+ return (status & LPC_REG_OP_STATUS_FINISHED) ? 0 : -EIO;
+ ndelay(LPC_NSEC_PERWAIT);
+ } while (--waitcnt);
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * hisi_lpc_target_in - trigger a series of LPC cycles for read operation
+ * @lpcdev: pointer to hisi lpc device
+ * @para: some parameters used to control the lpc I/O operations
+ * @addr: the lpc I/O target port address
+ * @buf: where the read back data is stored
+ * @opcnt: how many I/O operations required, i.e. data width
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_target_in(struct hisi_lpc_dev *lpcdev,
+ struct lpc_cycle_para *para, unsigned long addr,
+ unsigned char *buf, unsigned long opcnt)
+{
+ unsigned int cmd_word;
+ unsigned int waitcnt;
+ unsigned long flags;
+ int ret;
+
+ if (!buf || !opcnt || !para || !para->csize || !lpcdev)
+ return -EINVAL;
+
+ cmd_word = 0; /* IO mode, Read */
+ waitcnt = LPC_PEROP_WAITCNT;
+ if (!(para->opflags & FG_INCRADDR_LPC)) {
+ cmd_word |= LPC_REG_CMD_SAMEADDR;
+ waitcnt = LPC_MAX_WAITCNT;
+ }
+
+ /* whole operation must be atomic */
+ spin_lock_irqsave(&lpcdev->cycle_lock, flags);
+
+ writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
+ writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
+ writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
+
+ writel(LPC_REG_STARTUP_SIGNAL_START,
+ lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
+
+ /* whether the operation is finished */
+ ret = wait_lpc_idle(lpcdev->membase, waitcnt);
+ if (ret) {
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+ return ret;
+ }
+
+ readsb(lpcdev->membase + LPC_REG_RDATA, buf, opcnt);
+
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_target_out - trigger a series of LPC cycles for write operation
+ * @lpcdev: pointer to hisi lpc device
+ * @para: some parameters used to control the lpc I/O operations
+ * @addr: the lpc I/O target port address
+ * @buf: where the data to be written is stored
+ * @opcnt: how many I/O operations required, i.e. data width
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_target_out(struct hisi_lpc_dev *lpcdev,
+ struct lpc_cycle_para *para, unsigned long addr,
+ const unsigned char *buf, unsigned long opcnt)
+{
+ unsigned int waitcnt;
+ unsigned long flags;
+ u32 cmd_word;
+ int ret;
+
+ if (!buf || !opcnt || !para || !lpcdev)
+ return -EINVAL;
+
+ /* default is increasing address */
+ cmd_word = LPC_REG_CMD_OP; /* IO mode, write */
+ waitcnt = LPC_PEROP_WAITCNT;
+ if (!(para->opflags & FG_INCRADDR_LPC)) {
+ cmd_word |= LPC_REG_CMD_SAMEADDR;
+ waitcnt = LPC_MAX_WAITCNT;
+ }
+
+ spin_lock_irqsave(&lpcdev->cycle_lock, flags);
+
+ writel_relaxed(opcnt, lpcdev->membase + LPC_REG_OP_LEN);
+ writel_relaxed(cmd_word, lpcdev->membase + LPC_REG_CMD);
+ writel_relaxed(addr, lpcdev->membase + LPC_REG_ADDR);
+
+ writesb(lpcdev->membase + LPC_REG_WDATA, buf, opcnt);
+
+ writel(LPC_REG_STARTUP_SIGNAL_START,
+ lpcdev->membase + LPC_REG_STARTUP_SIGNAL);
+
+ /* whether the operation is finished */
+ ret = wait_lpc_idle(lpcdev->membase, waitcnt);
+
+ spin_unlock_irqrestore(&lpcdev->cycle_lock, flags);
+
+ return ret;
+}
+
+static unsigned long hisi_lpc_pio_to_addr(struct hisi_lpc_dev *lpcdev,
+ unsigned long pio)
+{
+ return pio - lpcdev->io_host->io_start + lpcdev->io_host->hw_start;
+}
+
+/*
+ * hisi_lpc_comm_in - input the data in a single operation
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @dwidth: the data length required to read from the target I/O port
+ *
+ * When success, data is returned. Otherwise, ~0 is returned.
+ */
+static u32 hisi_lpc_comm_in(void *hostdata, unsigned long pio, size_t dwidth)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ unsigned long addr;
+ __le32 rd_data = 0;
+ int ret;
+
+ if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return ~0;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ iopara.opflags = FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ ret = hisi_lpc_target_in(lpcdev, &iopara, addr,
+ (unsigned char *)&rd_data, dwidth);
+ if (ret)
+ return ~0;
+
+ return le32_to_cpu(rd_data);
+}
+
+/*
+ * hisi_lpc_comm_out - output the data in a single operation
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @val: a value to be output from caller, maximum is four bytes
+ * @dwidth: the data width required writing to the target I/O port
+ *
+ * This function corresponds to out(b,w,l) only.
+ */
+static void hisi_lpc_comm_out(void *hostdata, unsigned long pio,
+ u32 val, size_t dwidth)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ const unsigned char *buf;
+ unsigned long addr;
+ __le32 _val = cpu_to_le32(val);
+
+ if (!lpcdev || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return;
+
+ buf = (const unsigned char *)&_val;
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ iopara.opflags = FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth);
+}
+
+/*
+ * hisi_lpc_comm_ins - input the data in the buffer in multiple operations
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @buffer: a buffer where read/input data bytes are stored
+ * @dwidth: the data width required writing to the target I/O port
+ * @count: how many data units whose length is dwidth will be read
+ *
+ * When success, the data read back is stored in buffer pointed by buffer.
+ * Returns 0 on success, -errno otherwise.
+ */
+static u32 hisi_lpc_comm_ins(void *hostdata, unsigned long pio, void *buffer,
+ size_t dwidth, unsigned int count)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ unsigned char *buf = buffer;
+ struct lpc_cycle_para iopara;
+ unsigned long addr;
+
+ if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return -EINVAL;
+
+ iopara.opflags = 0;
+ if (dwidth > 1)
+ iopara.opflags |= FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+
+ do {
+ int ret;
+
+ ret = hisi_lpc_target_in(lpcdev, &iopara, addr, buf, dwidth);
+ if (ret)
+ return ret;
+ buf += dwidth;
+ } while (--count);
+
+ return 0;
+}
+
+/*
+ * hisi_lpc_comm_outs - output the data in the buffer in multiple operations
+ * @hostdata: pointer to the device information relevant to LPC controller
+ * @pio: the target I/O port address
+ * @buffer: a buffer where write/output data bytes are stored
+ * @dwidth: the data width required writing to the target I/O port
+ * @count: how many data units whose length is dwidth will be written
+ */
+static void hisi_lpc_comm_outs(void *hostdata, unsigned long pio,
+ const void *buffer, size_t dwidth,
+ unsigned int count)
+{
+ struct hisi_lpc_dev *lpcdev = hostdata;
+ struct lpc_cycle_para iopara;
+ const unsigned char *buf = buffer;
+ unsigned long addr;
+
+ if (!lpcdev || !buf || !count || !dwidth || dwidth > LPC_MAX_DWIDTH)
+ return;
+
+ iopara.opflags = 0;
+ if (dwidth > 1)
+ iopara.opflags |= FG_INCRADDR_LPC;
+ iopara.csize = dwidth;
+
+ addr = hisi_lpc_pio_to_addr(lpcdev, pio);
+ do {
+ if (hisi_lpc_target_out(lpcdev, &iopara, addr, buf, dwidth))
+ break;
+ buf += dwidth;
+ } while (--count);
+}
+
+static const struct logic_pio_host_ops hisi_lpc_ops = {
+ .in = hisi_lpc_comm_in,
+ .out = hisi_lpc_comm_out,
+ .ins = hisi_lpc_comm_ins,
+ .outs = hisi_lpc_comm_outs,
+};
+
+#ifdef CONFIG_ACPI
+static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
+ struct acpi_device *host,
+ struct resource *res)
+{
+ unsigned long sys_port;
+ resource_size_t len = resource_size(res);
+
+ sys_port = logic_pio_trans_hwaddr(acpi_fwnode_handle(host), res->start, len);
+ if (sys_port == ~0UL)
+ return -EFAULT;
+
+ res->start = sys_port;
+ res->end = sys_port + len;
+
+ return 0;
+}
+
+/*
+ * Released firmware describes the IO port max address as 0x3fff, which is
+ * the max host bus address. Fixup to a proper range. This will probably
+ * never be fixed in firmware.
+ */
+static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
+ struct resource *r)
+{
+ if (r->end != 0x3fff)
+ return;
+
+ if (r->start == 0xe4)
+ r->end = 0xe4 + 0x04 - 1;
+ else if (r->start == 0x2f8)
+ r->end = 0x2f8 + 0x08 - 1;
+ else
+ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
+ r);
+}
+
+/*
+ * hisi_lpc_acpi_set_io_res - set the resources for a child
+ * @adev: ACPI companion of the device node to be updated the I/O resource
+ * @hostdev: the device node associated with host controller
+ * @res: double pointer to be set to the address of translated resources
+ * @num_res: pointer to variable to hold the number of translated resources
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * For a given host controller, each child device will have an associated
+ * host-relative address resource. This function will return the translated
+ * logical PIO addresses for each child devices resources.
+ */
+static int hisi_lpc_acpi_set_io_res(struct acpi_device *adev,
+ struct device *hostdev,
+ const struct resource **res, int *num_res)
+{
+ struct acpi_device *host = to_acpi_device(adev->dev.parent);
+ struct resource_entry *rentry;
+ LIST_HEAD(resource_list);
+ struct resource *resources;
+ int count;
+ int i;
+
+ if (!adev->status.present) {
+ dev_dbg(&adev->dev, "device is not present\n");
+ return -EIO;
+ }
+
+ if (acpi_device_enumerated(adev)) {
+ dev_dbg(&adev->dev, "has been enumerated\n");
+ return -EIO;
+ }
+
+ /*
+ * The following code segment to retrieve the resources is common to
+ * acpi_create_platform_device(), so consider a common helper function
+ * in future.
+ */
+ count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (count <= 0) {
+ dev_dbg(&adev->dev, "failed to get resources\n");
+ return count ? count : -EIO;
+ }
+
+ resources = devm_kcalloc(hostdev, count, sizeof(*resources),
+ GFP_KERNEL);
+ if (!resources) {
+ dev_warn(hostdev, "could not allocate memory for %d resources\n",
+ count);
+ acpi_dev_free_resource_list(&resource_list);
+ return -ENOMEM;
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node) {
+ resources[count] = *rentry->res;
+ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
+ count++;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /* translate the I/O resources */
+ for (i = 0; i < count; i++) {
+ int ret;
+
+ if (!(resources[i].flags & IORESOURCE_IO))
+ continue;
+ ret = hisi_lpc_acpi_xlat_io_res(adev, host, &resources[i]);
+ if (ret) {
+ dev_err(&adev->dev, "translate IO range %pR failed (%d)\n",
+ &resources[i], ret);
+ return ret;
+ }
+ }
+ *res = resources;
+ *num_res = count;
+
+ return 0;
+}
+
+static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_used)
+{
+ acpi_device_clear_enumerated(adev);
+ return 0;
+}
+
+struct hisi_lpc_acpi_cell {
+ const char *hid;
+ const struct platform_device_info *pdevinfo;
+};
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+ device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+ acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
+ hisi_lpc_acpi_clear_enumerated, NULL);
+}
+
+static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
+{
+ const char *hid = acpi_device_hid(child);
+ struct device *hostdev = data;
+ const struct hisi_lpc_acpi_cell *cell;
+ struct platform_device *pdev;
+ const struct resource *res;
+ bool found = false;
+ int num_res;
+ int ret;
+
+ ret = hisi_lpc_acpi_set_io_res(child, hostdev, &res, &num_res);
+ if (ret) {
+ dev_warn(hostdev, "set resource fail (%d)\n", ret);
+ return ret;
+ }
+
+ cell = (struct hisi_lpc_acpi_cell []){
+ /* ipmi */
+ {
+ .hid = "IPI0001",
+ .pdevinfo = (struct platform_device_info []) {
+ {
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "hisi-lpc-ipmi",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ },
+ },
+ },
+ /* 8250-compatible uart */
+ {
+ .hid = "HISI1031",
+ .pdevinfo = (struct platform_device_info []) {
+ {
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "serial8250",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ .data = (struct plat_serial8250_port []) {
+ {
+ .iobase = res->start,
+ .uartclk = 1843200,
+ .iotype = UPIO_PORT,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {}
+ },
+ .size_data = 2 * sizeof(struct plat_serial8250_port),
+ },
+ },
+ },
+ {}
+ };
+
+ for (; cell && cell->hid; cell++) {
+ if (!strcmp(cell->hid, hid)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_warn(hostdev,
+ "could not find cell for child device (%s), discarding\n",
+ hid);
+ return 0;
+ }
+
+ pdev = platform_device_register_full(cell->pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ acpi_device_set_enumerated(child);
+ return 0;
+}
+
+/*
+ * hisi_lpc_acpi_probe - probe children for ACPI FW
+ * @hostdev: LPC host device pointer
+ *
+ * Returns 0 when successful, and a negative value for failure.
+ *
+ * Create a platform device per child, fixing up the resources
+ * from bus addresses to Logical PIO addresses.
+ *
+ */
+static int hisi_lpc_acpi_probe(struct device *hostdev)
+{
+ int ret;
+
+ /* Only consider the children of the host */
+ ret = acpi_dev_for_each_child(ACPI_COMPANION(hostdev),
+ hisi_lpc_acpi_add_child, hostdev);
+ if (ret)
+ hisi_lpc_acpi_remove(hostdev);
+
+ return ret;
+}
+#else
+static int hisi_lpc_acpi_probe(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
+#endif // CONFIG_ACPI
+
+/*
+ * hisi_lpc_probe - the probe callback function for hisi lpc host,
+ * will finish all the initialization.
+ * @pdev: the platform device corresponding to hisi lpc host
+ *
+ * Returns 0 on success, non-zero on fail.
+ */
+static int hisi_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct logic_pio_hwaddr *range;
+ struct hisi_lpc_dev *lpcdev;
+ resource_size_t io_end;
+ int ret;
+
+ lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
+ if (!lpcdev)
+ return -ENOMEM;
+
+ spin_lock_init(&lpcdev->cycle_lock);
+
+ lpcdev->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lpcdev->membase))
+ return PTR_ERR(lpcdev->membase);
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+
+ range->fwnode = dev_fwnode(dev);
+ range->flags = LOGIC_PIO_INDIRECT;
+ range->size = PIO_INDIRECT_SIZE;
+ range->hostdata = lpcdev;
+ range->ops = &hisi_lpc_ops;
+ lpcdev->io_host = range;
+
+ ret = logic_pio_register_range(range);
+ if (ret) {
+ dev_err(dev, "register IO range failed (%d)!\n", ret);
+ return ret;
+ }
+
+ /* register the LPC host PIO resources */
+ if (is_acpi_device_node(range->fwnode))
+ ret = hisi_lpc_acpi_probe(dev);
+ else
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ logic_pio_unregister_range(range);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, lpcdev);
+
+ io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
+ dev_info(dev, "registered range [%pa - %pa]\n",
+ &lpcdev->io_host->io_start, &io_end);
+
+ return ret;
+}
+
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+ struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+ if (is_acpi_device_node(range->fwnode))
+ hisi_lpc_acpi_remove(dev);
+ else
+ of_platform_depopulate(dev);
+
+ logic_pio_unregister_range(range);
+
+ return 0;
+}
+
+static const struct of_device_id hisi_lpc_of_match[] = {
+ { .compatible = "hisilicon,hip06-lpc", },
+ { .compatible = "hisilicon,hip07-lpc", },
+ {}
+};
+
+static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+ {"HISI0191"},
+ {}
+};
+
+static struct platform_driver hisi_lpc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hisi_lpc_of_match,
+ .acpi_match_table = hisi_lpc_acpi_match,
+ },
+ .probe = hisi_lpc_probe,
+ .remove = hisi_lpc_remove,
+};
+builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
new file mode 100644
index 0000000000..42c9386a7b
--- /dev/null
+++ b/drivers/bus/imx-weim.c
@@ -0,0 +1,413 @@
+/*
+ * EIM driver for Freescale's i.MX chips
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/regmap.h>
+
+struct imx_weim_devtype {
+ unsigned int cs_count;
+ unsigned int cs_regs_count;
+ unsigned int cs_stride;
+ unsigned int wcr_offset;
+ unsigned int wcr_bcm;
+ unsigned int wcr_cont_bclk;
+};
+
+static const struct imx_weim_devtype imx1_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 2,
+ .cs_stride = 0x08,
+};
+
+static const struct imx_weim_devtype imx27_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 3,
+ .cs_stride = 0x10,
+};
+
+static const struct imx_weim_devtype imx50_weim_devtype = {
+ .cs_count = 4,
+ .cs_regs_count = 6,
+ .cs_stride = 0x18,
+ .wcr_offset = 0x90,
+ .wcr_bcm = BIT(0),
+ .wcr_cont_bclk = BIT(3),
+};
+
+static const struct imx_weim_devtype imx51_weim_devtype = {
+ .cs_count = 6,
+ .cs_regs_count = 6,
+ .cs_stride = 0x18,
+};
+
+#define MAX_CS_REGS_COUNT 6
+#define MAX_CS_COUNT 6
+#define OF_REG_SIZE 3
+
+struct cs_timing {
+ bool is_applied;
+ u32 regs[MAX_CS_REGS_COUNT];
+};
+
+struct cs_timing_state {
+ struct cs_timing cs[MAX_CS_COUNT];
+};
+
+struct weim_priv {
+ void __iomem *base;
+ struct cs_timing_state timing_state;
+};
+
+static const struct of_device_id weim_id_table[] = {
+ /* i.MX1/21 */
+ { .compatible = "fsl,imx1-weim", .data = &imx1_weim_devtype, },
+ /* i.MX25/27/31/35 */
+ { .compatible = "fsl,imx27-weim", .data = &imx27_weim_devtype, },
+ /* i.MX50/53/6Q */
+ { .compatible = "fsl,imx50-weim", .data = &imx50_weim_devtype, },
+ { .compatible = "fsl,imx6q-weim", .data = &imx50_weim_devtype, },
+ /* i.MX51 */
+ { .compatible = "fsl,imx51-weim", .data = &imx51_weim_devtype, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, weim_id_table);
+
+static int imx_weim_gpr_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ struct regmap *gpr;
+ u32 gprvals[4] = {
+ 05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */
+ 033, /* CS0(64M) CS1(64M) CS2(0M) CS3(0M) */
+ 0113, /* CS0(64M) CS1(32M) CS2(32M) CS3(0M) */
+ 01111, /* CS0(32M) CS1(32M) CS2(32M) CS3(32M) */
+ };
+ u32 gprval = 0;
+ u32 val;
+ int cs = 0;
+ int i = 0;
+
+ gpr = syscon_regmap_lookup_by_phandle(np, "fsl,weim-cs-gpr");
+ if (IS_ERR(gpr)) {
+ dev_dbg(&pdev->dev, "failed to find weim-cs-gpr\n");
+ return 0;
+ }
+
+ if (of_range_parser_init(&parser, np))
+ goto err;
+
+ for_each_of_range(&parser, &range) {
+ cs = range.bus_addr >> 32;
+ val = (range.size / SZ_32M) | 1;
+ gprval |= val << cs * 3;
+ i++;
+ }
+
+ if (i == 0 || i % 4)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+ if (gprval == gprvals[i]) {
+ /* Found it. Set up IOMUXC_GPR1[11:0] with it. */
+ regmap_update_bits(gpr, IOMUXC_GPR1, 0xfff, gprval);
+ return 0;
+ }
+ }
+
+err:
+ dev_err(&pdev->dev, "Invalid 'ranges' configuration\n");
+ return -EINVAL;
+}
+
+/* Parse and set the timing for this device. */
+static int weim_timing_setup(struct device *dev, struct device_node *np,
+ const struct imx_weim_devtype *devtype)
+{
+ u32 cs_idx, value[MAX_CS_REGS_COUNT];
+ int i, ret;
+ int reg_idx, num_regs;
+ struct cs_timing *cst;
+ struct weim_priv *priv;
+ struct cs_timing_state *ts;
+ void __iomem *base;
+
+ if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
+ return -EINVAL;
+ if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
+ return -EINVAL;
+
+ priv = dev_get_drvdata(dev);
+ base = priv->base;
+ ts = &priv->timing_state;
+
+ ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
+ value, devtype->cs_regs_count);
+ if (ret)
+ return ret;
+
+ /*
+ * the child node's "reg" property may contain multiple address ranges,
+ * extract the chip select for each.
+ */
+ num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
+ if (num_regs < 0)
+ return num_regs;
+ if (!num_regs)
+ return -EINVAL;
+ for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
+ /* get the CS index from this child node's "reg" property. */
+ ret = of_property_read_u32_index(np, "reg",
+ reg_idx * OF_REG_SIZE, &cs_idx);
+ if (ret)
+ break;
+
+ if (cs_idx >= devtype->cs_count)
+ return -EINVAL;
+
+ /* prevent re-configuring a CS that's already been configured */
+ cst = &ts->cs[cs_idx];
+ if (cst->is_applied && memcmp(value, cst->regs,
+ devtype->cs_regs_count * sizeof(u32))) {
+ dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
+ return -EINVAL;
+ }
+
+ /* set the timing for WEIM */
+ for (i = 0; i < devtype->cs_regs_count; i++)
+ writel(value[i],
+ base + cs_idx * devtype->cs_stride + i * 4);
+ if (!cst->is_applied) {
+ cst->is_applied = true;
+ memcpy(cst->regs, value,
+ devtype->cs_regs_count * sizeof(u32));
+ }
+ }
+
+ return 0;
+}
+
+static int weim_parse_dt(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id = of_match_device(weim_id_table,
+ &pdev->dev);
+ const struct imx_weim_devtype *devtype = of_id->data;
+ int ret = 0, have_child = 0;
+ struct device_node *child;
+ struct weim_priv *priv;
+ void __iomem *base;
+ u32 reg;
+
+ if (devtype == &imx50_weim_devtype) {
+ ret = imx_weim_gpr_setup(pdev);
+ if (ret)
+ return ret;
+ }
+
+ priv = dev_get_drvdata(&pdev->dev);
+ base = priv->base;
+
+ if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
+ if (devtype->wcr_bcm) {
+ reg = readl(base + devtype->wcr_offset);
+ reg |= devtype->wcr_bcm;
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "fsl,continuous-burst-clk")) {
+ if (devtype->wcr_cont_bclk) {
+ reg |= devtype->wcr_cont_bclk;
+ } else {
+ dev_err(&pdev->dev,
+ "continuous burst clk not supported.\n");
+ return -EINVAL;
+ }
+ }
+
+ writel(reg, base + devtype->wcr_offset);
+ } else {
+ dev_err(&pdev->dev, "burst clk mode not supported.\n");
+ return -EINVAL;
+ }
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ ret = weim_timing_setup(&pdev->dev, child, devtype);
+ if (ret)
+ dev_warn(&pdev->dev, "%pOF set timing failed.\n",
+ child);
+ else
+ have_child = 1;
+ }
+
+ if (have_child)
+ ret = of_platform_default_populate(pdev->dev.of_node,
+ NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "%pOF fail to create devices.\n",
+ pdev->dev.of_node);
+ return ret;
+}
+
+static int weim_probe(struct platform_device *pdev)
+{
+ struct weim_priv *priv;
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* get the resource */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->base = base;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ /* get the clock */
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ /* parse the device node */
+ ret = weim_parse_dt(pdev);
+ if (ret)
+ clk_disable_unprepare(clk);
+ else
+ dev_info(&pdev->dev, "Driver registered.\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int of_weim_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ const struct imx_weim_devtype *devtype;
+ struct of_reconfig_data *rd = arg;
+ const struct of_device_id *of_id;
+ struct platform_device *pdev;
+ int ret = NOTIFY_OK;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ of_id = of_match_node(weim_id_table, rd->dn->parent);
+ if (!of_id)
+ return NOTIFY_OK; /* not for us */
+
+ devtype = of_id->data;
+
+ pdev = of_find_device_by_node(rd->dn->parent);
+ if (!pdev) {
+ pr_err("%s: could not find platform device for '%pOF'\n",
+ __func__, rd->dn->parent);
+
+ return notifier_from_errno(-EINVAL);
+ }
+
+ if (weim_timing_setup(&pdev->dev, rd->dn, devtype))
+ dev_warn(&pdev->dev,
+ "Failed to setup timing for '%pOF'\n", rd->dn);
+
+ if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+ /*
+ * Clear the flag before adding the device so that
+ * fw_devlink doesn't skip adding consumers to this
+ * device.
+ */
+ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
+ if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
+ dev_err(&pdev->dev,
+ "Failed to create child device '%pOF'\n",
+ rd->dn);
+ ret = notifier_from_errno(-EINVAL);
+ }
+ }
+
+ platform_device_put(pdev);
+
+ break;
+ case OF_RECONFIG_CHANGE_REMOVE:
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK; /* device already destroyed */
+
+ of_id = of_match_node(weim_id_table, rd->dn->parent);
+ if (!of_id)
+ return NOTIFY_OK; /* not for us */
+
+ pdev = of_find_device_by_node(rd->dn);
+ if (!pdev) {
+ pr_err("Could not find platform device for '%pOF'\n",
+ rd->dn);
+
+ ret = notifier_from_errno(-EINVAL);
+ } else {
+ of_platform_device_destroy(&pdev->dev, NULL);
+ platform_device_put(pdev);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct notifier_block weim_of_notifier = {
+ .notifier_call = of_weim_notify,
+};
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+static struct platform_driver weim_driver = {
+ .driver = {
+ .name = "imx-weim",
+ .of_match_table = weim_id_table,
+ },
+ .probe = weim_probe,
+};
+
+static int __init weim_init(void)
+{
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+ WARN_ON(of_reconfig_notifier_register(&weim_of_notifier));
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+ return platform_driver_register(&weim_driver);
+}
+module_init(weim_init);
+
+static void __exit weim_exit(void)
+{
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+ of_reconfig_notifier_unregister(&weim_of_notifier);
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+ return platform_driver_unregister(&weim_driver);
+
+}
+module_exit(weim_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor Inc.");
+MODULE_DESCRIPTION("i.MX EIM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
new file mode 100644
index 0000000000..320cf307db
--- /dev/null
+++ b/drivers/bus/intel-ixp4xx-eb.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx Expansion Bus Controller
+ * Copyright (C) 2021 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IXP4XX_EXP_NUM_CS 8
+
+#define IXP4XX_EXP_TIMING_CS0 0x00
+#define IXP4XX_EXP_TIMING_CS1 0x04
+#define IXP4XX_EXP_TIMING_CS2 0x08
+#define IXP4XX_EXP_TIMING_CS3 0x0c
+#define IXP4XX_EXP_TIMING_CS4 0x10
+#define IXP4XX_EXP_TIMING_CS5 0x14
+#define IXP4XX_EXP_TIMING_CS6 0x18
+#define IXP4XX_EXP_TIMING_CS7 0x1c
+
+/* Bits inside each CS timing register */
+#define IXP4XX_EXP_TIMING_STRIDE 0x04
+#define IXP4XX_EXP_CS_EN BIT(31)
+#define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */
+#define IXP4XX_EXP_T1_MASK GENMASK(29, 28)
+#define IXP4XX_EXP_T1_SHIFT 28
+#define IXP4XX_EXP_T2_MASK GENMASK(27, 26)
+#define IXP4XX_EXP_T2_SHIFT 26
+#define IXP4XX_EXP_T3_MASK GENMASK(25, 22)
+#define IXP4XX_EXP_T3_SHIFT 22
+#define IXP4XX_EXP_T4_MASK GENMASK(21, 20)
+#define IXP4XX_EXP_T4_SHIFT 20
+#define IXP4XX_EXP_T5_MASK GENMASK(19, 16)
+#define IXP4XX_EXP_T5_SHIFT 16
+#define IXP4XX_EXP_CYC_TYPE_MASK GENMASK(15, 14)
+#define IXP4XX_EXP_CYC_TYPE_SHIFT 14
+#define IXP4XX_EXP_SIZE_MASK GENMASK(13, 10)
+#define IXP4XX_EXP_SIZE_SHIFT 10
+#define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */
+#define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */
+#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */
+#define IXP4XX_EXP_BYTE_RD16 BIT(6)
+#define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */
+#define IXP4XX_EXP_MUX_EN BIT(4)
+#define IXP4XX_EXP_SPLT_EN BIT(3)
+#define IXP4XX_EXP_WORD BIT(2) /* Always zero */
+#define IXP4XX_EXP_WR_EN BIT(1)
+#define IXP4XX_EXP_BYTE_EN BIT(0)
+
+#define IXP4XX_EXP_CNFG0 0x20
+#define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31)
+#define IXP4XX_EXP_CNFG1 0x24
+
+#define IXP4XX_EXP_BOOT_BASE 0x00000000
+#define IXP4XX_EXP_NORMAL_BASE 0x50000000
+#define IXP4XX_EXP_STRIDE 0x01000000
+
+/* Fuses on the IXP43x */
+#define IXP43X_EXP_UNIT_FUSE_RESET 0x28
+#define IXP43x_EXP_FUSE_SPEED_MASK GENMASK(23, 22)
+
+/* Number of device tree values in "reg" */
+#define IXP4XX_OF_REG_SIZE 3
+
+struct ixp4xx_eb {
+ struct device *dev;
+ struct regmap *rmap;
+ u32 bus_base;
+ bool is_42x;
+ bool is_43x;
+};
+
+struct ixp4xx_exp_tim_prop {
+ const char *prop;
+ u32 max;
+ u32 mask;
+ u16 shift;
+};
+
+static const struct ixp4xx_exp_tim_prop ixp4xx_exp_tim_props[] = {
+ {
+ .prop = "intel,ixp4xx-eb-t1",
+ .max = 3,
+ .mask = IXP4XX_EXP_T1_MASK,
+ .shift = IXP4XX_EXP_T1_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t2",
+ .max = 3,
+ .mask = IXP4XX_EXP_T2_MASK,
+ .shift = IXP4XX_EXP_T2_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t3",
+ .max = 15,
+ .mask = IXP4XX_EXP_T3_MASK,
+ .shift = IXP4XX_EXP_T3_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t4",
+ .max = 3,
+ .mask = IXP4XX_EXP_T4_MASK,
+ .shift = IXP4XX_EXP_T4_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-t5",
+ .max = 15,
+ .mask = IXP4XX_EXP_T5_MASK,
+ .shift = IXP4XX_EXP_T5_SHIFT,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-byte-access-on-halfword",
+ .max = 1,
+ .mask = IXP4XX_EXP_BYTE_RD16,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-hpi-hrdy-pol-high",
+ .max = 1,
+ .mask = IXP4XX_EXP_HRDY_POL,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-mux-address-and-data",
+ .max = 1,
+ .mask = IXP4XX_EXP_MUX_EN,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-ahb-split-transfers",
+ .max = 1,
+ .mask = IXP4XX_EXP_SPLT_EN,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-write-enable",
+ .max = 1,
+ .mask = IXP4XX_EXP_WR_EN,
+ },
+ {
+ .prop = "intel,ixp4xx-eb-byte-access",
+ .max = 1,
+ .mask = IXP4XX_EXP_BYTE_EN,
+ },
+};
+
+static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb,
+ struct device_node *np,
+ u32 cs_index,
+ u32 cs_size)
+{
+ u32 cs_cfg;
+ u32 val;
+ u32 cur_cssize;
+ u32 cs_order;
+ int ret;
+ int i;
+
+ if (eb->is_42x && (cs_index > 7)) {
+ dev_err(eb->dev,
+ "invalid chipselect %u, we only support 0-7\n",
+ cs_index);
+ return;
+ }
+ if (eb->is_43x && (cs_index > 3)) {
+ dev_err(eb->dev,
+ "invalid chipselect %u, we only support 0-3\n",
+ cs_index);
+ return;
+ }
+
+ /* Several chip selects can be joined into one device */
+ if (cs_size > IXP4XX_EXP_STRIDE)
+ cur_cssize = IXP4XX_EXP_STRIDE;
+ else
+ cur_cssize = cs_size;
+
+
+ /*
+ * The following will read/modify/write the configuration for one
+ * chipselect, attempting to leave the boot defaults in place unless
+ * something is explicitly defined.
+ */
+ regmap_read(eb->rmap, IXP4XX_EXP_TIMING_CS0 +
+ IXP4XX_EXP_TIMING_STRIDE * cs_index, &cs_cfg);
+ dev_info(eb->dev, "CS%d at %#08x, size %#08x, config before: %#08x\n",
+ cs_index, eb->bus_base + IXP4XX_EXP_STRIDE * cs_index,
+ cur_cssize, cs_cfg);
+
+ /* Size set-up first align to 2^9 .. 2^24 */
+ cur_cssize = roundup_pow_of_two(cur_cssize);
+ if (cur_cssize < 512)
+ cur_cssize = 512;
+ cs_order = ilog2(cur_cssize);
+ if (cs_order < 9 || cs_order > 24) {
+ dev_err(eb->dev, "illegal size order %d\n", cs_order);
+ return;
+ }
+ dev_dbg(eb->dev, "CS%d size order: %d\n", cs_index, cs_order);
+ cs_cfg &= ~(IXP4XX_EXP_SIZE_MASK);
+ cs_cfg |= ((cs_order - 9) << IXP4XX_EXP_SIZE_SHIFT);
+
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_exp_tim_props); i++) {
+ const struct ixp4xx_exp_tim_prop *ip = &ixp4xx_exp_tim_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, ip->prop, &val);
+ if (ret)
+ continue;
+
+ /* Handle bools (single bits) first */
+ if (ip->max == 1) {
+ if (val)
+ cs_cfg |= ip->mask;
+ else
+ cs_cfg &= ~ip->mask;
+ dev_info(eb->dev, "CS%d %s %s\n", cs_index,
+ val ? "enabled" : "disabled",
+ ip->prop);
+ continue;
+ }
+
+ if (val > ip->max) {
+ dev_err(eb->dev,
+ "CS%d too high value for %s: %u, capped at %u\n",
+ cs_index, ip->prop, val, ip->max);
+ val = ip->max;
+ }
+ /* This assumes max value fills all the assigned bits (and it does) */
+ cs_cfg &= ~ip->mask;
+ cs_cfg |= (val << ip->shift);
+ dev_info(eb->dev, "CS%d set %s to %u\n", cs_index, ip->prop, val);
+ }
+
+ ret = of_property_read_u32(np, "intel,ixp4xx-eb-cycle-type", &val);
+ if (!ret) {
+ if (val > 3) {
+ dev_err(eb->dev, "illegal cycle type %d\n", val);
+ return;
+ }
+ dev_info(eb->dev, "CS%d set cycle type %d\n", cs_index, val);
+ cs_cfg &= ~IXP4XX_EXP_CYC_TYPE_MASK;
+ cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT;
+ }
+
+ if (eb->is_43x) {
+ /* Should always be zero */
+ cs_cfg &= ~IXP4XX_EXP_WORD;
+ /*
+ * This bit for Intel strata flash is currently unused, but let's
+ * report it if we find one.
+ */
+ if (cs_cfg & IXP43X_EXP_SYNC_INTEL)
+ dev_info(eb->dev, "claims to be Intel strata flash\n");
+ }
+ cs_cfg |= IXP4XX_EXP_CS_EN;
+
+ regmap_write(eb->rmap,
+ IXP4XX_EXP_TIMING_CS0 + IXP4XX_EXP_TIMING_STRIDE * cs_index,
+ cs_cfg);
+ dev_info(eb->dev, "CS%d wrote %#08x into CS config\n", cs_index, cs_cfg);
+
+ /*
+ * If several chip selects are joined together into one big
+ * device area, we call ourselves recursively for each successive
+ * chip select. For a 32MB flash chip this results in two calls
+ * for example.
+ */
+ if (cs_size > IXP4XX_EXP_STRIDE)
+ ixp4xx_exp_setup_chipselect(eb, np,
+ cs_index + 1,
+ cs_size - IXP4XX_EXP_STRIDE);
+}
+
+static void ixp4xx_exp_setup_child(struct ixp4xx_eb *eb,
+ struct device_node *np)
+{
+ u32 cs_sizes[IXP4XX_EXP_NUM_CS];
+ int num_regs;
+ u32 csindex;
+ u32 cssize;
+ int ret;
+ int i;
+
+ num_regs = of_property_count_elems_of_size(np, "reg", IXP4XX_OF_REG_SIZE);
+ if (num_regs <= 0)
+ return;
+ dev_dbg(eb->dev, "child %s has %d register sets\n",
+ of_node_full_name(np), num_regs);
+
+ for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++)
+ cs_sizes[csindex] = 0;
+
+ for (i = 0; i < num_regs; i++) {
+ u32 rbase, rsize;
+
+ ret = of_property_read_u32_index(np, "reg",
+ i * IXP4XX_OF_REG_SIZE, &csindex);
+ if (ret)
+ break;
+ ret = of_property_read_u32_index(np, "reg",
+ i * IXP4XX_OF_REG_SIZE + 1, &rbase);
+ if (ret)
+ break;
+ ret = of_property_read_u32_index(np, "reg",
+ i * IXP4XX_OF_REG_SIZE + 2, &rsize);
+ if (ret)
+ break;
+
+ if (csindex >= IXP4XX_EXP_NUM_CS) {
+ dev_err(eb->dev, "illegal CS %d\n", csindex);
+ continue;
+ }
+ /*
+ * The memory window always starts from CS base so we need to add
+ * the start and size to get to the size from the start of the CS
+ * base. For example if CS0 is at 0x50000000 and the reg is
+ * <0 0xe40000 0x40000> the size is e80000.
+ *
+ * Roof this if we have several regs setting the same CS.
+ */
+ cssize = rbase + rsize;
+ dev_dbg(eb->dev, "CS%d size %#08x\n", csindex, cssize);
+ if (cs_sizes[csindex] < cssize)
+ cs_sizes[csindex] = cssize;
+ }
+
+ for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++) {
+ cssize = cs_sizes[csindex];
+ if (!cssize)
+ continue;
+ /* Just this one, so set it up and return */
+ ixp4xx_exp_setup_chipselect(eb, np, csindex, cssize);
+ }
+}
+
+static int ixp4xx_exp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ixp4xx_eb *eb;
+ struct device_node *child;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ eb = devm_kzalloc(dev, sizeof(*eb), GFP_KERNEL);
+ if (!eb)
+ return -ENOMEM;
+
+ eb->dev = dev;
+ eb->is_42x = of_device_is_compatible(np, "intel,ixp42x-expansion-bus-controller");
+ eb->is_43x = of_device_is_compatible(np, "intel,ixp43x-expansion-bus-controller");
+
+ eb->rmap = syscon_node_to_regmap(np);
+ if (IS_ERR(eb->rmap))
+ return dev_err_probe(dev, PTR_ERR(eb->rmap), "no regmap\n");
+
+ /* We check that the regmap work only on first read */
+ ret = regmap_read(eb->rmap, IXP4XX_EXP_CNFG0, &val);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot read regmap\n");
+ if (val & IXP4XX_EXP_CNFG0_MEM_MAP)
+ eb->bus_base = IXP4XX_EXP_BOOT_BASE;
+ else
+ eb->bus_base = IXP4XX_EXP_NORMAL_BASE;
+ dev_info(dev, "expansion bus at %08x\n", eb->bus_base);
+
+ if (eb->is_43x) {
+ /* Check some fuses */
+ regmap_read(eb->rmap, IXP43X_EXP_UNIT_FUSE_RESET, &val);
+ switch (FIELD_GET(IXP43x_EXP_FUSE_SPEED_MASK, val)) {
+ case 0:
+ dev_info(dev, "IXP43x at 533 MHz\n");
+ break;
+ case 1:
+ dev_info(dev, "IXP43x at 400 MHz\n");
+ break;
+ case 2:
+ dev_info(dev, "IXP43x at 667 MHz\n");
+ break;
+ default:
+ dev_info(dev, "IXP43x unknown speed\n");
+ break;
+ }
+ }
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ ixp4xx_exp_setup_child(eb, child);
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_exp_of_match[] = {
+ { .compatible = "intel,ixp42x-expansion-bus-controller", },
+ { .compatible = "intel,ixp43x-expansion-bus-controller", },
+ { .compatible = "intel,ixp45x-expansion-bus-controller", },
+ { .compatible = "intel,ixp46x-expansion-bus-controller", },
+ { }
+};
+
+static struct platform_driver ixp4xx_exp_driver = {
+ .probe = ixp4xx_exp_probe,
+ .driver = {
+ .name = "intel-extbus",
+ .of_match_table = ixp4xx_exp_of_match,
+ },
+};
+module_platform_driver(ixp4xx_exp_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Intel IXP4xx external bus driver");
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
new file mode 100644
index 0000000000..b39a11e6c6
--- /dev/null
+++ b/drivers/bus/mhi/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2021, Linaro Ltd.
+#
+
+source "drivers/bus/mhi/host/Kconfig"
+source "drivers/bus/mhi/ep/Kconfig"
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 0000000000..354204b0ef
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,5 @@
+# Host MHI stack
+obj-$(CONFIG_MHI_BUS) += host/
+
+# Endpoint MHI stack
+obj-$(CONFIG_MHI_BUS_EP) += ep/
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
new file mode 100644
index 0000000000..f794b9c804
--- /dev/null
+++ b/drivers/bus/mhi/common.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_COMMON_H
+#define _MHI_COMMON_H
+
+#include <linux/bitfield.h>
+#include <linux/mhi.h>
+
+/* MHI registers */
+#define MHIREGLEN 0x00
+#define MHIVER 0x08
+#define MHICFG 0x10
+#define CHDBOFF 0x18
+#define ERDBOFF 0x20
+#define BHIOFF 0x28
+#define BHIEOFF 0x2c
+#define DEBUGOFF 0x30
+#define MHICTRL 0x38
+#define MHISTATUS 0x48
+#define CCABAP_LOWER 0x58
+#define CCABAP_HIGHER 0x5c
+#define ECABAP_LOWER 0x60
+#define ECABAP_HIGHER 0x64
+#define CRCBAP_LOWER 0x68
+#define CRCBAP_HIGHER 0x6c
+#define CRDB_LOWER 0x70
+#define CRDB_HIGHER 0x74
+#define MHICTRLBASE_LOWER 0x80
+#define MHICTRLBASE_HIGHER 0x84
+#define MHICTRLLIMIT_LOWER 0x88
+#define MHICTRLLIMIT_HIGHER 0x8c
+#define MHIDATABASE_LOWER 0x98
+#define MHIDATABASE_HIGHER 0x9c
+#define MHIDATALIMIT_LOWER 0xa0
+#define MHIDATALIMIT_HIGHER 0xa4
+
+/* MHI BHI registers */
+#define BHI_BHIVERSION_MINOR 0x00
+#define BHI_BHIVERSION_MAJOR 0x04
+#define BHI_IMGADDR_LOW 0x08
+#define BHI_IMGADDR_HIGH 0x0c
+#define BHI_IMGSIZE 0x10
+#define BHI_RSVD1 0x14
+#define BHI_IMGTXDB 0x18
+#define BHI_RSVD2 0x1c
+#define BHI_INTVEC 0x20
+#define BHI_RSVD3 0x24
+#define BHI_EXECENV 0x28
+#define BHI_STATUS 0x2c
+#define BHI_ERRCODE 0x30
+#define BHI_ERRDBG1 0x34
+#define BHI_ERRDBG2 0x38
+#define BHI_ERRDBG3 0x3c
+#define BHI_SERIALNU 0x40
+#define BHI_SBLANTIROLLVER 0x44
+#define BHI_NUMSEG 0x48
+#define BHI_MSMHWID(n) (0x4c + (0x4 * (n)))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
+#define BHI_RSVD5 0xc4
+
+/* BHI register bits */
+#define BHI_TXDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHI_TXDB_SEQNUM_SHFT 0
+#define BHI_STATUS_MASK GENMASK(31, 30)
+#define BHI_STATUS_ERROR 0x03
+#define BHI_STATUS_SUCCESS 0x02
+#define BHI_STATUS_RESET 0x00
+
+/* MHI BHIE registers */
+#define BHIE_MSMSOCID_OFFS 0x00
+#define BHIE_TXVECADDR_LOW_OFFS 0x2c
+#define BHIE_TXVECADDR_HIGH_OFFS 0x30
+#define BHIE_TXVECSIZE_OFFS 0x34
+#define BHIE_TXVECDB_OFFS 0x3c
+#define BHIE_TXVECSTATUS_OFFS 0x44
+#define BHIE_RXVECADDR_LOW_OFFS 0x60
+#define BHIE_RXVECADDR_HIGH_OFFS 0x64
+#define BHIE_RXVECSIZE_OFFS 0x68
+#define BHIE_RXVECDB_OFFS 0x70
+#define BHIE_RXVECSTATUS_OFFS 0x78
+
+/* BHIE register bits */
+#define BHIE_TXVECDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_TXVECDB_SEQNUM_SHFT 0
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT 0
+#define BHIE_TXVECSTATUS_STATUS_BMSK GENMASK(31, 30)
+#define BHIE_TXVECSTATUS_STATUS_SHFT 30
+#define BHIE_TXVECSTATUS_STATUS_RESET 0x00
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define BHIE_TXVECSTATUS_STATUS_ERROR 0x03
+#define BHIE_RXVECDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_RXVECDB_SEQNUM_SHFT 0
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT 0
+#define BHIE_RXVECSTATUS_STATUS_BMSK GENMASK(31, 30)
+#define BHIE_RXVECSTATUS_STATUS_SHFT 30
+#define BHIE_RXVECSTATUS_STATUS_RESET 0x00
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define BHIE_RXVECSTATUS_STATUS_ERROR 0x03
+
+/* MHI register bits */
+#define MHICFG_NHWER_MASK GENMASK(31, 24)
+#define MHICFG_NER_MASK GENMASK(23, 16)
+#define MHICFG_NHWCH_MASK GENMASK(15, 8)
+#define MHICFG_NCH_MASK GENMASK(7, 0)
+#define MHICTRL_MHISTATE_MASK GENMASK(15, 8)
+#define MHICTRL_RESET_MASK BIT(1)
+#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8)
+#define MHISTATUS_SYSERR_MASK BIT(2)
+#define MHISTATUS_READY_MASK BIT(0)
+
+/* Command Ring Element macros */
+/* No operation command */
+#define MHI_TRE_CMD_NOOP_PTR 0
+#define MHI_TRE_CMD_NOOP_DWORD0 0
+#define MHI_TRE_CMD_NOOP_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), MHI_CMD_NOP))
+
+/* Channel reset command */
+#define MHI_TRE_CMD_RESET_PTR 0
+#define MHI_TRE_CMD_RESET_DWORD0 0
+#define MHI_TRE_CMD_RESET_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), \
+ MHI_CMD_RESET_CHAN))
+
+/* Channel stop command */
+#define MHI_TRE_CMD_STOP_PTR 0
+#define MHI_TRE_CMD_STOP_DWORD0 0
+#define MHI_TRE_CMD_STOP_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), \
+ MHI_CMD_STOP_CHAN))
+
+/* Channel start command */
+#define MHI_TRE_CMD_START_PTR 0
+#define MHI_TRE_CMD_START_DWORD0 0
+#define MHI_TRE_CMD_START_DWORD1(chid) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), \
+ MHI_CMD_START_CHAN))
+
+#define MHI_TRE_GET_DWORD(tre, word) le32_to_cpu((tre)->dword[(word)])
+#define MHI_TRE_GET_CMD_CHID(tre) FIELD_GET(GENMASK(31, 24), MHI_TRE_GET_DWORD(tre, 1))
+#define MHI_TRE_GET_CMD_TYPE(tre) FIELD_GET(GENMASK(23, 16), MHI_TRE_GET_DWORD(tre, 1))
+
+/* Event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_TRE_EV_DWORD0(code, len) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code) | \
+ FIELD_PREP(GENMASK(15, 0), len))
+#define MHI_TRE_EV_DWORD1(chid, type) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), chid) | \
+ FIELD_PREP(GENMASK(23, 16), type))
+#define MHI_TRE_GET_EV_PTR(tre) le64_to_cpu((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_LEN(tre) FIELD_GET(GENMASK(15, 0), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_CHID(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
+#define MHI_TRE_GET_EV_TYPE(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 1)))
+#define MHI_TRE_GET_EV_STATE(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_EXECENV(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_SEQ(tre) MHI_TRE_GET_DWORD(tre, 0)
+#define MHI_TRE_GET_EV_TIME(tre) MHI_TRE_GET_EV_PTR(tre)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits(MHI_TRE_GET_EV_PTR(tre))
+#define MHI_TRE_GET_EV_VEID(tre) FIELD_GET(GENMASK(23, 16), (MHI_TRE_GET_DWORD(tre, 0)))
+#define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1)))
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0)))
+
+/* State change event */
+#define MHI_SC_EV_PTR 0
+#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state))
+#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* EE event */
+#define MHI_EE_EV_PTR 0
+#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee))
+#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+
+/* Command Completion event */
+#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code))
+#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type))
+
+/* Transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr)
+#define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len))
+#define MHI_TRE_TYPE_TRANSFER 2
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
+ MHI_TRE_TYPE_TRANSFER) | \
+ FIELD_PREP(BIT(10), bei) | \
+ FIELD_PREP(BIT(9), ieot) | \
+ FIELD_PREP(BIT(8), ieob) | \
+ FIELD_PREP(BIT(0), chain))
+#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr)
+#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0))
+#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1))))
+#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1))))
+
+/* RSC transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) cpu_to_le32(cookie)
+#define MHI_RSCTRE_DATA_DWORD1 cpu_to_le32(FIELD_PREP(GENMASK(23, 16), \
+ MHI_PKT_TYPE_COALESCING))
+
+enum mhi_pkt_type {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum mhi_ev_ccs {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4, /* End of block event */
+ MHI_EV_CC_OOB = 0x5, /* Out of block event */
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+/* Channel state */
+enum mhi_ch_state {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_ENABLED,
+ MHI_CH_STATE_RUNNING,
+ MHI_CH_STATE_SUSPENDED,
+ MHI_CH_STATE_STOP,
+ MHI_CH_STATE_ERROR,
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_NOP = 1,
+ MHI_CMD_RESET_CHAN = 16,
+ MHI_CMD_STOP_CHAN = 17,
+ MHI_CMD_START_CHAN = 18,
+};
+
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+struct mhi_event_ctxt {
+ __le32 intmod;
+ __le32 ertype;
+ __le32 msivec;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_chan_ctxt {
+ __le32 chcfg;
+ __le32 chtype;
+ __le32 erindex;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ __le32 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+struct mhi_ring_element {
+ __le64 ptr;
+ __le32 dword[2];
+};
+
+static inline const char *mhi_state_str(enum mhi_state state)
+{
+ switch (state) {
+ case MHI_STATE_RESET:
+ return "RESET";
+ case MHI_STATE_READY:
+ return "READY";
+ case MHI_STATE_M0:
+ return "M0";
+ case MHI_STATE_M1:
+ return "M1";
+ case MHI_STATE_M2:
+ return "M2";
+ case MHI_STATE_M3:
+ return "M3";
+ case MHI_STATE_M3_FAST:
+ return "M3 FAST";
+ case MHI_STATE_BHI:
+ return "BHI";
+ case MHI_STATE_SYS_ERR:
+ return "SYS ERROR";
+ default:
+ return "Unknown state";
+ }
+};
+
+#endif /* _MHI_COMMON_H */
diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig
new file mode 100644
index 0000000000..90ab3b0406
--- /dev/null
+++ b/drivers/bus/mhi/ep/Kconfig
@@ -0,0 +1,10 @@
+config MHI_BUS_EP
+ tristate "Modem Host Interface (MHI) bus Endpoint implementation"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by a host processor to control
+ and communicate a modem device over a high speed peripheral
+ bus or shared memory.
+
+ MHI_BUS_EP implements the MHI protocol for the endpoint devices,
+ such as SDX55 modem connected to the host machine over PCIe.
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644
index 0000000000..aad85f180b
--- /dev/null
+++ b/drivers/bus/mhi/ep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+mhi_ep-y := main.o mmio.o ring.o sm.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644
index 0000000000..a2125fa5fe
--- /dev/null
+++ b/drivers/bus/mhi/ep/internal.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+
+#include "../common.h"
+
+extern struct bus_type mhi_ep_bus_type;
+
+#define MHI_REG_OFFSET 0x100
+#define BHI_REG_OFFSET 0x200
+
+/* MHI registers */
+#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN)
+#define EP_MHIVER (MHI_REG_OFFSET + MHIVER)
+#define EP_MHICFG (MHI_REG_OFFSET + MHICFG)
+#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF)
+#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF)
+#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF)
+#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF)
+#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF)
+#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL)
+#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS)
+#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER)
+#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER)
+#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER)
+#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER)
+#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER)
+#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER)
+#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER)
+#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER)
+#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER)
+#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER)
+#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER)
+#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER)
+#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER)
+#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER)
+#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER)
+#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER)
+
+/* MHI BHI registers */
+#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC)
+#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV)
+
+/* MHI Doorbell registers */
+#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n))
+
+#define MHI_CTRL_INT_STATUS 0x4
+#define MHI_CTRL_INT_STATUS_MSK BIT(0)
+#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1)
+#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR 0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0)
+
+/*
+ * Unlike the usual "masking" convention, writing "1" to a bit in this register
+ * enables the interrupt and writing "0" will disable it..
+ */
+#define MHI_CTRL_INT_MASK 0x94
+#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_CRDB_MASK BIT(1)
+
+#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0)
+
+#define NR_OF_CMD_RINGS 1
+#define MHI_MASK_ROWS_CH_DB 4
+#define MHI_MASK_ROWS_EV_DB 4
+#define MHI_MASK_CH_LEN 32
+#define MHI_MASK_EV_LEN 32
+
+/* Generic context */
+struct mhi_generic_ctx {
+ __le32 reserved0;
+ __le32 reserved1;
+ __le32 reserved2;
+
+ __le64 rbase __packed __aligned(4);
+ __le64 rlen __packed __aligned(4);
+ __le64 rp __packed __aligned(4);
+ __le64 wp __packed __aligned(4);
+};
+
+enum mhi_ep_ring_type {
+ RING_TYPE_CMD,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+};
+
+/* Ring element */
+union mhi_ep_ring_ctx {
+ struct mhi_cmd_ctxt cmd;
+ struct mhi_event_ctxt ev;
+ struct mhi_chan_ctxt ch;
+ struct mhi_generic_ctx generic;
+};
+
+struct mhi_ep_ring_item {
+ struct list_head node;
+ struct mhi_ep_ring *ring;
+};
+
+struct mhi_ep_ring {
+ struct mhi_ep_cntrl *mhi_cntrl;
+ union mhi_ep_ring_ctx *ring_ctx;
+ struct mhi_ring_element *ring_cache;
+ enum mhi_ep_ring_type type;
+ u64 rbase;
+ size_t rd_offset;
+ size_t wr_offset;
+ size_t ring_size;
+ u32 db_offset_h;
+ u32 db_offset_l;
+ u32 ch_id;
+ u32 er_index;
+ u32 irq_vector;
+ bool started;
+};
+
+struct mhi_ep_cmd {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_state_transition {
+ struct list_head node;
+ enum mhi_state state;
+};
+
+struct mhi_ep_chan {
+ char *name;
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_ring ring;
+ struct mutex lock;
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+ enum mhi_ch_state state;
+ enum dma_data_direction dir;
+ u64 tre_loc;
+ u32 tre_size;
+ u32 tre_bytes_left;
+ u32 chan;
+ bool skip_td;
+};
+
+/* MHI Ring related functions */
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring);
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx);
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr);
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element);
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring);
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring);
+
+/* MMIO related functions */
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset);
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val);
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask);
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring);
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset);
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/* MHI EP core functions */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
+bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state);
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644
index 0000000000..582d5c166a
--- /dev/null
+++ b/drivers/bus/mhi/ep/main.c
@@ -0,0 +1,1670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MHI Endpoint bus stack
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include "internal.h"
+
+#define M0_WAIT_DELAY_MS 100
+#define M0_WAIT_COUNT 100
+
+static DEFINE_IDA(mhi_ep_cntrl_ida);
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
+static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
+ struct mhi_ring_element *el, bool bei)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_ctx *ctx;
+ struct mhi_ep_ring *ring;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ ring = &mhi_cntrl->mhi_event[ring_idx].ring;
+ ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
+ if (!ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+ if (ret) {
+ dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+ }
+
+ /* Add element to the event ring */
+ ret = mhi_ep_ring_add_element(ring, el);
+ if (ret) {
+ dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ /*
+ * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
+ * set this flag for interrupt moderation as per MHI protocol.
+ */
+ if (!bei)
+ mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ return ret;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
+{
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+ event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
+ event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+
+ ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+{
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->dword[0] = MHI_SC_EV_DWORD0(state);
+ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
+}
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
+{
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
+ event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
+}
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+{
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct mhi_ring_element *event;
+ int ret;
+
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ if (!event)
+ return -ENOMEM;
+
+ event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+ event->dword[0] = MHI_CC_EV_DWORD0(code);
+ event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+
+ ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+ kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+ return ret;
+}
+
+static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_ring *ch_ring;
+ u32 tmp, ch_id;
+ int ret;
+
+ ch_id = MHI_TRE_GET_CMD_CHID(el);
+
+ /* Check if the channel is supported by the controller */
+ if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
+ dev_dbg(dev, "Channel (%u) not supported!\n", ch_id);
+ return -ENODEV;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+
+ switch (MHI_TRE_GET_CMD_TYPE(el)) {
+ case MHI_PKT_TYPE_START_CHAN_CMD:
+ dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
+
+ mutex_lock(&mhi_chan->lock);
+ /* Initialize and configure the corresponding channel ring */
+ if (!ch_ring->started) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+ (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+ if (ret) {
+ dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_EV_CC_UNDEFINED_ERR);
+ if (ret)
+ dev_err(dev, "Error sending completion event: %d\n", ret);
+
+ goto err_unlock;
+ }
+ }
+
+ /* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+
+ /*
+ * Create MHI device only during UL channel start. Since the MHI
+ * channels operate in a pair, we'll associate both UL and DL
+ * channels to the same device.
+ *
+ * We also need to check for mhi_dev != NULL because, the host
+ * will issue START_CHAN command during resume and we don't
+ * destroy the device during suspend.
+ */
+ if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
+ ret = mhi_ep_create_device(mhi_cntrl, ch_id);
+ if (ret) {
+ dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
+ mhi_ep_handle_syserr(mhi_cntrl);
+ return ret;
+ }
+ }
+
+ /* Finally, enable DB for the channel */
+ mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
+
+ break;
+ case MHI_PKT_TYPE_STOP_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Disable DB for the channel */
+ mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+
+ /* Send channel disconnect status to client drivers */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ /* Set channel state to STOP */
+ mhi_chan->state = MHI_CH_STATE_STOP;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ case MHI_PKT_TYPE_RESET_CHAN_CMD:
+ dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
+ if (!ch_ring->started) {
+ dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Stop and reset the transfer ring */
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+
+ /* Send channel disconnect status to client driver */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event (%u)\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ default:
+ dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
+ MHI_TRE_GET_CMD_TYPE(el), ch_id);
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ return !!(ring->rd_offset == ring->wr_offset);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_result *result,
+ u32 len)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t tr_len, read_offset, write_offset;
+ struct mhi_ep_buf_info buf_info = {};
+ struct mhi_ring_element *el;
+ bool tr_done = false;
+ u32 buf_left;
+ int ret;
+
+ buf_left = len;
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ return -ENODEV;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+
+ /* Check if there is data pending to be read from previous read operation */
+ if (mhi_chan->tre_bytes_left) {
+ dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
+ tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ } else {
+ mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
+ mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
+ mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+ tr_len = min(buf_left, mhi_chan->tre_size);
+ }
+
+ read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ write_offset = len - buf_left;
+
+ buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+ buf_info.dev_addr = result->buf_addr + write_offset;
+ buf_info.size = tr_len;
+
+ dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
+ return ret;
+ }
+
+ buf_left -= tr_len;
+ mhi_chan->tre_bytes_left -= tr_len;
+
+ /*
+ * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
+ * read completely:
+ *
+ * 1. Send completion event to the host based on the flags set in TRE.
+ * 2. Increment the local read offset of the transfer ring.
+ */
+ if (!mhi_chan->tre_bytes_left) {
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ return ret;
+ }
+ }
+
+ tr_done = true;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ }
+
+ result->bytes_xferd += tr_len;
+ } while (buf_left && !tr_done);
+
+ return 0;
+}
+
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct mhi_result result = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ep_chan *mhi_chan;
+ int ret;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ /*
+ * Bail out if transfer callback is not registered for the channel.
+ * This is most likely due to the client driver not loaded at this point.
+ */
+ if (!mhi_chan->xfer_cb) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
+ return -ENODEV;
+ }
+
+ if (ring->ch_id % 2) {
+ /* DL channel */
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+ result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ if (!result.buf_addr)
+ return -ENOMEM;
+
+ do {
+ ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
+ return ret;
+ }
+
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ result.bytes_xferd = 0;
+ memset(result.buf_addr, 0, len);
+
+ /* Read until the ring becomes empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
+ }
+
+ return 0;
+}
+
+/* TODO: Handle partially formed TDs */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
+ struct mhi_ring_element *el;
+ u32 buf_left, read_offset;
+ struct mhi_ep_ring *ring;
+ enum mhi_ev_ccs code;
+ size_t tr_len;
+ u32 tre_len;
+ int ret;
+
+ buf_left = skb->len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ mutex_lock(&mhi_chan->lock);
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(dev, "Channel not available\n");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+
+ if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
+ dev_err(dev, "TRE not available!\n");
+ ret = -ENOSPC;
+ goto err_exit;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = MHI_TRE_DATA_GET_LEN(el);
+
+ tr_len = min(buf_left, tre_len);
+ read_offset = skb->len - buf_left;
+
+ buf_info.dev_addr = skb->data + read_offset;
+ buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
+ buf_info.size = tr_len;
+
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
+ goto err_exit;
+ }
+
+ buf_left -= tr_len;
+ /*
+ * For all TREs queued by the host for DL channel, only the EOT flag will be set.
+ * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
+ * the host so that the host can adjust the packet boundary to next TREs. Else send
+ * the EOT event to the host indicating the packet boundary.
+ */
+ if (buf_left)
+ code = MHI_EV_CC_OVERFLOW;
+ else
+ code = MHI_EV_CC_EOT;
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ goto err_exit;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ } while (buf_left);
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Update the number of event rings (NER) programmed by the host */
+ mhi_ep_mmio_update_ner(mhi_cntrl);
+
+ dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ /* Get the channel context base pointer from host */
+ mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host channel context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
+ &mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ch_ctx_cache,
+ ch_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
+ return ret;
+ }
+
+ /* Get the event context base pointer from host */
+ mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host event context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
+ &mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->ev_ctx_cache,
+ ev_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
+ goto err_ch_ctx;
+ }
+
+ /* Get the command context base pointer from host */
+ mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+ /* Allocate and map memory for caching host command context */
+ ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
+ &mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
+ cmd_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
+ goto err_ev_ctx;
+ }
+
+ /* Initialize command ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+ (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+ if (ret) {
+ dev_err(dev, "Failed to start the command ring\n");
+ goto err_cmd_ctx;
+ }
+
+ return ret;
+
+err_cmd_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+err_ev_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+err_ch_ctx:
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+
+ return ret;
+}
+
+static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
+
+ ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
+
+ mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
+ (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ /*
+ * Doorbell interrupts are enabled when the corresponding channel gets started.
+ * Enabling all interrupts here triggers spurious irqs as some of the interrupts
+ * associated with hw channels always get triggered.
+ */
+ mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+}
+
+static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ bool mhi_reset;
+ u32 count = 0;
+ int ret;
+
+ /* Wait for Host to set the M0 state */
+ do {
+ msleep(M0_WAIT_DELAY_MS);
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ /* Clear the MHI reset if host is in reset state */
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_info(dev, "Detected Host reset while waiting for M0\n");
+ }
+ count++;
+ } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
+
+ if (state != MHI_STATE_M0) {
+ dev_err(dev, "Host failed to enter M0\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to cache host config\n");
+ return ret;
+ }
+
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Enable all interrupts now */
+ mhi_ep_enable_int(mhi_cntrl);
+
+ return 0;
+}
+
+static void mhi_ep_cmd_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ring_element *el;
+ int ret;
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ return;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset)
+ return;
+
+ /*
+ * Process command ring element till write offset. In case of an error, just try to
+ * process next element.
+ */
+ while (ring->rd_offset != ring->wr_offset) {
+ el = &ring->ring_cache[ring->rd_offset];
+
+ ret = mhi_ep_process_cmd_ring(ring, el);
+ if (ret && ret != -ENODEV)
+ dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
+
+ mhi_ep_ring_inc_index(ring);
+ }
+}
+
+static void mhi_ep_ch_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_item *itr, *tmp;
+ struct mhi_ring_element *el;
+ struct mhi_ep_ring *ring;
+ struct mhi_ep_chan *chan;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ /* Process each queued channel ring. In case of an error, just process next element. */
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ ring = itr->ring;
+
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ mutex_lock(&chan->lock);
+
+ /*
+ * The ring could've stopped while we waited to grab the (chan->lock), so do
+ * a sanity check before going further.
+ */
+ if (!ring->started) {
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ mutex_unlock(&chan->lock);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ continue;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset) {
+ mutex_unlock(&chan->lock);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ continue;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+
+ dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+ ret = mhi_ep_process_ch_ring(ring, el);
+ if (ret) {
+ dev_err(dev, "Error processing ring for channel (%u): %d\n",
+ ring->ch_id, ret);
+ mutex_unlock(&chan->lock);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ continue;
+ }
+
+ mutex_unlock(&chan->lock);
+ kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
+ }
+}
+
+static void mhi_ep_state_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_state_transition *itr, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling MHI state transition to %s\n",
+ mhi_state_str(itr->state));
+
+ switch (itr->state) {
+ case MHI_STATE_M0:
+ ret = mhi_ep_set_m0_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M0 state\n");
+ break;
+ case MHI_STATE_M3:
+ ret = mhi_ep_set_m3_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M3 state\n");
+ break;
+ default:
+ dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
+ u32 ch_idx)
+{
+ struct mhi_ep_ring_item *item;
+ struct mhi_ep_ring *ring;
+ bool work = !!ch_int;
+ LIST_HEAD(head);
+ u32 i;
+
+ /* First add the ring items to a local list */
+ for_each_set_bit(i, &ch_int, 32) {
+ /* Channel index varies for each register: 0, 32, 64, 96 */
+ u32 ch_id = ch_idx + i;
+
+ ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+ item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->ring = ring;
+ list_add_tail(&item->node, &head);
+ }
+
+ /* Now, splice the local list into ch_db_list and queue the work item */
+ if (work) {
+ spin_lock(&mhi_cntrl->list_lock);
+ list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
+ }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ch_int, ch_idx, i;
+
+ /* Bail out if there is no channel doorbell interrupt */
+ if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
+ return;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ ch_idx = i * MHI_MASK_CH_LEN;
+
+ /* Only process channel interrupt if the mask is enabled */
+ ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
+ if (ch_int) {
+ mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state state)
+{
+ struct mhi_ep_state_transition *item;
+
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->state = state;
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 int_value;
+ bool mhi_reset;
+
+ /* Acknowledge the ctrl interrupt */
+ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
+
+ /* Check for ctrl interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
+ dev_dbg(dev, "Processing ctrl interrupt\n");
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ dev_info(dev, "Host triggered MHI reset!\n");
+ disable_irq_nosync(mhi_cntrl->irq);
+ schedule_work(&mhi_cntrl->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+ }
+
+ /* Check for command doorbell interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
+ dev_dbg(dev, "Processing command doorbell interrupt\n");
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
+ }
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+ return IRQ_HANDLED;
+}
+
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_ring *ch_ring, *ev_ring;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int i;
+
+ /* Stop all the channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to client drivers */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ flush_workqueue(mhi_cntrl->wq);
+
+ /* Destroy devices associated with all channels */
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+ /* Stop and reset the transfer rings */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ if (!mhi_chan->ring.started)
+ continue;
+
+ ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+ mutex_lock(&mhi_chan->lock);
+ mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Stop and reset the event rings */
+ for (i = 0; i < mhi_cntrl->event_rings; i++) {
+ ev_ring = &mhi_cntrl->mhi_event[i].ring;
+ if (!ev_ring->started)
+ continue;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ mhi_ep_ring_reset(mhi_cntrl, ev_ring);
+ mutex_unlock(&mhi_cntrl->event_lock);
+ }
+
+ /* Stop and reset the command ring */
+ mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+ mhi_ep_free_host_cfg(mhi_cntrl);
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+ mhi_cntrl->enabled = false;
+}
+
+static void mhi_ep_reset_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+ enum mhi_state cur_state;
+
+ mhi_ep_power_down(mhi_cntrl);
+
+ mutex_lock(&mhi_cntrl->state_lock);
+
+ /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ mhi_ep_mmio_reset(mhi_cntrl);
+ cur_state = mhi_cntrl->mhi_state;
+
+ /*
+ * Only proceed further if the reset is due to SYS_ERR. The host will
+ * issue reset during shutdown also and we don't need to do re-init in
+ * that case.
+ */
+ if (cur_state == MHI_STATE_SYS_ERR)
+ mhi_ep_power_up(mhi_cntrl);
+
+ mutex_unlock(&mhi_cntrl->state_lock);
+}
+
+/*
+ * We don't need to do anything special other than setting the MHI SYS_ERR
+ * state. The host will reset all contexts and issue MHI RESET so that we
+ * could also recover from error state.
+ */
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ return;
+
+ /* Signal host that the device went to SYS_ERR state */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
+}
+
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ /*
+ * Mask all interrupts until the state machine is ready. Interrupts will
+ * be enabled later with mhi_ep_enable().
+ */
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Initialize command, channel and event rings */
+ mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+ for (i = 0; i < mhi_cntrl->max_chan; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+ for (i = 0; i < mhi_cntrl->event_rings; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+
+ mhi_cntrl->mhi_state = MHI_STATE_RESET;
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ goto err_free_event;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint\n");
+ goto err_free_event;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ mhi_cntrl->enabled = true;
+
+ return 0;
+
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ if (mhi_cntrl->enabled) {
+ mhi_ep_abort_transfer(mhi_cntrl);
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ mhi_chan->state = MHI_CH_STATE_SUSPENDED;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ mhi_dev->mhi_cntrl->mhi_dev = NULL;
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created in mhi_ep_create_device()
+ * if the mhi_dev associated with it is NULL.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_device_type dev_type)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_ep_bus_type;
+ dev->release = mhi_ep_release_device;
+
+ /* Controller device is always allocated first */
+ if (dev_type == MHI_DEVICE_CONTROLLER)
+ /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ else
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_type = dev_type;
+
+ return mhi_dev;
+}
+
+/*
+ * MHI channels are always defined in pairs with UL as the even numbered
+ * channel and DL as odd numbered one. This function gets UL channel (primary)
+ * as the ch_id and always looks after the next entry in channel list for
+ * the corresponding DL channel (secondary).
+ */
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ /* Check if the channel name is same for both UL and DL */
+ if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
+ dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
+ mhi_chan->name, mhi_chan[1].name);
+ return -EINVAL;
+ }
+
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
+ if (IS_ERR(mhi_dev))
+ return PTR_ERR(mhi_dev);
+
+ /* Configure primary channel */
+ mhi_dev->ul_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Configure secondary channel as well */
+ mhi_chan++;
+ mhi_dev->dl_chan = mhi_chan;
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ ret = dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+ if (ret) {
+ put_device(&mhi_dev->dev);
+ return ret;
+ }
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+
+ return ret;
+}
+
+static int mhi_ep_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ struct mhi_ep_chan *ul_chan, *dl_chan;
+
+ if (dev->bus != &mhi_ep_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_ep_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy devices created for channels */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ put_device(&ul_chan->mhi_dev->dev);
+
+ if (dl_chan)
+ put_device(&dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ const struct mhi_ep_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ u32 chan, i;
+ int ret = -EINVAL;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * Allocate max_channels supported by the MHI endpoint and populate
+ * only the defined channels
+ */
+ mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_ep_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
+ chan, mhi_cntrl->max_chan);
+ goto error_chan_cfg;
+ }
+
+ /* Bi-directional and direction less channels are not supported */
+ if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
+ dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
+ ch_cfg->dir, chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+ mhi_chan->dir = ch_cfg->dir;
+ mutex_init(&mhi_chan->lock);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+/*
+ * Allocate channel and command rings here. Event rings will be allocated
+ * in mhi_ep_power_up() as the config comes from the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+ return -EINVAL;
+
+ ret = mhi_ep_chan_init(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_ch;
+ }
+
+ mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
+ sizeof(struct mhi_ring_element), 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!mhi_cntrl->ev_ring_el_cache) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!mhi_cntrl->tre_buf_cache) {
+ ret = -ENOMEM;
+ goto err_destroy_ev_ring_el_cache;
+ }
+
+ mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
+ sizeof(struct mhi_ep_ring_item), 0,
+ 0, NULL);
+ if (!mhi_cntrl->ev_ring_el_cache) {
+ ret = -ENOMEM;
+ goto err_destroy_tre_buf_cache;
+ }
+ INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+ INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+ INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
+ INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
+
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ if (!mhi_cntrl->wq) {
+ ret = -ENOMEM;
+ goto err_destroy_ring_item_cache;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+ spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->state_lock);
+ mutex_init(&mhi_cntrl->event_lock);
+
+ /* Set MHI version and AMSS EE before enumeration */
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+
+ /* Set controller index */
+ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_destroy_wq;
+
+ mhi_cntrl->index = ret;
+
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+ "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
+ goto err_ida_free;
+ }
+
+ /* Allocate the controller device */
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto err_free_irq;
+ }
+
+ ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
+ if (ret)
+ goto err_put_dev;
+
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_put_dev;
+
+ dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+ return 0;
+
+err_put_dev:
+ put_device(&mhi_dev->dev);
+err_free_irq:
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->wq);
+err_destroy_ring_item_cache:
+ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
+err_destroy_ev_ring_el_cache:
+ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+err_destroy_tre_buf_cache:
+ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
+
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ destroy_workqueue(mhi_cntrl->wq);
+
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
+ kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
+ kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+ kmem_cache_destroy(mhi_cntrl->ring_item_cache);
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Disconnect the channels associated with the driver */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to the client driver */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mhi_chan->xfer_cb = NULL;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Remove the client driver now */
+ mhi_drv->remove(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ /* Client drivers should have callbacks defined for both channels */
+ if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
+ return -EINVAL;
+
+ driver->bus = &mhi_ep_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_ep_driver_probe;
+ driver->remove = mhi_ep_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+ .name = "mhi_ep",
+ .dev_name = "mhi_ep",
+ .match = mhi_ep_match,
+ .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+ return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+ bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Bus Endpoint stack");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644
index 0000000000..b5bfd22f2c
--- /dev/null
+++ b/drivers/bus/mhi/ep/mmio.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+
+#include "internal.h"
+
+u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset)
+{
+ return readl(mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+ writel(val, mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, offset);
+ regval &= ~mask;
+ regval |= (val << __ffs(mask)) & mask;
+ mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+
+u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(dev, offset);
+ regval &= mask;
+ regval >>= __ffs(mask);
+
+ return regval;
+}
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL);
+ *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+ *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+
+static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable)
+{
+ u32 chid_mask, chid_shift, chdb_idx, val;
+
+ chid_shift = ch_id % 32;
+ chid_mask = BIT(chid_shift);
+ chdb_idx = ch_id / 32;
+
+ val = enable ? 1 : 0;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val);
+
+ /* Update the local copy of the channel mask */
+ mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask;
+ mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift;
+}
+
+void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true);
+}
+
+void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false);
+}
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val);
+ mhi_cntrl->chdb[i].mask = val;
+ }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+
+static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+
+bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ bool chdb = false;
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i));
+ if (mhi_cntrl->chdb[i].status)
+ chdb = true;
+ }
+
+ /* Return whether a channel doorbell interrupt occurred or not */
+ return chdb;
+}
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val, i;
+
+ val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val);
+}
+
+static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_MHICTRL_MASK, 0);
+}
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 1);
+}
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK,
+ MHI_CTRL_CRDB_MASK, 0);
+}
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+ mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+
+static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ MHI_CHDB_INT_CLEAR_n_CLEAR_ALL);
+
+ for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i),
+ MHI_ERDB_INT_CLEAR_n_CLEAR_ALL);
+
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR,
+ MHI_CTRL_INT_MMIO_WR_CLEAR |
+ MHI_CTRL_INT_CRDB_CLEAR |
+ MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER);
+ mhi_cntrl->ch_ctx_host_pa = regval;
+ mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER);
+ mhi_cntrl->ch_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER);
+ mhi_cntrl->ev_ctx_host_pa = regval;
+ mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER);
+ mhi_cntrl->ev_ctx_host_pa |= regval;
+}
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER);
+ mhi_cntrl->cmd_ctx_host_pa = regval;
+ mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER);
+ mhi_cntrl->cmd_ctx_host_pa |= regval;
+}
+
+u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u64 db_offset;
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h);
+ db_offset = regval;
+ db_offset <<= 32;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l);
+ db_offset |= regval;
+
+ return db_offset;
+}
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value);
+}
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0);
+}
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0);
+ mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0);
+ mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF);
+ mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF);
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+
+ mhi_ep_mmio_reset(mhi_cntrl);
+}
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 regval;
+
+ regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval);
+}
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644
index 0000000000..c673d7200b
--- /dev/null
+++ b/drivers/bus/mhi/ep/ring.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
+{
+ return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+ __le64 rlen;
+
+ memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
+
+ return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
+}
+
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
+{
+ ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
+}
+
+static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
+ size_t start;
+ int ret;
+
+ /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
+ if (ring->type == RING_TYPE_ER)
+ return 0;
+
+ /* No need to cache the ring if write pointer is unmodified */
+ if (ring->wr_offset == end)
+ return 0;
+
+ start = ring->wr_offset;
+ if (start < end) {
+ buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ if (ret < 0)
+ return ret;
+ } else {
+ buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+ buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+ buf_info.dev_addr = &ring->ring_cache[start];
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ if (ret < 0)
+ return ret;
+
+ if (end) {
+ buf_info.host_addr = ring->rbase;
+ buf_info.dev_addr = &ring->ring_cache[0];
+ buf_info.size = end * sizeof(struct mhi_ring_element);
+
+ ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
+
+ return 0;
+}
+
+static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
+{
+ size_t wr_offset;
+ int ret;
+
+ wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
+
+ /* Cache the host ring till write offset */
+ ret = __mhi_ep_cache_ring(ring, wr_offset);
+ if (ret)
+ return ret;
+
+ ring->wr_offset = wr_offset;
+
+ return 0;
+}
+
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+ u64 wr_ptr;
+
+ wr_ptr = mhi_ep_mmio_get_db(ring);
+
+ return mhi_ep_cache_ring(ring, wr_ptr);
+}
+
+/* TODO: Support for adding multiple ring elements to the ring */
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_buf_info buf_info = {};
+ size_t old_offset = 0;
+ u32 num_free_elem;
+ __le64 rp;
+ int ret;
+
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ return ret;
+ }
+
+ if (ring->rd_offset < ring->wr_offset)
+ num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
+ else
+ num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
+
+ /* Check if there is space in ring for adding at least an element */
+ if (!num_free_elem) {
+ dev_err(dev, "No space left in the ring\n");
+ return -ENOSPC;
+ }
+
+ old_offset = ring->rd_offset;
+ mhi_ep_ring_inc_index(ring);
+
+ dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+
+ /* Update rp in ring context */
+ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
+ memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
+
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
+
+ return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
+}
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+{
+ ring->type = type;
+ if (ring->type == RING_TYPE_CMD) {
+ ring->db_offset_h = EP_CRDB_HIGHER;
+ ring->db_offset_l = EP_CRDB_LOWER;
+ } else if (ring->type == RING_TYPE_CH) {
+ ring->db_offset_h = CHDB_HIGHER_n(id);
+ ring->db_offset_l = CHDB_LOWER_n(id);
+ ring->ch_id = id;
+ } else {
+ ring->db_offset_h = ERDB_HIGHER_n(id);
+ ring->db_offset_l = ERDB_LOWER_n(id);
+ }
+}
+
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ __le64 val;
+ int ret;
+
+ ring->mhi_cntrl = mhi_cntrl;
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_ep_ring_num_elems(ring);
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
+ ring->rbase = le64_to_cpu(val);
+
+ if (ring->type == RING_TYPE_CH)
+ ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
+
+ if (ring->type == RING_TYPE_ER)
+ ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+
+ /* During ring init, both rp and wp are equal */
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
+ ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+ ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
+
+ /* Allocate ring cache memory for holding the copy of host ring */
+ ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
+ ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
+ if (ret) {
+ dev_err(dev, "Failed to cache ring\n");
+ kfree(ring->ring_cache);
+ return ret;
+ }
+
+ ring->started = true;
+
+ return 0;
+}
+
+void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
+{
+ ring->started = false;
+ kfree(ring->ring_cache);
+ ring->ring_cache = NULL;
+}
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644
index 0000000000..fd200b2ac0
--- /dev/null
+++ b/drivers/bus/mhi/ep/sm.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state)
+{
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ return true; /* Allowed in any state */
+
+ if (mhi_state == MHI_STATE_READY)
+ return cur_mhi_state == MHI_STATE_RESET;
+
+ if (mhi_state == MHI_STATE_M0)
+ return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
+
+ if (mhi_state == MHI_STATE_M3)
+ return cur_mhi_state == MHI_STATE_M0;
+
+ return false;
+}
+
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
+ dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
+ mhi_state_str(mhi_state),
+ mhi_state_str(mhi_cntrl->mhi_state));
+ return -EACCES;
+ }
+
+ /* TODO: Add support for M1 and M2 states */
+ if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
+ dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
+ return -EOPNOTSUPP;
+ }
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
+ mhi_cntrl->mhi_state = mhi_state;
+
+ if (mhi_state == MHI_STATE_READY)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
+
+ if (mhi_state == MHI_STATE_SYS_ERR)
+ mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
+
+ return 0;
+}
+
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state old_state;
+ int ret;
+
+ /* If MHI is in M3, resume suspended channels */
+ mutex_lock(&mhi_cntrl->state_lock);
+
+ old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ goto err_unlock;
+ }
+
+ /* Signal host that the device moved to M0 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ dev_err(dev, "Failed sending M0 state change event\n");
+ goto err_unlock;
+ }
+
+ if (old_state == MHI_STATE_READY) {
+ /* Send AMSS EE event to host */
+ ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+ if (ret) {
+ dev_err(dev, "Failed sending AMSS EE event\n");
+ goto err_unlock;
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
+}
+
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->state_lock);
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ goto err_unlock;
+ }
+
+ mhi_ep_suspend_channels(mhi_cntrl);
+
+ /* Signal host that the device moved to M3 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ dev_err(dev, "Failed sending M3 state change event\n");
+ goto err_unlock;
+ }
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
+}
+
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state mhi_state;
+ int ret, is_ready;
+
+ mutex_lock(&mhi_cntrl->state_lock);
+
+ /* Ensure that the MHISTATUS is set to RESET by host */
+ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+ is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+
+ if (mhi_state != MHI_STATE_RESET || is_ready) {
+ dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+ ret = -EIO;
+ goto err_unlock;
+ }
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+ if (ret)
+ mhi_ep_handle_syserr(mhi_cntrl);
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig
new file mode 100644
index 0000000000..da5cd0c9fc
--- /dev/null
+++ b/drivers/bus/mhi/host/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+#
+
+config MHI_BUS
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+config MHI_BUS_DEBUG
+ bool "Debugfs support for the MHI bus"
+ depends on MHI_BUS && DEBUG_FS
+ help
+ Enable debugfs support for use with the MHI transport. Allows
+ reading and/or modifying some values within the MHI controller
+ for debug and test purposes.
+
+config MHI_BUS_PCI_GENERIC
+ tristate "MHI PCI controller driver"
+ depends on MHI_BUS
+ depends on PCI
+ help
+ This driver provides MHI PCI controller driver for devices such as
+ Qualcomm SDX55 based PCIe modems.
+
diff --git a/drivers/bus/mhi/host/Makefile b/drivers/bus/mhi/host/Makefile
new file mode 100644
index 0000000000..859c2f3845
--- /dev/null
+++ b/drivers/bus/mhi/host/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MHI_BUS) += mhi.o
+mhi-y := init.o main.o pm.o boot.o
+mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o
+
+obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
+mhi_pci_generic-y += pci_generic.o
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
new file mode 100644
index 0000000000..edc0ec5a09
--- /dev/null
+++ b/drivers/bus/mhi/host/boot.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/* Setup RDDM vector table for RDDM transfer and program RXVEC */
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info)
+{
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 sequence_id;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = mhi_buf->len;
+ }
+
+ dev_dbg(dev, "BHIe programming for RDDM\n");
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+ BHIE_RXVECDB_SEQNUM_BMSK, sequence_id);
+ if (ret) {
+ dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n",
+ &mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+
+ return 0;
+}
+
+/* Collect RDDM buffer during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+ u32 rx_status;
+ enum mhi_ee_type ee;
+ const u32 delayus = 2000;
+ u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ const u32 rddm_timeout_us = 200000;
+ int rddm_retry = rddm_timeout_us / delayus;
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ dev_dbg(dev, "Entered with pm_state:%s dev_state:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /*
+ * This should only be executing during a kernel panic, we expect all
+ * other cores to shutdown while we're collecting RDDM buffer. After
+ * returning from this function, we expect the device to reset.
+ *
+ * Normaly, we read/write pm_state only after grabbing the
+ * pm_lock, since we're in a panic, skipping it. Also there is no
+ * gurantee that this state change would take effect since
+ * we're setting it w/o grabbing pm_lock
+ */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* update should take the effect immediately */
+ smp_wmb();
+
+ /*
+ * Make sure device is not already in RDDM. In case the device asserts
+ * and a kernel panic follows, device will already be in RDDM.
+ * Do not trigger SYS ERR again and proceed with waiting for
+ * image download completion.
+ */
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_MAX)
+ goto error_exit_rddm;
+
+ if (ee != MHI_EE_RDDM) {
+ dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ dev_dbg(dev, "Waiting for device to enter RDDM\n");
+ while (rddm_retry--) {
+ ee = mhi_get_exec_env(mhi_cntrl);
+ if (ee == MHI_EE_RDDM)
+ break;
+
+ udelay(delayus);
+ }
+
+ if (rddm_retry <= 0) {
+ /* Hardware reset so force device to enter RDDM */
+ dev_dbg(dev,
+ "Did not enter RDDM, do a host req reset\n");
+ mhi_soc_reset(mhi_cntrl);
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ }
+
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK, &rx_status);
+ if (ret)
+ return -EIO;
+
+ if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL)
+ return 0;
+
+ udelay(delayus);
+ }
+
+ ee = mhi_get_exec_env(mhi_cntrl);
+ ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+ dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
+
+error_exit_rddm:
+ dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
+ TO_MHI_EXEC_STR(ee));
+
+ return -EIO;
+}
+
+/* Download RDDM image from device */
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 rx_status;
+
+ if (in_panic)
+ return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
+ /* Wait for the image download to complete */
+ wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_RXVECSTATUS_OFFS,
+ BHIE_RXVECSTATUS_STATUS_BMSK,
+ &rx_status) || rx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+
+static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
+ const struct mhi_buf *mhi_buf)
+{
+ void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ u32 tx_status, sequence_id;
+ int ret;
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ return -EIO;
+ }
+
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
+ sequence_id);
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ upper_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ lower_32_bits(mhi_buf->dma_addr));
+
+ mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, sequence_id);
+ read_unlock_bh(pm_lock);
+
+ if (ret)
+ return ret;
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
+ return -EIO;
+
+ return (!ret) ? -ETIMEDOUT : 0;
+}
+
+static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 tx_status, val, session_id;
+ int i, ret;
+ void __iomem *base = mhi_cntrl->bhi;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
+ session_id);
+ mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ upper_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ lower_32_bits(dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
+ read_unlock_bh(pm_lock);
+
+ /* Wait for the image download to complete */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+ BHI_STATUS_MASK, &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ goto invalid_pm_state;
+
+ if (tx_status == BHI_STATUS_ERROR) {
+ dev_err(dev, "Image transfer failed\n");
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base,
+ error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n",
+ error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+ goto invalid_pm_state;
+ }
+
+ return (!ret) ? -ETIMEDOUT : 0;
+
+invalid_pm_state:
+
+ return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ int i;
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ for (i = 0; i < image_info->entries; i++, mhi_buf++)
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ mhi_buf->buf, mhi_buf->dma_addr);
+
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ size_t seg_size = mhi_cntrl->seg_len;
+ int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int i;
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entries */
+ img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+ GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+ for (i = 0; i < segments; i++, mhi_buf++) {
+ size_t vec_size = seg_size;
+
+ /* Vector table is the last entry */
+ if (i == segments - 1)
+ vec_size = sizeof(struct bhi_vec_entry) * i;
+
+ mhi_buf->len = vec_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ vec_size, &mhi_buf->dma_addr,
+ GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+ }
+
+ img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+ img_info->entries = segments;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ mhi_buf->buf, mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+ const u8 *buf, size_t remainder,
+ struct image_info *img_info)
+{
+ size_t to_cpy;
+ struct mhi_buf *mhi_buf = img_info->mhi_buf;
+ struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+ while (remainder) {
+ to_cpy = min(remainder, mhi_buf->len);
+ memcpy(mhi_buf->buf, buf, to_cpy);
+ bhi_vec->dma_addr = mhi_buf->dma_addr;
+ bhi_vec->size = to_cpy;
+
+ buf += to_cpy;
+ remainder -= to_cpy;
+ bhi_vec++;
+ mhi_buf++;
+ }
+}
+
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+{
+ const struct firmware *firmware = NULL;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ const char *fw_name;
+ const u8 *fw_data;
+ void *buf;
+ dma_addr_t dma_addr;
+ size_t size, fw_sz;
+ int i, ret;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device MHI is not in valid state\n");
+ return;
+ }
+
+ /* save hardware info from BHI */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
+ &mhi_cntrl->serial_number);
+ if (ret)
+ dev_err(dev, "Could not capture serial number via BHI\n");
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
+ &mhi_cntrl->oem_pk_hash[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
+ break;
+ }
+ }
+
+ /* wait for ready on pass through or any other execution environment */
+ if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
+ goto fw_load_ready_state;
+
+ fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+ mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+ /* check if the driver has already provided the firmware data */
+ if (!fw_name && mhi_cntrl->fbc_download &&
+ mhi_cntrl->fw_data && mhi_cntrl->fw_sz) {
+ if (!mhi_cntrl->sbl_size) {
+ dev_err(dev, "fw_data provided but no sbl_size\n");
+ goto error_fw_load;
+ }
+
+ size = mhi_cntrl->sbl_size;
+ fw_data = mhi_cntrl->fw_data;
+ fw_sz = mhi_cntrl->fw_sz;
+ goto skip_req_fw;
+ }
+
+ if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+ !mhi_cntrl->seg_len))) {
+ dev_err(dev,
+ "No firmware image defined or !sbl_size || !seg_len\n");
+ goto error_fw_load;
+ }
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "Error loading firmware: %d\n", ret);
+ goto error_fw_load;
+ }
+
+ size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+ /* SBL size provided is maximum size, not necessarily the image size */
+ if (size > firmware->size)
+ size = firmware->size;
+
+ fw_data = firmware->data;
+ fw_sz = firmware->size;
+
+skip_req_fw:
+ buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ release_firmware(firmware);
+ goto error_fw_load;
+ }
+
+ /* Download image using BHI */
+ memcpy(buf, fw_data, size);
+ ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
+ dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+
+ /* Error or in EDL mode, we're done */
+ if (ret) {
+ dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ release_firmware(firmware);
+ goto error_fw_load;
+ }
+
+ /* Wait for ready since EDL image was loaded */
+ if (fw_name && fw_name == mhi_cntrl->edl_image) {
+ release_firmware(firmware);
+ goto fw_load_ready_state;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /*
+ * If we're doing fbc, populate vector tables while
+ * device transitioning into MHI READY state
+ */
+ if (mhi_cntrl->fbc_download) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
+ if (ret) {
+ release_firmware(firmware);
+ goto error_fw_load;
+ }
+
+ /* Load the firmware into BHIE vec table */
+ mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
+ }
+
+ release_firmware(firmware);
+
+fw_load_ready_state:
+ /* Transitioning into MHI RESET->READY state */
+ ret = mhi_ready_state_transition(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
+ goto error_ready_state;
+ }
+
+ dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
+ return;
+
+error_ready_state:
+ if (mhi_cntrl->fbc_download) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+error_fw_load:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_FW_DL_ERR)
+ wake_up_all(&mhi_cntrl->state_event);
+}
+
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+{
+ struct image_info *image_info = mhi_cntrl->fbc_image;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (!image_info)
+ return -EIO;
+
+ ret = mhi_fw_load_bhie(mhi_cntrl,
+ /* Vector table is the last entry */
+ &image_info->mhi_buf[image_info->entries - 1]);
+ if (ret) {
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (new_state == MHI_PM_FW_DL_ERR)
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c
new file mode 100644
index 0000000000..cfec7811df
--- /dev/null
+++ b/drivers/bus/mhi/host/debugfs.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include "internal.h"
+
+static int mhi_debugfs_states_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ /* states */
+ seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_cntrl->wake_set ? "true" : "false");
+
+ /* counters */
+ seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
+ mhi_cntrl->M3);
+
+ seq_printf(m, " device wake: %u pending packets: %u\n",
+ atomic_read(&mhi_cntrl->dev_wake),
+ atomic_read(&mhi_cntrl->pending_pkts));
+
+ return 0;
+}
+
+static int mhi_debugfs_events_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings;
+ i++, er_ctxt++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev) {
+ seq_printf(m, "Index: %d is an offload event ring\n",
+ i);
+ continue;
+ }
+
+ seq_printf(m, "Index: %d intmod count: %lu time: %lu",
+ i, (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODC_MASK) >>
+ __ffs(EV_CTX_INTMODC_MASK),
+ (le32_to_cpu(er_ctxt->intmod) & EV_CTX_INTMODT_MASK) >>
+ __ffs(EV_CTX_INTMODT_MASK));
+
+ seq_printf(m, " base: 0x%0llx len: 0x%llx", le64_to_cpu(er_ctxt->rbase),
+ le64_to_cpu(er_ctxt->rlen));
+
+ seq_printf(m, " rp: 0x%llx wp: 0x%llx", le64_to_cpu(er_ctxt->rp),
+ le64_to_cpu(er_ctxt->wp));
+
+ seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp,
+ &mhi_event->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_chan *mhi_chan;
+ struct mhi_chan_ctxt *chan_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->offload_ch) {
+ seq_printf(m, "%s(%u) is an offload channel\n",
+ mhi_chan->name, mhi_chan->chan);
+ continue;
+ }
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ seq_printf(m,
+ "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx",
+ mhi_chan->name, mhi_chan->chan, (le32_to_cpu(chan_ctxt->chcfg) &
+ CHAN_CTX_CHSTATE_MASK) >> __ffs(CHAN_CTX_CHSTATE_MASK),
+ (le32_to_cpu(chan_ctxt->chcfg) & CHAN_CTX_BRSTMODE_MASK) >>
+ __ffs(CHAN_CTX_BRSTMODE_MASK), (le32_to_cpu(chan_ctxt->chcfg) &
+ CHAN_CTX_POLLCFG_MASK) >> __ffs(CHAN_CTX_POLLCFG_MASK));
+
+ seq_printf(m, " type: 0x%x event ring: %u", le32_to_cpu(chan_ctxt->chtype),
+ le32_to_cpu(chan_ctxt->erindex));
+
+ seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx",
+ le64_to_cpu(chan_ctxt->rbase), le64_to_cpu(chan_ctxt->rlen),
+ le64_to_cpu(chan_ctxt->rp), le64_to_cpu(chan_ctxt->wp));
+
+ seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n",
+ ring->rp, ring->wp,
+ &mhi_chan->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_device_info_show(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+
+ seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u",
+ mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer",
+ mhi_dev->dev_wake);
+
+ /* for transfer device types only */
+ if (mhi_dev->dev_type == MHI_DEVICE_XFER)
+ seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)",
+ mhi_dev->ul_chan_id, mhi_dev->dl_chan_id);
+
+ seq_puts((struct seq_file *)data, "\n");
+
+ return 0;
+}
+
+static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ /* Show controller and client(s) info */
+ mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m);
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show);
+
+ return 0;
+}
+
+static int mhi_debugfs_regdump_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ enum mhi_state state;
+ enum mhi_ee_type ee;
+ int i, ret = -EIO;
+ u32 val;
+ void __iomem *mhi_base = mhi_cntrl->regs;
+ void __iomem *bhi_base = mhi_cntrl->bhi;
+ void __iomem *bhie_base = mhi_cntrl->bhie;
+ void __iomem *wake_db = mhi_cntrl->wake_db;
+ struct {
+ const char *name;
+ int offset;
+ void __iomem *base;
+ } regs[] = {
+ { "MHI_REGLEN", MHIREGLEN, mhi_base},
+ { "MHI_VER", MHIVER, mhi_base},
+ { "MHI_CFG", MHICFG, mhi_base},
+ { "MHI_CTRL", MHICTRL, mhi_base},
+ { "MHI_STATUS", MHISTATUS, mhi_base},
+ { "MHI_WAKE_DB", 0, wake_db},
+ { "BHI_EXECENV", BHI_EXECENV, bhi_base},
+ { "BHI_STATUS", BHI_STATUS, bhi_base},
+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+ { NULL },
+ };
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ return ret;
+
+ seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+ seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee),
+ mhi_state_str(state));
+
+ for (i = 0; regs[i].name; i++) {
+ if (!regs[i].base)
+ continue;
+ ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset,
+ &val);
+ if (ret)
+ continue;
+
+ seq_printf(m, "%s: 0x%x\n", regs[i].name, val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ seq_printf(m,
+ "Wake count: %d\n%s\n", mhi_dev->dev_wake,
+ "Usage: echo get/put > device_wake to vote/unvote for M0");
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_device_wake_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ char buf[16];
+ int ret = -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "get", 3)) {
+ ret = mhi_device_get_sync(mhi_dev);
+ } else if (!strncmp(buf, "put", 3)) {
+ mhi_device_put(mhi_dev);
+ ret = 0;
+ }
+
+ return ret ? ret : count;
+}
+
+static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms);
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_timeout_ms_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ u32 timeout_ms;
+
+ if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms))
+ return -EINVAL;
+
+ mhi_cntrl->timeout_ms = timeout_ms;
+
+ return count;
+}
+
+static int mhi_debugfs_states_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_states_show, inode->i_private);
+}
+
+static int mhi_debugfs_events_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_events_show, inode->i_private);
+}
+
+static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_channels_show, inode->i_private);
+}
+
+static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_devices_show, inode->i_private);
+}
+
+static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_regdump_show, inode->i_private);
+}
+
+static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private);
+}
+
+static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_states_fops = {
+ .open = mhi_debugfs_states_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_events_fops = {
+ .open = mhi_debugfs_events_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_channels_fops = {
+ .open = mhi_debugfs_channels_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_devices_fops = {
+ .open = mhi_debugfs_devices_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_regdump_fops = {
+ .open = mhi_debugfs_regdump_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_device_wake_fops = {
+ .open = mhi_debugfs_device_wake_open,
+ .write = mhi_debugfs_device_wake_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_timeout_ms_fops = {
+ .open = mhi_debugfs_timeout_ms_open,
+ .write = mhi_debugfs_timeout_ms_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static struct dentry *mhi_debugfs_root;
+
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->debugfs_dentry =
+ debugfs_create_dir(dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_debugfs_root);
+
+ debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_states_fops);
+ debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_events_fops);
+ debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_channels_fops);
+ debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_devices_fops);
+ debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_regdump_fops);
+ debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_device_wake_fops);
+ debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_timeout_ms_fops);
+}
+
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ debugfs_remove_recursive(mhi_cntrl->debugfs_dentry);
+ mhi_cntrl->debugfs_dentry = NULL;
+}
+
+void mhi_debugfs_init(void)
+{
+ mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL);
+}
+
+void mhi_debugfs_exit(void)
+{
+ debugfs_remove_recursive(mhi_debugfs_root);
+}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
new file mode 100644
index 0000000000..f78aefd2d7
--- /dev/null
+++ b/drivers/bus/mhi/host/init.c
@@ -0,0 +1,1464 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+static DEFINE_IDA(mhi_controller_ida);
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+ [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
+ [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
+ [MHI_EE_AMSS] = "MISSION MODE",
+ [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
+ [MHI_EE_WFW] = "WLAN FIRMWARE",
+ [MHI_EE_PTHRU] = "PASS THROUGH",
+ [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
+ [MHI_EE_FP] = "FLASH PROGRAMMER",
+ [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+ [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
+ [DEV_ST_TRANSITION_PBL] = "PBL",
+ [DEV_ST_TRANSITION_READY] = "READY",
+ [DEV_ST_TRANSITION_SBL] = "SBL",
+ [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+ [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
+};
+
+const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
+ [MHI_CH_STATE_TYPE_RESET] = "RESET",
+ [MHI_CH_STATE_TYPE_STOP] = "STOP",
+ [MHI_CH_STATE_TYPE_START] = "START",
+};
+
+static const char * const mhi_pm_state_str[] = {
+ [MHI_PM_STATE_DISABLE] = "DISABLE",
+ [MHI_PM_STATE_POR] = "POWER ON RESET",
+ [MHI_PM_STATE_M0] = "M0",
+ [MHI_PM_STATE_M2] = "M2",
+ [MHI_PM_STATE_M3_ENTER] = "M?->M3",
+ [MHI_PM_STATE_M3] = "M3",
+ [MHI_PM_STATE_M3_EXIT] = "M3->M0",
+ [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
+ [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
+ [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
+ [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+ [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+};
+
+const char *to_mhi_pm_state_str(u32 state)
+{
+ int index;
+
+ if (state)
+ index = __fls(state);
+
+ if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
+ return "Invalid State";
+
+ return mhi_pm_state_str[index];
+}
+
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ return sysfs_emit(buf, "Serial Number: %u\n",
+ mhi_cntrl->serial_number);
+}
+static DEVICE_ATTR_RO(serial_number);
+
+static ssize_t oem_pk_hash_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int i, cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
+ i, mhi_cntrl->oem_pk_hash[i]);
+
+ return cnt;
+}
+static DEVICE_ATTR_RO(oem_pk_hash);
+
+static ssize_t soc_reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_soc_reset(mhi_cntrl);
+ return count;
+}
+static DEVICE_ATTR_WO(soc_reset);
+
+static struct attribute *mhi_dev_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_oem_pk_hash.attr,
+ &dev_attr_soc_reset.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mhi_dev);
+
+/* MHI protocol requires the transfer ring to be aligned with ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring,
+ u64 len)
+{
+ ring->alloc_size = len + (len - 1);
+ ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned)
+ return -ENOMEM;
+
+ ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+
+ return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
+ int i, ret;
+
+ /* if controller driver has set irq_flags, use it */
+ if (mhi_cntrl->irq_flags)
+ irq_flags = mhi_cntrl->irq_flags;
+
+ /* Setup BHI_INTVEC IRQ */
+ ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
+ mhi_intvec_threaded_handler,
+ irq_flags,
+ "bhi", mhi_cntrl);
+ if (ret)
+ return ret;
+ /*
+ * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here.
+ * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that
+ * IRQ_NOAUTOEN is not applicable.
+ */
+ disable_irq(mhi_cntrl->irq[0]);
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
+ dev_err(dev, "irq %d not available for event ring\n",
+ mhi_event->irq);
+ ret = -EINVAL;
+ goto error_request;
+ }
+
+ ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
+ mhi_irq_handler,
+ irq_flags,
+ "mhi", mhi_event);
+ if (ret) {
+ dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ mhi_cntrl->irq[mhi_event->irq], i);
+ goto error_request;
+ }
+
+ disable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ }
+
+ return 0;
+
+error_request:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+ }
+ free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+ return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ int i;
+ struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event *mhi_event;
+ struct mhi_ring *ring;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+ ring = &mhi_cmd->ring;
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring = &mhi_event->ring;
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ ring->base = NULL;
+ ring->iommu_base = 0;
+ }
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+ kfree(mhi_ctxt);
+ mhi_cntrl->mhi_ctxt = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_ctxt *mhi_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ u32 tmp;
+ int ret = -ENOMEM, i;
+
+ atomic_set(&mhi_cntrl->dev_wake, 0);
+ atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+ mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+ if (!mhi_ctxt)
+ return -ENOMEM;
+
+ /* Setup channel ctxt */
+ mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan,
+ &mhi_ctxt->chan_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->chan_ctxt)
+ goto error_alloc_chan_ctxt;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ /* Skip if it is an offload channel */
+ if (mhi_chan->offload_ch)
+ continue;
+
+ tmp = le32_to_cpu(chan_ctxt->chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ tmp &= ~CHAN_CTX_BRSTMODE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
+ tmp &= ~CHAN_CTX_POLLCFG_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
+ chan_ctxt->chcfg = cpu_to_le32(tmp);
+
+ chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
+ chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
+ }
+
+ /* Setup event context */
+ mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings,
+ &mhi_ctxt->er_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->er_ctxt)
+ goto error_alloc_er_ctxt;
+
+ er_ctxt = mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if it is an offload event */
+ if (mhi_event->offload_ev)
+ continue;
+
+ tmp = le32_to_cpu(er_ctxt->intmod);
+ tmp &= ~EV_CTX_INTMODC_MASK;
+ tmp &= ~EV_CTX_INTMODT_MASK;
+ tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
+ er_ctxt->intmod = cpu_to_le32(tmp);
+
+ er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
+ er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
+ mhi_event->db_cfg.db_mode = true;
+
+ ring->el_size = sizeof(struct mhi_ring_element);
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_er;
+
+ /*
+ * If the read pointer equals to the write pointer, then the
+ * ring is empty
+ */
+ ring->rp = ring->wp = ring->base;
+ er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
+ er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+ er_ctxt->rlen = cpu_to_le64(ring->len);
+ ring->ctxt_wp = &er_ctxt->wp;
+ }
+
+ /* Setup cmd context */
+ ret = -ENOMEM;
+ mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->cmd_ctxt) *
+ NR_OF_CMD_RINGS,
+ &mhi_ctxt->cmd_ctxt_addr,
+ GFP_KERNEL);
+ if (!mhi_ctxt->cmd_ctxt)
+ goto error_alloc_er;
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->el_size = sizeof(struct mhi_ring_element);
+ ring->elements = CMD_EL_PER_RING;
+ ring->len = ring->el_size * ring->elements;
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+ if (ret)
+ goto error_alloc_cmd;
+
+ ring->rp = ring->wp = ring->base;
+ cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
+ cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+ cmd_ctxt->rlen = cpu_to_le64(ring->len);
+ ring->ctxt_wp = &cmd_ctxt->wp;
+ }
+
+ mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+ return 0;
+
+error_alloc_cmd:
+ for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ dma_free_coherent(mhi_cntrl->cntrl_dev,
+ sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+ mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+ i = mhi_cntrl->total_ev_rings;
+ mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+ for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev)
+ continue;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ }
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
+ mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+ mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+ dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
+ mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+ mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+ kfree(mhi_ctxt);
+
+ return ret;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+ u32 val;
+ int i, ret;
+ struct mhi_chan *mhi_chan;
+ struct mhi_event *mhi_event;
+ void __iomem *base = mhi_cntrl->regs;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct {
+ u32 offset;
+ u32 val;
+ } reg_info[] = {
+ {
+ CCABAP_HIGHER,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ CCABAP_LOWER,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+ },
+ {
+ ECABAP_HIGHER,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ ECABAP_LOWER,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+ },
+ {
+ CRCBAP_HIGHER,
+ upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ CRCBAP_LOWER,
+ lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+ },
+ {
+ MHICTRLBASE_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLBASE_LOWER,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHIDATABASE_LOWER,
+ lower_32_bits(mhi_cntrl->iova_start),
+ },
+ {
+ MHICTRLLIMIT_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHICTRLLIMIT_LOWER,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_HIGHER,
+ upper_32_bits(mhi_cntrl->iova_stop),
+ },
+ {
+ MHIDATALIMIT_LOWER,
+ lower_32_bits(mhi_cntrl->iova_stop),
+ },
+ {0, 0}
+ };
+
+ dev_dbg(dev, "Initializing MHI registers\n");
+
+ /* Read channel db offset */
+ ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
+ dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
+ return -ERANGE;
+ }
+
+ /* Setup wake db */
+ mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ mhi_cntrl->wake_set = false;
+
+ /* Setup channel db address for each channel in tre_ring */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+ mhi_chan->tre_ring.db_addr = base + val;
+
+ /* Read event ring db offset */
+ ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
+ if (ret) {
+ dev_err(dev, "Unable to read ERDBOFF register\n");
+ return -EIO;
+ }
+
+ if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
+ dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
+ val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
+ return -ERANGE;
+ }
+
+ /* Setup event db address for each ev_ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->ring.db_addr = base + val;
+ }
+
+ /* Setup DB register for primary CMD rings */
+ mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+ /* Write to MMIO registers */
+ for (i = 0; reg_info[i].offset; i++)
+ mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
+ reg_info[i].val);
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
+ mhi_cntrl->total_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
+
+ ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
+ mhi_cntrl->hw_ev_rings);
+ if (ret) {
+ dev_err(dev, "Unable to write MHICFG register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+ if (!chan_ctxt->rbase) /* Already uninitialized */
+ return;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+
+ buf_ring->base = tre_ring->base = NULL;
+ tre_ring->ctxt_wp = NULL;
+ chan_ctxt->rbase = 0;
+ chan_ctxt->rlen = 0;
+ chan_ctxt->rp = 0;
+ chan_ctxt->wp = 0;
+
+ tmp = le32_to_cpu(chan_ctxt->chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
+ chan_ctxt->chcfg = cpu_to_le32(tmp);
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring;
+ struct mhi_ring *tre_ring;
+ struct mhi_chan_ctxt *chan_ctxt;
+ u32 tmp;
+ int ret;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ tre_ring->el_size = sizeof(struct mhi_ring_element);
+ tre_ring->len = tre_ring->el_size * tre_ring->elements;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+ if (ret)
+ return -ENOMEM;
+
+ buf_ring->el_size = sizeof(struct mhi_buf_info);
+ buf_ring->len = buf_ring->el_size * buf_ring->elements;
+ buf_ring->base = vzalloc(buf_ring->len);
+
+ if (!buf_ring->base) {
+ dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ return -ENOMEM;
+ }
+
+ tmp = le32_to_cpu(chan_ctxt->chcfg);
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
+ chan_ctxt->chcfg = cpu_to_le32(tmp);
+
+ chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
+ chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+ chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
+ tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+ tre_ring->rp = tre_ring->wp = tre_ring->base;
+ buf_ring->rp = buf_ring->wp = buf_ring->base;
+ mhi_chan->db_cfg.db_mode = 1;
+
+ /* Update to all cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ const struct mhi_event_config *event_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ int i, num;
+
+ num = config->num_events;
+ mhi_cntrl->total_ev_rings = num;
+ mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Populate event ring */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < num; i++) {
+ event_cfg = &config->event_cfg[i];
+
+ mhi_event->er_index = i;
+ mhi_event->ring.elements = event_cfg->num_elements;
+ mhi_event->intmod = event_cfg->irq_moderation_ms;
+ mhi_event->irq = event_cfg->irq;
+
+ if (event_cfg->channel != U32_MAX) {
+ /* This event ring has a dedicated channel */
+ mhi_event->chan = event_cfg->channel;
+ if (mhi_event->chan >= mhi_cntrl->max_chan) {
+ dev_err(dev,
+ "Event Ring channel not available\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->mhi_chan =
+ &mhi_cntrl->mhi_chan[mhi_event->chan];
+ }
+
+ /* Priority is fixed to 1 for now */
+ mhi_event->priority = 1;
+
+ mhi_event->db_cfg.brstmode = event_cfg->mode;
+ if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+ goto error_ev_cfg;
+
+ if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_event->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_event->data_type = event_cfg->data_type;
+
+ switch (mhi_event->data_type) {
+ case MHI_ER_DATA:
+ mhi_event->process_event = mhi_process_data_event_ring;
+ break;
+ case MHI_ER_CTRL:
+ mhi_event->process_event = mhi_process_ctrl_ev_ring;
+ break;
+ default:
+ dev_err(dev, "Event Ring type not supported\n");
+ goto error_ev_cfg;
+ }
+
+ mhi_event->hw_ring = event_cfg->hardware_event;
+ if (mhi_event->hw_ring)
+ mhi_cntrl->hw_ev_rings++;
+ else
+ mhi_cntrl->sw_ev_rings++;
+
+ mhi_event->cl_manage = event_cfg->client_managed;
+ mhi_event->offload_ev = event_cfg->offload_channel;
+ mhi_event++;
+ }
+
+ return 0;
+
+error_ev_cfg:
+
+ kfree(mhi_cntrl->mhi_event);
+ return -EINVAL;
+}
+
+static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ const struct mhi_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ int i;
+ u32 chan;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * The allocation of MHI channels can exceed 32KB in some scenarios,
+ * so to avoid any memory possible allocation failures, vzalloc is
+ * used here
+ */
+ mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
+ sizeof(*mhi_cntrl->mhi_chan));
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+ /* Populate channel configurations */
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+
+ mhi_chan->tre_ring.elements = ch_cfg->num_elements;
+ if (!mhi_chan->tre_ring.elements)
+ goto error_chan_cfg;
+
+ /*
+ * For some channels, local ring length should be bigger than
+ * the transfer ring length due to internal logical channels
+ * in device. So host can queue much more buffers than transfer
+ * ring length. Example, RSC channels should have a larger local
+ * channel length than transfer ring length.
+ */
+ mhi_chan->buf_ring.elements = ch_cfg->local_elements;
+ if (!mhi_chan->buf_ring.elements)
+ mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
+ mhi_chan->er_index = ch_cfg->event_ring;
+ mhi_chan->dir = ch_cfg->dir;
+
+ /*
+ * For most channels, chtype is identical to channel directions.
+ * So, if it is not defined then assign channel direction to
+ * chtype
+ */
+ mhi_chan->type = ch_cfg->type;
+ if (!mhi_chan->type)
+ mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+ mhi_chan->ee_mask = ch_cfg->ee_mask;
+ mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
+ mhi_chan->lpm_notify = ch_cfg->lpm_notify;
+ mhi_chan->offload_ch = ch_cfg->offload_channel;
+ mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
+ mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->wake_capable = ch_cfg->wake_capable;
+
+ /*
+ * If MHI host allocates buffers, then the channel direction
+ * should be DMA_FROM_DEVICE
+ */
+ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ /*
+ * Bi-directional and direction less channel must be an
+ * offload channel
+ */
+ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+ mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ if (!mhi_chan->offload_ch) {
+ mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
+ if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
+ dev_err(dev, "Invalid Door bell mode\n");
+ goto error_chan_cfg;
+ }
+ }
+
+ if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode;
+ else
+ mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
+
+ mhi_chan->configured = true;
+
+ if (mhi_chan->lpm_notify)
+ list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+ }
+
+ return 0;
+
+error_chan_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int parse_config(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ int ret;
+
+ /* Parse MHI channel configuration */
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ /* Parse MHI event configuration */
+ ret = parse_ev_cfg(mhi_cntrl, config);
+ if (ret)
+ goto error_ev_cfg;
+
+ mhi_cntrl->timeout_ms = config->timeout_ms;
+ if (!mhi_cntrl->timeout_ms)
+ mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+ mhi_cntrl->bounce_buf = config->use_bounce_buf;
+ mhi_cntrl->buffer_len = config->buf_len;
+ if (!mhi_cntrl->buffer_len)
+ mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+ /* By default, host is allowed to ring DB in both M0 and M2 states */
+ mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+ if (config->m2_no_db)
+ mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+ return 0;
+
+error_ev_cfg:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_chan *mhi_chan;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_device *mhi_dev;
+ u32 soc_info;
+ int ret, i;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
+ !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+ !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
+ !mhi_cntrl->irq || !mhi_cntrl->reg_len)
+ return -EINVAL;
+
+ ret = parse_config(mhi_cntrl, config);
+ if (ret)
+ return -EINVAL;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_event;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+ mutex_init(&mhi_cntrl->pm_mutex);
+ rwlock_init(&mhi_cntrl->pm_lock);
+ spin_lock_init(&mhi_cntrl->transition_lock);
+ spin_lock_init(&mhi_cntrl->wlock);
+ INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ init_waitqueue_head(&mhi_cntrl->state_event);
+
+ mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
+ if (!mhi_cntrl->hiprio_wq) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+ spin_lock_init(&mhi_cmd->lock);
+
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ /* Skip for offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ mhi_event->mhi_cntrl = mhi_cntrl;
+ spin_lock_init(&mhi_event->lock);
+ if (mhi_event->data_type == MHI_ER_CTRL)
+ tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+ (ulong)mhi_event);
+ else
+ tasklet_init(&mhi_event->task, mhi_ev_task,
+ (ulong)mhi_event);
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ mutex_init(&mhi_chan->mutex);
+ init_completion(&mhi_chan->completion);
+ rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
+ }
+
+ if (mhi_cntrl->bounce_buf) {
+ mhi_cntrl->map_single = mhi_map_single_use_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+ } else {
+ mhi_cntrl->map_single = mhi_map_single_no_bb;
+ mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+ }
+
+ /* Read the MHI device info */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+ SOC_HW_VERSION_OFFS, &soc_info);
+ if (ret)
+ goto err_destroy_wq;
+
+ mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
+ mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
+ mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
+ mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
+
+ mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
+ if (mhi_cntrl->index < 0) {
+ ret = mhi_cntrl->index;
+ goto err_destroy_wq;
+ }
+
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto err_ida_free;
+
+ /* Register controller with MHI bus */
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto error_setup_irq;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+
+ /* Init wakeup source */
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_release_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ mhi_create_debugfs(mhi_cntrl);
+
+ return 0;
+
+err_release_dev:
+ put_device(&mhi_dev->dev);
+error_setup_irq:
+ mhi_deinit_free_irq(mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_controller_ida, mhi_cntrl->index);
+err_destroy_wq:
+ destroy_workqueue(mhi_cntrl->hiprio_wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_register_controller);
+
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
+ unsigned int i;
+
+ mhi_deinit_free_irq(mhi_cntrl);
+ mhi_destroy_debugfs(mhi_cntrl);
+
+ destroy_workqueue(mhi_cntrl->hiprio_wq);
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_event);
+
+ /* Drop the references to MHI devices created for channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ put_device(&mhi_chan->mhi_dev->dev);
+ }
+ vfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_controller_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_unregister_controller);
+
+struct mhi_controller *mhi_alloc_controller(void)
+{
+ struct mhi_controller *mhi_cntrl;
+
+ mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
+
+ return mhi_cntrl;
+}
+EXPORT_SYMBOL_GPL(mhi_alloc_controller);
+
+void mhi_free_controller(struct mhi_controller *mhi_cntrl)
+{
+ kfree(mhi_cntrl);
+}
+EXPORT_SYMBOL_GPL(mhi_free_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 bhi_off, bhie_off;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ ret = mhi_init_dev_ctxt(mhi_cntrl);
+ if (ret)
+ goto error_dev_ctxt;
+
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHI offset\n");
+ goto error_reg_offset;
+ }
+
+ if (bhi_off >= mhi_cntrl->reg_len) {
+ dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
+ bhi_off, mhi_cntrl->reg_len);
+ ret = -ERANGE;
+ goto error_reg_offset;
+ }
+ mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
+
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+ &bhie_off);
+ if (ret) {
+ dev_err(dev, "Error getting BHIE offset\n");
+ goto error_reg_offset;
+ }
+
+ if (bhie_off >= mhi_cntrl->reg_len) {
+ dev_err(dev,
+ "BHIe offset: 0x%x is out of range: 0x%zx\n",
+ bhie_off, mhi_cntrl->reg_len);
+ ret = -ERANGE;
+ goto error_reg_offset;
+ }
+ mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+ }
+
+ if (mhi_cntrl->rddm_size) {
+ /*
+ * This controller supports RDDM, so we need to manually clear
+ * BHIE RX registers since POR values are undefined.
+ */
+ memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
+ 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
+ 4);
+ /*
+ * Allocate RDDM table for debugging purpose if specified
+ */
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size);
+ if (mhi_cntrl->rddm_image) {
+ ret = mhi_rddm_prepare(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ if (ret) {
+ mhi_free_bhie_table(mhi_cntrl,
+ mhi_cntrl->rddm_image);
+ goto error_reg_offset;
+ }
+ }
+ }
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return 0;
+
+error_reg_offset:
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_cntrl->fbc_image = NULL;
+ }
+
+ if (mhi_cntrl->rddm_image) {
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image = NULL;
+ }
+
+ mhi_cntrl->bhi = NULL;
+ mhi_cntrl->bhie = NULL;
+
+ mhi_deinit_dev_ctxt(mhi_cntrl);
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
+
+static void mhi_release_device(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created if the mhi_dev
+ * associated with it is NULL. This scenario will happen during the
+ * controller suspend and resume.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_bus_type;
+ dev->release = mhi_release_device;
+
+ if (mhi_cntrl->mhi_dev) {
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+ } else {
+ /* for MHI controller device, parent is the bus device (e.g. pci device) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ }
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+ mhi_dev->dev_wake = 0;
+
+ return mhi_dev;
+}
+
+static int mhi_driver_probe(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device_driver *drv = dev->driver;
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ struct mhi_event *mhi_event;
+ struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+ int ret;
+
+ /* Bring device out of LPM */
+ ret = mhi_device_get_sync(mhi_dev);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ if (ul_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+ goto exit_probe;
+
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+ }
+
+ ret = -EINVAL;
+ if (dl_chan) {
+ /*
+ * If channel supports LPM notifications then status_cb should
+ * be provided
+ */
+ if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ /* For non-offload channels then xfer_cb should be provided */
+ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+ goto exit_probe;
+
+ mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+ /*
+ * If the channel event ring is managed by client, then
+ * status_cb must be provided so that the framework can
+ * notify pending data
+ */
+ if (mhi_event->cl_manage && !mhi_drv->status_cb)
+ goto exit_probe;
+
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+ }
+
+ /* Call the user provided probe function */
+ ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+
+exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
+ mhi_device_put(mhi_dev);
+
+ return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ enum mhi_ch_state ch_state[] = {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_DISABLED
+ };
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Reset both channels */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ /* Wake all threads waiting for completion */
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_EV_CC_INVALID;
+ complete_all(&mhi_chan->completion);
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Set the channel state to disabled */
+ mutex_lock(&mhi_chan->mutex);
+ write_lock_irq(&mhi_chan->lock);
+ ch_state[dir] = mhi_chan->ch_state;
+ mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ /* Reset the non-offload channel */
+ if (!mhi_chan->offload_ch)
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ mhi_drv->remove(mhi_dev);
+
+ /* De-init channel if it was enabled */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
+ ch_state[dir] == MHI_CH_STATE_STOP) &&
+ !mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+ mutex_unlock(&mhi_chan->mutex);
+ }
+
+ while (mhi_dev->dev_wake)
+ mhi_device_put(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_driver_probe;
+ driver->remove = mhi_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_driver_unregister);
+
+static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_bus_type = {
+ .name = "mhi",
+ .dev_name = "mhi",
+ .match = mhi_match,
+ .uevent = mhi_uevent,
+ .dev_groups = mhi_dev_groups,
+};
+
+static int __init mhi_init(void)
+{
+ mhi_debugfs_init();
+ return bus_register(&mhi_bus_type);
+}
+
+static void __exit mhi_exit(void)
+{
+ mhi_debugfs_exit();
+ bus_unregister(&mhi_bus_type);
+}
+
+postcore_initcall(mhi_init);
+module_exit(mhi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Modem Host Interface");
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
new file mode 100644
index 0000000000..2e139e76de
--- /dev/null
+++ b/drivers/bus/mhi/host/internal.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+#include "../common.h"
+
+extern struct bus_type mhi_bus_type;
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET 0xb0
+#define MHI_SOC_RESET_REQ BIT(0)
+
+#define SOC_HW_VERSION_OFFS 0x224
+#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
+
+struct mhi_ctxt {
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ dma_addr_t er_ctxt_addr;
+ dma_addr_t chan_ctxt_addr;
+ dma_addr_t cmd_ctxt_addr;
+};
+
+struct bhi_vec_entry {
+ u64 dma_addr;
+ u64 size;
+};
+
+enum mhi_ch_state_type {
+ MHI_CH_STATE_TYPE_RESET,
+ MHI_CH_STATE_TYPE_STOP,
+ MHI_CH_STATE_TYPE_START,
+ MHI_CH_STATE_TYPE_MAX,
+};
+
+extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
+#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
+ "INVALID_STATE" : \
+ mhi_ch_state_type_str[(state)])
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
+ mode != MHI_DB_BRST_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+ "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+ ee == MHI_EE_EDL)
+#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
+#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
+ ee == MHI_EE_FP)
+
+enum dev_st_transition {
+ DEV_ST_TRANSITION_PBL,
+ DEV_ST_TRANSITION_READY,
+ DEV_ST_TRANSITION_SBL,
+ DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_FP,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
+ DEV_ST_TRANSITION_MAX,
+};
+
+extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
+#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
+ "INVALID_STATE" : dev_state_tran_str[state])
+
+/* internal power states */
+enum mhi_pm_state {
+ MHI_PM_STATE_DISABLE,
+ MHI_PM_STATE_POR,
+ MHI_PM_STATE_M0,
+ MHI_PM_STATE_M2,
+ MHI_PM_STATE_M3_ENTER,
+ MHI_PM_STATE_M3,
+ MHI_PM_STATE_M3_EXIT,
+ MHI_PM_STATE_FW_DL_ERR,
+ MHI_PM_STATE_SYS_ERR_DETECT,
+ MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SHUTDOWN_PROCESS,
+ MHI_PM_STATE_LD_ERR_FATAL_DETECT,
+ MHI_PM_STATE_MAX
+};
+
+#define MHI_PM_DISABLE BIT(0)
+#define MHI_PM_POR BIT(1)
+#define MHI_PM_M0 BIT(2)
+#define MHI_PM_M2 BIT(3)
+#define MHI_PM_M3_ENTER BIT(4)
+#define MHI_PM_M3 BIT(5)
+#define MHI_PM_M3_EXIT BIT(6)
+/* firmware download failure state */
+#define MHI_PM_FW_DL_ERR BIT(7)
+#define MHI_PM_SYS_ERR_DETECT BIT(8)
+#define MHI_PM_SYS_ERR_PROCESS BIT(9)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+/* link not accessible */
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+ (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+#define NR_OF_CMD_RINGS 1
+#define CMD_EL_PER_RING 128
+#define PRIMARY_CMD_RING 0
+#define MHI_DEV_WAKE_DB 127
+#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk))
+
+enum mhi_er_type {
+ MHI_ER_TYPE_INVALID = 0x0,
+ MHI_ER_TYPE_VALID = 0x1,
+};
+
+struct db_cfg {
+ bool reset_req;
+ bool db_mode;
+ u32 pollcfg;
+ enum mhi_db_brst_mode brstmode;
+ dma_addr_t db_val;
+ void (*process_db)(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg, void __iomem *io_addr,
+ dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+ enum mhi_pm_state from_state;
+ u32 to_states;
+};
+
+struct state_transition {
+ struct list_head node;
+ enum dev_st_transition state;
+};
+
+struct mhi_ring {
+ dma_addr_t dma_handle;
+ dma_addr_t iommu_base;
+ __le64 *ctxt_wp; /* point to ctxt wp */
+ void *pre_aligned;
+ void *base;
+ void *rp;
+ void *wp;
+ size_t el_size;
+ size_t len;
+ size_t elements;
+ size_t alloc_size;
+ void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+ struct mhi_ring ring;
+ spinlock_t lock;
+};
+
+struct mhi_buf_info {
+ void *v_addr;
+ void *bb_addr;
+ void *wp;
+ void *cb_buf;
+ dma_addr_t p_addr;
+ size_t len;
+ enum dma_data_direction dir;
+ bool used; /* Indicates whether the buffer is used or not */
+ bool pre_mapped; /* Already pre-mapped by client */
+};
+
+struct mhi_event {
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *mhi_chan; /* dedicated to channel */
+ u32 er_index;
+ u32 intmod;
+ u32 irq;
+ int chan; /* this event ring is dedicated to a channel (optional) */
+ u32 priority;
+ enum mhi_er_data_type data_type;
+ struct mhi_ring ring;
+ struct db_cfg db_cfg;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ int (*process_event)(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota);
+ bool hw_ring;
+ bool cl_manage;
+ bool offload_ev; /* managed by a device driver */
+};
+
+struct mhi_chan {
+ const char *name;
+ /*
+ * Important: When consuming, increment tre_ring first and when
+ * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
+ * is guranteed to have space so we do not need to check both rings.
+ */
+ struct mhi_ring buf_ring;
+ struct mhi_ring tre_ring;
+ u32 chan;
+ u32 er_index;
+ u32 intmod;
+ enum mhi_ch_type type;
+ enum dma_data_direction dir;
+ struct db_cfg db_cfg;
+ enum mhi_ch_ee_mask ee_mask;
+ enum mhi_ch_state ch_state;
+ enum mhi_ev_ccs ccs;
+ struct mhi_device *mhi_dev;
+ void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
+ struct mutex mutex;
+ struct completion completion;
+ rwlock_t lock;
+ struct list_head node;
+ bool lpm_notify;
+ bool configured;
+ bool offload_ch;
+ bool pre_alloc;
+ bool wake_capable;
+};
+
+/* Default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+
+/* debugfs related functions */
+#ifdef CONFIG_MHI_BUS_DEBUG
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_debugfs_init(void);
+void mhi_debugfs_exit(void);
+#else
+static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_debugfs_init(void)
+{
+}
+
+static inline void mhi_debugfs_exit(void)
+{
+}
+#endif
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info);
+
+/* Power management APIs */
+enum mhi_pm_state __must_check mhi_tryset_pm_state(
+ struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state);
+const char *to_mhi_pm_state_str(u32 state);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd);
+int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
+static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
+{
+ return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+ mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
+}
+
+static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
+{
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+}
+
+/* Register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+ void __iomem *db_addr, dma_addr_t db_val);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_mode, void __iomem *db_addr,
+ dma_addr_t db_val);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 *out);
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val, u32 delayus);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Initialization methods */
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+ struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan, unsigned int flags);
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
+
+/* Event processing methods */
+void mhi_ctrl_ev_task(unsigned long data);
+void mhi_ev_task(unsigned long data);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
+
+/* ISR handlers */
+irqreturn_t mhi_irq_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_buf_info *info, enum mhi_flags flags);
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info);
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
new file mode 100644
index 0000000000..d6653cbcf9
--- /dev/null
+++ b/drivers/bus/mhi/host/main.c
@@ -0,0 +1,1693 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 *out)
+{
+ return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 *out)
+{
+ u32 tmp;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ *out = (tmp & mask) >> __ffs(mask);
+
+ return 0;
+}
+
+int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset,
+ u32 mask, u32 val, u32 delayus)
+{
+ int ret;
+ u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+
+ while (retry--) {
+ ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
+ if (ret)
+ return ret;
+
+ if (out == val)
+ return 0;
+
+ fsleep(delayus);
+ }
+
+ return -ETIMEDOUT;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val)
+{
+ mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
+}
+
+int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 mask,
+ u32 val)
+{
+ int ret;
+ u32 tmp;
+
+ ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= (val << __ffs(mask));
+ mhi_write_reg(mhi_cntrl, base, offset, tmp);
+
+ return 0;
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
+ mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ if (db_cfg->db_mode) {
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+ db_cfg->db_mode = 0;
+ }
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+ struct db_cfg *db_cfg,
+ void __iomem *db_addr,
+ dma_addr_t db_val)
+{
+ db_cfg->db_val = db_val;
+ mhi_write_db(mhi_cntrl, db_addr, db_val);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+ ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+ dma_addr_t db;
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+ *ring->ctxt_wp = cpu_to_le64(db);
+ mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+ dma_addr_t db;
+
+ db = ring->iommu_base + (ring->wp - ring->base);
+
+ /*
+ * Writes to the new ring element must be visible to the hardware
+ * before letting h/w know there is new element to fetch.
+ */
+ dma_wmb();
+ *ring->ctxt_wp = cpu_to_le64(db);
+
+ mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
+ ring->db_addr, db);
+}
+
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+ u32 exec;
+ int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+ return (ret) ? MHI_EE_MAX : exec;
+}
+EXPORT_SYMBOL_GPL(mhi_get_exec_env);
+
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+ u32 state;
+ int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK, &state);
+ return ret ? MHI_STATE_MAX : state;
+}
+EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
+
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->reset) {
+ mhi_cntrl->reset(mhi_cntrl);
+ return;
+ }
+
+ /* Generic MHI SoC reset */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
+ MHI_SOC_RESET_REQ);
+}
+EXPORT_SYMBOL_GPL(mhi_soc_reset);
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ buf_info->v_addr, buf_info->len,
+ buf_info->dir);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
+ &buf_info->p_addr, GFP_ATOMIC);
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (buf_info->dir == DMA_TO_DEVICE)
+ memcpy(buf, buf_info->v_addr, buf_info->len);
+
+ buf_info->bb_addr = buf;
+
+ return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
+ buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf_info)
+{
+ if (buf_info->dir == DMA_FROM_DEVICE)
+ memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
+ buf_info->bb_addr, buf_info->p_addr);
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ int nr_el;
+
+ if (ring->wp < ring->rp) {
+ nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ nr_el = (ring->rp - ring->base) / ring->el_size;
+ nr_el += ((ring->base + ring->len - ring->wp) /
+ ring->el_size) - 1;
+ }
+
+ return nr_el;
+}
+
+static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return (addr - ring->iommu_base) + ring->base;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+ /* smp update */
+ smp_wmb();
+}
+
+static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
+{
+ return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
+ !(addr & (sizeof(struct mhi_ring_element) - 1));
+}
+
+int mhi_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_chan *ul_chan, *dl_chan;
+ struct mhi_device *mhi_dev;
+ struct mhi_controller *mhi_cntrl;
+ enum mhi_ee_type ee = MHI_EE_MAX;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy virtual devices thats attached to bus */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ /*
+ * If execution environment is specified, remove only those devices that
+ * started in them based on ee_mask for the channels as we move on to a
+ * different execution environment
+ */
+ if (data)
+ ee = *(enum mhi_ee_type *)data;
+
+ /*
+ * For the suspend and resume case, this function will get called
+ * without mhi_unregister_controller(). Hence, we need to drop the
+ * references to mhi_dev created for ul and dl channels. We can
+ * be sure that there will be no instances of mhi_dev left after
+ * this.
+ */
+ if (ul_chan) {
+ if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&ul_chan->mhi_dev->dev);
+ }
+
+ if (dl_chan) {
+ if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
+ return 0;
+
+ put_device(&dl_chan->mhi_dev->dev);
+ }
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
+
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+{
+ struct mhi_driver *mhi_drv;
+
+ if (!mhi_dev->dev.driver)
+ return;
+
+ mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+ if (mhi_drv->status_cb)
+ mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+EXPORT_SYMBOL_GPL(mhi_notify);
+
+/* Bind MHI channels to MHI devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *mhi_chan;
+ struct mhi_device *mhi_dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i, ret;
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+ !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+ continue;
+ mhi_dev = mhi_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev))
+ return;
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+ switch (mhi_chan->dir) {
+ case DMA_TO_DEVICE:
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ break;
+ case DMA_FROM_DEVICE:
+ /* We use dl_chan as offload channels */
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ break;
+ default:
+ dev_err(dev, "Direction not supported\n");
+ put_device(&mhi_dev->dev);
+ return;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Check next channel if it matches */
+ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+ if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+ i++;
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+ }
+ }
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+
+ /* Init wakeup source if available */
+ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+ device_init_wakeup(&mhi_dev->dev, true);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+ }
+}
+
+irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+{
+ struct mhi_event *mhi_event = dev;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct mhi_event_ctxt *er_ctxt;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ dma_addr_t ptr;
+ void *dev_rp;
+
+ /*
+ * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
+ * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
+ * before handling the IRQs.
+ */
+ if (!mhi_cntrl->mhi_ctxt) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev,
+ "mhi_ctxt has been freed\n");
+ return IRQ_HANDLED;
+ }
+
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ ptr = le64_to_cpu(er_ctxt->rp);
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return IRQ_HANDLED;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+
+ /* Only proceed if event ring has pending events */
+ if (ev_ring->rp == dev_rp)
+ return IRQ_HANDLED;
+
+ /* For client managed event ring, notify pending data */
+ if (mhi_event->cl_manage) {
+ struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+ struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_dev)
+ mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
+ } else {
+ tasklet_schedule(&mhi_event->task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+{
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ enum mhi_ee_type ee;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
+ }
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_state_str(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(ee), mhi_state_str(state));
+
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (pm_state != MHI_PM_SYS_ERR_DETECT)
+ goto exit_intvec;
+
+ switch (ee) {
+ case MHI_EE_RDDM:
+ /* proceed if power down is not already in progress */
+ if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ mhi_cntrl->ee = ee;
+ wake_up_all(&mhi_cntrl->state_event);
+ }
+ break;
+ case MHI_EE_PBL:
+ case MHI_EE_EDL:
+ case MHI_EE_PTHRU:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+ mhi_cntrl->ee = ee;
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ default:
+ wake_up_all(&mhi_cntrl->state_event);
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ }
+
+exit_intvec:
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
+{
+ struct mhi_controller *mhi_cntrl = dev;
+
+ /* Wake up events waiting for state change */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ /* Update the WP */
+ ring->wp += ring->el_size;
+
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* Update to all cores */
+ smp_wmb();
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring_element *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_result result;
+ unsigned long flags = 0;
+ u32 ev_code;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /*
+ * If it's a DB Event then we need to grab the lock
+ * with preemption disabled and as a write because we
+ * have to update db register and there are chances that
+ * another thread could be doing the same.
+ */
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_lock_irqsave(&mhi_chan->lock, flags);
+ else
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_tx_event;
+
+ switch (ev_code) {
+ case MHI_EV_CC_OVERFLOW:
+ case MHI_EV_CC_EOB:
+ case MHI_EV_CC_EOT:
+ {
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+ struct mhi_ring_element *local_rp, *ev_tre;
+ void *dev_rp;
+ struct mhi_buf_info *buf_info;
+ u16 xfer_len;
+
+ if (!is_valid_ring_ptr(tre_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the tre ring\n");
+ break;
+ }
+ /* Get the TRB this event points to */
+ ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+ dev_rp = ev_tre + 1;
+ if (dev_rp >= (tre_ring->base + tre_ring->len))
+ dev_rp = tre_ring->base;
+
+ result.dir = mhi_chan->dir;
+
+ local_rp = tre_ring->rp;
+ while (local_rp != dev_rp) {
+ buf_info = buf_ring->rp;
+ /* If it's the last TRE, get length from the event */
+ if (local_rp == ev_tre)
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+ else
+ xfer_len = buf_info->len;
+
+ /* Unmap if it's not pre-mapped by client */
+ if (likely(!buf_info->pre_mapped))
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ result.buf_addr = buf_info->cb_buf;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ local_rp = tre_ring->rp;
+
+ read_unlock_bh(&mhi_chan->lock);
+
+ /* notify client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ /*
+ * Recycle the buffer if buffer is pre-allocated,
+ * if there is an error, not much we can do apart
+ * from dropping the packet
+ */
+ if (mhi_chan->pre_alloc) {
+ if (mhi_queue_buf(mhi_chan->mhi_dev,
+ mhi_chan->dir,
+ buf_info->cb_buf,
+ buf_info->len, MHI_EOT)) {
+ dev_err(dev,
+ "Error recycling buffer for chan:%d\n",
+ mhi_chan->chan);
+ kfree(buf_info->cb_buf);
+ }
+ }
+
+ read_lock_bh(&mhi_chan->lock);
+ }
+ break;
+ } /* CC_EOT */
+ case MHI_EV_CC_OOB:
+ case MHI_EV_CC_DB_MODE:
+ {
+ unsigned long pm_lock_flags;
+
+ mhi_chan->db_cfg.db_mode = 1;
+ read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
+ if (tre_ring->wp != tre_ring->rp &&
+ MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ }
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
+ break;
+ }
+ case MHI_EV_CC_BAD_TRE:
+ default:
+ dev_err(dev, "Unknown event 0x%x\n", ev_code);
+ break;
+ } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+ if (ev_code >= MHI_EV_CC_OOB)
+ write_unlock_irqrestore(&mhi_chan->lock, flags);
+ else
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring_element *event,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_buf_info *buf_info;
+ struct mhi_result result;
+ int ev_code;
+ u32 cookie; /* offset to local descriptor */
+ u16 xfer_len;
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ ev_code = MHI_TRE_GET_EV_CODE(event);
+ cookie = MHI_TRE_GET_EV_COOKIE(event);
+ xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+ /* Received out of bound cookie */
+ WARN_ON(cookie >= buf_ring->len);
+
+ buf_info = buf_ring->base + cookie;
+
+ result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+ -EOVERFLOW : 0;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+
+ read_lock_bh(&mhi_chan->lock);
+
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ goto end_process_rsc_event;
+
+ WARN_ON(!buf_info->used);
+
+ /* notify the client */
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /*
+ * Note: We're arbitrarily incrementing RP even though, completion
+ * packet we processed might not be the same one, reason we can do this
+ * is because device guaranteed to cache descriptors in order it
+ * receive, so even though completion event is different we can re-use
+ * all descriptors in between.
+ * Example:
+ * Transfer Ring has descriptors: A, B, C, D
+ * Last descriptor host queue is D (WP) and first descriptor
+ * host queue is A (RP).
+ * The completion event we just serviced is descriptor C.
+ * Then we can safely queue descriptors to replace A, B, and C
+ * even though host did not receive any completions.
+ */
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+ buf_info->used = false;
+
+end_process_rsc_event:
+ read_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring_element *tre)
+{
+ dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+ struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *mhi_ring = &cmd_ring->ring;
+ struct mhi_ring_element *cmd_pkt;
+ struct mhi_chan *mhi_chan;
+ u32 chan;
+
+ if (!is_valid_ring_ptr(mhi_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points outside of the cmd ring\n");
+ return;
+ }
+
+ cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+ chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ write_lock_bh(&mhi_chan->lock);
+ mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+ complete(&mhi_chan->completion);
+ write_unlock_bh(&mhi_chan->lock);
+ } else {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Completion packet for invalid channel ID: %d\n", chan);
+ }
+
+ mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_ring_element *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 chan;
+ int count = 0;
+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+
+ /*
+ * This is a quick check to avoid unnecessary event processing
+ * in case MHI is already in error state, but it's still possible
+ * to transition to error state while processing events
+ */
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ switch (type) {
+ case MHI_PKT_TYPE_BW_REQ_EVENT:
+ {
+ struct mhi_link_info *link_info;
+
+ link_info = &mhi_cntrl->mhi_link_info;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ link_info->target_link_speed =
+ MHI_TRE_GET_EV_LINKSPEED(local_rp);
+ link_info->target_link_width =
+ MHI_TRE_GET_EV_LINKWIDTH(local_rp);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Received BW_REQ event\n");
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
+ break;
+ }
+ case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+ {
+ enum mhi_state new_state;
+
+ new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+ dev_dbg(dev, "State change event to state: %s\n",
+ mhi_state_str(new_state));
+
+ switch (new_state) {
+ case MHI_STATE_M0:
+ mhi_pm_m0_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M1:
+ mhi_pm_m1_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_M3:
+ mhi_pm_m3_transition(mhi_cntrl);
+ break;
+ case MHI_STATE_SYS_ERR:
+ {
+ enum mhi_pm_state pm_state;
+
+ dev_dbg(dev, "System error detected\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ break;
+ }
+ default:
+ dev_err(dev, "Invalid state: %s\n",
+ mhi_state_str(new_state));
+ }
+
+ break;
+ }
+ case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+ mhi_process_cmd_completion(mhi_cntrl, local_rp);
+ break;
+ case MHI_PKT_TYPE_EE_EVENT:
+ {
+ enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
+ enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+ dev_dbg(dev, "Received EE event: %s\n",
+ TO_MHI_EXEC_STR(event));
+ switch (event) {
+ case MHI_EE_SBL:
+ st = DEV_ST_TRANSITION_SBL;
+ break;
+ case MHI_EE_WFW:
+ case MHI_EE_AMSS:
+ st = DEV_ST_TRANSITION_MISSION_MODE;
+ break;
+ case MHI_EE_FP:
+ st = DEV_ST_TRANSITION_FP;
+ break;
+ case MHI_EE_RDDM:
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = event;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ break;
+ default:
+ dev_err(dev,
+ "Unhandled EE event: 0x%x\n", type);
+ }
+ if (st != DEV_ST_TRANSITION_MAX)
+ mhi_queue_state_transition(mhi_cntrl, st);
+
+ break;
+ }
+ case MHI_PKT_TYPE_TX_EVENT:
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ if (!mhi_chan->configured)
+ break;
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ }
+ break;
+ default:
+ dev_err(dev, "Unhandled event type: %d\n", type);
+ break;
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+
+ ptr = le64_to_cpu(er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ count++;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ /* Ring EV DB only if there is any pending element to process */
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_ring_element *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ int count = 0;
+ u32 chan;
+ struct mhi_chan *mhi_chan;
+ dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ local_rp = ev_ring->rp;
+
+ while (dev_rp != local_rp && event_quota > 0) {
+ enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+ chan = MHI_TRE_GET_EV_CHID(local_rp);
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan &&
+ mhi_cntrl->mhi_chan[chan].configured) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
+ }
+
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ local_rp = ev_ring->rp;
+
+ ptr = le64_to_cpu(er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ return -EIO;
+ }
+
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ count++;
+ }
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ /* Ring EV DB only if there is any pending element to process */
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return count;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+ /* process all pending events */
+ spin_lock_bh(&mhi_event->lock);
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_event *mhi_event = (struct mhi_event *)data;
+ struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ enum mhi_pm_state pm_state = 0;
+ int ret;
+
+ /*
+ * We can check PM state w/o a lock here because there is no way
+ * PM state can change from reg access valid to no access while this
+ * thread being executed.
+ */
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ /*
+ * We may have a pending event but not allowed to
+ * process it since we are probably in a suspended state,
+ * so trigger a resume.
+ */
+ mhi_trigger_resume(mhi_cntrl);
+
+ return;
+ }
+
+ /* Process ctrl events */
+ ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+ /*
+ * We received an IRQ but no events to process, maybe device went to
+ * SYS_ERR state? Check the state to confirm.
+ */
+ if (!ret) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ dev_dbg(dev, "System error detected\n");
+ pm_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_DETECT);
+ }
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (pm_state == MHI_PM_SYS_ERR_DETECT)
+ mhi_pm_sys_err_handler(mhi_cntrl);
+ }
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
+{
+ void *tmp = ring->wp + ring->el_size;
+
+ if (tmp >= (ring->base + ring->len))
+ tmp = ring->base;
+
+ return (tmp == ring->rp);
+}
+
+static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
+ enum dma_data_direction dir, enum mhi_flags mflags)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
+ return -EIO;
+
+ ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
+ if (unlikely(ret))
+ return -EAGAIN;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
+ if (unlikely(ret))
+ return ret;
+
+ read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+ /* Packet is queued, take a usage ref to exit M3 if necessary
+ * for host->device buffer, balanced put is done on buffer completion
+ * for device->host buffer, balanced put is after ringing the DB
+ */
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ /* Assert dev_wake (to exit/prevent M1/M2)*/
+ mhi_cntrl->wake_toggle(mhi_cntrl);
+
+ if (mhi_chan->dir == DMA_TO_DEVICE)
+ atomic_inc(&mhi_cntrl->pending_pkts);
+
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+
+ if (dir == DMA_FROM_DEVICE)
+ mhi_cntrl->runtime_put(mhi_cntrl);
+
+ read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+ return ret;
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_buf_info buf_info = { };
+
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
+
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
+
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_skb);
+
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
+ mhi_dev->dl_chan;
+ struct mhi_buf_info buf_info = { };
+
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
+
+ if (unlikely(mhi_chan->pre_alloc))
+ return -EINVAL;
+
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_dma);
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_buf_info *info, enum mhi_flags flags)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_ring_element *mhi_tre;
+ struct mhi_buf_info *buf_info;
+ int eot, eob, chain, bei;
+ int ret;
+
+ /* Protect accesses for reading and incrementing WP */
+ write_lock_bh(&mhi_chan->lock);
+
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+
+ buf_info = buf_ring->wp;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
+ buf_info->wp = tre_ring->wp;
+ buf_info->dir = mhi_chan->dir;
+ buf_info->len = info->len;
+
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret) {
+ write_unlock_bh(&mhi_chan->lock);
+ return ret;
+ }
+ }
+
+ eob = !!(flags & MHI_EOB);
+ eot = !!(flags & MHI_EOT);
+ chain = !!(flags & MHI_CHAIN);
+ bei = !!(mhi_chan->intmod);
+
+ mhi_tre = tre_ring->wp;
+ mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
+ mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+ /* increment WP */
+ mhi_add_ring_element(mhi_cntrl, tre_ring);
+ mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+ write_unlock_bh(&mhi_chan->lock);
+
+ return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_buf_info buf_info = { };
+
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_buf);
+
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return mhi_is_ring_full(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_is_full);
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_cmd_type cmd)
+{
+ struct mhi_ring_element *cmd_tre = NULL;
+ struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+ struct mhi_ring *ring = &mhi_cmd->ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int chan = 0;
+
+ if (mhi_chan)
+ chan = mhi_chan->chan;
+
+ spin_lock_bh(&mhi_cmd->lock);
+ if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+ spin_unlock_bh(&mhi_cmd->lock);
+ return -ENOMEM;
+ }
+
+ /* prepare the cmd tre */
+ cmd_tre = ring->wp;
+ switch (cmd) {
+ case MHI_CMD_RESET_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+ break;
+ case MHI_CMD_STOP_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
+ break;
+ case MHI_CMD_START_CHAN:
+ cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+ cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+ cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+ break;
+ default:
+ dev_err(dev, "Command not supported\n");
+ break;
+ }
+
+ /* queue to hardware */
+ mhi_add_ring_element(mhi_cntrl, ring);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_cmd->lock);
+
+ return 0;
+}
+
+static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan,
+ enum mhi_ch_state_type to_state)
+{
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+ enum mhi_cmd_type cmd = MHI_CMD_NOP;
+ int ret;
+
+ dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
+ TO_CH_STATE_TYPE_STR(to_state));
+
+ switch (to_state) {
+ case MHI_CH_STATE_TYPE_RESET:
+ write_lock_irq(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+ mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+ mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
+ write_unlock_irq(&mhi_chan->lock);
+ return -EINVAL;
+ }
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ cmd = MHI_CMD_RESET_CHAN;
+ break;
+ case MHI_CH_STATE_TYPE_STOP:
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+ return -EINVAL;
+
+ cmd = MHI_CMD_STOP_CHAN;
+ break;
+ case MHI_CH_STATE_TYPE_START:
+ if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
+ mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
+ return -EINVAL;
+
+ cmd = MHI_CMD_START_CHAN;
+ break;
+ default:
+ dev_err(dev, "%d: Channel state update to %s not allowed\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ return -EINVAL;
+ }
+
+ /* bring host and device out of suspended states */
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret)
+ return ret;
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ reinit_completion(&mhi_chan->completion);
+ ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
+ if (ret) {
+ dev_err(dev, "%d: Failed to send %s channel command\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ goto exit_channel_update;
+ }
+
+ ret = wait_for_completion_timeout(&mhi_chan->completion,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+ dev_err(dev,
+ "%d: Failed to receive %s channel command completion\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+ ret = -EIO;
+ goto exit_channel_update;
+ }
+
+ ret = 0;
+
+ if (to_state != MHI_CH_STATE_TYPE_RESET) {
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
+ MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ dev_dbg(dev, "%d: Channel state change to %s successful\n",
+ mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
+
+exit_channel_update:
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+ return ret;
+}
+
+static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ int ret;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+
+ mutex_lock(&mhi_chan->mutex);
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
+ goto exit_unprepare_channel;
+ }
+
+ /* no more processing events for this channel */
+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+ MHI_CH_STATE_TYPE_RESET);
+ if (ret)
+ dev_err(dev, "%d: Failed to reset channel, still resetting\n",
+ mhi_chan->chan);
+
+exit_unprepare_channel:
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+ write_unlock_irq(&mhi_chan->lock);
+
+ if (!mhi_chan->offload_ch) {
+ mhi_reset_chan(mhi_cntrl, mhi_chan);
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ }
+ dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
+
+ mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan, unsigned int flags)
+{
+ int ret = 0;
+ struct device *dev = &mhi_chan->mhi_dev->dev;
+
+ if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+ dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&mhi_chan->mutex);
+
+ /* Check of client manages channel context for offload channels */
+ if (!mhi_chan->offload_ch) {
+ ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+ if (ret)
+ goto error_init_chan;
+ }
+
+ ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
+ MHI_CH_STATE_TYPE_START);
+ if (ret)
+ goto error_pm_state;
+
+ if (mhi_chan->dir == DMA_FROM_DEVICE)
+ mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
+ /* Pre-allocate buffer for xfer ring */
+ if (mhi_chan->pre_alloc) {
+ int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+ &mhi_chan->tre_ring);
+ size_t len = mhi_cntrl->buffer_len;
+
+ while (nr_el--) {
+ void *buf;
+ struct mhi_buf_info info = { };
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_pre_alloc;
+ }
+
+ /* Prepare transfer descriptors */
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ goto error_pre_alloc;
+ }
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+ read_lock_irq(&mhi_chan->lock);
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ }
+
+ mutex_unlock(&mhi_chan->mutex);
+
+ return 0;
+
+error_pm_state:
+ if (!mhi_chan->offload_ch)
+ mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+ mutex_unlock(&mhi_chan->mutex);
+
+ return ret;
+
+error_pre_alloc:
+ mutex_unlock(&mhi_chan->mutex);
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+ return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ struct mhi_event_ctxt *er_ctxt,
+ int chan)
+
+{
+ struct mhi_ring_element *dev_rp, *local_rp;
+ struct mhi_ring *ev_ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ unsigned long flags;
+ dma_addr_t ptr;
+
+ dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+
+ ev_ring = &mhi_event->ring;
+
+ /* mark all stale events related to channel as STALE event */
+ spin_lock_irqsave(&mhi_event->lock, flags);
+
+ ptr = le64_to_cpu(er_ctxt->rp);
+ if (!is_valid_ring_ptr(ev_ring, ptr)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event ring rp points outside of the event ring\n");
+ dev_rp = ev_ring->rp;
+ } else {
+ dev_rp = mhi_to_virtual(ev_ring, ptr);
+ }
+
+ local_rp = ev_ring->rp;
+ while (dev_rp != local_rp) {
+ if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
+ chan == MHI_TRE_GET_EV_CHID(local_rp))
+ local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+ MHI_PKT_TYPE_STALE_EVENT);
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+
+ dev_dbg(dev, "Finished marking events as stale events\n");
+ spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
+{
+ struct mhi_ring *buf_ring, *tre_ring;
+ struct mhi_result result;
+
+ /* Reset any pending buffers */
+ buf_ring = &mhi_chan->buf_ring;
+ tre_ring = &mhi_chan->tre_ring;
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ while (tre_ring->rp != tre_ring->wp) {
+ struct mhi_buf_info *buf_info = buf_ring->rp;
+
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ atomic_dec(&mhi_cntrl->pending_pkts);
+ /* Release the reference got from mhi_queue() */
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ }
+
+ if (!buf_info->pre_mapped)
+ mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+ mhi_del_ring_element(mhi_cntrl, buf_ring);
+ mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+ if (mhi_chan->pre_alloc) {
+ kfree(buf_info->cb_buf);
+ } else {
+ result.buf_addr = buf_info->cb_buf;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+ }
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int chan = mhi_chan->chan;
+
+ /* Nothing to reset, client doesn't queue buffers */
+ if (mhi_chan->offload_ch)
+ return;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+ mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+ mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+
+static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
+{
+ int ret, dir;
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
+ if (ret)
+ goto error_open_chan;
+ }
+
+ return 0;
+
+error_open_chan:
+ for (--dir; dir >= 0; dir--) {
+ mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+ if (!mhi_chan)
+ continue;
+
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+
+ return ret;
+}
+
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, 0);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan;
+ int dir;
+
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+ if (!mhi_chan)
+ continue;
+
+ mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+ }
+}
+EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
new file mode 100644
index 0000000000..08f3f039db
--- /dev/null
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -0,0 +1,1292 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI PCI driver - MHI over PCI controller driver
+ *
+ * This module is a generic driver for registering MHI-over-PCI devices,
+ * such as PCIe QCOM modems.
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#define MHI_PCI_DEFAULT_BAR_NUM 0
+
+#define MHI_POST_RESET_DELAY_MS 2000
+
+#define HEALTH_CHECK_PERIOD (HZ * 2)
+
+/* PCI VID definitions */
+#define PCI_VENDOR_ID_THALES 0x1269
+#define PCI_VENDOR_ID_QUECTEL 0x1eac
+
+/**
+ * struct mhi_pci_dev_info - MHI PCI device specific information
+ * @config: MHI controller configuration
+ * @name: name of the PCI module
+ * @fw: firmware path (if any)
+ * @edl: emergency download mode firmware path (if any)
+ * @bar_num: PCI base address register to use for MHI MMIO register space
+ * @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @mru_default: default MRU size for MBIM network packets
+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
+ * of inband wake support (such as sdx24)
+ */
+struct mhi_pci_dev_info {
+ const struct mhi_controller_config *config;
+ const char *name;
+ const char *fw;
+ const char *edl;
+ unsigned int bar_num;
+ unsigned int dma_data_width;
+ unsigned int mru_default;
+ bool sideband_wake;
+};
+
+#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ .auto_queue = true, \
+ }
+
+#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 0, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_CTRL, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_AMSS), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_ENABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = true, \
+ }
+
+#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_SBL), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_SBL), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_TO_DEVICE, \
+ .ee_mask = BIT(MHI_EE_FP), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ } \
+
+#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .num_elements = el_count, \
+ .event_ring = ev_ring, \
+ .dir = DMA_FROM_DEVICE, \
+ .ee_mask = BIT(MHI_EE_FP), \
+ .pollcfg = 0, \
+ .doorbell = MHI_DB_BRST_DISABLE, \
+ .lpm_notify = false, \
+ .offload_channel = false, \
+ .doorbell_mode_switch = false, \
+ }
+
+#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 5, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 0, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 1, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = true, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ .channel = ch_num, \
+ }
+
+static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
+};
+
+static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* DIAG dedicated event ring */
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 64),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+ .ch_cfg = modem_qcom_v1_mhi_channels,
+ .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+ .event_cfg = modem_qcom_v1_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
+ .name = "qcom-sdx65m",
+ .fw = "qcom/sdx65m/xbl.elf",
+ .edl = "qcom/sdx65m/edl.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
+ .name = "qcom-sdx55m",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
+ .name = "qcom-sdx24",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_qcom_v1_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = true,
+};
+
+static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ /* The EDL firmware is a flash-programmer exposing firehose protocol */
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_quectel_em1xx_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_quectel_em1xx_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
+ .ch_cfg = mhi_quectel_em1xx_channels,
+ .num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
+ .event_cfg = mhi_quectel_em1xx_events,
+};
+
+static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
+ .name = "quectel-em1xx",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
+ .name = "quectel-rm5xx",
+ .edl = "qcom/prog_firehose_sdx6x.elf",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
+};
+
+static const struct mhi_controller_config modem_foxconn_sdx55_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+ .ch_cfg = mhi_foxconn_sdx55_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
+ .name = "foxconn-sdx24",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+ .name = "foxconn-sdx55",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
+ .name = "foxconn-sdx65",
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_mv3x_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
+ /* MBIM Control Channel */
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
+ /* MBIM Data Channel */
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
+};
+
+static struct mhi_event_config mhi_mv3x_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 256),
+ MHI_EVENT_CONFIG_DATA(1, 256),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
+};
+
+static const struct mhi_controller_config modem_mv3x_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_mv3x_channels),
+ .ch_cfg = mhi_mv3x_channels,
+ .num_events = ARRAY_SIZE(mhi_mv3x_events),
+ .event_cfg = mhi_mv3x_events,
+};
+
+static const struct mhi_pci_dev_info mhi_mv31_info = {
+ .name = "cinterion-mv31",
+ .config = &modem_mv3x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_pci_dev_info mhi_mv32_info = {
+ .name = "cinterion-mv32",
+ .config = &modem_mv3x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
+};
+
+static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
+ /* first ring is control+data and DIAG ring */
+ MHI_EVENT_CONFIG_CTRL(0, 2048),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_sierra_em919x_config = {
+ .max_channels = 128,
+ .timeout_ms = 24000,
+ .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
+ .ch_cfg = mhi_sierra_em919x_channels,
+ .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
+ .event_cfg = modem_sierra_em919x_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
+ .name = "sierra-em919x",
+ .config = &modem_sierra_em919x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
+};
+
+static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
+ .ch_cfg = mhi_telit_fn980_hw_v1_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
+ .event_cfg = mhi_telit_fn980_hw_v1_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
+ .name = "telit-fn980-hwv1",
+ .fw = "qcom/sdx55m/sbl1.mbn",
+ .edl = "qcom/sdx55m/edl.mbn",
+ .config = &modem_telit_fn980_hw_v1_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
+static struct mhi_event_config mhi_telit_fn990_events[] = {
+ MHI_EVENT_CONFIG_CTRL(0, 128),
+ MHI_EVENT_CONFIG_DATA(1, 128),
+ MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_telit_fn990_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
+ .ch_cfg = mhi_telit_fn990_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
+ .name = "telit-fn990",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+/* Keep the list sorted based on the PID. New VID should be added as the last entry */
+static const struct pci_device_id mhi_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* Telit FN980 hardware revision v1 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
+ /* Telit FN990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ /* Telit FE990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ /* RM520N-GL (sdx6x), eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
+ .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
+ /* RM520N-GL (sdx6x), Lenovo variant */
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
+ .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ /* T99W175 (sdx55), Both for eSIM and Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* DW5930e (sdx55), With eSIM, It's also T99W175 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55), Based on Qualcomm new baseline */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W368 (sdx65) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* T99W373 (sdx62) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* T99W510 (sdx24), variant 1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ /* T99W510 (sdx24), variant 2 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ /* T99W510 (sdx24), variant 3 */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ /* DW5932e-eSIM (sdx62), With eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* DW5932e (sdx62), Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* MV31-W (Cinterion) */
+ { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
+ .driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV31-W (Cinterion), based on new baseline */
+ { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4),
+ .driver_data = (kernel_ulong_t) &mhi_mv31_info },
+ /* MV32-WA (Cinterion) */
+ { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+ /* MV32-WB (Cinterion) */
+ { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb),
+ .driver_data = (kernel_ulong_t) &mhi_mv32_info },
+ /* T99W175 (sdx55), HP variant */
+ { PCI_DEVICE(0x03f0, 0x0a6c),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
+
+enum mhi_pci_device_status {
+ MHI_PCI_DEV_STARTED,
+ MHI_PCI_DEV_SUSPENDED,
+};
+
+struct mhi_pci_device {
+ struct mhi_controller mhi_cntrl;
+ struct pci_saved_state *pci_state;
+ struct work_struct recovery_work;
+ struct timer_list health_check_timer;
+ unsigned long status;
+};
+
+static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr, u32 *out)
+{
+ *out = readl(addr);
+ return 0;
+}
+
+static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr, u32 val)
+{
+ writel(val, addr);
+}
+
+static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+
+ /* Nothing to do for now */
+ switch (cb) {
+ case MHI_CB_FATAL_ERROR:
+ case MHI_CB_SYS_ERROR:
+ dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
+ pm_runtime_forbid(&pdev->dev);
+ break;
+ case MHI_CB_EE_MISSION_MODE:
+ pm_runtime_allow(&pdev->dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
+{
+ /* no-op */
+}
+
+static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
+{
+ /* no-op */
+}
+
+static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
+{
+ /* no-op */
+}
+
+static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ u16 vendor = 0;
+
+ if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ return false;
+
+ if (vendor == (u16) ~0 || vendor == 0)
+ return false;
+
+ return true;
+}
+
+static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
+ unsigned int bar_num, u64 dma_mask)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ err = pci_assign_resource(pdev, bar_num);
+ if (err)
+ return err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
+ if (err) {
+ dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
+ return err;
+ }
+ mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
+ mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *mhi_cntrl_config)
+{
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int nr_vectors, i;
+ int *irq;
+
+ /*
+ * Alloc one MSI vector for BHI + one vector per event ring, ideally...
+ * No explicit pci_free_irq_vectors required, done by pcim_release.
+ */
+ mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
+
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ if (nr_vectors < 0) {
+ dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
+ nr_vectors);
+ return nr_vectors;
+ }
+
+ if (nr_vectors < mhi_cntrl->nr_irqs) {
+ dev_warn(&pdev->dev, "using shared MSI\n");
+
+ /* Patch msi vectors, use only one (shared) */
+ for (i = 0; i < mhi_cntrl_config->num_events; i++)
+ mhi_cntrl_config->event_cfg[i].irq = 0;
+ mhi_cntrl->nr_irqs = 1;
+ }
+
+ irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
+ int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
+
+ irq[i] = pci_irq_vector(pdev, vector);
+ }
+
+ mhi_cntrl->irq = irq;
+
+ return 0;
+}
+
+static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ /* The runtime_get() MHI callback means:
+ * Do whatever is requested to leave M3.
+ */
+ return pm_runtime_get(mhi_cntrl->cntrl_dev);
+}
+
+static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+ /* The runtime_put() MHI callback means:
+ * Device can be moved in M3 state.
+ */
+ pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
+ pm_runtime_put(mhi_cntrl->cntrl_dev);
+}
+
+static void mhi_pci_recovery_work(struct work_struct *work)
+{
+ struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
+ recovery_work);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+ int err;
+
+ dev_warn(&pdev->dev, "device recovery started\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+ pm_runtime_forbid(&pdev->dev);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ if (!mhi_pci_is_alive(mhi_cntrl))
+ goto err_try_reset;
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err)
+ goto err_try_reset;
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err)
+ goto err_unprepare;
+
+ dev_dbg(&pdev->dev, "Recovery completed\n");
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ return;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_try_reset:
+ if (pci_reset_function(pdev))
+ dev_err(&pdev->dev, "Recovery failed\n");
+}
+
+static void health_check(struct timer_list *t)
+{
+ struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return;
+
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ return;
+ }
+
+ /* reschedule in two seconds */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
+static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
+ const struct mhi_controller_config *mhi_cntrl_config;
+ struct mhi_pci_device *mhi_pdev;
+ struct mhi_controller *mhi_cntrl;
+ int err;
+
+ dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
+
+ /* mhi_pdev.mhi_cntrl must be zero-initialized */
+ mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
+ if (!mhi_pdev)
+ return -ENOMEM;
+
+ INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
+ mhi_cntrl_config = info->config;
+ mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ mhi_cntrl->cntrl_dev = &pdev->dev;
+ mhi_cntrl->iova_start = 0;
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->fw_image = info->fw;
+ mhi_cntrl->edl_image = info->edl;
+
+ mhi_cntrl->read_reg = mhi_pci_read_reg;
+ mhi_cntrl->write_reg = mhi_pci_write_reg;
+ mhi_cntrl->status_cb = mhi_pci_status_cb;
+ mhi_cntrl->runtime_get = mhi_pci_runtime_get;
+ mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+ mhi_cntrl->mru = info->mru_default;
+
+ if (info->sideband_wake) {
+ mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+ mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+ mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+ }
+
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ if (err)
+ return err;
+
+ err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
+ if (err)
+ return err;
+
+ pci_set_drvdata(pdev, mhi_pdev);
+
+ /* Have stored pci confspace at hand for restore in sudden PCI error.
+ * cache the state locally and discard the PCI core one.
+ */
+ pci_save_state(pdev);
+ mhi_pdev->pci_state = pci_store_saved_state(pdev);
+ pci_load_saved_state(pdev, NULL);
+
+ err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
+ if (err)
+ return err;
+
+ /* MHI bus does not power up the controller by default */
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ goto err_unregister;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ goto err_unprepare;
+ }
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+
+ /* start health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ /* Only allow runtime-suspend if PME capable (for wakeup) */
+ if (pci_pme_capable(pdev, PCI_D3hot)) {
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+
+ return 0;
+
+err_unprepare:
+ mhi_unprepare_after_power_down(mhi_cntrl);
+err_unregister:
+ mhi_unregister_controller(mhi_cntrl);
+
+ return err;
+}
+
+static void mhi_pci_remove(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ del_timer_sync(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* balancing probe put_noidle */
+ if (pci_pme_capable(pdev, PCI_D3hot))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ mhi_unregister_controller(mhi_cntrl);
+}
+
+static void mhi_pci_shutdown(struct pci_dev *pdev)
+{
+ mhi_pci_remove(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static void mhi_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_info(&pdev->dev, "reset\n");
+
+ del_timer(&mhi_pdev->health_check_timer);
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ /* cause internal device reset */
+ mhi_soc_reset(mhi_cntrl);
+
+ /* Be sure device reset has been executed */
+ msleep(MHI_POST_RESET_DELAY_MS);
+}
+
+static void mhi_pci_reset_done(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ /* Restore initial known working PCI state */
+ pci_load_saved_state(pdev, mhi_pdev->pci_state);
+ pci_restore_state(pdev);
+
+ /* Is device status available ? */
+ if (!mhi_pci_is_alive(mhi_cntrl)) {
+ dev_err(&pdev->dev, "reset failed\n");
+ return;
+ }
+
+ err = mhi_prepare_for_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+ return;
+ }
+
+ err = mhi_sync_power_up(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up MHI controller\n");
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ return;
+ }
+
+ set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+}
+
+static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ /* Clean up MHI state */
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, false);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ } else {
+ /* Nothing to do */
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
+{
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void mhi_pci_io_resume(struct pci_dev *pdev)
+{
+ struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
+
+ dev_err(&pdev->dev, "PCI slot reset done\n");
+
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+}
+
+static const struct pci_error_handlers mhi_pci_err_handler = {
+ .error_detected = mhi_pci_error_detected,
+ .slot_reset = mhi_pci_slot_reset,
+ .resume = mhi_pci_io_resume,
+ .reset_prepare = mhi_pci_reset_prepare,
+ .reset_done = mhi_pci_reset_done,
+};
+
+static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return 0;
+
+ del_timer(&mhi_pdev->health_check_timer);
+ cancel_work_sync(&mhi_pdev->recovery_work);
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ mhi_cntrl->ee != MHI_EE_AMSS)
+ goto pci_suspend; /* Nothing to do at MHI level */
+
+ /* Transition to M3 state */
+ err = mhi_pm_suspend(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
+ clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
+ return -EBUSY;
+ }
+
+pci_suspend:
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ int err;
+
+ if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
+ return 0;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_recovery;
+
+ pci_set_master(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+ mhi_cntrl->ee != MHI_EE_AMSS)
+ return 0; /* Nothing to do at MHI level */
+
+ /* Exit M3, transition to M0 state */
+ err = mhi_pm_resume(mhi_cntrl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to resume device: %d\n", err);
+ goto err_recovery;
+ }
+
+ /* Resume health check */
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ /* It can be a remote wakeup (no mhi runtime_get), update access time */
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+
+err_recovery:
+ /* Do not fail to not mess up our PCI device state, the device likely
+ * lost power (d3cold) and we simply need to reset it from the recovery
+ * procedure, trigger the recovery asynchronously to prevent system
+ * suspend exit delaying.
+ */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_suspend(struct device *dev)
+{
+ pm_runtime_disable(dev);
+ return mhi_pci_runtime_suspend(dev);
+}
+
+static int __maybe_unused mhi_pci_resume(struct device *dev)
+{
+ int ret;
+
+ /* Depending the platform, device may have lost power (d3cold), we need
+ * to resume it now to check its state and recover when necessary.
+ */
+ ret = mhi_pci_runtime_resume(dev);
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mhi_pci_freeze(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+ struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
+
+ /* We want to stop all operations, hibernation does not guarantee that
+ * device will be in the same state as before freezing, especially if
+ * the intermediate restore kernel reinitializes MHI device with new
+ * context.
+ */
+ flush_work(&mhi_pdev->recovery_work);
+ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
+ mhi_power_down(mhi_cntrl, true);
+ mhi_unprepare_after_power_down(mhi_cntrl);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mhi_pci_restore(struct device *dev)
+{
+ struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
+
+ /* Reinitialize the device */
+ queue_work(system_long_wq, &mhi_pdev->recovery_work);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mhi_pci_pm_ops = {
+ SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = mhi_pci_suspend,
+ .resume = mhi_pci_resume,
+ .freeze = mhi_pci_freeze,
+ .thaw = mhi_pci_restore,
+ .poweroff = mhi_pci_freeze,
+ .restore = mhi_pci_restore,
+#endif
+};
+
+static struct pci_driver mhi_pci_driver = {
+ .name = "mhi-pci-generic",
+ .id_table = mhi_pci_id_table,
+ .probe = mhi_pci_probe,
+ .remove = mhi_pci_remove,
+ .shutdown = mhi_pci_shutdown,
+ .err_handler = &mhi_pci_err_handler,
+ .driver.pm = &mhi_pci_pm_ops
+};
+module_pci_driver(mhi_pci_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
new file mode 100644
index 0000000000..8a4362d75f
--- /dev/null
+++ b/drivers/bus/mhi/host/pm.c
@@ -0,0 +1,1283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include "internal.h"
+
+/*
+ * Not all MHI state transitions are synchronous. Transitions like Linkdown,
+ * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
+ * transition to a new state only if we're allowed to.
+ *
+ * Priority increases as we go down. For instance, from any state in L0, the
+ * transition can be made to states in L1, L2 and L3. A notable exception to
+ * this rule is state DISABLE. From DISABLE state we can only transition to
+ * POR state. Also, while in L2 state, user cannot jump back to previous
+ * L1 or L0 states.
+ *
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * POR <--> POR
+ * POR -> M0 -> M2 --> M0
+ * POR -> FW_DL_ERR
+ * FW_DL_ERR <--> FW_DL_ERR
+ * M0 <--> M0
+ * M0 -> FW_DL_ERR
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ * SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> DISABLE
+ */
+static const struct mhi_pm_transitions dev_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_FW_DL_ERR,
+ MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
+ },
+};
+
+enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_pm_state state)
+{
+ unsigned long cur_state = mhi_cntrl->pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
+ return cur_state;
+
+ if (unlikely(dev_state_transitions[index].from_state != cur_state))
+ return cur_state;
+
+ if (unlikely(!(dev_state_transitions[index].to_states & state)))
+ return cur_state;
+
+ mhi_cntrl->pm_state = state;
+ return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ if (state == MHI_STATE_RESET) {
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 1);
+ } else {
+ ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_MHISTATE_MASK, state);
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to set MHI state to: %s\n",
+ mhi_state_str(state));
+}
+
+/* NOP for backward compatibility, host allowed to ring DB in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* Handle device ready state transition */
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ enum mhi_pm_state cur_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+ int ret, i;
+
+ /* Check if device entered error state */
+ if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device link is not accessible\n");
+ return -EIO;
+ }
+
+ /* Wait for RESET to be cleared and READY bit to be set by the device */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 0, interval_us);
+ if (ret) {
+ dev_err(dev, "Device failed to clear MHI Reset\n");
+ return ret;
+ }
+
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+ MHISTATUS_READY_MASK, 1, interval_us);
+ if (ret) {
+ dev_err(dev, "Device failed to enter MHI Ready\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Device in READY State\n");
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ mhi_cntrl->dev_state = MHI_STATE_READY;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ return -EIO;
+ }
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ dev_err(dev, "Device registers not accessible\n");
+ goto error_mmio;
+ }
+
+ /* Configure MMIO registers */
+ ret = mhi_init_mmio(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Error configuring MMIO registers\n");
+ goto error_mmio;
+ }
+
+ /* Add elements to all SW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip if this is an offload or HW event */
+ if (mhi_event->offload_ev || mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
+ /* Update all cores */
+ smp_wmb();
+
+ /* Ring the event ring db */
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Set MHI to M0 state */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return 0;
+
+error_mmio:
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_chan *mhi_chan;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int i;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M0;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ dev_err(dev, "Unable to transition to M0 state\n");
+ return -EIO;
+ }
+ mhi_cntrl->M0++;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+
+ /* Ring all event rings and CMD ring only if we're in mission mode */
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ struct mhi_cmd *mhi_cmd =
+ &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ spin_lock_irq(&mhi_event->lock);
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ /* Only ring primary cmd ring if ring is not empty */
+ spin_lock_irq(&mhi_cmd->lock);
+ if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+ mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+ spin_unlock_irq(&mhi_cmd->lock);
+ }
+
+ /* Ring channel DB registers */
+ mhi_chan = mhi_cntrl->mhi_chan;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
+ mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
+
+ /* Only ring DB if ring is not empty */
+ if (tre_ring->base && tre_ring->wp != tre_ring->rp &&
+ mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
+ mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+ read_unlock_irq(&mhi_chan->lock);
+ }
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/*
+ * After receiving the MHI state change event from the device indicating the
+ * transition to M1 state, the host can transition the device to M2 state
+ * for keeping it in low power state.
+ */
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+ if (state == MHI_PM_M2) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+ mhi_cntrl->dev_state = MHI_STATE_M2;
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->M2++;
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* If there are any pending resources, exit M2 immediately */
+ if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+ atomic_read(&mhi_cntrl->dev_wake))) {
+ dev_dbg(dev,
+ "Exiting M2, pending_pkts: %d dev_wake: %d\n",
+ atomic_read(&mhi_cntrl->pending_pkts),
+ atomic_read(&mhi_cntrl->dev_wake));
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ mhi_cntrl->wake_put(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ } else {
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
+ }
+ } else {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ }
+}
+
+/* MHI M3 completion handler */
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->dev_state = MHI_STATE_M3;
+ state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (state != MHI_PM_M3) {
+ dev_err(dev, "Unable to transition to M3 state\n");
+ return -EIO;
+ }
+
+ mhi_cntrl->M3++;
+ wake_up_all(&mhi_cntrl->state_event);
+
+ return 0;
+}
+
+/* Handle device Mission Mode transition */
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
+ int i, ret;
+
+ dev_dbg(dev, "Processing Mission Mode transition\n");
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ ee = mhi_get_exec_env(mhi_cntrl);
+
+ if (!MHI_IN_MISSION_MODE(ee)) {
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ wake_up_all(&mhi_cntrl->state_event);
+ return -EIO;
+ }
+ mhi_cntrl->ee = ee;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
+ mhi_destroy_device);
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+
+ /* Force MHI to be in M0 state before continuing */
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ ret = -EIO;
+ goto error_mission_mode;
+ }
+
+ /* Add elements to all HW event rings */
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev || !mhi_event->hw_ring)
+ continue;
+
+ ring->wp = ring->base + ring->len - ring->el_size;
+ *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
+ /* Update to all cores */
+ smp_wmb();
+
+ spin_lock_irq(&mhi_event->lock);
+ if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+ mhi_ring_er_db(mhi_event);
+ spin_unlock_irq(&mhi_event->lock);
+ }
+
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ /*
+ * The MHI devices are only created when the client device switches its
+ * Execution Environment (EE) to either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ return ret;
+}
+
+/* Handle shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Processing disable transition with PM state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ /* Skip MHI RESET if in RDDM state */
+ if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
+ goto skip_mhi_reset;
+
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 0, 25000);
+ if (ret)
+ dev_err(dev, "Device failed to clear MHI Reset\n");
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+
+ if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ /* wait for ready to be set */
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
+ MHISTATUS,
+ MHISTATUS_READY_MASK, 1, 25000);
+ if (ret)
+ dev_err(dev, "Device failed to enter READY state\n");
+ }
+ }
+
+skip_mhi_reset:
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ disable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ /* Move to disable state */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ dev_err(dev, "Error moving from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_DISABLE));
+
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Handle system error transitions */
+static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
+{
+ enum mhi_pm_state cur_state, prev_state;
+ enum dev_st_transition next_state;
+ struct mhi_event *mhi_event;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_event_ctxt *er_ctxt;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
+
+ /* We must notify MHI control driver so it can clean up first */
+ mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ prev_state = mhi_cntrl->pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
+ dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
+ to_mhi_pm_state_str(cur_state),
+ to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
+ goto exit_sys_error_transition;
+ }
+
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+
+ /* Wake up threads waiting for state transition */
+ wake_up_all(&mhi_cntrl->state_event);
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (MHI_REG_ACCESS_VALID(prev_state)) {
+ u32 in_reset = -1;
+ unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+ dev_dbg(dev, "Triggering MHI Reset in device\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+ /* Wait for the reset bit to be cleared by the device */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ &in_reset) ||
+ !in_reset, timeout);
+ if (!ret || in_reset) {
+ dev_err(dev, "Device failed to exit MHI Reset state\n");
+ goto exit_sys_error_transition;
+ }
+
+ /*
+ * Device will clear BHI_INTVEC as a part of RESET processing,
+ * hence re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ dev_dbg(dev,
+ "Waiting for all pending event ring processing to complete\n");
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+ tasklet_kill(&mhi_event->task);
+ }
+
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ dev_dbg(dev, "Waiting for all pending threads to complete\n");
+ wake_up_all(&mhi_cntrl->state_event);
+
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+ WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+ /* Reset the ev rings and cmd rings */
+ dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+ mhi_cmd = mhi_cntrl->mhi_cmd;
+ cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+ struct mhi_ring *ring = &mhi_cmd->ring;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ cmd_ctxt->rp = cmd_ctxt->rbase;
+ cmd_ctxt->wp = cmd_ctxt->rbase;
+ }
+
+ mhi_event = mhi_cntrl->mhi_event;
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+ mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ /* Skip offload events */
+ if (mhi_event->offload_ev)
+ continue;
+
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ er_ctxt->rp = er_ctxt->rbase;
+ er_ctxt->wp = er_ctxt->rbase;
+ }
+
+ /* Transition to next state */
+ if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ if (cur_state != MHI_PM_POR) {
+ dev_err(dev, "Error moving to state %s from %s\n",
+ to_mhi_pm_state_str(MHI_PM_POR),
+ to_mhi_pm_state_str(cur_state));
+ goto exit_sys_error_transition;
+ }
+ next_state = DEV_ST_TRANSITION_PBL;
+ } else {
+ next_state = DEV_ST_TRANSITION_READY;
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+exit_sys_error_transition:
+ dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state));
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Queue a new work item and schedule work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+ enum dev_st_transition state)
+{
+ struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+ unsigned long flags;
+
+ if (!item)
+ return -ENOMEM;
+
+ item->state = state;
+ spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+ list_add_tail(&item->node, &mhi_cntrl->transition_list);
+ spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+ queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
+
+ return 0;
+}
+
+/* SYS_ERR worker */
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
+}
+
+/* Device State Transition worker */
+void mhi_pm_st_worker(struct work_struct *work)
+{
+ struct state_transition *itr, *tmp;
+ LIST_HEAD(head);
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ st_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ spin_lock_irq(&mhi_cntrl->transition_lock);
+ list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+ spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling state transition: %s\n",
+ TO_DEV_STATE_TRANS_STR(itr->state));
+
+ switch (itr->state) {
+ case DEV_ST_TRANSITION_PBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mhi_fw_load_handler(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_SBL:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_SBL;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /*
+ * The MHI devices are only created when the client
+ * device switches its Execution Environment (EE) to
+ * either SBL or AMSS states
+ */
+ mhi_create_devices(mhi_cntrl);
+ if (mhi_cntrl->fbc_download)
+ mhi_download_amss_image(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_MISSION_MODE:
+ mhi_pm_mission_mode_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_FP:
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_cntrl->ee = MHI_EE_FP;
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mhi_create_devices(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_READY:
+ mhi_ready_state_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_sys_error_transition(mhi_cntrl);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition(mhi_cntrl);
+ break;
+ default:
+ break;
+ }
+ kfree(itr);
+ }
+}
+
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_dbg(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_dbg(dev, "Waiting for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ mhi_state_str(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_state_str(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+ dev_warn(dev, "Resuming from non M3 state (%s)\n",
+ mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
+ if (!force)
+ return -EINVAL;
+ }
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M2 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ mhi_state_str(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, false);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+ int ret;
+
+ /* Wake up the device */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->pm_state == MHI_PM_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Assert device wake db */
+static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+ unsigned long flags;
+
+ /*
+ * If force flag is set, then increment the wake count value and
+ * ring wake db
+ */
+ if (unlikely(force)) {
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ atomic_inc(&mhi_cntrl->dev_wake);
+ if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ } else {
+ /*
+ * If resources are already requested, then just increment
+ * the wake count value and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+ MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+ !mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+ mhi_cntrl->wake_set = true;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+ }
+}
+
+/* De-assert device wake db */
+static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
+ bool override)
+{
+ unsigned long flags;
+
+ /*
+ * Only continue if there is a single resource, else just decrement
+ * and return
+ */
+ if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+ if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+ MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+ mhi_cntrl->wake_set) {
+ mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+ mhi_cntrl->wake_set = false;
+ }
+ spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+ enum mhi_state state;
+ enum mhi_ee_type current_ee;
+ enum dev_st_transition next_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+ int ret, i;
+
+ dev_info(dev, "Requested to power ON\n");
+
+ /* Supply default wake routines if not provided by controller driver */
+ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+ !mhi_cntrl->wake_toggle) {
+ mhi_cntrl->wake_get = mhi_assert_dev_wake;
+ mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+ mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+ mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+ }
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+ /* Setup BHI INTVEC */
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->pm_state = MHI_PM_POR;
+ mhi_cntrl->ee = MHI_EE_MAX;
+ current_ee = mhi_get_exec_env(mhi_cntrl);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ /* Confirm that the device is in valid exec env */
+ if (!MHI_POWER_UP_CAPABLE(current_ee)) {
+ dev_err(dev, "%s is not a valid EE for power on\n",
+ TO_MHI_EXEC_STR(current_ee));
+ ret = -EIO;
+ goto error_exit;
+ }
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
+ TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
+
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, 0, interval_us);
+ if (ret) {
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_exit;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
+ /* IRQs have been requested during probe, so we just need to enable them. */
+ enable_irq(mhi_cntrl->irq[0]);
+
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->offload_ev)
+ continue;
+
+ enable_irq(mhi_cntrl->irq[mhi_event->irq]);
+ }
+
+ /* Transition to next state */
+ next_state = MHI_IN_PBL(current_ee) ?
+ DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
+
+ mhi_queue_state_transition(mhi_cntrl, next_state);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ dev_info(dev, "Power on setup success\n");
+
+ return 0;
+
+error_exit:
+ mhi_cntrl->pm_state = MHI_PM_DISABLE;
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_async_power_up);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ enum mhi_pm_state cur_state, transition_state;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_cntrl->pm_state;
+ if (cur_state == MHI_PM_DISABLE) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+ return; /* Already powered down */
+ }
+
+ /* If it's not a graceful shutdown, force MHI to linkdown state */
+ transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
+ MHI_PM_LD_ERR_FATAL_DETECT;
+
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+ if (cur_state != transition_state) {
+ dev_err(dev, "Failed to move to state: %s from: %s\n",
+ to_mhi_pm_state_str(transition_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ /* Force link down or error fatal detected state */
+ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ }
+
+ /* mark device inactive to avoid any further host processing */
+ mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+ mhi_cntrl->dev_state = MHI_STATE_RESET;
+
+ wake_up_all(&mhi_cntrl->state_event);
+
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
+ disable_irq(mhi_cntrl->irq[0]);
+}
+EXPORT_SYMBOL_GPL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+ int ret = mhi_async_power_up(mhi_cntrl);
+
+ if (ret)
+ return ret;
+
+ wait_event_timeout(mhi_cntrl->state_event,
+ MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
+ if (ret)
+ mhi_power_down(mhi_cntrl, false);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Check if device is already in RDDM */
+ if (mhi_cntrl->ee == MHI_EE_RDDM)
+ return 0;
+
+ dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+ /* Wait for RDDM event */
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->ee == MHI_EE_RDDM,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ ret = ret ? 0 : -EIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
+
+void mhi_device_get(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake++;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ mhi_cntrl->wake_get(mhi_cntrl, true);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int ret;
+
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (!ret)
+ mhi_dev->dev_wake++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ mhi_dev->dev_wake--;
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+}
+EXPORT_SYMBOL_GPL(mhi_device_put);
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
new file mode 100644
index 0000000000..554e1992ed
--- /dev/null
+++ b/drivers/bus/mips_cdmm.c
@@ -0,0 +1,698 @@
+/*
+ * Bus driver for MIPS Common Device Memory Map (CDMM).
+ *
+ * Copyright (C) 2014-2015 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cdmm.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+/* Access control and status register fields */
+#define CDMM_ACSR_DEVTYPE_SHIFT 24
+#define CDMM_ACSR_DEVTYPE (255ul << CDMM_ACSR_DEVTYPE_SHIFT)
+#define CDMM_ACSR_DEVSIZE_SHIFT 16
+#define CDMM_ACSR_DEVSIZE (31ul << CDMM_ACSR_DEVSIZE_SHIFT)
+#define CDMM_ACSR_DEVREV_SHIFT 12
+#define CDMM_ACSR_DEVREV (15ul << CDMM_ACSR_DEVREV_SHIFT)
+#define CDMM_ACSR_UW (1ul << 3)
+#define CDMM_ACSR_UR (1ul << 2)
+#define CDMM_ACSR_SW (1ul << 1)
+#define CDMM_ACSR_SR (1ul << 0)
+
+/* Each block of device registers is 64 bytes */
+#define CDMM_DRB_SIZE 64
+
+#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+
+/* Default physical base address */
+static phys_addr_t mips_cdmm_default_base;
+
+/* Bus operations */
+
+static const struct mips_cdmm_device_id *
+mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
+ struct mips_cdmm_device *dev)
+{
+ int ret = 0;
+
+ for (; table->type; ++table) {
+ ret = (dev->type == table->type);
+ if (ret)
+ break;
+ }
+
+ return ret ? table : NULL;
+}
+
+static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+
+ return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
+}
+
+static int mips_cdmm_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ int retval = 0;
+
+ retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_TYPE=0x%02x", cdev->type);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "CDMM_REV=%u", cdev->rev);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=mipscdmm:t%02X", cdev->type);
+ return retval;
+}
+
+/* Device attributes */
+
+#define CDMM_ATTR(name, fmt, arg...) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct mips_cdmm_device *dev = to_mips_cdmm_device(_dev); \
+ return sprintf(buf, fmt, arg); \
+} \
+static DEVICE_ATTR_RO(name);
+
+CDMM_ATTR(cpu, "%u\n", dev->cpu);
+CDMM_ATTR(type, "0x%02x\n", dev->type);
+CDMM_ATTR(revision, "%u\n", dev->rev);
+CDMM_ATTR(modalias, "mipscdmm:t%02X\n", dev->type);
+CDMM_ATTR(resource, "\t%016llx\t%016llx\t%016lx\n",
+ (unsigned long long)dev->res.start,
+ (unsigned long long)dev->res.end,
+ dev->res.flags);
+
+static struct attribute *mips_cdmm_dev_attrs[] = {
+ &dev_attr_cpu.attr,
+ &dev_attr_type.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_resource.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mips_cdmm_dev);
+
+struct bus_type mips_cdmm_bustype = {
+ .name = "cdmm",
+ .dev_groups = mips_cdmm_dev_groups,
+ .match = mips_cdmm_match,
+ .uevent = mips_cdmm_uevent,
+};
+EXPORT_SYMBOL_GPL(mips_cdmm_bustype);
+
+/*
+ * Standard driver callback helpers.
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the standard driver callbacks we need a work function
+ * (mips_cdmm_{void,int}_work()) to do the actual call from the right CPU, and a
+ * wrapper function (generated with BUILD_PERCPU_HELPER) to arrange for the work
+ * function to be called on that CPU.
+ */
+
+/**
+ * struct mips_cdmm_work_dev - Data for per-device call work.
+ * @fn: CDMM driver callback function to call for the device.
+ * @dev: CDMM device to pass to @fn.
+ */
+struct mips_cdmm_work_dev {
+ void *fn;
+ struct mips_cdmm_device *dev;
+};
+
+/**
+ * mips_cdmm_void_work() - Call a void returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which doesn't return a value.
+ */
+static long mips_cdmm_void_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ void (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ fn(work->dev);
+ return 0;
+}
+
+/**
+ * mips_cdmm_int_work() - Call an int returning CDMM driver callback.
+ * @data: struct mips_cdmm_work_dev pointer.
+ *
+ * A work_on_cpu() callback function to call an arbitrary CDMM driver callback
+ * function which returns an int.
+ */
+static long mips_cdmm_int_work(void *data)
+{
+ struct mips_cdmm_work_dev *work = data;
+ int (*fn)(struct mips_cdmm_device *) = work->fn;
+
+ return fn(work->dev);
+}
+
+#define _BUILD_RET_void
+#define _BUILD_RET_int return
+
+/**
+ * BUILD_PERCPU_HELPER() - Helper to call a CDMM driver callback on right CPU.
+ * @_ret: Return type (void or int).
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a specific device callback function to call a CDMM driver callback
+ * function on the appropriate CPU for the device, and if applicable return the
+ * result.
+ */
+#define BUILD_PERCPU_HELPER(_ret, _name) \
+static _ret mips_cdmm_##_name(struct device *dev) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(dev->driver); \
+ struct mips_cdmm_work_dev work = { \
+ .fn = cdrv->_name, \
+ .dev = cdev, \
+ }; \
+ \
+ _BUILD_RET_##_ret work_on_cpu(cdev->cpu, \
+ mips_cdmm_##_ret##_work, &work); \
+}
+
+/* Driver callback functions */
+BUILD_PERCPU_HELPER(int, probe) /* int mips_cdmm_probe(struct device) */
+BUILD_PERCPU_HELPER(int, remove) /* int mips_cdmm_remove(struct device) */
+BUILD_PERCPU_HELPER(void, shutdown) /* void mips_cdmm_shutdown(struct device) */
+
+
+/* Driver registration */
+
+/**
+ * mips_cdmm_driver_register() - Register a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Register a CDMM driver with the CDMM subsystem. The driver will be informed
+ * of matching devices which are discovered.
+ *
+ * Returns: 0 on success.
+ */
+int mips_cdmm_driver_register(struct mips_cdmm_driver *drv)
+{
+ drv->drv.bus = &mips_cdmm_bustype;
+
+ if (drv->probe)
+ drv->drv.probe = mips_cdmm_probe;
+ if (drv->remove)
+ drv->drv.remove = mips_cdmm_remove;
+ if (drv->shutdown)
+ drv->drv.shutdown = mips_cdmm_shutdown;
+
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_register);
+
+/**
+ * mips_cdmm_driver_unregister() - Unregister a CDMM driver.
+ * @drv: CDMM driver information.
+ *
+ * Unregister a CDMM driver from the CDMM subsystem.
+ */
+void mips_cdmm_driver_unregister(struct mips_cdmm_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_driver_unregister);
+
+
+/* CDMM initialisation and bus discovery */
+
+/**
+ * struct mips_cdmm_bus - Info about CDMM bus.
+ * @phys: Physical address at which it is mapped.
+ * @regs: Virtual address where registers can be accessed.
+ * @drbs: Total number of DRBs.
+ * @drbs_reserved: Number of DRBs reserved.
+ * @discovered: Whether the devices on the bus have been discovered yet.
+ * @offline: Whether the CDMM bus is going offline (or very early
+ * coming back online), in which case it should be
+ * reconfigured each time.
+ */
+struct mips_cdmm_bus {
+ phys_addr_t phys;
+ void __iomem *regs;
+ unsigned int drbs;
+ unsigned int drbs_reserved;
+ bool discovered;
+ bool offline;
+};
+
+static struct mips_cdmm_bus mips_cdmm_boot_bus;
+static DEFINE_PER_CPU(struct mips_cdmm_bus *, mips_cdmm_buses);
+static atomic_t mips_cdmm_next_id = ATOMIC_INIT(-1);
+
+/**
+ * mips_cdmm_get_bus() - Get the per-CPU CDMM bus information.
+ *
+ * Get information about the per-CPU CDMM bus, if the bus is present.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: Pointer to CDMM bus information for the current CPU.
+ * May return ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+static struct mips_cdmm_bus *mips_cdmm_get_bus(void)
+{
+ struct mips_cdmm_bus *bus, **bus_p;
+ unsigned long flags;
+ unsigned int cpu;
+
+ if (!cpu_has_cdmm)
+ return ERR_PTR(-ENODEV);
+
+ cpu = smp_processor_id();
+ /* Avoid early use of per-cpu primitives before initialised */
+ if (cpu == 0)
+ return &mips_cdmm_boot_bus;
+
+ /* Get bus pointer */
+ bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu);
+ local_irq_save(flags);
+ bus = *bus_p;
+ /* Attempt allocation if NULL */
+ if (unlikely(!bus)) {
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
+ if (unlikely(!bus))
+ bus = ERR_PTR(-ENOMEM);
+ else
+ *bus_p = bus;
+ }
+ local_irq_restore(flags);
+ return bus;
+}
+
+/**
+ * mips_cdmm_cur_base() - Find current physical base address of CDMM region.
+ *
+ * Returns: Physical base address of CDMM region according to cdmmbase CP0
+ * register, or 0 if the CDMM region is disabled.
+ */
+static phys_addr_t mips_cdmm_cur_base(void)
+{
+ unsigned long cdmmbase = read_c0_cdmmbase();
+
+ if (!(cdmmbase & MIPS_CDMMBASE_EN))
+ return 0;
+
+ return (cdmmbase >> MIPS_CDMMBASE_ADDR_SHIFT)
+ << MIPS_CDMMBASE_ADDR_START;
+}
+
+/**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ * By default this method tries to find a CDMM-specific node in the system
+ * dtb. Note that this won't work for early serial console.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int err;
+
+ np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
+ if (np) {
+ err = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (!err)
+ return res.start;
+ }
+
+ return 0;
+}
+
+/**
+ * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
+ * @bus: Pointer to bus information for current CPU.
+ * IS_ERR(bus) is checked, so no need for caller to check.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
+{
+ unsigned long cdmmbase, flags;
+ int ret = 0;
+
+ if (IS_ERR(bus))
+ return PTR_ERR(bus);
+
+ local_irq_save(flags);
+ /* Don't set up bus a second time unless marked offline */
+ if (bus->offline) {
+ /* If CDMM region is still set up, nothing to do */
+ if (bus->phys == mips_cdmm_cur_base())
+ goto out;
+ /*
+ * The CDMM region isn't set up as expected, so it needs
+ * reconfiguring, but then we can stop checking it.
+ */
+ bus->offline = false;
+ } else if (bus->phys > 1) {
+ goto out;
+ }
+
+ /* If the CDMM region is already configured, inherit that setup */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_cur_base();
+ /* Otherwise, ask platform code for suggestions */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_phys_base();
+ /* Otherwise, copy what other CPUs have done */
+ if (!bus->phys)
+ bus->phys = mips_cdmm_default_base;
+ /* Otherwise, complain once */
+ if (!bus->phys) {
+ bus->phys = 1;
+ /*
+ * If you hit this, either your bootloader needs to set up the
+ * CDMM on the boot CPU, or else you need to implement
+ * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
+ */
+ pr_err("cdmm%u: Failed to choose a physical base\n",
+ smp_processor_id());
+ }
+ /* Already complained? */
+ if (bus->phys == 1) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* Record our success for other CPUs to copy */
+ mips_cdmm_default_base = bus->phys;
+
+ pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
+ smp_processor_id(), &bus->phys);
+
+ /* Enable CDMM */
+ cdmmbase = read_c0_cdmmbase();
+ cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
+ cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
+ << MIPS_CDMMBASE_ADDR_SHIFT;
+ cdmmbase |= MIPS_CDMMBASE_EN;
+ write_c0_cdmmbase(cdmmbase);
+ tlbw_use_hazard();
+
+ bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
+ bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
+ MIPS_CDMMBASE_SIZE_SHIFT);
+ bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);
+
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+/**
+ * mips_cdmm_early_probe() - Minimally probe for a specific device on CDMM.
+ * @dev_type: CDMM type code to look for.
+ *
+ * Minimally configure the in-CPU Common Device Memory Map (CDMM) and look for a
+ * specific device. This can be used to find a device very early in boot for
+ * example to configure an early FDC console device.
+ *
+ * The caller must prevent migration to another CPU, either by disabling
+ * pre-emption or by running from a pinned kernel thread.
+ *
+ * Returns: MMIO pointer to device memory. The caller can read the ACSR
+ * register to find more information about the device (such as the
+ * version number or the number of blocks).
+ * May return IOMEM_ERR_PTR(-errno) in case of error, so check with
+ * IS_ERR().
+ */
+void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
+{
+ struct mips_cdmm_bus *bus;
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size;
+ int err;
+
+ if (WARN_ON(!dev_type))
+ return IOMEM_ERR_PTR(-ENODEV);
+
+ bus = mips_cdmm_get_bus();
+ err = mips_cdmm_setup(bus);
+ if (err)
+ return IOMEM_ERR_PTR(err);
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Look for a specific device type */
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ if (type == dev_type)
+ return cdmm + drb * CDMM_DRB_SIZE;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ }
+
+ return IOMEM_ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(mips_cdmm_early_probe);
+
+/**
+ * mips_cdmm_release() - Release a removed CDMM device.
+ * @dev: Device object
+ *
+ * Clean up the struct mips_cdmm_device for an unused CDMM device. This is
+ * called automatically by the driver core when a device is removed.
+ */
+static void mips_cdmm_release(struct device *dev)
+{
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+
+ kfree(cdev);
+}
+
+/**
+ * mips_cdmm_bus_discover() - Discover the devices on the CDMM bus.
+ * @bus: CDMM bus information, must already be set up.
+ */
+static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
+{
+ void __iomem *cdmm;
+ u32 acsr;
+ unsigned int drb, type, size, rev;
+ struct mips_cdmm_device *dev;
+ unsigned int cpu = smp_processor_id();
+ int ret = 0;
+ int id = 0;
+
+ /* Skip the first block if it's reserved for more registers */
+ drb = bus->drbs_reserved;
+ cdmm = bus->regs;
+
+ /* Discover devices */
+ bus->discovered = true;
+ pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
+ for (; drb < bus->drbs; drb += size + 1) {
+ acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
+ type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
+ size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
+ rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT;
+
+ if (!type)
+ continue;
+
+ pr_info("cdmm%u-%u: @%u (%#x..%#x), type 0x%02x, rev %u\n",
+ cpu, id, drb, drb * CDMM_DRB_SIZE,
+ (drb + size + 1) * CDMM_DRB_SIZE - 1,
+ type, rev);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ break;
+
+ dev->cpu = cpu;
+ dev->res.start = bus->phys + drb * CDMM_DRB_SIZE;
+ dev->res.end = bus->phys +
+ (drb + size + 1) * CDMM_DRB_SIZE - 1;
+ dev->res.flags = IORESOURCE_MEM;
+ dev->type = type;
+ dev->rev = rev;
+ dev->dev.parent = get_cpu_device(cpu);
+ dev->dev.bus = &mips_cdmm_bustype;
+ dev->dev.id = atomic_inc_return(&mips_cdmm_next_id);
+ dev->dev.release = mips_cdmm_release;
+
+ dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
+ ++id;
+ ret = device_register(&dev->dev);
+ if (ret)
+ put_device(&dev->dev);
+ }
+}
+
+
+/*
+ * CPU hotplug and initialisation
+ *
+ * All the CDMM driver callbacks need to be executed on the appropriate CPU from
+ * workqueues. For the CPU callbacks, they need to be called for all devices on
+ * that CPU, so the work function calls bus_for_each_dev, using a helper
+ * (generated with BUILD_PERDEV_HELPER) to call the driver callback if the
+ * device's CPU matches.
+ */
+
+/**
+ * BUILD_PERDEV_HELPER() - Helper to call a CDMM driver callback if CPU matches.
+ * @_name: Name of CDMM driver callback function.
+ *
+ * Generates a bus_for_each_dev callback function to call a specific CDMM driver
+ * callback function for the device if the device's CPU matches that pointed to
+ * by the data argument.
+ *
+ * This is used for informing drivers for all devices on a given CPU of some
+ * event (such as the CPU going online/offline).
+ *
+ * It is expected to already be called from the appropriate CPU.
+ */
+#define BUILD_PERDEV_HELPER(_name) \
+static int mips_cdmm_##_name##_helper(struct device *dev, void *data) \
+{ \
+ struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); \
+ struct mips_cdmm_driver *cdrv; \
+ unsigned int cpu = *(unsigned int *)data; \
+ \
+ if (cdev->cpu != cpu || !dev->driver) \
+ return 0; \
+ \
+ cdrv = to_mips_cdmm_driver(dev->driver); \
+ if (!cdrv->_name) \
+ return 0; \
+ return cdrv->_name(cdev); \
+}
+
+/* bus_for_each_dev callback helper functions */
+BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
+BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
+
+/**
+ * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
+ * Tear down the CDMM bus.
+ * @cpu: unsigned int CPU number.
+ *
+ * This function is executed on the hotplugged CPU and calls the CDMM
+ * driver cpu_down callback for all devices on that CPU.
+ */
+static int mips_cdmm_cpu_down_prep(unsigned int cpu)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
+ mips_cdmm_cpu_down_helper);
+
+ /*
+ * While bus is offline, each use of it should reconfigure it just in
+ * case it is first use when coming back online again.
+ */
+ bus = mips_cdmm_get_bus();
+ if (!IS_ERR(bus))
+ bus->offline = true;
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
+ * @cpu: unsigned int CPU number.
+ *
+ * This work_on_cpu callback function is executed on a given CPU to discover
+ * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
+ * devices already discovered on that CPU.
+ *
+ * It is used as work_on_cpu callback function during
+ * initialisation. When CPUs are brought online the function is
+ * invoked directly on the hotplugged CPU.
+ */
+static int mips_cdmm_cpu_online(unsigned int cpu)
+{
+ struct mips_cdmm_bus *bus;
+ long ret;
+
+ bus = mips_cdmm_get_bus();
+ ret = mips_cdmm_setup(bus);
+ if (ret)
+ return ret;
+
+ /* Bus now set up, so we can drop the offline flag if still set */
+ bus->offline = false;
+
+ if (!bus->discovered)
+ mips_cdmm_bus_discover(bus);
+ else
+ /* Inform all the devices on the bus */
+ ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
+ mips_cdmm_cpu_up_helper);
+
+ return ret;
+}
+
+/**
+ * mips_cdmm_init() - Initialise CDMM bus.
+ *
+ * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
+ * hotplug notifications so the CDMM drivers can be kept up to date.
+ */
+static int __init mips_cdmm_init(void)
+{
+ int ret;
+
+ /* Register the bus */
+ ret = bus_register(&mips_cdmm_bustype);
+ if (ret)
+ return ret;
+
+ /* We want to be notified about new CPUs */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
+ mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
+ if (ret < 0)
+ pr_warn("cdmm: Failed to register CPU notifier\n");
+
+ return ret;
+}
+subsys_initcall(mips_cdmm_init);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
new file mode 100644
index 0000000000..e384fbc6c1
--- /dev/null
+++ b/drivers/bus/moxtet.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox module configuration bus driver
+ *
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
+ */
+
+#include <dt-bindings/bus/moxtet.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moxtet.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/spi/spi.h>
+
+/*
+ * @name: module name for sysfs
+ * @hwirq_base: base index for IRQ for this module (-1 if no IRQs)
+ * @nirqs: how many interrupts does the shift register provide
+ * @desc: module description for kernel log
+ */
+static const struct {
+ const char *name;
+ int hwirq_base;
+ int nirqs;
+ const char *desc;
+} mox_module_table[] = {
+ /* do not change order of this array! */
+ { NULL, 0, 0, NULL },
+ { "sfp", -1, 0, "MOX D (SFP cage)" },
+ { "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" },
+ { "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" },
+ { "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" },
+ { "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" },
+ { "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" },
+};
+
+static inline bool mox_module_known(unsigned int id)
+{
+ return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST;
+}
+
+static inline const char *mox_module_name(unsigned int id)
+{
+ if (mox_module_known(id))
+ return mox_module_table[id].name;
+ else
+ return "unknown";
+}
+
+#define DEF_MODULE_ATTR(name, fmt, ...) \
+static ssize_t \
+module_##name##_show(struct device *dev, struct device_attribute *a, \
+ char *buf) \
+{ \
+ struct moxtet_device *mdev = to_moxtet_device(dev); \
+ return sprintf(buf, (fmt), __VA_ARGS__); \
+} \
+static DEVICE_ATTR_RO(module_##name)
+
+DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
+DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
+DEF_MODULE_ATTR(description, "%s\n",
+ mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
+ : "");
+
+static struct attribute *moxtet_dev_attrs[] = {
+ &dev_attr_module_id.attr,
+ &dev_attr_module_name.attr,
+ &dev_attr_module_description.attr,
+ NULL,
+};
+
+static const struct attribute_group moxtet_dev_group = {
+ .attrs = moxtet_dev_attrs,
+};
+
+static const struct attribute_group *moxtet_dev_groups[] = {
+ &moxtet_dev_group,
+ NULL,
+};
+
+static int moxtet_match(struct device *dev, struct device_driver *drv)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const enum turris_mox_module_id *t;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!tdrv->id_table)
+ return 0;
+
+ for (t = tdrv->id_table; *t; ++t)
+ if (*t == mdev->id)
+ return 1;
+
+ return 0;
+}
+
+static struct bus_type moxtet_bus_type = {
+ .name = "moxtet",
+ .dev_groups = moxtet_dev_groups,
+ .match = moxtet_match,
+};
+
+int __moxtet_register_driver(struct module *owner,
+ struct moxtet_driver *mdrv)
+{
+ mdrv->driver.owner = owner;
+ mdrv->driver.bus = &moxtet_bus_type;
+ return driver_register(&mdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__moxtet_register_driver);
+
+static int moxtet_dev_check(struct device *dev, void *data)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_device *new_dev = data;
+
+ if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
+ mdev->idx == new_dev->idx)
+ return -EBUSY;
+ return 0;
+}
+
+static void moxtet_dev_release(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+
+ put_device(mdev->moxtet->dev);
+ kfree(mdev);
+}
+
+static struct moxtet_device *
+moxtet_alloc_device(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+
+ if (!get_device(moxtet->dev))
+ return NULL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ put_device(moxtet->dev);
+ return NULL;
+ }
+
+ dev->moxtet = moxtet;
+ dev->dev.parent = moxtet->dev;
+ dev->dev.bus = &moxtet_bus_type;
+ dev->dev.release = moxtet_dev_release;
+
+ device_initialize(&dev->dev);
+
+ return dev;
+}
+
+static int moxtet_add_device(struct moxtet_device *dev)
+{
+ static DEFINE_MUTEX(add_mutex);
+ int ret;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf)
+ return -EINVAL;
+
+ dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id),
+ dev->idx);
+
+ mutex_lock(&add_mutex);
+
+ ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev,
+ moxtet_dev_check);
+ if (ret)
+ goto done;
+
+ ret = device_add(&dev->dev);
+ if (ret < 0)
+ dev_err(dev->moxtet->dev, "can't add %s, status %d\n",
+ dev_name(dev->moxtet->dev), ret);
+
+done:
+ mutex_unlock(&add_mutex);
+ return ret;
+}
+
+static int __unregister(struct device *dev, void *null)
+{
+ if (dev->of_node) {
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+ of_node_put(dev->of_node);
+ }
+
+ device_unregister(dev);
+
+ return 0;
+}
+
+static struct moxtet_device *
+of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc)
+{
+ struct moxtet_device *dev;
+ u32 val;
+ int ret;
+
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev,
+ "Moxtet device alloc error for %pOF\n", nc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = of_property_read_u32(nc, "reg", &val);
+ if (ret) {
+ dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n",
+ nc, ret);
+ goto err_put;
+ }
+
+ dev->idx = val;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n",
+ nc, dev->idx);
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ dev->id = moxtet->modules[dev->idx];
+
+ if (!dev->id) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc,
+ dev->idx);
+ ret = -ENODEV;
+ goto err_put;
+ }
+
+ of_node_get(nc);
+ dev->dev.of_node = nc;
+
+ ret = moxtet_add_device(dev);
+ if (ret) {
+ dev_err(moxtet->dev,
+ "Moxtet device register error for %pOF\n", nc);
+ of_node_put(nc);
+ goto err_put;
+ }
+
+ return dev;
+
+err_put:
+ put_device(&dev->dev);
+ return ERR_PTR(ret);
+}
+
+static void of_register_moxtet_devices(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ struct device_node *nc;
+
+ if (!moxtet->dev->of_node)
+ return;
+
+ for_each_available_child_of_node(moxtet->dev->of_node, nc) {
+ if (of_node_test_and_set_flag(nc, OF_POPULATED))
+ continue;
+ dev = of_register_moxtet_device(moxtet, nc);
+ if (IS_ERR(dev)) {
+ dev_warn(moxtet->dev,
+ "Failed to create Moxtet device for %pOF\n",
+ nc);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
+ }
+}
+
+static void
+moxtet_register_devices_from_topology(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ int i, ret;
+
+ for (i = 0; i < moxtet->count; ++i) {
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev, "Moxtet device %u alloc error\n",
+ i);
+ continue;
+ }
+
+ dev->idx = i;
+ dev->id = moxtet->modules[i];
+
+ ret = moxtet_add_device(dev);
+ if (ret && ret != -EBUSY) {
+ put_device(&dev->dev);
+ dev_err(moxtet->dev,
+ "Moxtet device %u register error: %i\n", i,
+ ret);
+ }
+ }
+}
+
+/*
+ * @nsame: how many modules with same id are already in moxtet->modules
+ */
+static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame)
+{
+ int i, first;
+ struct moxtet_irqpos *pos;
+
+ first = mox_module_table[id].hwirq_base +
+ nsame * mox_module_table[id].nirqs;
+
+ if (first + mox_module_table[id].nirqs > MOXTET_NIRQS)
+ return -EINVAL;
+
+ for (i = 0; i < mox_module_table[id].nirqs; ++i) {
+ pos = &moxtet->irq.position[first + i];
+ pos->idx = idx;
+ pos->bit = i;
+ moxtet->irq.exists |= BIT(first + i);
+ }
+
+ return 0;
+}
+
+static int moxtet_find_topology(struct moxtet *moxtet)
+{
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int cnts[TURRIS_MOX_MODULE_LAST];
+ int i, ret;
+
+ memset(cnts, 0, sizeof(cnts));
+
+ ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES);
+ if (ret < 0)
+ return ret;
+
+ if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) {
+ dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n");
+ } else if (buf[0] == TURRIS_MOX_CPU_ID_SD) {
+ dev_info(moxtet->dev, "Found MOX A (CPU) module\n");
+ } else {
+ dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n",
+ buf[0]);
+ return -ENODEV;
+ }
+
+ moxtet->count = 0;
+
+ for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) {
+ int id;
+
+ if (buf[i] == 0xff)
+ break;
+
+ id = buf[i] & 0xf;
+
+ moxtet->modules[i-1] = id;
+ ++moxtet->count;
+
+ if (mox_module_known(id)) {
+ dev_info(moxtet->dev, "Found %s module\n",
+ mox_module_table[id].desc);
+
+ if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0)
+ dev_err(moxtet->dev,
+ " Cannot set IRQ for module %s\n",
+ mox_module_table[id].desc);
+ } else {
+ dev_warn(moxtet->dev,
+ "Unknown Moxtet module found (ID 0x%02x)\n",
+ id);
+ }
+ }
+
+ return 0;
+}
+
+static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf)
+{
+ struct spi_transfer xfer = {
+ .rx_buf = buf,
+ .tx_buf = moxtet->tx,
+ .len = moxtet->count + 1
+ };
+ int ret;
+
+ mutex_lock(&moxtet->lock);
+
+ ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+
+int moxtet_device_read(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ return buf[mdev->idx + 1] >> 4;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_read);
+
+int moxtet_device_write(struct device *dev, u8 val)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ moxtet->tx[moxtet->count - mdev->idx] = val;
+
+ ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_write);
+
+int moxtet_device_written(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ return moxtet->tx[moxtet->count - mdev->idx];
+}
+EXPORT_SYMBOL_GPL(moxtet_device_written);
+
+#ifdef CONFIG_DEBUG_FS
+static int moxtet_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t input_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(bin) * 2 + 1];
+ int ret, n;
+
+ ret = moxtet_spi_read(moxtet, bin);
+ if (ret < 0)
+ return ret;
+
+ n = moxtet->count + 1;
+ bin2hex(hex, bin, n);
+
+ hex[2*n] = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1);
+}
+
+static const struct file_operations input_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = input_read,
+ .llseek = no_llseek,
+};
+
+static ssize_t output_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1];
+ u8 *p = hex;
+ int i;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
+
+ mutex_unlock(&moxtet->lock);
+
+ *p++ = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
+}
+
+static ssize_t output_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(bin) * 2 + 1];
+ ssize_t res;
+ loff_t dummy = 0;
+ int err, i;
+
+ if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count)
+ return -EINVAL;
+
+ res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len);
+ if (res < 0)
+ return res;
+
+ if (len % 2 == 1 && hex[len - 1] != '\n')
+ return -EINVAL;
+
+ err = hex2bin(bin, hex, moxtet->count);
+ if (err < 0)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ moxtet->tx[moxtet->count - i] = bin[i];
+
+ err = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return err < 0 ? err : len;
+}
+
+static const struct file_operations output_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = output_read,
+ .write = output_write,
+ .llseek = no_llseek,
+};
+
+static int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("moxtet", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("input", 0444, root, moxtet,
+ &input_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ entry = debugfs_create_file_unsafe("output", 0644, root, moxtet,
+ &output_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ moxtet->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+ debugfs_remove_recursive(moxtet->debugfs_root);
+}
+#else
+static inline int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ return 0;
+}
+
+static inline void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+}
+#endif
+
+static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct moxtet *moxtet = d->host_data;
+
+ if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) {
+ dev_err(moxtet->dev, "Invalid hw irq number\n");
+ return -EINVAL;
+ }
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq);
+
+ return 0;
+}
+
+static int moxtet_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct moxtet *moxtet = d->host_data;
+ int irq;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ irq = intspec[0];
+
+ if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq)))
+ return -EINVAL;
+
+ *out_hwirq = irq;
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct irq_domain_ops moxtet_irq_domain = {
+ .map = moxtet_irq_domain_map,
+ .xlate = moxtet_irq_domain_xlate,
+};
+
+static void moxtet_irq_mask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked |= BIT(d->hwirq);
+}
+
+static void moxtet_irq_unmask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked &= ~BIT(d->hwirq);
+}
+
+static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+ struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq];
+ int id;
+
+ id = moxtet->modules[pos->idx];
+
+ seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ pos->bit);
+}
+
+static const struct irq_chip moxtet_irq_chip = {
+ .name = "moxtet",
+ .irq_mask = moxtet_irq_mask,
+ .irq_unmask = moxtet_irq_unmask,
+ .irq_print_chip = moxtet_irq_print_chip,
+};
+
+static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
+{
+ struct moxtet_irqpos *pos = moxtet->irq.position;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int i, ret;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ *map = 0;
+
+ for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) {
+ if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit)))
+ set_bit(i, map);
+ }
+
+ return 0;
+}
+
+static irqreturn_t moxtet_irq_thread_fn(int irq, void *data)
+{
+ struct moxtet *moxtet = data;
+ unsigned long set;
+ int nhandled = 0, i, sub_irq, ret;
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+
+ do {
+ for_each_set_bit(i, &set, MOXTET_NIRQS) {
+ sub_irq = irq_find_mapping(moxtet->irq.domain, i);
+ handle_nested_irq(sub_irq);
+ dev_dbg(moxtet->dev, "%i irq\n", i);
+ ++nhandled;
+ }
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+ } while (set);
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void moxtet_irq_free(struct moxtet *moxtet)
+{
+ int i, irq;
+
+ for (i = 0; i < MOXTET_NIRQS; ++i) {
+ if (moxtet->irq.exists & BIT(i)) {
+ irq = irq_find_mapping(moxtet->irq.domain, i);
+ irq_dispose_mapping(irq);
+ }
+ }
+
+ irq_domain_remove(moxtet->irq.domain);
+}
+
+static int moxtet_irq_setup(struct moxtet *moxtet)
+{
+ int i, ret;
+
+ moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
+ if (moxtet->irq.domain == NULL) {
+ dev_err(moxtet->dev, "Could not add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MOXTET_NIRQS; ++i)
+ if (moxtet->irq.exists & BIT(i))
+ irq_create_mapping(moxtet->irq.domain, i);
+
+ moxtet->irq.chip = moxtet_irq_chip;
+ moxtet->irq.masked = ~0;
+
+ ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
+ IRQF_SHARED | IRQF_ONESHOT, "moxtet", moxtet);
+ if (ret < 0)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ moxtet_irq_free(moxtet);
+ return ret;
+}
+
+static int moxtet_probe(struct spi_device *spi)
+{
+ struct moxtet *moxtet;
+ int ret;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet),
+ GFP_KERNEL);
+ if (!moxtet)
+ return -ENOMEM;
+
+ moxtet->dev = &spi->dev;
+ spi_set_drvdata(spi, moxtet);
+
+ mutex_init(&moxtet->lock);
+
+ moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0);
+ if (moxtet->dev_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (moxtet->dev_irq <= 0) {
+ dev_err(moxtet->dev, "No IRQ resource found\n");
+ return -ENXIO;
+ }
+
+ ret = moxtet_find_topology(moxtet);
+ if (ret < 0)
+ return ret;
+
+ if (moxtet->irq.exists) {
+ ret = moxtet_irq_setup(moxtet);
+ if (ret < 0)
+ return ret;
+ }
+
+ of_register_moxtet_devices(moxtet);
+ moxtet_register_devices_from_topology(moxtet);
+
+ ret = moxtet_register_debugfs(moxtet);
+ if (ret < 0)
+ dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n",
+ ret);
+
+ return 0;
+}
+
+static void moxtet_remove(struct spi_device *spi)
+{
+ struct moxtet *moxtet = spi_get_drvdata(spi);
+
+ free_irq(moxtet->dev_irq, moxtet);
+
+ moxtet_irq_free(moxtet);
+
+ moxtet_unregister_debugfs(moxtet);
+
+ device_for_each_child(moxtet->dev, NULL, __unregister);
+
+ mutex_destroy(&moxtet->lock);
+}
+
+static const struct spi_device_id moxtet_spi_ids[] = {
+ { "moxtet" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, moxtet_spi_ids);
+
+static const struct of_device_id moxtet_dt_ids[] = {
+ { .compatible = "cznic,moxtet" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, moxtet_dt_ids);
+
+static struct spi_driver moxtet_spi_driver = {
+ .driver = {
+ .name = "moxtet",
+ .of_match_table = moxtet_dt_ids,
+ },
+ .id_table = moxtet_spi_ids,
+ .probe = moxtet_probe,
+ .remove = moxtet_remove,
+};
+
+static int __init moxtet_init(void)
+{
+ int ret;
+
+ ret = bus_register(&moxtet_bus_type);
+ if (ret < 0) {
+ pr_err("moxtet bus registration failed: %d\n", ret);
+ goto error;
+ }
+
+ ret = spi_register_driver(&moxtet_spi_driver);
+ if (ret < 0) {
+ pr_err("moxtet spi driver registration failed: %d\n", ret);
+ goto error_bus;
+ }
+
+ return 0;
+
+error_bus:
+ bus_unregister(&moxtet_bus_type);
+error:
+ return ret;
+}
+postcore_initcall_sync(moxtet_init);
+
+static void __exit moxtet_exit(void)
+{
+ spi_unregister_driver(&moxtet_spi_driver);
+ bus_unregister(&moxtet_bus_type);
+}
+module_exit(moxtet_exit);
+
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
+MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
new file mode 100644
index 0000000000..00cb792bda
--- /dev/null
+++ b/drivers/bus/mvebu-mbus.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Address map functions for Marvell EBU SoCs (Kirkwood, Armada
+ * 370/XP, Dove, Orion5x and MV78xx0)
+ *
+ * The Marvell EBU SoCs have a configurable physical address space:
+ * the physical address at which certain devices (PCIe, NOR, NAND,
+ * etc.) sit can be configured. The configuration takes place through
+ * two sets of registers:
+ *
+ * - One to configure the access of the CPU to the devices. Depending
+ * on the families, there are between 8 and 20 configurable windows,
+ * each can be use to create a physical memory window that maps to a
+ * specific device. Devices are identified by a tuple (target,
+ * attribute).
+ *
+ * - One to configure the access to the CPU to the SDRAM. There are
+ * either 2 (for Dove) or 4 (for other families) windows to map the
+ * SDRAM into the physical address space.
+ *
+ * This driver:
+ *
+ * - Reads out the SDRAM address decoding windows at initialization
+ * time, and fills the mvebu_mbus_dram_info structure with these
+ * information. The exported function mv_mbus_dram_info() allow
+ * device drivers to get those information related to the SDRAM
+ * address decoding windows. This is because devices also have their
+ * own windows (configured through registers that are part of each
+ * device register space), and therefore the drivers for Marvell
+ * devices have to configure those device -> SDRAM windows to ensure
+ * that DMA works properly.
+ *
+ * - Provides an API for platform code or device drivers to
+ * dynamically add or remove address decoding windows for the CPU ->
+ * device accesses. This API is mvebu_mbus_add_window_by_id(),
+ * mvebu_mbus_add_window_remap_by_id() and
+ * mvebu_mbus_del_window().
+ *
+ * - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
+ * see the list of CPU -> SDRAM windows and their configuration
+ * (file 'sdram') and the list of CPU -> devices windows and their
+ * configuration (file 'devices').
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/log2.h>
+#include <linux/memblock.h>
+#include <linux/syscore_ops.h>
+
+/*
+ * DDR target is the same on all platforms.
+ */
+#define TARGET_DDR 0
+
+/*
+ * CPU Address Decode Windows registers
+ */
+#define WIN_CTRL_OFF 0x0000
+#define WIN_CTRL_ENABLE BIT(0)
+/* Only on HW I/O coherency capable platforms */
+#define WIN_CTRL_SYNCBARRIER BIT(1)
+#define WIN_CTRL_TGT_MASK 0xf0
+#define WIN_CTRL_TGT_SHIFT 4
+#define WIN_CTRL_ATTR_MASK 0xff00
+#define WIN_CTRL_ATTR_SHIFT 8
+#define WIN_CTRL_SIZE_MASK 0xffff0000
+#define WIN_CTRL_SIZE_SHIFT 16
+#define WIN_BASE_OFF 0x0004
+#define WIN_BASE_LOW 0xffff0000
+#define WIN_BASE_HIGH 0xf
+#define WIN_REMAP_LO_OFF 0x0008
+#define WIN_REMAP_LOW 0xffff0000
+#define WIN_REMAP_HI_OFF 0x000c
+
+#define UNIT_SYNC_BARRIER_OFF 0x84
+#define UNIT_SYNC_BARRIER_ALL 0xFFFF
+
+#define ATTR_HW_COHERENCY (0x1 << 4)
+
+#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
+#define DDR_BASE_CS_HIGH_MASK 0xf
+#define DDR_BASE_CS_LOW_MASK 0xff000000
+#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
+#define DDR_SIZE_ENABLED BIT(0)
+#define DDR_SIZE_CS_MASK 0x1c
+#define DDR_SIZE_CS_SHIFT 2
+#define DDR_SIZE_MASK 0xff000000
+
+#define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
+
+/* Relative to mbusbridge_base */
+#define MBUS_BRIDGE_CTRL_OFF 0x0
+#define MBUS_BRIDGE_BASE_OFF 0x4
+
+/* Maximum number of windows, for all known platforms */
+#define MBUS_WINS_MAX 20
+
+struct mvebu_mbus_state;
+
+struct mvebu_mbus_soc_data {
+ unsigned int num_wins;
+ bool has_mbus_bridge;
+ unsigned int (*win_cfg_offset)(const int win);
+ unsigned int (*win_remap_offset)(const int win);
+ void (*setup_cpu_target)(struct mvebu_mbus_state *s);
+ int (*save_cpu_target)(struct mvebu_mbus_state *s,
+ u32 __iomem *store_addr);
+ int (*show_cpu_target)(struct mvebu_mbus_state *s,
+ struct seq_file *seq, void *v);
+};
+
+/*
+ * Used to store the state of one MBus window across suspend/resume.
+ */
+struct mvebu_mbus_win_data {
+ u32 ctrl;
+ u32 base;
+ u32 remap_lo;
+ u32 remap_hi;
+};
+
+struct mvebu_mbus_state {
+ void __iomem *mbuswins_base;
+ void __iomem *sdramwins_base;
+ void __iomem *mbusbridge_base;
+ phys_addr_t sdramwins_phys_base;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_sdram;
+ struct dentry *debugfs_devs;
+ struct resource pcie_mem_aperture;
+ struct resource pcie_io_aperture;
+ const struct mvebu_mbus_soc_data *soc;
+ int hw_io_coherency;
+
+ /* Used during suspend/resume */
+ u32 mbus_bridge_ctrl;
+ u32 mbus_bridge_base;
+ struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
+};
+
+static struct mvebu_mbus_state mbus_state;
+
+/*
+ * We provide two variants of the mv_mbus_dram_info() function:
+ *
+ * - The normal one, where the described DRAM ranges may overlap with
+ * the I/O windows, but for which the DRAM ranges are guaranteed to
+ * have a power of two size. Such ranges are suitable for the DMA
+ * masters that only DMA between the RAM and the device, which is
+ * actually all devices except the crypto engines.
+ *
+ * - The 'nooverlap' one, where the described DRAM ranges are
+ * guaranteed to not overlap with the I/O windows, but for which the
+ * DRAM ranges will not have power of two sizes. They will only be
+ * aligned on a 64 KB boundary, and have a size multiple of 64
+ * KB. Such ranges are suitable for the DMA masters that DMA between
+ * the crypto SRAM (which is mapped through an I/O window) and a
+ * device. This is the case for the crypto engines.
+ */
+
+static struct mbus_dram_target_info mvebu_mbus_dram_info;
+static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap;
+
+const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return &mvebu_mbus_dram_info;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
+
+const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
+{
+ return &mvebu_mbus_dram_info_nooverlap;
+}
+EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap);
+
+/* Checks whether the given window has remap capability */
+static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ return mbus->soc->win_remap_offset(win) != MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Functions to manipulate the address decoding windows
+ */
+
+static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
+ int win, int *enabled, u64 *base,
+ u32 *size, u8 *target, u8 *attr,
+ u64 *remap)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 basereg = readl(addr + WIN_BASE_OFF);
+ u32 ctrlreg = readl(addr + WIN_CTRL_OFF);
+
+ if (!(ctrlreg & WIN_CTRL_ENABLE)) {
+ *enabled = 0;
+ return;
+ }
+
+ *enabled = 1;
+ *base = ((u64)basereg & WIN_BASE_HIGH) << 32;
+ *base |= (basereg & WIN_BASE_LOW);
+ *size = (ctrlreg | ~WIN_CTRL_SIZE_MASK) + 1;
+
+ if (target)
+ *target = (ctrlreg & WIN_CTRL_TGT_MASK) >> WIN_CTRL_TGT_SHIFT;
+
+ if (attr)
+ *attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
+
+ if (remap) {
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ u32 remap_low, remap_hi;
+ void __iomem *addr_rmp = mbus->mbuswins_base +
+ mbus->soc->win_remap_offset(win);
+ remap_low = readl(addr_rmp + WIN_REMAP_LO_OFF);
+ remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+ *remap = ((u64)remap_hi << 32) | remap_low;
+ } else
+ *remap = 0;
+ }
+}
+
+static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
+ int win)
+{
+ void __iomem *addr;
+
+ addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
+ writel(0, addr + WIN_BASE_OFF);
+ writel(0, addr + WIN_CTRL_OFF);
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ addr = mbus->mbuswins_base + mbus->soc->win_remap_offset(win);
+ writel(0, addr + WIN_REMAP_LO_OFF);
+ writel(0, addr + WIN_REMAP_HI_OFF);
+ }
+}
+
+/* Checks whether the given window number is available */
+
+static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
+ const int win)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ return !(ctrl & WIN_CTRL_ENABLE);
+}
+
+/*
+ * Checks whether the given (base, base+size) area doesn't overlap an
+ * existing region
+ */
+static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ u8 target, u8 attr)
+{
+ u64 end = (u64)base + size;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wend;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, NULL);
+
+ if (!enabled)
+ continue;
+
+ wend = wbase + wsize;
+
+ /*
+ * Check if the current window overlaps with the
+ * proposed physical range
+ */
+ if ((u64)base < wend && end > wbase)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int mvebu_mbus_find_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size)
+{
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase;
+ u32 wsize;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ NULL, NULL, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (base == wbase && size == wsize)
+ return win;
+ }
+
+ return -ENODEV;
+}
+
+static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
+ int win, phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ void __iomem *addr = mbus->mbuswins_base +
+ mbus->soc->win_cfg_offset(win);
+ u32 ctrl, remap_addr;
+
+ if (!is_power_of_2(size)) {
+ WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+ return -EINVAL;
+ }
+
+ if ((base & (phys_addr_t)(size - 1)) != 0) {
+ WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+ size);
+ return -EINVAL;
+ }
+
+ ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
+ (attr << WIN_CTRL_ATTR_SHIFT) |
+ (target << WIN_CTRL_TGT_SHIFT) |
+ WIN_CTRL_ENABLE;
+ if (mbus->hw_io_coherency)
+ ctrl |= WIN_CTRL_SYNCBARRIER;
+
+ writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
+ writel(ctrl, addr + WIN_CTRL_OFF);
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ void __iomem *addr_rmp = mbus->mbuswins_base +
+ mbus->soc->win_remap_offset(win);
+
+ if (remap == MVEBU_MBUS_NO_REMAP)
+ remap_addr = base;
+ else
+ remap_addr = remap;
+ writel(remap_addr & WIN_REMAP_LOW, addr_rmp + WIN_REMAP_LO_OFF);
+ writel(0, addr_rmp + WIN_REMAP_HI_OFF);
+ }
+
+ return 0;
+}
+
+static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap, u8 target,
+ u8 attr)
+{
+ int win;
+
+ if (remap == MVEBU_MBUS_NO_REMAP) {
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ if (mvebu_mbus_window_is_remappable(mbus, win))
+ continue;
+
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base,
+ size, remap,
+ target, attr);
+ }
+ }
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ /* Skip window if need remap but is not supported */
+ if ((remap != MVEBU_MBUS_NO_REMAP) &&
+ !mvebu_mbus_window_is_remappable(mbus, win))
+ continue;
+
+ if (mvebu_mbus_window_is_free(mbus, win))
+ return mvebu_mbus_setup_window(mbus, win, base, size,
+ remap, target, attr);
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Debugfs debugging
+ */
+
+/* Common function used for Dove, Kirkwood, Armada 370/XP and Orion 5x */
+static int mvebu_sdram_debug_show_orion(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 basereg = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 sizereg = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(sizereg & DDR_SIZE_ENABLED)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = ((u64)basereg & DDR_BASE_CS_HIGH_MASK) << 32;
+ base |= basereg & DDR_BASE_CS_LOW_MASK;
+ size = (sizereg | ~DDR_SIZE_MASK);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size + 1,
+ (sizereg & DDR_SIZE_CS_MASK) >> DDR_SIZE_CS_SHIFT);
+ }
+
+ return 0;
+}
+
+/* Special function for Dove */
+static int mvebu_sdram_debug_show_dove(struct mvebu_mbus_state *mbus,
+ struct seq_file *seq, void *v)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+ u64 base;
+ u32 size;
+
+ if (!(map & 1)) {
+ seq_printf(seq, "[%d] disabled\n", i);
+ continue;
+ }
+
+ base = map & 0xff800000;
+ size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+
+ seq_printf(seq, "[%d] %016llx - %016llx : cs%d\n",
+ i, (unsigned long long)base,
+ (unsigned long long)base + size, i);
+ }
+
+ return 0;
+}
+
+static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ return mbus->soc->show_cpu_target(mbus, seq, v);
+}
+DEFINE_SHOW_ATTRIBUTE(mvebu_sdram_debug);
+
+static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
+{
+ struct mvebu_mbus_state *mbus = &mbus_state;
+ int win;
+
+ for (win = 0; win < mbus->soc->num_wins; win++) {
+ u64 wbase, wremap;
+ u32 wsize;
+ u8 wtarget, wattr;
+ int enabled;
+
+ mvebu_mbus_read_window(mbus, win,
+ &enabled, &wbase, &wsize,
+ &wtarget, &wattr, &wremap);
+
+ if (!enabled) {
+ seq_printf(seq, "[%02d] disabled\n", win);
+ continue;
+ }
+
+ seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x",
+ win, (unsigned long long)wbase,
+ (unsigned long long)(wbase + wsize), wtarget, wattr);
+
+ if (!is_power_of_2(wsize) ||
+ ((wbase & (u64)(wsize - 1)) != 0))
+ seq_puts(seq, " (Invalid base/size!!)");
+
+ if (mvebu_mbus_window_is_remappable(mbus, win)) {
+ seq_printf(seq, " (remap %016llx)\n",
+ (unsigned long long)wremap);
+ } else
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mvebu_devs_debug);
+
+/*
+ * SoC-specific functions and definitions
+ */
+
+static unsigned int generic_mbus_win_cfg_offset(int win)
+{
+ return win << 4;
+}
+
+static unsigned int armada_370_xp_mbus_win_cfg_offset(int win)
+{
+ /* The register layout is a bit annoying and the below code
+ * tries to cope with it.
+ * - At offset 0x0, there are the registers for the first 8
+ * windows, with 4 registers of 32 bits per window (ctrl,
+ * base, remap low, remap high)
+ * - Then at offset 0x80, there is a hole of 0x10 bytes for
+ * the internal registers base address and internal units
+ * sync barrier register.
+ * - Then at offset 0x90, there the registers for 12
+ * windows, with only 2 registers of 32 bits per window
+ * (ctrl, base).
+ */
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x90 + ((win - 8) << 3);
+}
+
+static unsigned int mv78xx0_mbus_win_cfg_offset(int win)
+{
+ if (win < 8)
+ return win << 4;
+ else
+ return 0x900 + ((win - 8) << 4);
+}
+
+static unsigned int generic_mbus_win_remap_2_offset(int win)
+{
+ if (win < 2)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_4_offset(int win)
+{
+ if (win < 4)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_8_offset(int win)
+{
+ if (win < 8)
+ return generic_mbus_win_cfg_offset(win);
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int armada_xp_mbus_win_remap_offset(int win)
+{
+ if (win < 8)
+ return generic_mbus_win_cfg_offset(win);
+ else if (win == 13)
+ return 0xF0 - WIN_REMAP_LO_OFF;
+ else
+ return MVEBU_MBUS_NO_REMAP;
+}
+
+/*
+ * Use the memblock information to find the MBus bridge hole in the
+ * physical address space.
+ */
+static void __init
+mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
+{
+ phys_addr_t reg_start, reg_end;
+ uint64_t i, s = 0;
+
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ /*
+ * This part of the memory is above 4 GB, so we don't
+ * care for the MBus bridge hole.
+ */
+ if ((u64)reg_start >= 0x100000000ULL)
+ continue;
+
+ /*
+ * The MBus bridge hole is at the end of the RAM under
+ * the 4 GB limit.
+ */
+ if (reg_end > s)
+ s = reg_end;
+ }
+
+ *start = s;
+ *end = 0x100000000ULL;
+}
+
+/*
+ * This function fills in the mvebu_mbus_dram_info_nooverlap data
+ * structure, by looking at the mvebu_mbus_dram_info data, and
+ * removing the parts of it that overlap with I/O windows.
+ */
+static void __init
+mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus)
+{
+ uint64_t mbus_bridge_base, mbus_bridge_end;
+ int cs_nooverlap = 0;
+ int i;
+
+ mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
+
+ for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) {
+ struct mbus_dram_window *w;
+ u64 base, size, end;
+
+ w = &mvebu_mbus_dram_info.cs[i];
+ base = w->base;
+ size = w->size;
+ end = base + size;
+
+ /*
+ * The CS is fully enclosed inside the MBus bridge
+ * area, so ignore it.
+ */
+ if (base >= mbus_bridge_base && end <= mbus_bridge_end)
+ continue;
+
+ /*
+ * Beginning of CS overlaps with end of MBus, raise CS
+ * base address, and shrink its size.
+ */
+ if (base >= mbus_bridge_base && end > mbus_bridge_end) {
+ size -= mbus_bridge_end - base;
+ base = mbus_bridge_end;
+ }
+
+ /*
+ * End of CS overlaps with beginning of MBus, shrink
+ * CS size.
+ */
+ if (base < mbus_bridge_base && end > mbus_bridge_base)
+ size -= end - mbus_bridge_base;
+
+ w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base;
+ w->size = size;
+ }
+
+ mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR;
+ mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap;
+}
+
+static void __init
+mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ /*
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
+ */
+ if ((size & DDR_SIZE_ENABLED) &&
+ !(base & DDR_BASE_CS_HIGH_MASK)) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0xf & ~(1 << i);
+ if (mbus->hw_io_coherency)
+ w->mbus_attr |= ATTR_HW_COHERENCY;
+ w->base = base & DDR_BASE_CS_LOW_MASK;
+ w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
+ }
+ }
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
+ u32 __iomem *store_addr)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+ writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
+ store_addr++);
+ writel(base, store_addr++);
+ writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
+ store_addr++);
+ writel(size, store_addr++);
+ }
+
+ /* We've written 16 words to the store address */
+ return 16;
+}
+
+static void __init
+mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
+{
+ int i;
+ int cs;
+
+ mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
+
+ for (i = 0, cs = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ /*
+ * Chip select enabled?
+ */
+ if (map & 1) {
+ struct mbus_dram_window *w;
+
+ w = &mvebu_mbus_dram_info.cs[cs++];
+ w->cs_index = i;
+ w->mbus_attr = 0; /* CS address decoding done inside */
+ /* the DDR controller, no need to */
+ /* provide attributes */
+ w->base = map & 0xff800000;
+ w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4);
+ }
+ }
+
+ mvebu_mbus_dram_info.num_cs = cs;
+}
+
+static int
+mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
+ u32 __iomem *store_addr)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i));
+
+ writel(mbus->sdramwins_phys_base + DOVE_DDR_BASE_CS_OFF(i),
+ store_addr++);
+ writel(map, store_addr++);
+ }
+
+ /* We've written 4 words to the store address */
+ return 4;
+}
+
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr)
+{
+ return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
+}
+
+static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
+ .num_wins = 20,
+ .has_mbus_bridge = true,
+ .win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
+ .win_remap_offset = generic_mbus_win_remap_8_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
+ .num_wins = 20,
+ .has_mbus_bridge = true,
+ .win_cfg_offset = armada_370_xp_mbus_win_cfg_offset,
+ .win_remap_offset = armada_xp_mbus_win_remap_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+};
+
+static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data dove_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_dove_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_dove_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_dove,
+};
+
+/*
+ * Some variants of Orion5x have 4 remappable windows, some other have
+ * only two of them.
+ */
+static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_4_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
+ .num_wins = 8,
+ .win_cfg_offset = generic_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_2_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
+ .num_wins = 14,
+ .win_cfg_offset = mv78xx0_mbus_win_cfg_offset,
+ .save_cpu_target = mvebu_mbus_default_save_cpu_target,
+ .win_remap_offset = generic_mbus_win_remap_8_offset,
+ .setup_cpu_target = mvebu_mbus_default_setup_cpu_target,
+ .show_cpu_target = mvebu_sdram_debug_show_orion,
+};
+
+static const struct of_device_id of_mvebu_mbus_ids[] = {
+ { .compatible = "marvell,armada370-mbus",
+ .data = &armada_370_mbus_data, },
+ { .compatible = "marvell,armada375-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,armada380-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,armadaxp-mbus",
+ .data = &armada_xp_mbus_data, },
+ { .compatible = "marvell,kirkwood-mbus",
+ .data = &kirkwood_mbus_data, },
+ { .compatible = "marvell,dove-mbus",
+ .data = &dove_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5281-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5182-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f5181-mbus",
+ .data = &orion5x_2win_mbus_data, },
+ { .compatible = "marvell,orion5x-88f6183-mbus",
+ .data = &orion5x_4win_mbus_data, },
+ { .compatible = "marvell,mv78xx0-mbus",
+ .data = &mv78xx0_mbus_data, },
+ { },
+};
+
+/*
+ * Public API of the driver
+ */
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+ unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ if (!mvebu_mbus_window_conflicts(s, base, size, target, attribute)) {
+ pr_err("cannot add window '%x:%x', conflicts with another window\n",
+ target, attribute);
+ return -EINVAL;
+ }
+
+ return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_remap_by_id);
+
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size)
+{
+ return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ size, MVEBU_MBUS_NO_REMAP);
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_by_id);
+
+int mvebu_mbus_del_window(phys_addr_t base, size_t size)
+{
+ int win;
+
+ win = mvebu_mbus_find_window(&mbus_state, base, size);
+ if (win < 0)
+ return win;
+
+ mvebu_mbus_disable_window(&mbus_state, win);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_del_window);
+
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
+{
+ if (!res)
+ return;
+ *res = mbus_state.pcie_mem_aperture;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_mem_aperture);
+
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
+{
+ if (!res)
+ return;
+ *res = mbus_state.pcie_io_aperture;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_io_aperture);
+
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
+{
+ const struct mbus_dram_target_info *dram;
+ int i;
+
+ /* Get dram info */
+ dram = mv_mbus_dram_info();
+ if (!dram) {
+ pr_err("missing DRAM information\n");
+ return -ENODEV;
+ }
+
+ /* Try to find matching DRAM window for phyaddr */
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ if (cs->base <= phyaddr &&
+ phyaddr <= (cs->base + cs->size - 1)) {
+ *target = dram->mbus_dram_target_id;
+ *attr = cs->mbus_attr;
+ return 0;
+ }
+ }
+
+ pr_err("invalid dram address %pa\n", &phyaddr);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
+
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr)
+{
+ int win;
+
+ for (win = 0; win < mbus_state.soc->num_wins; win++) {
+ u64 wbase;
+ int enabled;
+
+ mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase,
+ size, target, attr, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (wbase <= phyaddr && phyaddr <= wbase + *size)
+ return win;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info);
+
+static __init int mvebu_mbus_debugfs_init(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+
+ /*
+ * If no base has been initialized, doesn't make sense to
+ * register the debugfs entries. We may be on a multiplatform
+ * kernel that isn't running a Marvell EBU SoC.
+ */
+ if (!s->mbuswins_base)
+ return 0;
+
+ s->debugfs_root = debugfs_create_dir("mvebu-mbus", NULL);
+ if (s->debugfs_root) {
+ s->debugfs_sdram = debugfs_create_file("sdram", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_sdram_debug_fops);
+ s->debugfs_devs = debugfs_create_file("devices", S_IRUGO,
+ s->debugfs_root, NULL,
+ &mvebu_devs_debug_fops);
+ }
+
+ return 0;
+}
+fs_initcall(mvebu_mbus_debugfs_init);
+
+static int mvebu_mbus_suspend(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ int win;
+
+ if (!s->mbusbridge_base)
+ return -ENODEV;
+
+ for (win = 0; win < s->soc->num_wins; win++) {
+ void __iomem *addr = s->mbuswins_base +
+ s->soc->win_cfg_offset(win);
+ void __iomem *addr_rmp;
+
+ s->wins[win].base = readl(addr + WIN_BASE_OFF);
+ s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (!mvebu_mbus_window_is_remappable(s, win))
+ continue;
+
+ addr_rmp = s->mbuswins_base +
+ s->soc->win_remap_offset(win);
+
+ s->wins[win].remap_lo = readl(addr_rmp + WIN_REMAP_LO_OFF);
+ s->wins[win].remap_hi = readl(addr_rmp + WIN_REMAP_HI_OFF);
+ }
+
+ s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
+ MBUS_BRIDGE_CTRL_OFF);
+ s->mbus_bridge_base = readl(s->mbusbridge_base +
+ MBUS_BRIDGE_BASE_OFF);
+
+ return 0;
+}
+
+static void mvebu_mbus_resume(void)
+{
+ struct mvebu_mbus_state *s = &mbus_state;
+ int win;
+
+ writel(s->mbus_bridge_ctrl,
+ s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
+ writel(s->mbus_bridge_base,
+ s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
+
+ for (win = 0; win < s->soc->num_wins; win++) {
+ void __iomem *addr = s->mbuswins_base +
+ s->soc->win_cfg_offset(win);
+ void __iomem *addr_rmp;
+
+ writel(s->wins[win].base, addr + WIN_BASE_OFF);
+ writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
+
+ if (!mvebu_mbus_window_is_remappable(s, win))
+ continue;
+
+ addr_rmp = s->mbuswins_base +
+ s->soc->win_remap_offset(win);
+
+ writel(s->wins[win].remap_lo, addr_rmp + WIN_REMAP_LO_OFF);
+ writel(s->wins[win].remap_hi, addr_rmp + WIN_REMAP_HI_OFF);
+ }
+}
+
+static struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
+ phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size,
+ phys_addr_t mbusbridge_phys_base,
+ size_t mbusbridge_size,
+ bool is_coherent)
+{
+ int win;
+
+ mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
+ if (!mbus->mbuswins_base)
+ return -ENOMEM;
+
+ mbus->sdramwins_base = ioremap(sdramwins_phys_base, sdramwins_size);
+ if (!mbus->sdramwins_base) {
+ iounmap(mbus->mbuswins_base);
+ return -ENOMEM;
+ }
+
+ mbus->sdramwins_phys_base = sdramwins_phys_base;
+
+ if (mbusbridge_phys_base) {
+ mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
+ mbusbridge_size);
+ if (!mbus->mbusbridge_base) {
+ iounmap(mbus->sdramwins_base);
+ iounmap(mbus->mbuswins_base);
+ return -ENOMEM;
+ }
+ } else
+ mbus->mbusbridge_base = NULL;
+
+ for (win = 0; win < mbus->soc->num_wins; win++)
+ mvebu_mbus_disable_window(mbus, win);
+
+ mbus->soc->setup_cpu_target(mbus);
+ mvebu_mbus_setup_cpu_target_nooverlap(mbus);
+
+ if (is_coherent)
+ writel(UNIT_SYNC_BARRIER_ALL,
+ mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
+
+ register_syscore_ops(&mvebu_mbus_syscore_ops);
+
+ return 0;
+}
+
+int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
+ size_t mbuswins_size,
+ phys_addr_t sdramwins_phys_base,
+ size_t sdramwins_size)
+{
+ const struct of_device_id *of_id;
+
+ for (of_id = of_mvebu_mbus_ids; of_id->compatible[0]; of_id++)
+ if (!strcmp(of_id->compatible, soc))
+ break;
+
+ if (!of_id->compatible[0]) {
+ pr_err("could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus_state.soc = of_id->data;
+
+ return mvebu_mbus_common_init(&mbus_state,
+ mbuswins_phys_base,
+ mbuswins_size,
+ sdramwins_phys_base,
+ sdramwins_size, 0, 0, false);
+}
+
+#ifdef CONFIG_OF
+/*
+ * The window IDs in the ranges DT property have the following format:
+ * - bits 28 to 31: MBus custom field
+ * - bits 24 to 27: window target ID
+ * - bits 16 to 23: window attribute ID
+ * - bits 0 to 15: unused
+ */
+#define CUSTOM(id) (((id) & 0xF0000000) >> 24)
+#define TARGET(id) (((id) & 0x0F000000) >> 24)
+#define ATTR(id) (((id) & 0x00FF0000) >> 16)
+
+static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
+ u32 base, u32 size,
+ u8 target, u8 attr)
+{
+ if (!mvebu_mbus_window_conflicts(mbus, base, size, target, attr)) {
+ pr_err("cannot add window '%04x:%04x', conflicts with another window\n",
+ target, attr);
+ return -EBUSY;
+ }
+
+ if (mvebu_mbus_alloc_window(mbus, base, size, MVEBU_MBUS_NO_REMAP,
+ target, attr)) {
+ pr_err("cannot add window '%04x:%04x', too many windows\n",
+ target, attr);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
+ struct device_node *np)
+{
+ int ret;
+ struct of_range_parser parser;
+ struct of_range range;
+
+ ret = of_range_parser_init(&parser, np);
+ if (ret < 0)
+ return 0;
+
+ for_each_of_range(&parser, &range) {
+ u32 windowid = upper_32_bits(range.bus_addr);
+ u8 target, attr;
+
+ /*
+ * An entry with a non-zero custom field do not
+ * correspond to a static window, so skip it.
+ */
+ if (CUSTOM(windowid))
+ continue;
+
+ target = TARGET(windowid);
+ attr = ATTR(windowid);
+
+ ret = mbus_dt_setup_win(mbus, range.cpu_addr, range.size, target, attr);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
+ struct resource *mem,
+ struct resource *io)
+{
+ u32 reg[2];
+ int ret;
+
+ /*
+ * These are optional, so we make sure that resource_size(x) will
+ * return 0.
+ */
+ memset(mem, 0, sizeof(struct resource));
+ mem->end = -1;
+ memset(io, 0, sizeof(struct resource));
+ io->end = -1;
+
+ ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
+ if (!ret) {
+ mem->start = reg[0];
+ mem->end = mem->start + reg[1] - 1;
+ mem->flags = IORESOURCE_MEM;
+ }
+
+ ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
+ if (!ret) {
+ io->start = reg[0];
+ io->end = io->start + reg[1] - 1;
+ io->flags = IORESOURCE_IO;
+ }
+}
+
+int __init mvebu_mbus_dt_init(bool is_coherent)
+{
+ struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
+ struct device_node *np, *controller;
+ const struct of_device_id *of_id;
+ const __be32 *prop;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, of_mvebu_mbus_ids, &of_id);
+ if (!np) {
+ pr_err("could not find a matching SoC family\n");
+ return -ENODEV;
+ }
+
+ mbus_state.soc = of_id->data;
+
+ prop = of_get_property(np, "controller", NULL);
+ if (!prop) {
+ pr_err("required 'controller' property missing\n");
+ return -EINVAL;
+ }
+
+ controller = of_find_node_by_phandle(be32_to_cpup(prop));
+ if (!controller) {
+ pr_err("could not find an 'mbus-controller' node\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(controller, 0, &mbuswins_res)) {
+ pr_err("cannot get MBUS register address\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(controller, 1, &sdramwins_res)) {
+ pr_err("cannot get SDRAM register address\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Set the resource to 0 so that it can be left unmapped by
+ * mvebu_mbus_common_init() if the DT doesn't carry the
+ * necessary information. This is needed to preserve backward
+ * compatibility.
+ */
+ memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
+
+ if (mbus_state.soc->has_mbus_bridge) {
+ if (of_address_to_resource(controller, 2, &mbusbridge_res))
+ pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
+ }
+
+ mbus_state.hw_io_coherency = is_coherent;
+
+ /* Get optional pcie-{mem,io}-aperture properties */
+ mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
+ &mbus_state.pcie_io_aperture);
+
+ ret = mvebu_mbus_common_init(&mbus_state,
+ mbuswins_res.start,
+ resource_size(&mbuswins_res),
+ sdramwins_res.start,
+ resource_size(&sdramwins_res),
+ mbusbridge_res.start,
+ resource_size(&mbusbridge_res),
+ is_coherent);
+ if (ret)
+ return ret;
+
+ /* Setup statically declared windows in the DT */
+ return mbus_dt_setup(&mbus_state, np);
+}
+#endif
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
new file mode 100644
index 0000000000..e02d065624
--- /dev/null
+++ b/drivers/bus/omap-ocp2scp.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * omap-ocp2scp.c - transform ocp interface protocol to scp protocol
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define OCP2SCP_TIMING 0x18
+#define SYNC2_MASK 0xf
+
+static int ocp2scp_remove_devices(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int omap_ocp2scp_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 reg;
+ void __iomem *regs;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np) {
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to add resources for ocp2scp child\n");
+ goto err0;
+ }
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ /*
+ * As per AM572x TRM: http://www.ti.com/lit/ug/spruhz6/spruhz6.pdf
+ * under section 26.3.2.2, table 26-26 OCP2SCP TIMING Caution;
+ * As per OMAP4430 TRM: http://www.ti.com/lit/ug/swpu231ap/swpu231ap.pdf
+ * under section 23.12.6.2.2 , Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP4460 TRM: http://www.ti.com/lit/ug/swpu235ab/swpu235ab.pdf
+ * under section 23.12.6.2.2, Table 23-1213 OCP2SCP TIMING Caution;
+ * As per OMAP543x TRM http://www.ti.com/lit/pdf/swpu249
+ * under section 27.3.2.2, Table 27-27 OCP2SCP TIMING Caution;
+ *
+ * Read path of OCP2SCP is not working properly due to low reset value
+ * of SYNC2 parameter in OCP2SCP. Suggested reset value is 0x6 or more.
+ */
+ if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err1;
+ }
+
+ pm_runtime_get_sync(&pdev->dev);
+ reg = readl_relaxed(regs + OCP2SCP_TIMING);
+ reg &= ~(SYNC2_MASK);
+ reg |= 0x6;
+ writel_relaxed(reg, regs + OCP2SCP_TIMING);
+ pm_runtime_put_sync(&pdev->dev);
+ }
+
+ return 0;
+
+err1:
+ pm_runtime_disable(&pdev->dev);
+
+err0:
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+ return ret;
+}
+
+static int omap_ocp2scp_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id omap_ocp2scp_id_table[] = {
+ { .compatible = "ti,omap-ocp2scp" },
+ { .compatible = "ti,am437x-ocp2scp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
+#endif
+
+static struct platform_driver omap_ocp2scp_driver = {
+ .probe = omap_ocp2scp_probe,
+ .remove = omap_ocp2scp_remove,
+ .driver = {
+ .name = "omap-ocp2scp",
+ .of_match_table = of_match_ptr(omap_ocp2scp_id_table),
+ },
+};
+
+module_platform_driver(omap_ocp2scp_driver);
+
+MODULE_ALIAS("platform:omap-ocp2scp");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("OMAP OCP2SCP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
new file mode 100644
index 0000000000..eb1ba6319f
--- /dev/null
+++ b/drivers/bus/omap_l3_noc.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP L3 Interconnect error handling driver
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "omap_l3_noc.h"
+
+/**
+ * l3_handle_target() - Handle Target specific parse and reporting
+ * @l3: pointer to l3 struct
+ * @base: base address of clkdm
+ * @flag_mux: flagmux corresponding to the event
+ * @err_src: error source index of the slave (target)
+ *
+ * This does the second part of the error interrupt handling:
+ * 3) Parse in the slave information
+ * 4) Print the logged information.
+ * 5) Add dump stack to provide kernel trace.
+ * 6) Clear the source if known.
+ *
+ * This handles two types of errors:
+ * 1) Custom errors in L3 :
+ * Target like DMM/FW/EMIF generates SRESP=ERR error
+ * 2) Standard L3 error:
+ * - Unsupported CMD.
+ * L3 tries to access target while it is idle
+ * - OCP disconnect.
+ * - Address hole error:
+ * If DSS/ISS/FDIF/USBHOSTFS access a target where they
+ * do not have connectivity, the error is logged in
+ * their default target which is DMM2.
+ *
+ * On High Secure devices, firewall errors are possible and those
+ * can be trapped as well. But the trapping is implemented as part
+ * secure software and hence need not be implemented here.
+ */
+static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
+ struct l3_flagmux_data *flag_mux, int err_src)
+{
+ int k;
+ u32 std_err_main, clear, masterid;
+ u8 op_code, m_req_info;
+ void __iomem *l3_targ_base;
+ void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
+ void __iomem *l3_targ_hdr, *l3_targ_info;
+ struct l3_target_data *l3_targ_inst;
+ struct l3_masters_data *master;
+ char *target_name, *master_name = "UN IDENTIFIED";
+ char *err_description;
+ char err_string[30] = { 0 };
+ char info_string[60] = { 0 };
+
+ /* We DONOT expect err_src to go out of bounds */
+ BUG_ON(err_src > MAX_CLKDM_TARGETS);
+
+ if (err_src < flag_mux->num_targ_data) {
+ l3_targ_inst = &flag_mux->l3_targ[err_src];
+ target_name = l3_targ_inst->name;
+ l3_targ_base = base + l3_targ_inst->offset;
+ } else {
+ target_name = L3_TARGET_NOT_SUPPORTED;
+ }
+
+ if (target_name == L3_TARGET_NOT_SUPPORTED)
+ return -ENODEV;
+
+ /* Read the stderrlog_main_source from clk domain */
+ l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
+ l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
+
+ std_err_main = readl_relaxed(l3_targ_stderr);
+
+ switch (std_err_main & CUSTOM_ERROR) {
+ case STANDARD_ERROR:
+ err_description = "Standard";
+ snprintf(err_string, sizeof(err_string),
+ ": At Address: 0x%08X ",
+ readl_relaxed(l3_targ_slvofslsb));
+
+ l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
+ break;
+
+ case CUSTOM_ERROR:
+ err_description = "Custom";
+
+ l3_targ_mstaddr = l3_targ_base +
+ L3_TARG_STDERRLOG_CINFO_MSTADDR;
+ l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
+ l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
+ break;
+
+ default:
+ /* Nothing to be handled here as of now */
+ return 0;
+ }
+
+ /* STDERRLOG_MSTADDR Stores the NTTP master address. */
+ masterid = (readl_relaxed(l3_targ_mstaddr) &
+ l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
+
+ for (k = 0, master = l3->l3_masters; k < l3->num_masters;
+ k++, master++) {
+ if (masterid == master->id) {
+ master_name = master->name;
+ break;
+ }
+ }
+
+ op_code = readl_relaxed(l3_targ_hdr) & 0x7;
+
+ m_req_info = readl_relaxed(l3_targ_info) & 0xF;
+ snprintf(info_string, sizeof(info_string),
+ ": %s in %s mode during %s access",
+ (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
+ (m_req_info & BIT(1)) ? "Supervisor" : "User",
+ (m_req_info & BIT(3)) ? "Debug" : "Functional");
+
+ WARN(true,
+ "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
+ dev_name(l3->dev),
+ err_description,
+ master_name, target_name,
+ l3_transaction_type[op_code],
+ err_string, info_string);
+
+ /* clear the std error log*/
+ clear = std_err_main | CLEAR_STDERR_LOG;
+ writel_relaxed(clear, l3_targ_stderr);
+
+ return 0;
+}
+
+/**
+ * l3_interrupt_handler() - interrupt handler for l3 events
+ * @irq: irq number
+ * @_l3: pointer to l3 structure
+ *
+ * Interrupt Handler for L3 error detection.
+ * 1) Identify the L3 clockdomain partition to which the error belongs to.
+ * 2) Identify the slave where the error information is logged
+ * ... handle the slave event..
+ * 7) if the slave is unknown, mask out the slave.
+ */
+static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
+{
+ struct omap_l3 *l3 = _l3;
+ int inttype, i, ret;
+ int err_src = 0;
+ u32 err_reg, mask_val;
+ void __iomem *base, *mask_reg;
+ struct l3_flagmux_data *flag_mux;
+
+ /* Get the Type of interrupt */
+ inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+
+ for (i = 0; i < l3->num_modules; i++) {
+ /*
+ * Read the regerr register of the clock domain
+ * to determine the source
+ */
+ base = l3->l3_base[i];
+ flag_mux = l3->l3_flagmux[i];
+ err_reg = readl_relaxed(base + flag_mux->offset +
+ L3_FLAGMUX_REGERR0 + (inttype << 3));
+
+ err_reg &= ~(inttype ? flag_mux->mask_app_bits :
+ flag_mux->mask_dbg_bits);
+
+ /* Get the corresponding error and analyse */
+ if (err_reg) {
+ /* Identify the source from control status register */
+ err_src = __ffs(err_reg);
+
+ ret = l3_handle_target(l3, base, flag_mux, err_src);
+
+ /*
+ * Certain plaforms may have "undocumented" status
+ * pending on boot. So dont generate a severe warning
+ * here. Just mask it off to prevent the error from
+ * reoccuring and locking up the system.
+ */
+ if (ret) {
+ dev_err(l3->dev,
+ "L3 %s error: target %d mod:%d %s\n",
+ inttype ? "debug" : "application",
+ err_src, i, "(unclearable)");
+
+ mask_reg = base + flag_mux->offset +
+ L3_FLAGMUX_MASK0 + (inttype << 3);
+ mask_val = readl_relaxed(mask_reg);
+ mask_val &= ~(1 << err_src);
+ writel_relaxed(mask_val, mask_reg);
+
+ /* Mark these bits as to be ignored */
+ if (inttype)
+ flag_mux->mask_app_bits |= 1 << err_src;
+ else
+ flag_mux->mask_dbg_bits |= 1 << err_src;
+ }
+
+ /* Error found so break the for loop */
+ return IRQ_HANDLED;
+ }
+ }
+
+ dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
+ inttype ? "debug" : "application");
+
+ return IRQ_NONE;
+}
+
+static const struct of_device_id l3_noc_match[] = {
+ {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+ {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
+ {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
+ {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, l3_noc_match);
+
+static int omap_l3_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ static struct omap_l3 *l3;
+ int ret, i, res_idx;
+
+ of_id = of_match_device(l3_noc_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
+ if (!l3)
+ return -ENOMEM;
+
+ memcpy(l3, of_id->data, sizeof(*l3));
+ l3->dev = &pdev->dev;
+ platform_set_drvdata(pdev, l3);
+
+ /* Get mem resources */
+ for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
+ struct resource *res;
+
+ if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
+ /* First entry cannot be submodule */
+ BUG_ON(i == 0);
+ l3->l3_base[i] = l3->l3_base[i - 1];
+ continue;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
+ l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3->l3_base[i])) {
+ dev_err(l3->dev, "ioremap %d failed\n", i);
+ return PTR_ERR(l3->l3_base[i]);
+ }
+ res_idx++;
+ }
+
+ /*
+ * Setup interrupt Handlers
+ */
+ l3->debug_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+ IRQF_NO_THREAD, "l3-dbg-irq", l3);
+ if (ret) {
+ dev_err(l3->dev, "request_irq failed for %d\n",
+ l3->debug_irq);
+ return ret;
+ }
+
+ l3->app_irq = platform_get_irq(pdev, 1);
+ ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+ IRQF_NO_THREAD, "l3-app-irq", l3);
+ if (ret)
+ dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * l3_resume_noirq() - resume function for l3_noc
+ * @dev: pointer to l3_noc device structure
+ *
+ * We only have the resume handler only since we
+ * have already maintained the delta register
+ * configuration as part of configuring the system
+ */
+static int l3_resume_noirq(struct device *dev)
+{
+ struct omap_l3 *l3 = dev_get_drvdata(dev);
+ int i;
+ struct l3_flagmux_data *flag_mux;
+ void __iomem *base, *mask_regx = NULL;
+ u32 mask_val;
+
+ for (i = 0; i < l3->num_modules; i++) {
+ base = l3->l3_base[i];
+ flag_mux = l3->l3_flagmux[i];
+ if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
+ continue;
+
+ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+ (L3_APPLICATION_ERROR << 3);
+ mask_val = readl_relaxed(mask_regx);
+ mask_val &= ~(flag_mux->mask_app_bits);
+
+ writel_relaxed(mask_val, mask_regx);
+ mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
+ (L3_DEBUG_ERROR << 3);
+ mask_val = readl_relaxed(mask_regx);
+ mask_val &= ~(flag_mux->mask_dbg_bits);
+
+ writel_relaxed(mask_val, mask_regx);
+ }
+
+ /* Dummy read to force OCP barrier */
+ if (mask_regx)
+ (void)readl(mask_regx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops l3_dev_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
+};
+
+#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
+#else
+#define L3_DEV_PM_OPS NULL
+#endif
+
+static struct platform_driver omap_l3_driver = {
+ .probe = omap_l3_probe,
+ .driver = {
+ .name = "omap_l3_noc",
+ .pm = L3_DEV_PM_OPS,
+ .of_match_table = of_match_ptr(l3_noc_match),
+ },
+};
+
+static int __init omap_l3_init(void)
+{
+ return platform_driver_register(&omap_l3_driver);
+}
+postcore_initcall_sync(omap_l3_init);
+
+static void __exit omap_l3_exit(void)
+{
+ platform_driver_unregister(&omap_l3_driver);
+}
+module_exit(omap_l3_exit);
+
+MODULE_AUTHOR("Santosh Shilimkar");
+MODULE_AUTHOR("Sricharan R");
+MODULE_DESCRIPTION("OMAP L3 Interconnect error handling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
new file mode 100644
index 0000000000..bb3eebd346
--- /dev/null
+++ b/drivers/bus/omap_l3_noc.h
@@ -0,0 +1,493 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * OMAP L3 Interconnect error handling driver header
+ *
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * sricharan <r.sricharan@ti.com>
+ */
+#ifndef __OMAP_L3_NOC_H
+#define __OMAP_L3_NOC_H
+
+#define MAX_L3_MODULES 3
+#define MAX_CLKDM_TARGETS 31
+
+#define CLEAR_STDERR_LOG (1 << 31)
+#define CUSTOM_ERROR 0x2
+#define STANDARD_ERROR 0x0
+#define INBAND_ERROR 0x0
+#define L3_APPLICATION_ERROR 0x0
+#define L3_DEBUG_ERROR 0x1
+
+/* L3 TARG register offsets */
+#define L3_TARG_STDERRLOG_MAIN 0x48
+#define L3_TARG_STDERRLOG_HDR 0x4c
+#define L3_TARG_STDERRLOG_MSTADDR 0x50
+#define L3_TARG_STDERRLOG_INFO 0x58
+#define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c
+#define L3_TARG_STDERRLOG_CINFO_INFO 0x64
+#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68
+#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c
+#define L3_FLAGMUX_REGERR0 0xc
+#define L3_FLAGMUX_MASK0 0x8
+
+#define L3_TARGET_NOT_SUPPORTED NULL
+
+#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0))
+
+static const char * const l3_transaction_type[] = {
+ /* 0 0 0 */ "Idle",
+ /* 0 0 1 */ "Write",
+ /* 0 1 0 */ "Read",
+ /* 0 1 1 */ "ReadEx",
+ /* 1 0 0 */ "Read Link",
+ /* 1 0 1 */ "Write Non-Posted",
+ /* 1 1 0 */ "Write Conditional",
+ /* 1 1 1 */ "Write Broadcast",
+};
+
+/**
+ * struct l3_masters_data - L3 Master information
+ * @id: ID of the L3 Master
+ * @name: master name
+ */
+struct l3_masters_data {
+ u32 id;
+ char *name;
+};
+
+/**
+ * struct l3_target_data - L3 Target information
+ * @offset: Offset from base for L3 Target
+ * @name: Target name
+ *
+ * Target information is organized indexed by bit field definitions.
+ */
+struct l3_target_data {
+ u32 offset;
+ char *name;
+};
+
+/**
+ * struct l3_flagmux_data - Flag Mux information
+ * @offset: offset from base for flagmux register
+ * @l3_targ: array indexed by flagmux index (bit offset) pointing to the
+ * target data. unsupported ones are marked with
+ * L3_TARGET_NOT_SUPPORTED
+ * @num_targ_data: number of entries in target data
+ * @mask_app_bits: ignore these from raw application irq status
+ * @mask_dbg_bits: ignore these from raw debug irq status
+ */
+struct l3_flagmux_data {
+ u32 offset;
+ struct l3_target_data *l3_targ;
+ u8 num_targ_data;
+ u32 mask_app_bits;
+ u32 mask_dbg_bits;
+};
+
+
+/**
+ * struct omap_l3 - Description of data relevant for L3 bus.
+ * @dev: device representing the bus (populated runtime)
+ * @l3_base: base addresses of modules (populated runtime if 0)
+ * if set to L3_BASE_IS_SUBMODULE, then uses previous
+ * module index as the base address
+ * @l3_flag_mux: array containing flag mux data per module
+ * offset from corresponding module base indexed per
+ * module.
+ * @num_modules: number of clock domains / modules.
+ * @l3_masters: array pointing to master data containing name and register
+ * offset for the master.
+ * @num_master: number of masters
+ * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet
+ * @debug_irq: irq number of the debug interrupt (populated runtime)
+ * @app_irq: irq number of the application interrupt (populated runtime)
+ */
+struct omap_l3 {
+ struct device *dev;
+
+ void __iomem *l3_base[MAX_L3_MODULES];
+ struct l3_flagmux_data **l3_flagmux;
+ int num_modules;
+
+ struct l3_masters_data *l3_masters;
+ int num_masters;
+ u32 mst_addr_mask;
+
+ int debug_irq;
+ int app_irq;
+};
+
+static struct l3_target_data omap_l3_target_data_clk1[] = {
+ {0x100, "DMM1",},
+ {0x200, "DMM2",},
+ {0x300, "ABE",},
+ {0x400, "L4CFG",},
+ {0x600, "CLK2PWRDISC",},
+ {0x0, "HOSTCLK1",},
+ {0x900, "L4WAKEUP",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk1 = {
+ .offset = 0x500,
+ .l3_targ = omap_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1),
+};
+
+
+static struct l3_target_data omap_l3_target_data_clk2[] = {
+ {0x500, "CORTEXM3",},
+ {0x300, "DSS",},
+ {0x100, "GPMC",},
+ {0x400, "ISS",},
+ {0x700, "IVAHD",},
+ {0xD00, "AES1",},
+ {0x900, "L4PER0",},
+ {0x200, "OCMRAM",},
+ {0x100, "GPMCsERROR",},
+ {0x600, "SGX",},
+ {0x800, "SL2",},
+ {0x1600, "C2C",},
+ {0x1100, "PWRDISCCLK1",},
+ {0xF00, "SHA1",},
+ {0xE00, "AES2",},
+ {0xC00, "L4PER3",},
+ {0xA00, "L4PER1",},
+ {0xB00, "L4PER2",},
+ {0x0, "HOSTCLK2",},
+ {0x1800, "CAL",},
+ {0x1700, "LLI",},
+};
+
+static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
+ .offset = 0x1000,
+ .l3_targ = omap_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2),
+};
+
+
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+ {0x0100, "DEBUGSS",},
+};
+
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap4_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
+};
+
+static struct l3_masters_data omap_l3_masters[] = {
+ { 0x00, "MPU"},
+ { 0x04, "CS_ADP"},
+ { 0x05, "xxx"},
+ { 0x08, "DSP"},
+ { 0x0C, "IVAHD"},
+ { 0x10, "ISS"},
+ { 0x11, "DucatiM3"},
+ { 0x12, "FaceDetect"},
+ { 0x14, "SDMA_Rd"},
+ { 0x15, "SDMA_Wr"},
+ { 0x16, "xxx"},
+ { 0x17, "xxx"},
+ { 0x18, "SGX"},
+ { 0x1C, "DSS"},
+ { 0x20, "C2C"},
+ { 0x22, "xxx"},
+ { 0x23, "xxx"},
+ { 0x24, "HSI"},
+ { 0x28, "MMC1"},
+ { 0x29, "MMC2"},
+ { 0x2A, "MMC6"},
+ { 0x2C, "UNIPRO1"},
+ { 0x30, "USBHOSTHS"},
+ { 0x31, "USBOTGHS"},
+ { 0x32, "USBHOSTFS"}
+};
+
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap4_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap4_l3_data = {
+ .l3_flagmux = omap4_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+ {0x0100, "L3INSTR",},
+ {0x0300, "DEBUGSS",},
+ {0x0, "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap5_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+ .l3_flagmux = omap5_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0x7E0,
+};
+
+/* DRA7 data */
+static struct l3_target_data dra_l3_target_data_clk1[] = {
+ {0x2a00, "AES1",},
+ {0x0200, "DMM_P1",},
+ {0x0600, "DSP2_SDMA",},
+ {0x0b00, "EVE2",},
+ {0x1300, "DMM_P2",},
+ {0x2c00, "AES2",},
+ {0x0300, "DSP1_SDMA",},
+ {0x0a00, "EVE1",},
+ {0x0c00, "EVE3",},
+ {0x0d00, "EVE4",},
+ {0x2900, "DSS",},
+ {0x0100, "GPMC",},
+ {0x3700, "PCIE1",},
+ {0x1600, "IVA_CONFIG",},
+ {0x1800, "IVA_SL2IF",},
+ {0x0500, "L4_CFG",},
+ {0x1d00, "L4_WKUP",},
+ {0x3800, "PCIE2",},
+ {0x3300, "SHA2_1",},
+ {0x1200, "GPU",},
+ {0x1000, "IPU1",},
+ {0x1100, "IPU2",},
+ {0x2000, "TPCC_EDMA",},
+ {0x2e00, "TPTC1_EDMA",},
+ {0x2b00, "TPTC2_EDMA",},
+ {0x0700, "VCP1",},
+ {0x2500, "L4_PER2_P3",},
+ {0x0e00, "L4_PER3_P3",},
+ {0x2200, "MMU1",},
+ {0x1400, "PRUSS1",},
+ {0x1500, "PRUSS2"},
+ {0x0800, "VCP1",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
+ .offset = 0x803500,
+ .l3_targ = dra_l3_target_data_clk1,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1),
+};
+
+static struct l3_target_data dra_l3_target_data_clk2[] = {
+ {0x0, "HOST CLK1",},
+ {0x800000, "HOST CLK2",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x3400, "SHA2_2",},
+ {0x0900, "BB2D",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x2100, "L4_PER1_P3",},
+ {0x1c00, "L4_PER1_P1",},
+ {0x1f00, "L4_PER1_P2",},
+ {0x2300, "L4_PER2_P1",},
+ {0x2400, "L4_PER2_P2",},
+ {0x2600, "L4_PER3_P1",},
+ {0x2700, "L4_PER3_P2",},
+ {0x2f00, "MCASP1",},
+ {0x3000, "MCASP2",},
+ {0x3100, "MCASP3",},
+ {0x2800, "MMU2",},
+ {0x0f00, "OCMC_RAM1",},
+ {0x1700, "OCMC_RAM2",},
+ {0x1900, "OCMC_RAM3",},
+ {0x1e00, "OCMC_ROM",},
+ {0x3900, "QSPI",},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk2 = {
+ .offset = 0x803600,
+ .l3_targ = dra_l3_target_data_clk2,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2),
+};
+
+static struct l3_target_data dra_l3_target_data_clk3[] = {
+ {0x0100, "L3_INSTR"},
+ {0x0300, "DEBUGSS_CT_TBR"},
+ {0x0, "HOST CLK3"},
+};
+
+static struct l3_flagmux_data dra_l3_flagmux_clk3 = {
+ .offset = 0x200,
+ .l3_targ = dra_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3),
+};
+
+static struct l3_masters_data dra_l3_masters[] = {
+ { 0x0, "MPU" },
+ { 0x4, "CS_DAP" },
+ { 0x5, "IEEE1500_2_OCP" },
+ { 0x8, "DSP1_MDMA" },
+ { 0x9, "DSP1_CFG" },
+ { 0xA, "DSP1_DMA" },
+ { 0xB, "DSP2_MDMA" },
+ { 0xC, "DSP2_CFG" },
+ { 0xD, "DSP2_DMA" },
+ { 0xE, "IVA" },
+ { 0x10, "EVE1_P1" },
+ { 0x11, "EVE2_P1" },
+ { 0x12, "EVE3_P1" },
+ { 0x13, "EVE4_P1" },
+ { 0x14, "PRUSS1 PRU1" },
+ { 0x15, "PRUSS1 PRU2" },
+ { 0x16, "PRUSS2 PRU1" },
+ { 0x17, "PRUSS2 PRU2" },
+ { 0x18, "IPU1" },
+ { 0x19, "IPU2" },
+ { 0x1A, "SDMA" },
+ { 0x1B, "CDMA" },
+ { 0x1C, "TC1_EDMA" },
+ { 0x1D, "TC2_EDMA" },
+ { 0x20, "DSS" },
+ { 0x21, "MMU1" },
+ { 0x22, "PCIE1" },
+ { 0x23, "MMU2" },
+ { 0x24, "VIP1" },
+ { 0x25, "VIP2" },
+ { 0x26, "VIP3" },
+ { 0x27, "VPE" },
+ { 0x28, "GPU_P1" },
+ { 0x29, "BB2D" },
+ { 0x29, "GPU_P2" },
+ { 0x2B, "GMAC_SW" },
+ { 0x2C, "USB3" },
+ { 0x2D, "USB2_SS" },
+ { 0x2E, "USB2_ULPI_SS1" },
+ { 0x2F, "USB2_ULPI_SS2" },
+ { 0x30, "CSI2_1" },
+ { 0x31, "CSI2_2" },
+ { 0x33, "SATA" },
+ { 0x34, "EVE1_P2" },
+ { 0x35, "EVE2_P2" },
+ { 0x36, "EVE3_P2" },
+ { 0x37, "EVE4_P2" }
+};
+
+static struct l3_flagmux_data *dra_l3_flagmux[] = {
+ &dra_l3_flagmux_clk1,
+ &dra_l3_flagmux_clk2,
+ &dra_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 dra_l3_data = {
+ .l3_base = { [1] = L3_BASE_IS_SUBMODULE },
+ .l3_flagmux = dra_l3_flagmux,
+ .num_modules = ARRAY_SIZE(dra_l3_flagmux),
+ .l3_masters = dra_l3_masters,
+ .num_masters = ARRAY_SIZE(dra_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0xFC,
+};
+
+/* AM4372 data */
+static struct l3_target_data am4372_l3_target_data_200f[] = {
+ {0xf00, "EMIF",},
+ {0x1200, "DES",},
+ {0x400, "OCMCRAM",},
+ {0x700, "TPTC0",},
+ {0x800, "TPTC1",},
+ {0x900, "TPTC2"},
+ {0xb00, "TPCC",},
+ {0xd00, "DEBUGSS",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x200, "SHA",},
+ {0xc00, "SGX530",},
+ {0x500, "AES0",},
+ {0xa00, "L4_FAST",},
+ {0x300, "MPUSS_L2_RAM",},
+ {0x100, "ICSS",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_200f = {
+ .offset = 0x1000,
+ .l3_targ = am4372_l3_target_data_200f,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f),
+};
+
+static struct l3_target_data am4372_l3_target_data_100s[] = {
+ {0x100, "L4_PER_0",},
+ {0x200, "L4_PER_1",},
+ {0x300, "L4_PER_2",},
+ {0x400, "L4_PER_3",},
+ {0x800, "McASP0",},
+ {0x900, "McASP1",},
+ {0xC00, "MMCHS2",},
+ {0x700, "GPMC",},
+ {0xD00, "L4_FW",},
+ {0xdead, L3_TARGET_NOT_SUPPORTED,},
+ {0x500, "ADCTSC",},
+ {0xE00, "L4_WKUP",},
+ {0xA00, "MAG_CARD",},
+};
+
+static struct l3_flagmux_data am4372_l3_flagmux_100s = {
+ .offset = 0x600,
+ .l3_targ = am4372_l3_target_data_100s,
+ .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s),
+};
+
+static struct l3_masters_data am4372_l3_masters[] = {
+ { 0x0, "M1 (128-bit)"},
+ { 0x1, "M2 (64-bit)"},
+ { 0x4, "DAP"},
+ { 0x5, "P1500"},
+ { 0xC, "ICSS0"},
+ { 0xD, "ICSS1"},
+ { 0x14, "Wakeup Processor"},
+ { 0x18, "TPTC0 Read"},
+ { 0x19, "TPTC0 Write"},
+ { 0x1A, "TPTC1 Read"},
+ { 0x1B, "TPTC1 Write"},
+ { 0x1C, "TPTC2 Read"},
+ { 0x1D, "TPTC2 Write"},
+ { 0x20, "SGX530"},
+ { 0x21, "OCP WP Traffic Probe"},
+ { 0x22, "OCP WP DMA Profiling"},
+ { 0x23, "OCP WP Event Trace"},
+ { 0x25, "DSS"},
+ { 0x28, "Crypto DMA RD"},
+ { 0x29, "Crypto DMA WR"},
+ { 0x2C, "VPFE0"},
+ { 0x2D, "VPFE1"},
+ { 0x30, "GEMAC"},
+ { 0x34, "USB0 RD"},
+ { 0x35, "USB0 WR"},
+ { 0x36, "USB1 RD"},
+ { 0x37, "USB1 WR"},
+};
+
+static struct l3_flagmux_data *am4372_l3_flagmux[] = {
+ &am4372_l3_flagmux_200f,
+ &am4372_l3_flagmux_100s,
+};
+
+static const struct omap_l3 am4372_l3_data = {
+ .l3_flagmux = am4372_l3_flagmux,
+ .num_modules = ARRAY_SIZE(am4372_l3_flagmux),
+ .l3_masters = am4372_l3_masters,
+ .num_masters = ARRAY_SIZE(am4372_l3_masters),
+ /* All 6 bits of register field used to distinguish initiator */
+ .mst_addr_mask = 0x3F,
+};
+
+#endif /* __OMAP_L3_NOC_H */
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
new file mode 100644
index 0000000000..31774648be
--- /dev/null
+++ b/drivers/bus/omap_l3_smx.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OMAP3XXX L3 Interconnect Driver
+ *
+ * Copyright (C) 2011 Texas Corporation
+ * Felipe Balbi <balbi@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "omap_l3_smx.h"
+
+static inline u64 omap3_l3_readll(void __iomem *base, u16 reg)
+{
+ return __raw_readll(base + reg);
+}
+
+static inline void omap3_l3_writell(void __iomem *base, u16 reg, u64 value)
+{
+ __raw_writell(value, base + reg);
+}
+
+static inline enum omap3_l3_code omap3_l3_decode_error_code(u64 error)
+{
+ return (error & 0x0f000000) >> L3_ERROR_LOG_CODE;
+}
+
+static inline u32 omap3_l3_decode_addr(u64 error_addr)
+{
+ return error_addr & 0xffffffff;
+}
+
+static inline unsigned omap3_l3_decode_cmd(u64 error)
+{
+ return (error & 0x07) >> L3_ERROR_LOG_CMD;
+}
+
+static inline enum omap3_l3_initiator_id omap3_l3_decode_initid(u64 error)
+{
+ return (error & 0xff00) >> L3_ERROR_LOG_INITID;
+}
+
+static inline unsigned omap3_l3_decode_req_info(u64 error)
+{
+ return (error >> 32) & 0xffff;
+}
+
+static char *omap3_l3_code_string(u8 code)
+{
+ switch (code) {
+ case OMAP_L3_CODE_NOERROR:
+ return "No Error";
+ case OMAP_L3_CODE_UNSUP_CMD:
+ return "Unsupported Command";
+ case OMAP_L3_CODE_ADDR_HOLE:
+ return "Address Hole";
+ case OMAP_L3_CODE_PROTECT_VIOLATION:
+ return "Protection Violation";
+ case OMAP_L3_CODE_IN_BAND_ERR:
+ return "In-band Error";
+ case OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT:
+ return "Request Timeout Not Accepted";
+ case OMAP_L3_CODE_REQ_TOUT_NO_RESP:
+ return "Request Timeout, no response";
+ default:
+ return "UNKNOWN error";
+ }
+}
+
+static char *omap3_l3_initiator_string(u8 initid)
+{
+ switch (initid) {
+ case OMAP_L3_LCD:
+ return "LCD";
+ case OMAP_L3_SAD2D:
+ return "SAD2D";
+ case OMAP_L3_IA_MPU_SS_1:
+ case OMAP_L3_IA_MPU_SS_2:
+ case OMAP_L3_IA_MPU_SS_3:
+ case OMAP_L3_IA_MPU_SS_4:
+ case OMAP_L3_IA_MPU_SS_5:
+ return "MPU";
+ case OMAP_L3_IA_IVA_SS_1:
+ case OMAP_L3_IA_IVA_SS_2:
+ case OMAP_L3_IA_IVA_SS_3:
+ return "IVA_SS";
+ case OMAP_L3_IA_IVA_SS_DMA_1:
+ case OMAP_L3_IA_IVA_SS_DMA_2:
+ case OMAP_L3_IA_IVA_SS_DMA_3:
+ case OMAP_L3_IA_IVA_SS_DMA_4:
+ case OMAP_L3_IA_IVA_SS_DMA_5:
+ case OMAP_L3_IA_IVA_SS_DMA_6:
+ return "IVA_SS_DMA";
+ case OMAP_L3_IA_SGX:
+ return "SGX";
+ case OMAP_L3_IA_CAM_1:
+ case OMAP_L3_IA_CAM_2:
+ case OMAP_L3_IA_CAM_3:
+ return "CAM";
+ case OMAP_L3_IA_DAP:
+ return "DAP";
+ case OMAP_L3_SDMA_WR_1:
+ case OMAP_L3_SDMA_WR_2:
+ return "SDMA_WR";
+ case OMAP_L3_SDMA_RD_1:
+ case OMAP_L3_SDMA_RD_2:
+ case OMAP_L3_SDMA_RD_3:
+ case OMAP_L3_SDMA_RD_4:
+ return "SDMA_RD";
+ case OMAP_L3_USBOTG:
+ return "USB_OTG";
+ case OMAP_L3_USBHOST:
+ return "USB_HOST";
+ default:
+ return "UNKNOWN Initiator";
+ }
+}
+
+/*
+ * omap3_l3_block_irq - handles a register block's irq
+ * @l3: struct omap3_l3 *
+ * @base: register block base address
+ * @error: L3_ERROR_LOG register of our block
+ *
+ * Called in hard-irq context. Caller should take care of locking
+ *
+ * OMAP36xx TRM gives, on page 2001, Figure 9-10, the Typical Error
+ * Analysis Sequence, we are following that sequence here, please
+ * refer to that Figure for more information on the subject.
+ */
+static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
+ u64 error, int error_addr)
+{
+ u8 code = omap3_l3_decode_error_code(error);
+ u8 initid = omap3_l3_decode_initid(error);
+ u8 multi = error & L3_ERROR_LOG_MULTI;
+ u32 address = omap3_l3_decode_addr(error_addr);
+
+ pr_err("%s seen by %s %s at address %x\n",
+ omap3_l3_code_string(code),
+ omap3_l3_initiator_string(initid),
+ multi ? "Multiple Errors" : "", address);
+ WARN_ON(1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
+{
+ struct omap3_l3 *l3 = _l3;
+ u64 status, clear;
+ u64 error;
+ u64 error_addr;
+ u64 err_source = 0;
+ void __iomem *base;
+ int int_type;
+ irqreturn_t ret = IRQ_NONE;
+
+ int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
+ if (!int_type)
+ status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
+ else
+ status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1);
+
+ /* identify the error source */
+ err_source = __ffs(status);
+
+ base = l3->rt + omap3_l3_bases[int_type][err_source];
+ error = omap3_l3_readll(base, L3_ERROR_LOG);
+ if (error) {
+ error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
+ ret |= omap3_l3_block_irq(l3, error, error_addr);
+ }
+
+ /*
+ * if we have a timeout error, there's nothing we can
+ * do besides rebooting the board. So let's BUG on any
+ * of such errors and handle the others. timeout error
+ * is severe and not expected to occur.
+ */
+ BUG_ON(!int_type && status & L3_STATUS_0_TIMEOUT_MASK);
+
+ /* Clear the status register */
+ clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
+ L3_AGENT_STATUS_CLEAR_TA;
+ omap3_l3_writell(base, L3_AGENT_STATUS, clear);
+
+ /* clear the error log register */
+ omap3_l3_writell(base, L3_ERROR_LOG, error);
+
+ return ret;
+}
+
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id omap3_l3_match[] = {
+ {
+ .compatible = "ti,omap3-l3-smx",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap3_l3_match);
+#endif
+
+static int omap3_l3_probe(struct platform_device *pdev)
+{
+ struct omap3_l3 *l3;
+ struct resource *res;
+ int ret;
+
+ l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
+ if (!l3)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, l3);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "couldn't find resource\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+ l3->rt = ioremap(res->start, resource_size(res));
+ if (!l3->rt) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ l3->debug_irq = platform_get_irq(pdev, 0);
+ ret = request_irq(l3->debug_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+ "l3-debug-irq", l3);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request debug irq\n");
+ goto err1;
+ }
+
+ l3->app_irq = platform_get_irq(pdev, 1);
+ ret = request_irq(l3->app_irq, omap3_l3_app_irq, IRQF_TRIGGER_RISING,
+ "l3-app-irq", l3);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request app irq\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ free_irq(l3->debug_irq, l3);
+err1:
+ iounmap(l3->rt);
+err0:
+ kfree(l3);
+ return ret;
+}
+
+static int omap3_l3_remove(struct platform_device *pdev)
+{
+ struct omap3_l3 *l3 = platform_get_drvdata(pdev);
+
+ free_irq(l3->app_irq, l3);
+ free_irq(l3->debug_irq, l3);
+ iounmap(l3->rt);
+ kfree(l3);
+
+ return 0;
+}
+
+static struct platform_driver omap3_l3_driver = {
+ .probe = omap3_l3_probe,
+ .remove = omap3_l3_remove,
+ .driver = {
+ .name = "omap_l3_smx",
+ .of_match_table = of_match_ptr(omap3_l3_match),
+ },
+};
+
+static int __init omap3_l3_init(void)
+{
+ return platform_driver_register(&omap3_l3_driver);
+}
+postcore_initcall_sync(omap3_l3_init);
+
+static void __exit omap3_l3_exit(void)
+{
+ platform_driver_unregister(&omap3_l3_driver);
+}
+module_exit(omap3_l3_exit);
+
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_AUTHOR("Santosh Shilimkar");
+MODULE_AUTHOR("Sricharan R");
+MODULE_DESCRIPTION("OMAP3XXX L3 Interconnect Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/omap_l3_smx.h b/drivers/bus/omap_l3_smx.h
new file mode 100644
index 0000000000..1d841d10bb
--- /dev/null
+++ b/drivers/bus/omap_l3_smx.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OMAP3XXX L3 Interconnect Driver header
+ *
+ * Copyright (C) 2011 Texas Corporation
+ * Felipe Balbi <balbi@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * sricharan <r.sricharan@ti.com>
+ */
+#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H
+
+/* Register definitions. All 64-bit wide */
+#define L3_COMPONENT 0x000
+#define L3_CORE 0x018
+#define L3_AGENT_CONTROL 0x020
+#define L3_AGENT_STATUS 0x028
+#define L3_ERROR_LOG 0x058
+
+#define L3_ERROR_LOG_MULTI (1 << 31)
+#define L3_ERROR_LOG_SECONDARY (1 << 30)
+
+#define L3_ERROR_LOG_ADDR 0x060
+
+/* Register definitions for Sideband Interconnect */
+#define L3_SI_CONTROL 0x020
+#define L3_SI_FLAG_STATUS_0 0x510
+
+static const u64 shift = 1;
+
+#define L3_STATUS_0_MPUIA_BRST (shift << 0)
+#define L3_STATUS_0_MPUIA_RSP (shift << 1)
+#define L3_STATUS_0_MPUIA_INBAND (shift << 2)
+#define L3_STATUS_0_IVAIA_BRST (shift << 6)
+#define L3_STATUS_0_IVAIA_RSP (shift << 7)
+#define L3_STATUS_0_IVAIA_INBAND (shift << 8)
+#define L3_STATUS_0_SGXIA_BRST (shift << 9)
+#define L3_STATUS_0_SGXIA_RSP (shift << 10)
+#define L3_STATUS_0_SGXIA_MERROR (shift << 11)
+#define L3_STATUS_0_CAMIA_BRST (shift << 12)
+#define L3_STATUS_0_CAMIA_RSP (shift << 13)
+#define L3_STATUS_0_CAMIA_INBAND (shift << 14)
+#define L3_STATUS_0_DISPIA_BRST (shift << 15)
+#define L3_STATUS_0_DISPIA_RSP (shift << 16)
+#define L3_STATUS_0_DMARDIA_BRST (shift << 18)
+#define L3_STATUS_0_DMARDIA_RSP (shift << 19)
+#define L3_STATUS_0_DMAWRIA_BRST (shift << 21)
+#define L3_STATUS_0_DMAWRIA_RSP (shift << 22)
+#define L3_STATUS_0_USBOTGIA_BRST (shift << 24)
+#define L3_STATUS_0_USBOTGIA_RSP (shift << 25)
+#define L3_STATUS_0_USBOTGIA_INBAND (shift << 26)
+#define L3_STATUS_0_USBHOSTIA_BRST (shift << 27)
+#define L3_STATUS_0_USBHOSTIA_INBAND (shift << 28)
+#define L3_STATUS_0_SMSTA_REQ (shift << 48)
+#define L3_STATUS_0_GPMCTA_REQ (shift << 49)
+#define L3_STATUS_0_OCMRAMTA_REQ (shift << 50)
+#define L3_STATUS_0_OCMROMTA_REQ (shift << 51)
+#define L3_STATUS_0_IVATA_REQ (shift << 54)
+#define L3_STATUS_0_SGXTA_REQ (shift << 55)
+#define L3_STATUS_0_SGXTA_SERROR (shift << 56)
+#define L3_STATUS_0_GPMCTA_SERROR (shift << 57)
+#define L3_STATUS_0_L4CORETA_REQ (shift << 58)
+#define L3_STATUS_0_L4PERTA_REQ (shift << 59)
+#define L3_STATUS_0_L4EMUTA_REQ (shift << 60)
+#define L3_STATUS_0_MAD2DTA_REQ (shift << 61)
+
+#define L3_STATUS_0_TIMEOUT_MASK (L3_STATUS_0_MPUIA_BRST \
+ | L3_STATUS_0_MPUIA_RSP \
+ | L3_STATUS_0_IVAIA_BRST \
+ | L3_STATUS_0_IVAIA_RSP \
+ | L3_STATUS_0_SGXIA_BRST \
+ | L3_STATUS_0_SGXIA_RSP \
+ | L3_STATUS_0_CAMIA_BRST \
+ | L3_STATUS_0_CAMIA_RSP \
+ | L3_STATUS_0_DISPIA_BRST \
+ | L3_STATUS_0_DISPIA_RSP \
+ | L3_STATUS_0_DMARDIA_BRST \
+ | L3_STATUS_0_DMARDIA_RSP \
+ | L3_STATUS_0_DMAWRIA_BRST \
+ | L3_STATUS_0_DMAWRIA_RSP \
+ | L3_STATUS_0_USBOTGIA_BRST \
+ | L3_STATUS_0_USBOTGIA_RSP \
+ | L3_STATUS_0_USBHOSTIA_BRST \
+ | L3_STATUS_0_SMSTA_REQ \
+ | L3_STATUS_0_GPMCTA_REQ \
+ | L3_STATUS_0_OCMRAMTA_REQ \
+ | L3_STATUS_0_OCMROMTA_REQ \
+ | L3_STATUS_0_IVATA_REQ \
+ | L3_STATUS_0_SGXTA_REQ \
+ | L3_STATUS_0_L4CORETA_REQ \
+ | L3_STATUS_0_L4PERTA_REQ \
+ | L3_STATUS_0_L4EMUTA_REQ \
+ | L3_STATUS_0_MAD2DTA_REQ)
+
+#define L3_SI_FLAG_STATUS_1 0x530
+
+#define L3_STATUS_1_MPU_DATAIA (1 << 0)
+#define L3_STATUS_1_DAPIA0 (1 << 3)
+#define L3_STATUS_1_DAPIA1 (1 << 4)
+#define L3_STATUS_1_IVAIA (1 << 6)
+
+#define L3_PM_ERROR_LOG 0x020
+#define L3_PM_CONTROL 0x028
+#define L3_PM_ERROR_CLEAR_SINGLE 0x030
+#define L3_PM_ERROR_CLEAR_MULTI 0x038
+#define L3_PM_REQ_INFO_PERMISSION(n) (0x048 + (0x020 * n))
+#define L3_PM_READ_PERMISSION(n) (0x050 + (0x020 * n))
+#define L3_PM_WRITE_PERMISSION(n) (0x058 + (0x020 * n))
+#define L3_PM_ADDR_MATCH(n) (0x060 + (0x020 * n))
+
+/* L3 error log bit fields. Common for IA and TA */
+#define L3_ERROR_LOG_CODE 24
+#define L3_ERROR_LOG_INITID 8
+#define L3_ERROR_LOG_CMD 0
+
+/* L3 agent status bit fields. */
+#define L3_AGENT_STATUS_CLEAR_IA 0x10000000
+#define L3_AGENT_STATUS_CLEAR_TA 0x01000000
+
+#define OMAP34xx_IRQ_L3_APP 10
+#define L3_APPLICATION_ERROR 0x0
+#define L3_DEBUG_ERROR 0x1
+
+enum omap3_l3_initiator_id {
+ /* LCD has 1 ID */
+ OMAP_L3_LCD = 29,
+ /* SAD2D has 1 ID */
+ OMAP_L3_SAD2D = 28,
+ /* MPU has 5 IDs */
+ OMAP_L3_IA_MPU_SS_1 = 27,
+ OMAP_L3_IA_MPU_SS_2 = 26,
+ OMAP_L3_IA_MPU_SS_3 = 25,
+ OMAP_L3_IA_MPU_SS_4 = 24,
+ OMAP_L3_IA_MPU_SS_5 = 23,
+ /* IVA2.2 SS has 3 IDs*/
+ OMAP_L3_IA_IVA_SS_1 = 22,
+ OMAP_L3_IA_IVA_SS_2 = 21,
+ OMAP_L3_IA_IVA_SS_3 = 20,
+ /* IVA 2.2 SS DMA has 6 IDS */
+ OMAP_L3_IA_IVA_SS_DMA_1 = 19,
+ OMAP_L3_IA_IVA_SS_DMA_2 = 18,
+ OMAP_L3_IA_IVA_SS_DMA_3 = 17,
+ OMAP_L3_IA_IVA_SS_DMA_4 = 16,
+ OMAP_L3_IA_IVA_SS_DMA_5 = 15,
+ OMAP_L3_IA_IVA_SS_DMA_6 = 14,
+ /* SGX has 1 ID */
+ OMAP_L3_IA_SGX = 13,
+ /* CAM has 3 ID */
+ OMAP_L3_IA_CAM_1 = 12,
+ OMAP_L3_IA_CAM_2 = 11,
+ OMAP_L3_IA_CAM_3 = 10,
+ /* DAP has 1 ID */
+ OMAP_L3_IA_DAP = 9,
+ /* SDMA WR has 2 IDs */
+ OMAP_L3_SDMA_WR_1 = 8,
+ OMAP_L3_SDMA_WR_2 = 7,
+ /* SDMA RD has 4 IDs */
+ OMAP_L3_SDMA_RD_1 = 6,
+ OMAP_L3_SDMA_RD_2 = 5,
+ OMAP_L3_SDMA_RD_3 = 4,
+ OMAP_L3_SDMA_RD_4 = 3,
+ /* HSUSB OTG has 1 ID */
+ OMAP_L3_USBOTG = 2,
+ /* HSUSB HOST has 1 ID */
+ OMAP_L3_USBHOST = 1,
+};
+
+enum omap3_l3_code {
+ OMAP_L3_CODE_NOERROR = 0,
+ OMAP_L3_CODE_UNSUP_CMD = 1,
+ OMAP_L3_CODE_ADDR_HOLE = 2,
+ OMAP_L3_CODE_PROTECT_VIOLATION = 3,
+ OMAP_L3_CODE_IN_BAND_ERR = 4,
+ /* codes 5 and 6 are reserved */
+ OMAP_L3_CODE_REQ_TOUT_NOT_ACCEPT = 7,
+ OMAP_L3_CODE_REQ_TOUT_NO_RESP = 8,
+ /* codes 9 - 15 are also reserved */
+};
+
+struct omap3_l3 {
+ struct device *dev;
+ struct clk *ick;
+
+ /* memory base*/
+ void __iomem *rt;
+
+ int debug_irq;
+ int app_irq;
+
+ /* true when and inband functional error occurs */
+ unsigned inband:1;
+};
+
+/* offsets for l3 agents in order with the Flag status register */
+static unsigned int omap3_l3_app_bases[] = {
+ /* MPU IA */
+ 0x1400,
+ 0x1400,
+ 0x1400,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ /* IVA 2.2 IA */
+ 0x1800,
+ 0x1800,
+ 0x1800,
+ /* SGX IA */
+ 0x1c00,
+ 0x1c00,
+ /* RESERVED */
+ 0,
+ /* CAMERA IA */
+ 0x5800,
+ 0x5800,
+ 0x5800,
+ /* DISPLAY IA */
+ 0x5400,
+ 0x5400,
+ /* RESERVED */
+ 0,
+ /*SDMA RD IA */
+ 0x4c00,
+ 0x4c00,
+ /* RESERVED */
+ 0,
+ /* SDMA WR IA */
+ 0x5000,
+ 0x5000,
+ /* RESERVED */
+ 0,
+ /* USB OTG IA */
+ 0x4400,
+ 0x4400,
+ 0x4400,
+ /* USB HOST IA */
+ 0x4000,
+ 0x4000,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ 0,
+ /* SAD2D IA */
+ 0x3000,
+ 0x3000,
+ 0x3000,
+ /* RESERVED */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* SMA TA */
+ 0x2000,
+ /* GPMC TA */
+ 0x2400,
+ /* OCM RAM TA */
+ 0x2800,
+ /* OCM ROM TA */
+ 0x2C00,
+ /* L4 CORE TA */
+ 0x6800,
+ /* L4 PER TA */
+ 0x6c00,
+ /* IVA 2.2 TA */
+ 0x6000,
+ /* SGX TA */
+ 0x6400,
+ /* L4 EMU TA */
+ 0x7000,
+ /* GPMC TA */
+ 0x2400,
+ /* L4 CORE TA */
+ 0x6800,
+ /* L4 PER TA */
+ 0x6c00,
+ /* L4 EMU TA */
+ 0x7000,
+ /* MAD2D TA */
+ 0x3400,
+ /* RESERVED */
+ 0,
+ 0,
+};
+
+static unsigned int omap3_l3_debug_bases[] = {
+ /* MPU DATA IA */
+ 0x1400,
+ /* RESERVED */
+ 0,
+ 0,
+ /* DAP IA */
+ 0x5c00,
+ 0x5c00,
+ /* RESERVED */
+ 0,
+ /* IVA 2.2 IA */
+ 0x1800,
+ /* REST RESERVED */
+};
+
+static u32 *omap3_l3_bases[] = {
+ omap3_l3_app_bases,
+ omap3_l3_debug_bases,
+};
+
+/*
+ * REVISIT define __raw_readll/__raw_writell here, but move them to
+ * <asm/io.h> at some point
+ */
+#define __raw_writell(v, a) (__chk_io_ptr(a), \
+ *(volatile u64 __force *)(a) = (v))
+#define __raw_readll(a) (__chk_io_ptr(a), \
+ *(volatile u64 __force *)(a))
+
+#endif
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
new file mode 100644
index 0000000000..c1fef1b4bd
--- /dev/null
+++ b/drivers/bus/qcom-ebi2.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm External Bus Interface 2 (EBI2) driver
+ * an older version of the Qualcomm Parallel Interface Controller (QPIC)
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/*
+ * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
+ */
+#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
+#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
+#define EBI2_CS2_ENABLE_MASK BIT(4)
+#define EBI2_CS3_ENABLE_MASK BIT(5)
+#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
+#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
+#define EBI2_CSN_MASK GENMASK(9, 0)
+
+#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
+
+/*
+ * SLOW CSn CFG
+ *
+ * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
+ * memory continues to drive the data bus after OE is de-asserted.
+ * Inserted when reading one CS and switching to another CS or read
+ * followed by write on the same CS. Valid values 0 thru 15.
+ * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
+ * every write minimum 1. The data out is driven from the time WE is
+ * asserted until CS is asserted. With a hold of 1, the CS stays
+ * active for 1 extra cycle etc. Valid values 0 thru 15.
+ * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
+ * write to a page or burst memory
+ * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
+ * read to a page or burst memory
+ * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ */
+#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
+#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
+#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
+#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
+#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
+#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
+
+#define EBI2_XMEM_RECOVERY_SHIFT 28
+#define EBI2_XMEM_WR_HOLD_SHIFT 24
+#define EBI2_XMEM_WR_DELTA_SHIFT 16
+#define EBI2_XMEM_RD_DELTA_SHIFT 8
+#define EBI2_XMEM_WR_WAIT_SHIFT 4
+#define EBI2_XMEM_RD_WAIT_SHIFT 0
+
+/*
+ * FAST CSn CFG
+ * Bits 31-28: ?
+ * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
+ * transfer. For a single read trandfer this will be the time
+ * from CS assertion to OE assertion.
+ * Bits 18-24: ?
+ * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
+ * assertion, with respect to the cycle where ADV is asserted.
+ * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
+ * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
+ * hold time requirements with ADV assertion.
+ *
+ * The manual mentions "write precharge cycles" and "precharge cycles".
+ * We have not been able to figure out which bit fields these correspond to
+ * in the hardware, or what valid values exist. The current hypothesis is that
+ * this is something just used on the FAST chip selects. There is also a "byte
+ * device enable" flag somewhere for 8bit memories.
+ */
+#define EBI2_XMEM_CS0_FAST_CFG 0x0028
+#define EBI2_XMEM_CS1_FAST_CFG 0x002C
+#define EBI2_XMEM_CS2_FAST_CFG 0x0030
+#define EBI2_XMEM_CS3_FAST_CFG 0x0034
+#define EBI2_XMEM_CS4_FAST_CFG 0x0038
+#define EBI2_XMEM_CS5_FAST_CFG 0x003C
+
+#define EBI2_XMEM_RD_HOLD_SHIFT 24
+#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
+#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
+
+/**
+ * struct cs_data - struct with info on a chipselect setting
+ * @enable_mask: mask to enable the chipselect in the EBI2 config
+ * @slow_cfg: offset to XMEMC slow CS config
+ * @fast_cfg: offset to XMEMC fast CS config
+ */
+struct cs_data {
+ u32 enable_mask;
+ u16 slow_cfg;
+ u16 fast_cfg;
+};
+
+static const struct cs_data cs_info[] = {
+ {
+ /* CS0 */
+ .enable_mask = EBI2_CS0_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
+ },
+ {
+ /* CS1 */
+ .enable_mask = EBI2_CS1_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
+ },
+ {
+ /* CS2 */
+ .enable_mask = EBI2_CS2_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
+ },
+ {
+ /* CS3 */
+ .enable_mask = EBI2_CS3_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
+ },
+ {
+ /* CS4 */
+ .enable_mask = EBI2_CS4_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
+ },
+ {
+ /* CS5 */
+ .enable_mask = EBI2_CS5_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
+ },
+};
+
+/**
+ * struct ebi2_xmem_prop - describes an XMEM config property
+ * @prop: the device tree binding name
+ * @max: maximum value for the property
+ * @slowreg: true if this property is in the SLOW CS config register
+ * else it is assumed to be in the FAST config register
+ * @shift: the bit field start in the SLOW or FAST register for this
+ * property
+ */
+struct ebi2_xmem_prop {
+ const char *prop;
+ u32 max;
+ bool slowreg;
+ u16 shift;
+};
+
+static const struct ebi2_xmem_prop xmem_props[] = {
+ {
+ .prop = "qcom,xmem-recovery-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-hold-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_HOLD_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-address-hold-enable",
+ .max = 1, /* boolean prop */
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
+ .max = 3,
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-hold-cycles",
+ .max = 15,
+ .slowreg = false,
+ .shift = EBI2_XMEM_RD_HOLD_SHIFT,
+ },
+};
+
+static void qcom_ebi2_setup_chipselect(struct device_node *np,
+ struct device *dev,
+ void __iomem *ebi2_base,
+ void __iomem *ebi2_xmem,
+ u32 csindex)
+{
+ const struct cs_data *csd;
+ u32 slowcfg, fastcfg;
+ u32 val;
+ int ret;
+ int i;
+
+ csd = &cs_info[csindex];
+ val = readl(ebi2_base);
+ val |= csd->enable_mask;
+ writel(val, ebi2_base);
+ dev_dbg(dev, "enabled CS%u\n", csindex);
+
+ /* Next set up the XMEMC */
+ slowcfg = 0;
+ fastcfg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
+ const struct ebi2_xmem_prop *xp = &xmem_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, xp->prop, &val);
+ if (ret) {
+ dev_dbg(dev, "could not read %s for CS%d\n",
+ xp->prop, csindex);
+ continue;
+ }
+
+ /* First check boolean props */
+ if (xp->max == 1 && val) {
+ if (xp->slowreg)
+ slowcfg |= BIT(xp->shift);
+ else
+ fastcfg |= BIT(xp->shift);
+ dev_dbg(dev, "set %s flag\n", xp->prop);
+ continue;
+ }
+
+ /* We're dealing with an u32 */
+ if (val > xp->max) {
+ dev_err(dev,
+ "too high value for %s: %u, capped at %u\n",
+ xp->prop, val, xp->max);
+ val = xp->max;
+ }
+ if (xp->slowreg)
+ slowcfg |= (val << xp->shift);
+ else
+ fastcfg |= (val << xp->shift);
+ dev_dbg(dev, "set %s to %u\n", xp->prop, val);
+ }
+
+ dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
+ csindex, slowcfg, fastcfg);
+
+ if (slowcfg)
+ writel(slowcfg, ebi2_xmem + csd->slow_cfg);
+ if (fastcfg)
+ writel(fastcfg, ebi2_xmem + csd->fast_cfg);
+}
+
+static int qcom_ebi2_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *ebi2_base;
+ void __iomem *ebi2_xmem;
+ struct clk *ebi2xclk;
+ struct clk *ebi2clk;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ ebi2xclk = devm_clk_get(dev, "ebi2x");
+ if (IS_ERR(ebi2xclk))
+ return PTR_ERR(ebi2xclk);
+
+ ret = clk_prepare_enable(ebi2xclk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
+ return ret;
+ }
+
+ ebi2clk = devm_clk_get(dev, "ebi2");
+ if (IS_ERR(ebi2clk)) {
+ ret = PTR_ERR(ebi2clk);
+ goto err_disable_2x_clk;
+ }
+
+ ret = clk_prepare_enable(ebi2clk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2 clk\n");
+ goto err_disable_2x_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ebi2_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_base)) {
+ ret = PTR_ERR(ebi2_base);
+ goto err_disable_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ebi2_xmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_xmem)) {
+ ret = PTR_ERR(ebi2_xmem);
+ goto err_disable_clk;
+ }
+
+ /* Allegedly this turns the power save mode off */
+ writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
+
+ /* Disable all chipselects */
+ val = readl(ebi2_base);
+ val &= ~EBI2_CSN_MASK;
+ writel(val, ebi2_base);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ u32 csindex;
+
+ /* Figure out the chipselect */
+ ret = of_property_read_u32(child, "reg", &csindex);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+
+ if (csindex > 5) {
+ dev_err(dev,
+ "invalid chipselect %u, we only support 0-5\n",
+ csindex);
+ continue;
+ }
+
+ qcom_ebi2_setup_chipselect(child,
+ dev,
+ ebi2_base,
+ ebi2_xmem,
+ csindex);
+
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ebi2clk);
+err_disable_2x_clk:
+ clk_disable_unprepare(ebi2xclk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ebi2_of_match[] = {
+ { .compatible = "qcom,msm8660-ebi2", },
+ { .compatible = "qcom,apq8060-ebi2", },
+ { }
+};
+
+static struct platform_driver qcom_ebi2_driver = {
+ .probe = qcom_ebi2_probe,
+ .driver = {
+ .name = "qcom-ebi2",
+ .of_match_table = qcom_ebi2_of_match,
+ },
+};
+module_platform_driver(qcom_ebi2_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm EBI2 driver");
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
new file mode 100644
index 0000000000..3fef18a43c
--- /dev/null
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2021, Michael Srba
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define SSCAON_CONFIG0_CLAMP_EN_OVRD BIT(4)
+#define SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL BIT(5)
+
+static const char *const qcom_ssc_block_pd_names[] = {
+ "ssc_cx",
+ "ssc_mx"
+};
+
+struct qcom_ssc_block_bus_data {
+ const char *const *pd_names;
+ struct device *pds[ARRAY_SIZE(qcom_ssc_block_pd_names)];
+ char __iomem *reg_mpm_sscaon_config0;
+ char __iomem *reg_mpm_sscaon_config1;
+ struct regmap *halt_map;
+ struct clk *xo_clk;
+ struct clk *aggre2_clk;
+ struct clk *gcc_im_sleep_clk;
+ struct clk *aggre2_north_clk;
+ struct clk *ssc_xo_clk;
+ struct clk *ssc_ahbs_clk;
+ struct reset_control *ssc_bcr;
+ struct reset_control *ssc_reset;
+ u32 ssc_axi_halt;
+ int num_pds;
+};
+
+static void reg32_set_bits(char __iomem *reg, u32 value)
+{
+ u32 tmp = ioread32(reg);
+
+ iowrite32(tmp | value, reg);
+}
+
+static void reg32_clear_bits(char __iomem *reg, u32 value)
+{
+ u32 tmp = ioread32(reg);
+
+ iowrite32(tmp & (~value), reg);
+}
+
+static int qcom_ssc_block_bus_init(struct device *dev)
+{
+ int ret;
+
+ struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(data->xo_clk);
+ if (ret) {
+ dev_err(dev, "error enabling xo_clk: %d\n", ret);
+ goto err_xo_clk;
+ }
+
+ ret = clk_prepare_enable(data->aggre2_clk);
+ if (ret) {
+ dev_err(dev, "error enabling aggre2_clk: %d\n", ret);
+ goto err_aggre2_clk;
+ }
+
+ ret = clk_prepare_enable(data->gcc_im_sleep_clk);
+ if (ret) {
+ dev_err(dev, "error enabling gcc_im_sleep_clk: %d\n", ret);
+ goto err_gcc_im_sleep_clk;
+ }
+
+ /*
+ * We need to intervene here because the HW logic driving these signals cannot handle
+ * initialization after power collapse by itself.
+ */
+ reg32_clear_bits(data->reg_mpm_sscaon_config0,
+ SSCAON_CONFIG0_CLAMP_EN_OVRD | SSCAON_CONFIG0_CLAMP_EN_OVRD_VAL);
+ /* override few_ack/rest_ack */
+ reg32_clear_bits(data->reg_mpm_sscaon_config1, BIT(31));
+
+ ret = clk_prepare_enable(data->aggre2_north_clk);
+ if (ret) {
+ dev_err(dev, "error enabling aggre2_north_clk: %d\n", ret);
+ goto err_aggre2_north_clk;
+ }
+
+ ret = reset_control_deassert(data->ssc_reset);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_reset: %d\n", ret);
+ goto err_ssc_reset;
+ }
+
+ ret = reset_control_deassert(data->ssc_bcr);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_bcr: %d\n", ret);
+ goto err_ssc_bcr;
+ }
+
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 0);
+
+ ret = clk_prepare_enable(data->ssc_xo_clk);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_xo_clk: %d\n", ret);
+ goto err_ssc_xo_clk;
+ }
+
+ ret = clk_prepare_enable(data->ssc_ahbs_clk);
+ if (ret) {
+ dev_err(dev, "error deasserting ssc_ahbs_clk: %d\n", ret);
+ goto err_ssc_ahbs_clk;
+ }
+
+ return 0;
+
+err_ssc_ahbs_clk:
+ clk_disable(data->ssc_xo_clk);
+
+err_ssc_xo_clk:
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
+
+ reset_control_assert(data->ssc_bcr);
+
+err_ssc_bcr:
+ reset_control_assert(data->ssc_reset);
+
+err_ssc_reset:
+ clk_disable(data->aggre2_north_clk);
+
+err_aggre2_north_clk:
+ reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
+ reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
+
+ clk_disable(data->gcc_im_sleep_clk);
+
+err_gcc_im_sleep_clk:
+ clk_disable(data->aggre2_clk);
+
+err_aggre2_clk:
+ clk_disable(data->xo_clk);
+
+err_xo_clk:
+ return ret;
+}
+
+static void qcom_ssc_block_bus_deinit(struct device *dev)
+{
+ int ret;
+
+ struct qcom_ssc_block_bus_data *data = dev_get_drvdata(dev);
+
+ clk_disable(data->ssc_xo_clk);
+ clk_disable(data->ssc_ahbs_clk);
+
+ ret = reset_control_assert(data->ssc_bcr);
+ if (ret)
+ dev_err(dev, "error asserting ssc_bcr: %d\n", ret);
+
+ regmap_write(data->halt_map, data->ssc_axi_halt + AXI_HALTREQ_REG, 1);
+
+ reg32_set_bits(data->reg_mpm_sscaon_config1, BIT(31));
+ reg32_set_bits(data->reg_mpm_sscaon_config0, BIT(4) | BIT(5));
+
+ ret = reset_control_assert(data->ssc_reset);
+ if (ret)
+ dev_err(dev, "error asserting ssc_reset: %d\n", ret);
+
+ clk_disable(data->gcc_im_sleep_clk);
+
+ clk_disable(data->aggre2_north_clk);
+
+ clk_disable(data->aggre2_clk);
+ clk_disable(data->xo_clk);
+}
+
+static int qcom_ssc_block_bus_pds_attach(struct device *dev, struct device **pds,
+ const char *const *pd_names, size_t num_pds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ pds[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(pds[i])) {
+ ret = PTR_ERR(pds[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(pds[i], false);
+
+ return ret;
+};
+
+static void qcom_ssc_block_bus_pds_detach(struct device *dev, struct device **pds, size_t num_pds)
+{
+ int i;
+
+ for (i = 0; i < num_pds; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_ssc_block_bus_pds_enable(struct device **pds, size_t num_pds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0)
+ goto unroll_pd_votes;
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void qcom_ssc_block_bus_pds_disable(struct device **pds, size_t num_pds)
+{
+ int i;
+
+ for (i = 0; i < num_pds; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
+{
+ struct qcom_ssc_block_bus_data *data;
+ struct device_node *np = pdev->dev.of_node;
+ struct of_phandle_args halt_args;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ /* power domains */
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+
+ /* low level overrides for when the HW logic doesn't "just work" */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
+ data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_mpm_sscaon_config0))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config0),
+ "Failed to ioremap mpm_sscaon_config0\n");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config1");
+ data->reg_mpm_sscaon_config1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_mpm_sscaon_config1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->reg_mpm_sscaon_config1),
+ "Failed to ioremap mpm_sscaon_config1\n");
+
+ /* resets */
+ data->ssc_bcr = devm_reset_control_get_exclusive(&pdev->dev, "ssc_bcr");
+ if (IS_ERR(data->ssc_bcr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_bcr),
+ "Failed to acquire reset: scc_bcr\n");
+
+ data->ssc_reset = devm_reset_control_get_exclusive(&pdev->dev, "ssc_reset");
+ if (IS_ERR(data->ssc_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_reset),
+ "Failed to acquire reset: ssc_reset:\n");
+
+ /* clocks */
+ data->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(data->xo_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->xo_clk),
+ "Failed to get clock: xo\n");
+
+ data->aggre2_clk = devm_clk_get(&pdev->dev, "aggre2");
+ if (IS_ERR(data->aggre2_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_clk),
+ "Failed to get clock: aggre2\n");
+
+ data->gcc_im_sleep_clk = devm_clk_get(&pdev->dev, "gcc_im_sleep");
+ if (IS_ERR(data->gcc_im_sleep_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->gcc_im_sleep_clk),
+ "Failed to get clock: gcc_im_sleep\n");
+
+ data->aggre2_north_clk = devm_clk_get(&pdev->dev, "aggre2_north");
+ if (IS_ERR(data->aggre2_north_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->aggre2_north_clk),
+ "Failed to get clock: aggre2_north\n");
+
+ data->ssc_xo_clk = devm_clk_get(&pdev->dev, "ssc_xo");
+ if (IS_ERR(data->ssc_xo_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_xo_clk),
+ "Failed to get clock: ssc_xo\n");
+
+ data->ssc_ahbs_clk = devm_clk_get(&pdev->dev, "ssc_ahbs");
+ if (IS_ERR(data->ssc_ahbs_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ssc_ahbs_clk),
+ "Failed to get clock: ssc_ahbs\n");
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, "qcom,halt-regs", 1, 0,
+ &halt_args);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to parse qcom,halt-regs\n");
+
+ data->halt_map = syscon_node_to_regmap(halt_args.np);
+ of_node_put(halt_args.np);
+ if (IS_ERR(data->halt_map))
+ return PTR_ERR(data->halt_map);
+
+ data->ssc_axi_halt = halt_args.args[0];
+
+ qcom_ssc_block_bus_init(&pdev->dev);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
+{
+ struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev);
+
+ qcom_ssc_block_bus_deinit(&pdev->dev);
+
+ iounmap(data->reg_mpm_sscaon_config0);
+ iounmap(data->reg_mpm_sscaon_config1);
+
+ qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+ pm_runtime_disable(&pdev->dev);
+ pm_clk_destroy(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ssc_block_bus_of_match[] = {
+ { .compatible = "qcom,ssc-block-bus", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
+
+static struct platform_driver qcom_ssc_block_bus_driver = {
+ .probe = qcom_ssc_block_bus_probe,
+ .remove = qcom_ssc_block_bus_remove,
+ .driver = {
+ .name = "qcom-ssc-block-bus",
+ .of_match_table = qcom_ssc_block_bus_of_match,
+ },
+};
+
+module_platform_driver(qcom_ssc_block_bus_driver);
+
+MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB");
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
new file mode 100644
index 0000000000..aafcc481de
--- /dev/null
+++ b/drivers/bus/simple-pm-bus.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple Power-Managed Bus Driver
+ *
+ * Copyright (C) 2014-2015 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+struct simple_pm_bus {
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static int simple_pm_bus_probe(struct platform_device *pdev)
+{
+ const struct device *dev = &pdev->dev;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *match;
+ struct simple_pm_bus *bus;
+
+ /*
+ * Allow user to use driver_override to bind this driver to a
+ * transparent bus device which has a different compatible string
+ * that's not listed in simple_pm_bus_of_match. We don't want to do any
+ * of the simple-pm-bus tasks for these devices, so return early.
+ */
+ if (pdev->driver_override)
+ return 0;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ /*
+ * These are transparent bus devices (not simple-pm-bus matches) that
+ * have their child nodes populated automatically. So, don't need to
+ * do anything more. We only match with the device if this driver is
+ * the most specific match because we don't want to incorrectly bind to
+ * a device that has a more specific driver.
+ */
+ if (match && match->data) {
+ if (of_property_match_string(np, "compatible", match->compatible) == 0)
+ return 0;
+ else
+ return -ENODEV;
+ }
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->num_clks = devm_clk_bulk_get_all(&pdev->dev, &bus->clks);
+ if (bus->num_clks < 0)
+ return dev_err_probe(&pdev->dev, bus->num_clks, "failed to get clocks\n");
+
+ dev_set_drvdata(&pdev->dev, bus);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_enable(&pdev->dev);
+
+ if (np)
+ of_platform_populate(np, NULL, lookup, &pdev->dev);
+
+ return 0;
+}
+
+static int simple_pm_bus_remove(struct platform_device *pdev)
+{
+ const void *data = of_device_get_match_data(&pdev->dev);
+
+ if (pdev->driver_override || data)
+ return 0;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int simple_pm_bus_runtime_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(bus->num_clks, bus->clks);
+
+ return 0;
+}
+
+static int simple_pm_bus_runtime_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(bus->num_clks, bus->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops simple_pm_bus_pm_ops = {
+ RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
+
+static const struct of_device_id simple_pm_bus_of_match[] = {
+ { .compatible = "simple-pm-bus", },
+ { .compatible = "simple-bus", .data = ONLY_BUS },
+ { .compatible = "simple-mfd", .data = ONLY_BUS },
+ { .compatible = "isa", .data = ONLY_BUS },
+ { .compatible = "arm,amba-bus", .data = ONLY_BUS },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
+
+static struct platform_driver simple_pm_bus_driver = {
+ .probe = simple_pm_bus_probe,
+ .remove = simple_pm_bus_remove,
+ .driver = {
+ .name = "simple-pm-bus",
+ .of_match_table = simple_pm_bus_of_match,
+ .pm = pm_ptr(&simple_pm_bus_pm_ops),
+ },
+};
+
+module_platform_driver(simple_pm_bus_driver);
+
+MODULE_DESCRIPTION("Simple Power-Managed Bus Driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
new file mode 100644
index 0000000000..414f29cded
--- /dev/null
+++ b/drivers/bus/sun50i-de2.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner A64 Display Engine 2.0 Bus Driver
+ *
+ * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+static int sun50i_de2_bus_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = sunxi_sram_claim(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't map SRAM to device\n");
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
+ return 0;
+}
+
+static int sun50i_de2_bus_remove(struct platform_device *pdev)
+{
+ sunxi_sram_release(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id sun50i_de2_bus_of_match[] = {
+ { .compatible = "allwinner,sun50i-a64-de2", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun50i_de2_bus_driver = {
+ .probe = sun50i_de2_bus_probe,
+ .remove = sun50i_de2_bus_remove,
+ .driver = {
+ .name = "sun50i-de2-bus",
+ .of_match_table = sun50i_de2_bus_of_match,
+ },
+};
+
+builtin_platform_driver(sun50i_de2_bus_driver);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
new file mode 100644
index 0000000000..db0ed4e5d3
--- /dev/null
+++ b/drivers/bus/sunxi-rsb.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RSB (Reduced Serial Bus) driver.
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * The RSB controller looks like an SMBus controller which only supports
+ * byte and word data transfers. But, it differs from standard SMBus
+ * protocol on several aspects:
+ * - it uses addresses set at runtime to address slaves. Runtime addresses
+ * are sent to slaves using their 12bit hardware addresses. Up to 15
+ * runtime addresses are available.
+ * - it adds a parity bit every 8bits of data and address for read and
+ * write accesses; this replaces the ack bit
+ * - only one read access is required to read a byte (instead of a write
+ * followed by a read access in standard SMBus protocol)
+ * - there's no Ack bit after each read access
+ *
+ * This means this bus cannot be used to interface with standard SMBus
+ * devices. Devices known to support this interface include the AXP223,
+ * AXP809, and AXP806 PMICs, and the AC100 audio codec, all from X-Powers.
+ *
+ * A description of the operation and wire protocol can be found in the
+ * RSB section of Allwinner's A80 user manual, which can be found at
+ *
+ * https://github.com/allwinner-zh/documents/tree/master/A80
+ *
+ * This document is officially released by Allwinner.
+ *
+ * This driver is based on i2c-sun6i-p2wi.c, the P2WI bus driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/sunxi-rsb.h>
+#include <linux/types.h>
+
+/* RSB registers */
+#define RSB_CTRL 0x0 /* Global control */
+#define RSB_CCR 0x4 /* Clock control */
+#define RSB_INTE 0x8 /* Interrupt controls */
+#define RSB_INTS 0xc /* Interrupt status */
+#define RSB_ADDR 0x10 /* Address to send with read/write command */
+#define RSB_DATA 0x1c /* Data to read/write */
+#define RSB_LCR 0x24 /* Line control */
+#define RSB_DMCR 0x28 /* Device mode (init) control */
+#define RSB_CMD 0x2c /* RSB Command */
+#define RSB_DAR 0x30 /* Device address / runtime address */
+
+/* CTRL fields */
+#define RSB_CTRL_START_TRANS BIT(7)
+#define RSB_CTRL_ABORT_TRANS BIT(6)
+#define RSB_CTRL_GLOBAL_INT_ENB BIT(1)
+#define RSB_CTRL_SOFT_RST BIT(0)
+
+/* CLK CTRL fields */
+#define RSB_CCR_SDA_OUT_DELAY(v) (((v) & 0x7) << 8)
+#define RSB_CCR_MAX_CLK_DIV 0xff
+#define RSB_CCR_CLK_DIV(v) ((v) & RSB_CCR_MAX_CLK_DIV)
+
+/* STATUS fields */
+#define RSB_INTS_TRANS_ERR_ACK BIT(16)
+#define RSB_INTS_TRANS_ERR_DATA_BIT(v) (((v) >> 8) & 0xf)
+#define RSB_INTS_TRANS_ERR_DATA GENMASK(11, 8)
+#define RSB_INTS_LOAD_BSY BIT(2)
+#define RSB_INTS_TRANS_ERR BIT(1)
+#define RSB_INTS_TRANS_OVER BIT(0)
+
+/* LINE CTRL fields*/
+#define RSB_LCR_SCL_STATE BIT(5)
+#define RSB_LCR_SDA_STATE BIT(4)
+#define RSB_LCR_SCL_CTL BIT(3)
+#define RSB_LCR_SCL_CTL_EN BIT(2)
+#define RSB_LCR_SDA_CTL BIT(1)
+#define RSB_LCR_SDA_CTL_EN BIT(0)
+
+/* DEVICE MODE CTRL field values */
+#define RSB_DMCR_DEVICE_START BIT(31)
+#define RSB_DMCR_MODE_DATA (0x7c << 16)
+#define RSB_DMCR_MODE_REG (0x3e << 8)
+#define RSB_DMCR_DEV_ADDR 0x00
+
+/* CMD values */
+#define RSB_CMD_RD8 0x8b
+#define RSB_CMD_RD16 0x9c
+#define RSB_CMD_RD32 0xa6
+#define RSB_CMD_WR8 0x4e
+#define RSB_CMD_WR16 0x59
+#define RSB_CMD_WR32 0x63
+#define RSB_CMD_STRA 0xe8
+
+/* DAR fields */
+#define RSB_DAR_RTA(v) (((v) & 0xff) << 16)
+#define RSB_DAR_DA(v) ((v) & 0xffff)
+
+#define RSB_MAX_FREQ 20000000
+
+#define RSB_CTRL_NAME "sunxi-rsb"
+
+struct sunxi_rsb_addr_map {
+ u16 hwaddr;
+ u8 rtaddr;
+};
+
+struct sunxi_rsb {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ struct reset_control *rstc;
+ struct completion complete;
+ struct mutex lock;
+ unsigned int status;
+ u32 clk_freq;
+};
+
+/* bus / slave device related functions */
+static struct bus_type sunxi_rsb_bus;
+
+static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static int sunxi_rsb_device_probe(struct device *dev)
+{
+ const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
+ struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+ int ret;
+
+ if (!drv->probe)
+ return -ENODEV;
+
+ if (!rdev->irq) {
+ int irq = -ENOENT;
+
+ if (dev->of_node)
+ irq = of_irq_get(dev->of_node, 0);
+
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ if (irq < 0)
+ irq = 0;
+
+ rdev->irq = irq;
+ }
+
+ ret = of_clk_set_defaults(dev->of_node, false);
+ if (ret < 0)
+ return ret;
+
+ return drv->probe(rdev);
+}
+
+static void sunxi_rsb_device_remove(struct device *dev)
+{
+ const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
+
+ drv->remove(to_sunxi_rsb_device(dev));
+}
+
+static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return of_device_uevent_modalias(dev, env);
+}
+
+static struct bus_type sunxi_rsb_bus = {
+ .name = RSB_CTRL_NAME,
+ .match = sunxi_rsb_device_match,
+ .probe = sunxi_rsb_device_probe,
+ .remove = sunxi_rsb_device_remove,
+ .uevent = sunxi_rsb_device_modalias,
+};
+
+static void sunxi_rsb_dev_release(struct device *dev)
+{
+ struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+
+ kfree(rdev);
+}
+
+/**
+ * sunxi_rsb_device_create() - allocate and add an RSB device
+ * @rsb: RSB controller
+ * @node: RSB slave device node
+ * @hwaddr: RSB slave hardware address
+ * @rtaddr: RSB slave runtime address
+ */
+static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
+ struct device_node *node, u16 hwaddr, u8 rtaddr)
+{
+ int err;
+ struct sunxi_rsb_device *rdev;
+
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev)
+ return ERR_PTR(-ENOMEM);
+
+ rdev->rsb = rsb;
+ rdev->hwaddr = hwaddr;
+ rdev->rtaddr = rtaddr;
+ rdev->dev.bus = &sunxi_rsb_bus;
+ rdev->dev.parent = rsb->dev;
+ rdev->dev.of_node = node;
+ rdev->dev.release = sunxi_rsb_dev_release;
+
+ dev_set_name(&rdev->dev, "%s-%x", RSB_CTRL_NAME, hwaddr);
+
+ err = device_register(&rdev->dev);
+ if (err < 0) {
+ dev_err(&rdev->dev, "Can't add %s, status %d\n",
+ dev_name(&rdev->dev), err);
+ goto err_device_add;
+ }
+
+ dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+
+ return rdev;
+
+err_device_add:
+ put_device(&rdev->dev);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * sunxi_rsb_device_unregister(): unregister an RSB device
+ * @rdev: rsb_device to be removed
+ */
+static void sunxi_rsb_device_unregister(struct sunxi_rsb_device *rdev)
+{
+ device_unregister(&rdev->dev);
+}
+
+static int sunxi_rsb_remove_devices(struct device *dev, void *data)
+{
+ struct sunxi_rsb_device *rdev = to_sunxi_rsb_device(dev);
+
+ if (dev->bus == &sunxi_rsb_bus)
+ sunxi_rsb_device_unregister(rdev);
+
+ return 0;
+}
+
+/**
+ * sunxi_rsb_driver_register() - Register device driver with RSB core
+ * @rdrv: device driver to be associated with slave-device.
+ *
+ * This API will register the client driver with the RSB framework.
+ * It is typically called from the driver's module-init function.
+ */
+int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv)
+{
+ rdrv->driver.bus = &sunxi_rsb_bus;
+ return driver_register(&rdrv->driver);
+}
+EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
+
+/* common code that starts a transfer */
+static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
+{
+ u32 int_mask, status;
+ bool timeout;
+
+ if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
+ dev_dbg(rsb->dev, "RSB transfer still in progress\n");
+ return -EBUSY;
+ }
+
+ reinit_completion(&rsb->complete);
+
+ int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
+ writel(int_mask, rsb->regs + RSB_INTE);
+ writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
+ rsb->regs + RSB_CTRL);
+
+ if (irqs_disabled()) {
+ timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
+ status, (status & int_mask),
+ 10, 100000);
+ writel(status, rsb->regs + RSB_INTS);
+ } else {
+ timeout = !wait_for_completion_io_timeout(&rsb->complete,
+ msecs_to_jiffies(100));
+ status = rsb->status;
+ }
+
+ if (timeout) {
+ dev_dbg(rsb->dev, "RSB timeout\n");
+
+ /* abort the transfer */
+ writel(RSB_CTRL_ABORT_TRANS, rsb->regs + RSB_CTRL);
+
+ /* clear any interrupt flags */
+ writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
+
+ return -ETIMEDOUT;
+ }
+
+ if (status & RSB_INTS_LOAD_BSY) {
+ dev_dbg(rsb->dev, "RSB busy\n");
+ return -EBUSY;
+ }
+
+ if (status & RSB_INTS_TRANS_ERR) {
+ if (status & RSB_INTS_TRANS_ERR_ACK) {
+ dev_dbg(rsb->dev, "RSB slave nack\n");
+ return -EINVAL;
+ }
+
+ if (status & RSB_INTS_TRANS_ERR_DATA) {
+ dev_dbg(rsb->dev, "RSB transfer data error\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
+ u32 *buf, size_t len)
+{
+ u32 cmd;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ switch (len) {
+ case 1:
+ cmd = RSB_CMD_RD8;
+ break;
+ case 2:
+ cmd = RSB_CMD_RD16;
+ break;
+ case 4:
+ cmd = RSB_CMD_RD32;
+ break;
+ default:
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rsb->lock);
+
+ writel(addr, rsb->regs + RSB_ADDR);
+ writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
+ writel(cmd, rsb->regs + RSB_CMD);
+
+ ret = _sunxi_rsb_run_xfer(rsb);
+ if (ret)
+ goto unlock;
+
+ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
+
+unlock:
+ mutex_unlock(&rsb->lock);
+
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
+ return ret;
+}
+
+static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
+ const u32 *buf, size_t len)
+{
+ u32 cmd;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ switch (len) {
+ case 1:
+ cmd = RSB_CMD_WR8;
+ break;
+ case 2:
+ cmd = RSB_CMD_WR16;
+ break;
+ case 4:
+ cmd = RSB_CMD_WR32;
+ break;
+ default:
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(rsb->dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rsb->lock);
+
+ writel(addr, rsb->regs + RSB_ADDR);
+ writel(RSB_DAR_RTA(rtaddr), rsb->regs + RSB_DAR);
+ writel(*buf, rsb->regs + RSB_DATA);
+ writel(cmd, rsb->regs + RSB_CMD);
+ ret = _sunxi_rsb_run_xfer(rsb);
+
+ mutex_unlock(&rsb->lock);
+
+ pm_runtime_mark_last_busy(rsb->dev);
+ pm_runtime_put_autosuspend(rsb->dev);
+
+ return ret;
+}
+
+/* RSB regmap functions */
+struct sunxi_rsb_ctx {
+ struct sunxi_rsb_device *rdev;
+ int size;
+};
+
+static int regmap_sunxi_rsb_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct sunxi_rsb_ctx *ctx = context;
+ struct sunxi_rsb_device *rdev = ctx->rdev;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ return sunxi_rsb_read(rdev->rsb, rdev->rtaddr, reg, val, ctx->size);
+}
+
+static int regmap_sunxi_rsb_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct sunxi_rsb_ctx *ctx = context;
+ struct sunxi_rsb_device *rdev = ctx->rdev;
+
+ return sunxi_rsb_write(rdev->rsb, rdev->rtaddr, reg, &val, ctx->size);
+}
+
+static void regmap_sunxi_rsb_free_ctx(void *context)
+{
+ struct sunxi_rsb_ctx *ctx = context;
+
+ kfree(ctx);
+}
+
+static struct regmap_bus regmap_sunxi_rsb = {
+ .reg_write = regmap_sunxi_rsb_reg_write,
+ .reg_read = regmap_sunxi_rsb_reg_read,
+ .free_context = regmap_sunxi_rsb_free_ctx,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct sunxi_rsb_ctx *regmap_sunxi_rsb_init_ctx(struct sunxi_rsb_device *rdev,
+ const struct regmap_config *config)
+{
+ struct sunxi_rsb_ctx *ctx;
+
+ switch (config->val_bits) {
+ case 8:
+ case 16:
+ case 32:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->rdev = rdev;
+ ctx->size = config->val_bits / 8;
+
+ return ctx;
+}
+
+struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct sunxi_rsb_ctx *ctx = regmap_sunxi_rsb_init_ctx(rdev, config);
+
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&rdev->dev, &regmap_sunxi_rsb, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sunxi_rsb);
+
+/* RSB controller driver functions */
+static irqreturn_t sunxi_rsb_irq(int irq, void *dev_id)
+{
+ struct sunxi_rsb *rsb = dev_id;
+ u32 status;
+
+ status = readl(rsb->regs + RSB_INTS);
+ rsb->status = status;
+
+ /* Clear interrupts */
+ status &= (RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR |
+ RSB_INTS_TRANS_OVER);
+ writel(status, rsb->regs + RSB_INTS);
+
+ complete(&rsb->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
+{
+ int ret = 0;
+ u32 reg;
+
+ /* send init sequence */
+ writel(RSB_DMCR_DEVICE_START | RSB_DMCR_MODE_DATA |
+ RSB_DMCR_MODE_REG | RSB_DMCR_DEV_ADDR, rsb->regs + RSB_DMCR);
+
+ readl_poll_timeout(rsb->regs + RSB_DMCR, reg,
+ !(reg & RSB_DMCR_DEVICE_START), 100, 250000);
+ if (reg & RSB_DMCR_DEVICE_START)
+ ret = -ETIMEDOUT;
+
+ /* clear interrupt status bits */
+ writel(readl(rsb->regs + RSB_INTS), rsb->regs + RSB_INTS);
+
+ return ret;
+}
+
+/*
+ * There are 15 valid runtime addresses, though Allwinner typically
+ * skips the first, for unknown reasons, and uses the following three.
+ *
+ * 0x17, 0x2d, 0x3a, 0x4e, 0x59, 0x63, 0x74, 0x8b,
+ * 0x9c, 0xa6, 0xb1, 0xc5, 0xd2, 0xe8, 0xff
+ *
+ * No designs with 2 RSB slave devices sharing identical hardware
+ * addresses on the same bus have been seen in the wild. All designs
+ * use 0x2d for the primary PMIC, 0x3a for the secondary PMIC if
+ * there is one, and 0x45 for peripheral ICs.
+ *
+ * The hardware does not seem to support re-setting runtime addresses.
+ * Attempts to do so result in the slave devices returning a NACK.
+ * Hence we just hardcode the mapping here, like Allwinner does.
+ */
+
+static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
+ { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
+ { 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
+ { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
+};
+
+static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sunxi_rsb_addr_maps); i++)
+ if (hwaddr == sunxi_rsb_addr_maps[i].hwaddr)
+ return sunxi_rsb_addr_maps[i].rtaddr;
+
+ return 0; /* 0 is an invalid runtime address */
+}
+
+static int of_rsb_register_devices(struct sunxi_rsb *rsb)
+{
+ struct device *dev = rsb->dev;
+ struct device_node *child, *np = dev->of_node;
+ u32 hwaddr;
+ u8 rtaddr;
+ int ret;
+
+ if (!np)
+ return -EINVAL;
+
+ /* Runtime addresses for all slaves should be set first */
+ for_each_available_child_of_node(np, child) {
+ dev_dbg(dev, "setting child %pOF runtime address\n",
+ child);
+
+ ret = of_property_read_u32(child, "reg", &hwaddr);
+ if (ret) {
+ dev_err(dev, "%pOF: invalid 'reg' property: %d\n",
+ child, ret);
+ continue;
+ }
+
+ rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
+ if (!rtaddr) {
+ dev_err(dev, "%pOF: unknown hardware device address\n",
+ child);
+ continue;
+ }
+
+ /*
+ * Since no devices have been registered yet, we are the
+ * only ones using the bus, we can skip locking the bus.
+ */
+
+ /* setup command parameters */
+ writel(RSB_CMD_STRA, rsb->regs + RSB_CMD);
+ writel(RSB_DAR_RTA(rtaddr) | RSB_DAR_DA(hwaddr),
+ rsb->regs + RSB_DAR);
+
+ /* send command */
+ ret = _sunxi_rsb_run_xfer(rsb);
+ if (ret)
+ dev_warn(dev, "%pOF: set runtime address failed: %d\n",
+ child, ret);
+ }
+
+ /* Then we start adding devices and probing them */
+ for_each_available_child_of_node(np, child) {
+ struct sunxi_rsb_device *rdev;
+
+ dev_dbg(dev, "adding child %pOF\n", child);
+
+ ret = of_property_read_u32(child, "reg", &hwaddr);
+ if (ret)
+ continue;
+
+ rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
+ if (!rtaddr)
+ continue;
+
+ rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr);
+ if (IS_ERR(rdev))
+ dev_err(dev, "failed to add child device %pOF: %ld\n",
+ child, PTR_ERR(rdev));
+ }
+
+ return 0;
+}
+
+static int sunxi_rsb_hw_init(struct sunxi_rsb *rsb)
+{
+ struct device *dev = rsb->dev;
+ unsigned long p_clk_freq;
+ u32 clk_delay, reg;
+ int clk_div, ret;
+
+ ret = clk_prepare_enable(rsb->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(rsb->rstc);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset line: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ /* reset the controller */
+ writel(RSB_CTRL_SOFT_RST, rsb->regs + RSB_CTRL);
+ readl_poll_timeout(rsb->regs + RSB_CTRL, reg,
+ !(reg & RSB_CTRL_SOFT_RST), 1000, 100000);
+
+ /*
+ * Clock frequency and delay calculation code is from
+ * Allwinner U-boot sources.
+ *
+ * From A83 user manual:
+ * bus clock frequency = parent clock frequency / (2 * (divider + 1))
+ */
+ p_clk_freq = clk_get_rate(rsb->clk);
+ clk_div = p_clk_freq / rsb->clk_freq / 2;
+ if (!clk_div)
+ clk_div = 1;
+ else if (clk_div > RSB_CCR_MAX_CLK_DIV + 1)
+ clk_div = RSB_CCR_MAX_CLK_DIV + 1;
+
+ clk_delay = clk_div >> 1;
+ if (!clk_delay)
+ clk_delay = 1;
+
+ dev_info(dev, "RSB running at %lu Hz\n", p_clk_freq / clk_div / 2);
+ writel(RSB_CCR_SDA_OUT_DELAY(clk_delay) | RSB_CCR_CLK_DIV(clk_div - 1),
+ rsb->regs + RSB_CCR);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(rsb->clk);
+
+ return ret;
+}
+
+static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
+{
+ reset_control_assert(rsb->rstc);
+
+ /* Keep the clock and PM reference counts consistent. */
+ if (!pm_runtime_status_suspended(rsb->dev))
+ clk_disable_unprepare(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rsb->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_runtime_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(rsb->clk);
+}
+
+static int __maybe_unused sunxi_rsb_suspend(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ sunxi_rsb_hw_exit(rsb);
+
+ return 0;
+}
+
+static int __maybe_unused sunxi_rsb_resume(struct device *dev)
+{
+ struct sunxi_rsb *rsb = dev_get_drvdata(dev);
+
+ return sunxi_rsb_hw_init(rsb);
+}
+
+static int sunxi_rsb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct sunxi_rsb *rsb;
+ u32 clk_freq = 3000000;
+ int irq, ret;
+
+ of_property_read_u32(np, "clock-frequency", &clk_freq);
+ if (clk_freq > RSB_MAX_FREQ) {
+ dev_err(dev,
+ "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
+ if (!rsb)
+ return -ENOMEM;
+
+ rsb->dev = dev;
+ rsb->clk_freq = clk_freq;
+ platform_set_drvdata(pdev, rsb);
+ rsb->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rsb->regs))
+ return PTR_ERR(rsb->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ rsb->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(rsb->clk)) {
+ ret = PTR_ERR(rsb->clk);
+ dev_err(dev, "failed to retrieve clk: %d\n", ret);
+ return ret;
+ }
+
+ rsb->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(rsb->rstc)) {
+ ret = PTR_ERR(rsb->rstc);
+ dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
+ return ret;
+ }
+
+ init_completion(&rsb->complete);
+ mutex_init(&rsb->lock);
+
+ ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
+ if (ret) {
+ dev_err(dev, "can't register interrupt handler irq %d: %d\n",
+ irq, ret);
+ return ret;
+ }
+
+ ret = sunxi_rsb_hw_init(rsb);
+ if (ret)
+ return ret;
+
+ /* initialize all devices on the bus into RSB mode */
+ ret = sunxi_rsb_init_device_mode(rsb);
+ if (ret)
+ dev_warn(dev, "Initialize device mode failed: %d\n", ret);
+
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ of_rsb_register_devices(rsb);
+
+ return 0;
+}
+
+static int sunxi_rsb_remove(struct platform_device *pdev)
+{
+ struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
+
+ device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
+ pm_runtime_disable(&pdev->dev);
+ sunxi_rsb_hw_exit(rsb);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
+ sunxi_rsb_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sunxi_rsb_suspend, sunxi_rsb_resume)
+};
+
+static const struct of_device_id sunxi_rsb_of_match_table[] = {
+ { .compatible = "allwinner,sun8i-a23-rsb" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
+
+static struct platform_driver sunxi_rsb_driver = {
+ .probe = sunxi_rsb_probe,
+ .remove = sunxi_rsb_remove,
+ .driver = {
+ .name = RSB_CTRL_NAME,
+ .of_match_table = sunxi_rsb_of_match_table,
+ .pm = &sunxi_rsb_dev_pm_ops,
+ },
+};
+
+static int __init sunxi_rsb_init(void)
+{
+ int ret;
+
+ ret = bus_register(&sunxi_rsb_bus);
+ if (ret) {
+ pr_err("failed to register sunxi sunxi_rsb bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(sunxi_rsb_init);
+
+static void __exit sunxi_rsb_exit(void)
+{
+ platform_driver_unregister(&sunxi_rsb_driver);
+ bus_unregister(&sunxi_rsb_bus);
+}
+module_exit(sunxi_rsb_exit);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunXi Reduced Serial Bus controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
new file mode 100644
index 0000000000..ac58142301
--- /dev/null
+++ b/drivers/bus/tegra-aconnect.c
@@ -0,0 +1,120 @@
+/*
+ * Tegra ACONNECT Bus Driver
+ *
+ * Copyright (C) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+struct tegra_aconnect {
+ struct clk *ape_clk;
+ struct clk *apb2ape_clk;
+};
+
+static int tegra_aconnect_probe(struct platform_device *pdev)
+{
+ struct tegra_aconnect *aconnect;
+
+ if (!pdev->dev.of_node)
+ return -EINVAL;
+
+ aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect),
+ GFP_KERNEL);
+ if (!aconnect)
+ return -ENOMEM;
+
+ aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape");
+ if (IS_ERR(aconnect->ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve ape clock\n");
+ return PTR_ERR(aconnect->ape_clk);
+ }
+
+ aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape");
+ if (IS_ERR(aconnect->apb2ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n");
+ return PTR_ERR(aconnect->apb2ape_clk);
+ }
+
+ dev_set_drvdata(&pdev->dev, aconnect);
+ pm_runtime_enable(&pdev->dev);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
+
+ return 0;
+}
+
+static int tegra_aconnect_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int tegra_aconnect_runtime_resume(struct device *dev)
+{
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(aconnect->ape_clk);
+ if (ret) {
+ dev_err(dev, "ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(aconnect->apb2ape_clk);
+ if (ret) {
+ clk_disable_unprepare(aconnect->ape_clk);
+ dev_err(dev, "apb2ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_aconnect_runtime_suspend(struct device *dev)
+{
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aconnect->ape_clk);
+ clk_disable_unprepare(aconnect->apb2ape_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_aconnect_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
+ tegra_aconnect_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id tegra_aconnect_of_match[] = {
+ { .compatible = "nvidia,tegra210-aconnect", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
+
+static struct platform_driver tegra_aconnect_driver = {
+ .probe = tegra_aconnect_probe,
+ .remove = tegra_aconnect_remove,
+ .driver = {
+ .name = "tegra-aconnect",
+ .of_match_table = tegra_aconnect_of_match,
+ .pm = &tegra_aconnect_pm_ops,
+ },
+};
+module_platform_driver(tegra_aconnect_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
new file mode 100644
index 0000000000..59919e99f7
--- /dev/null
+++ b/drivers/bus/tegra-gmi.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
+
+#define TEGRA_GMI_CONFIG 0x00
+#define TEGRA_GMI_CONFIG_GO BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
+#define TEGRA_GMI_MUX_MODE BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH BIT(20)
+#define TEGRA_GMI_CS_SELECT(x) ((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0 0x10
+#define TEGRA_GMI_MUXED_WIDTH(x) ((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x) ((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x) ((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x) (x & 0xf)
+
+#define TEGRA_GMI_TIMING1 0x14
+#define TEGRA_GMI_WE_WIDTH(x) ((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x) ((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x) (x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT 8
+
+struct tegra_gmi {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ u32 snor_config;
+ u32 snor_timing0;
+ u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+ int err;
+
+ pm_runtime_enable(gmi->dev);
+ err = pm_runtime_resume_and_get(gmi->dev);
+ if (err) {
+ pm_runtime_disable(gmi->dev);
+ return err;
+ }
+
+ reset_control_assert(gmi->rst);
+ usleep_range(2000, 4000);
+ reset_control_deassert(gmi->rst);
+
+ writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+ writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+ gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+ writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+ return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+ u32 config;
+
+ /* stop GMI operation */
+ config = readl(gmi->base + TEGRA_GMI_CONFIG);
+ config &= ~TEGRA_GMI_CONFIG_GO;
+ writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+ reset_control_assert(gmi->rst);
+
+ pm_runtime_put_sync_suspend(gmi->dev);
+ pm_runtime_force_suspend(gmi->dev);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+ struct device_node *child;
+ u32 property, ranges[4];
+ int err;
+
+ child = of_get_next_available_child(gmi->dev->of_node, NULL);
+ if (!child) {
+ dev_err(gmi->dev, "no child nodes found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We currently only support one child device due to lack of
+ * chip-select address decoding. Which means that we only have one
+ * chip-select line from the GMI controller.
+ */
+ if (of_get_child_count(gmi->dev->of_node) > 1)
+ dev_warn(gmi->dev, "only one child device is supported.");
+
+ if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+ gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+ if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+ gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+ gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+ if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+ gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+ gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+ gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+ if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+ gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+ /* Decode the CS# */
+ err = of_property_read_u32_array(child, "ranges", ranges, 4);
+ if (err < 0) {
+ /* Invalid binding */
+ if (err == -EOVERFLOW) {
+ dev_err(gmi->dev,
+ "failed to decode CS: invalid ranges length\n");
+ goto error_cs;
+ }
+
+ /*
+ * If we reach here it means that the child node has an empty
+ * ranges or it does not exist at all. Attempt to decode the
+ * CS# from the reg property instead.
+ */
+ err = of_property_read_u32(child, "reg", &property);
+ if (err < 0) {
+ dev_err(gmi->dev,
+ "failed to decode CS: no reg property found\n");
+ goto error_cs;
+ }
+ } else {
+ property = ranges[1];
+ }
+
+ /* Valid chip selects are CS0-CS7 */
+ if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+ dev_err(gmi->dev, "invalid chip select: %d", property);
+ err = -EINVAL;
+ goto error_cs;
+ }
+
+ gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+ /* The default values that are provided below are reset values */
+ if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+ else
+ gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+ if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+ if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+ else
+ gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+ of_node_put(child);
+ return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_gmi *gmi;
+ int err;
+
+ gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+ if (!gmi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gmi);
+ gmi->dev = dev;
+
+ gmi->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gmi->base))
+ return PTR_ERR(gmi->base);
+
+ gmi->clk = devm_clk_get(dev, "gmi");
+ if (IS_ERR(gmi->clk)) {
+ dev_err(dev, "can not get clock\n");
+ return PTR_ERR(gmi->clk);
+ }
+
+ gmi->rst = devm_reset_control_get(dev, "gmi");
+ if (IS_ERR(gmi->rst)) {
+ dev_err(dev, "can not get reset\n");
+ return PTR_ERR(gmi->rst);
+ }
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ err = tegra_gmi_parse_dt(gmi);
+ if (err)
+ return err;
+
+ err = tegra_gmi_enable(gmi);
+ if (err < 0)
+ return err;
+
+ err = of_platform_default_populate(dev->of_node, NULL, dev);
+ if (err < 0) {
+ dev_err(dev, "fail to create devices.\n");
+ tegra_gmi_disable(gmi);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+ struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(gmi->dev);
+ tegra_gmi_disable(gmi);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(gmi->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_gmi_pm = {
+ SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+ { .compatible = "nvidia,tegra20-gmi", },
+ { .compatible = "nvidia,tegra30-gmi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+ .probe = tegra_gmi_probe,
+ .remove = tegra_gmi_remove,
+ .driver = {
+ .name = "tegra-gmi",
+ .of_match_table = tegra_gmi_id_table,
+ .pm = &tegra_gmi_pm,
+ },
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
new file mode 100644
index 0000000000..480a4de76c
--- /dev/null
+++ b/drivers/bus/ti-pwmss.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * TI PWM Subsystem driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_platform.h>
+
+static const struct of_device_id pwmss_of_match[] = {
+ { .compatible = "ti,am33xx-pwmss" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pwmss_of_match);
+
+static int pwmss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node = pdev->dev.of_node;
+
+ pm_runtime_enable(&pdev->dev);
+
+ /* Populate all the child nodes here... */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "no child node found\n");
+
+ return ret;
+}
+
+static int pwmss_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver pwmss_driver = {
+ .driver = {
+ .name = "pwmss",
+ .of_match_table = pwmss_of_match,
+ },
+ .probe = pwmss_probe,
+ .remove = pwmss_remove,
+};
+
+module_platform_driver(pwmss_driver);
+
+MODULE_DESCRIPTION("PWM Subsystem driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
new file mode 100644
index 0000000000..9ed9239b12
--- /dev/null
+++ b/drivers/bus/ti-sysc.c
@@ -0,0 +1,3477 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ti-sysc.c - Texas Instruments sysc interconnect target driver
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/timekeeping.h>
+#include <linux/iopoll.h>
+
+#include <linux/platform_data/ti-sysc.h>
+
+#include <dt-bindings/bus/ti-sysc.h>
+
+#define DIS_ISP BIT(2)
+#define DIS_IVA BIT(1)
+#define DIS_SGX BIT(0)
+
+#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
+
+#define MAX_MODULE_SOFTRESET_WAIT 10000
+
+enum sysc_soc {
+ SOC_UNKNOWN,
+ SOC_2420,
+ SOC_2430,
+ SOC_3430,
+ SOC_AM35,
+ SOC_3630,
+ SOC_4430,
+ SOC_4460,
+ SOC_4470,
+ SOC_5430,
+ SOC_AM3,
+ SOC_AM4,
+ SOC_DRA7,
+};
+
+struct sysc_address {
+ unsigned long base;
+ struct list_head node;
+};
+
+struct sysc_module {
+ struct sysc *ddata;
+ struct list_head node;
+};
+
+struct sysc_soc_info {
+ unsigned long general_purpose:1;
+ enum sysc_soc soc;
+ struct mutex list_lock; /* disabled and restored modules list lock */
+ struct list_head disabled_modules;
+ struct list_head restored_modules;
+ struct notifier_block nb;
+};
+
+enum sysc_clocks {
+ SYSC_FCK,
+ SYSC_ICK,
+ SYSC_OPTFCK0,
+ SYSC_OPTFCK1,
+ SYSC_OPTFCK2,
+ SYSC_OPTFCK3,
+ SYSC_OPTFCK4,
+ SYSC_OPTFCK5,
+ SYSC_OPTFCK6,
+ SYSC_OPTFCK7,
+ SYSC_MAX_CLOCKS,
+};
+
+static struct sysc_soc_info *sysc_soc;
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
+
+#define SYSC_IDLEMODE_MASK 3
+#define SYSC_CLOCKACTIVITY_MASK 3
+
+/**
+ * struct sysc - TI sysc interconnect target module registers and capabilities
+ * @dev: struct device pointer
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @module_va: virtual address of the interconnect target module
+ * @offsets: register offsets from module base
+ * @mdata: ti-sysc to hwmod translation data for a module
+ * @clocks: clocks used by the interconnect target module
+ * @clock_roles: clock role names for the found clocks
+ * @nr_clocks: number of clocks used by the interconnect target module
+ * @rsts: resets used by the interconnect target module
+ * @legacy_mode: configured for legacy mode if set
+ * @cap: interconnect target module capabilities
+ * @cfg: interconnect target module configuration
+ * @cookie: data used by legacy platform callbacks
+ * @name: name if available
+ * @revision: interconnect target module revision
+ * @sysconfig: saved sysconfig register value
+ * @reserved: target module is reserved and already in use
+ * @enabled: sysc runtime enabled status
+ * @needs_resume: runtime resume needed on resume from suspend
+ * @child_needs_resume: runtime resume needed for child on resume from suspend
+ * @disable_on_idle: status flag used for disabling modules with resets
+ * @idle_work: work structure used to perform delayed idle on a module
+ * @pre_reset_quirk: module specific pre-reset quirk
+ * @post_reset_quirk: module specific post-reset quirk
+ * @reset_done_quirk: module specific reset done quirk
+ * @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
+ * @module_unlock_quirk: module specific sysconfig unlock quirk
+ * @module_lock_quirk: module specific sysconfig lock quirk
+ */
+struct sysc {
+ struct device *dev;
+ u64 module_pa;
+ u32 module_size;
+ void __iomem *module_va;
+ int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
+ struct clk **clocks;
+ const char **clock_roles;
+ int nr_clocks;
+ struct reset_control *rsts;
+ const char *legacy_mode;
+ const struct sysc_capabilities *cap;
+ struct sysc_config cfg;
+ struct ti_sysc_cookie cookie;
+ const char *name;
+ u32 revision;
+ u32 sysconfig;
+ unsigned int reserved:1;
+ unsigned int enabled:1;
+ unsigned int needs_resume:1;
+ unsigned int child_needs_resume:1;
+ struct delayed_work idle_work;
+ void (*pre_reset_quirk)(struct sysc *sysc);
+ void (*post_reset_quirk)(struct sysc *sysc);
+ void (*reset_done_quirk)(struct sysc *sysc);
+ void (*module_enable_quirk)(struct sysc *sysc);
+ void (*module_disable_quirk)(struct sysc *sysc);
+ void (*module_unlock_quirk)(struct sysc *sysc);
+ void (*module_lock_quirk)(struct sysc *sysc);
+};
+
+static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
+ bool is_child);
+static int sysc_reset(struct sysc *ddata);
+
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
+{
+ if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
+ writew_relaxed(value & 0xffff, ddata->module_va + offset);
+
+ /* Only i2c revision has LO and HI register with stride of 4 */
+ if (ddata->offsets[SYSC_REVISION] >= 0 &&
+ offset == ddata->offsets[SYSC_REVISION]) {
+ u16 hi = value >> 16;
+
+ writew_relaxed(hi, ddata->module_va + offset + 4);
+ }
+
+ return;
+ }
+
+ writel_relaxed(value, ddata->module_va + offset);
+}
+
+static u32 sysc_read(struct sysc *ddata, int offset)
+{
+ if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
+ u32 val;
+
+ val = readw_relaxed(ddata->module_va + offset);
+
+ /* Only i2c revision has LO and HI register with stride of 4 */
+ if (ddata->offsets[SYSC_REVISION] >= 0 &&
+ offset == ddata->offsets[SYSC_REVISION]) {
+ u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
+
+ val |= tmp << 16;
+ }
+
+ return val;
+ }
+
+ return readl_relaxed(ddata->module_va + offset);
+}
+
+static bool sysc_opt_clks_needed(struct sysc *ddata)
+{
+ return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
+}
+
+static u32 sysc_read_revision(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_REVISION];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static u32 sysc_read_sysconfig(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSCONFIG];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static u32 sysc_read_sysstatus(struct sysc *ddata)
+{
+ int offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (offset < 0)
+ return 0;
+
+ return sysc_read(ddata, offset);
+}
+
+static int sysc_poll_reset_sysstatus(struct sysc *ddata)
+{
+ int error, retries;
+ u32 syss_done, rstval;
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (likely(!timekeeping_suspended)) {
+ error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
+ rstval, (rstval & ddata->cfg.syss_mask) ==
+ syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysstatus(ddata);
+ if ((rstval & ddata->cfg.syss_mask) == syss_done)
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+static int sysc_poll_reset_sysconfig(struct sysc *ddata)
+{
+ int error, retries;
+ u32 sysc_mask, rstval;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (likely(!timekeeping_suspended)) {
+ error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
+ rstval, !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysconfig(ddata);
+ if (!(rstval & sysc_mask))
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ int syss_offset, error = 0;
+
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (syss_offset >= 0)
+ error = sysc_poll_reset_sysstatus(ddata);
+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
+ error = sysc_poll_reset_sysconfig(ddata);
+
+ return error;
+}
+
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
+static int sysc_get_one_clock(struct sysc *ddata, const char *name)
+{
+ int error, i, index = -ENODEV;
+
+ if (!strncmp(clock_names[SYSC_FCK], name, 3))
+ index = SYSC_FCK;
+ else if (!strncmp(clock_names[SYSC_ICK], name, 3))
+ index = SYSC_ICK;
+
+ if (index < 0) {
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!ddata->clocks[i]) {
+ index = i;
+ break;
+ }
+ }
+ }
+
+ if (index < 0) {
+ dev_err(ddata->dev, "clock %s not added\n", name);
+ return index;
+ }
+
+ ddata->clocks[index] = devm_clk_get(ddata->dev, name);
+ if (IS_ERR(ddata->clocks[index])) {
+ dev_err(ddata->dev, "clock get error for %s: %li\n",
+ name, PTR_ERR(ddata->clocks[index]));
+
+ return PTR_ERR(ddata->clocks[index]);
+ }
+
+ error = clk_prepare(ddata->clocks[index]);
+ if (error) {
+ dev_err(ddata->dev, "clock prepare error for %s: %i\n",
+ name, error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+static int sysc_get_clocks(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct property *prop;
+ const char *name;
+ int nr_fck = 0, nr_ick = 0, i, error = 0;
+
+ ddata->clock_roles = devm_kcalloc(ddata->dev,
+ SYSC_MAX_CLOCKS,
+ sizeof(*ddata->clock_roles),
+ GFP_KERNEL);
+ if (!ddata->clock_roles)
+ return -ENOMEM;
+
+ of_property_for_each_string(np, "clock-names", prop, name) {
+ if (!strncmp(clock_names[SYSC_FCK], name, 3))
+ nr_fck++;
+ if (!strncmp(clock_names[SYSC_ICK], name, 3))
+ nr_ick++;
+ ddata->clock_roles[ddata->nr_clocks] = name;
+ ddata->nr_clocks++;
+ }
+
+ if (ddata->nr_clocks < 1)
+ return 0;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
+ if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
+ dev_err(ddata->dev, "too many clocks for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ if (nr_fck > 1 || nr_ick > 1) {
+ dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ /* Always add a slot for main clocks fck and ick even if unused */
+ if (!nr_fck)
+ ddata->nr_clocks++;
+ if (!nr_ick)
+ ddata->nr_clocks++;
+
+ ddata->clocks = devm_kcalloc(ddata->dev,
+ ddata->nr_clocks, sizeof(*ddata->clocks),
+ GFP_KERNEL);
+ if (!ddata->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ const char *name = ddata->clock_roles[i];
+
+ if (!name)
+ continue;
+
+ error = sysc_get_one_clock(ddata, name);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
+static void sysc_clkdm_deny_idle(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+
+ if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
+ return;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->clkdm_deny_idle)
+ pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
+}
+
+static void sysc_clkdm_allow_idle(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+
+ if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
+ return;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->clkdm_allow_idle)
+ pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
+}
+
+/**
+ * sysc_init_resets - init rstctrl reset line if configured
+ * @ddata: device driver data
+ *
+ * See sysc_rstctrl_reset_deassert().
+ */
+static int sysc_init_resets(struct sysc *ddata)
+{
+ ddata->rsts =
+ devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
+
+ return PTR_ERR_OR_ZERO(ddata->rsts);
+}
+
+/**
+ * sysc_parse_and_check_child_range - parses module IO region from ranges
+ * @ddata: device driver data
+ *
+ * In general we only need rev, syss, and sysc registers and not the whole
+ * module range. But we do want the offsets for these registers from the
+ * module base. This allows us to check them against the legacy hwmod
+ * platform data. Let's also check the ranges are configured properly.
+ */
+static int sysc_parse_and_check_child_range(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int error;
+
+ error = of_range_parser_init(&parser, np);
+ if (error)
+ return error;
+
+ for_each_of_range(&parser, &range) {
+ ddata->module_pa = range.cpu_addr;
+ ddata->module_size = range.size;
+ break;
+ }
+
+ return 0;
+}
+
+/* Interconnect instances to probe before l4_per instances */
+static struct resource early_bus_ranges[] = {
+ /* am3/4 l4_wkup */
+ { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
+ /* omap4/5 and dra7 l4_cfg */
+ { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
+ /* omap4 l4_wkup */
+ { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
+ /* omap5 and dra7 l4_wkup without dra7 dcan segment */
+ { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
+};
+
+static atomic_t sysc_defer = ATOMIC_INIT(10);
+
+/**
+ * sysc_defer_non_critical - defer non_critical interconnect probing
+ * @ddata: device driver data
+ *
+ * We want to probe l4_cfg and l4_wkup interconnect instances before any
+ * l4_per instances as l4_per instances depend on resources on l4_cfg and
+ * l4_wkup interconnects.
+ */
+static int sysc_defer_non_critical(struct sysc *ddata)
+{
+ struct resource *res;
+ int i;
+
+ if (!atomic_read(&sysc_defer))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
+ res = &early_bus_ranges[i];
+ if (ddata->module_pa >= res->start &&
+ ddata->module_pa <= res->end) {
+ atomic_set(&sysc_defer, 0);
+
+ return 0;
+ }
+ }
+
+ atomic_dec_if_positive(&sysc_defer);
+
+ return -EPROBE_DEFER;
+}
+
+static struct device_node *stdout_path;
+
+static void sysc_init_stdout_path(struct sysc *ddata)
+{
+ struct device_node *np = NULL;
+ const char *uart;
+
+ if (IS_ERR(stdout_path))
+ return;
+
+ if (stdout_path)
+ return;
+
+ np = of_find_node_by_path("/chosen");
+ if (!np)
+ goto err;
+
+ uart = of_get_property(np, "stdout-path", NULL);
+ if (!uart)
+ goto err;
+
+ np = of_find_node_by_path(uart);
+ if (!np)
+ goto err;
+
+ stdout_path = np;
+
+ return;
+
+err:
+ stdout_path = ERR_PTR(-ENODEV);
+}
+
+static void sysc_check_quirk_stdout(struct sysc *ddata,
+ struct device_node *np)
+{
+ sysc_init_stdout_path(ddata);
+ if (np != stdout_path)
+ return;
+
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
+ SYSC_QUIRK_NO_RESET_ON_INIT;
+}
+
+/**
+ * sysc_check_one_child - check child configuration
+ * @ddata: device driver data
+ * @np: child device node
+ *
+ * Let's avoid messy situations where we have new interconnect target
+ * node but children have "ti,hwmods". These belong to the interconnect
+ * target node and are managed by this driver.
+ */
+static void sysc_check_one_child(struct sysc *ddata,
+ struct device_node *np)
+{
+ const char *name;
+
+ name = of_get_property(np, "ti,hwmods", NULL);
+ if (name && !of_device_is_compatible(np, "ti,sysc"))
+ dev_warn(ddata->dev, "really a child ti,hwmods property?");
+
+ sysc_check_quirk_stdout(ddata, np);
+ sysc_parse_dts_quirks(ddata, np, true);
+}
+
+static void sysc_check_children(struct sysc *ddata)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(ddata->dev->of_node, child)
+ sysc_check_one_child(ddata, child);
+}
+
+/*
+ * So far only I2C uses 16-bit read access with clockactivity with revision
+ * in two registers with stride of 4. We can detect this based on the rev
+ * register size to configure things far enough to be able to properly read
+ * the revision register.
+ */
+static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
+{
+ if (resource_size(res) == 8)
+ ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
+}
+
+/**
+ * sysc_parse_one - parses the interconnect target module registers
+ * @ddata: device driver data
+ * @reg: register to parse
+ */
+static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
+{
+ struct resource *res;
+ const char *name;
+
+ switch (reg) {
+ case SYSC_REVISION:
+ case SYSC_SYSCONFIG:
+ case SYSC_SYSSTATUS:
+ name = reg_names[reg];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(to_platform_device(ddata->dev),
+ IORESOURCE_MEM, name);
+ if (!res) {
+ ddata->offsets[reg] = -ENODEV;
+
+ return 0;
+ }
+
+ ddata->offsets[reg] = res->start - ddata->module_pa;
+ if (reg == SYSC_REVISION)
+ sysc_check_quirk_16bit(ddata, res);
+
+ return 0;
+}
+
+static int sysc_parse_registers(struct sysc *ddata)
+{
+ int i, error;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ error = sysc_parse_one(ddata, i);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_check_registers - check for misconfigured register overlaps
+ * @ddata: device driver data
+ */
+static int sysc_check_registers(struct sysc *ddata)
+{
+ int i, j, nr_regs = 0, nr_matches = 0;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ if (ddata->offsets[i] < 0)
+ continue;
+
+ if (ddata->offsets[i] > (ddata->module_size - 4)) {
+ dev_err(ddata->dev, "register outside module range");
+
+ return -EINVAL;
+ }
+
+ for (j = 0; j < SYSC_MAX_REGS; j++) {
+ if (ddata->offsets[j] < 0)
+ continue;
+
+ if (ddata->offsets[i] == ddata->offsets[j])
+ nr_matches++;
+ }
+ nr_regs++;
+ }
+
+ if (nr_matches > nr_regs) {
+ dev_err(ddata->dev, "overlapping registers: (%i/%i)",
+ nr_regs, nr_matches);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_ioremap - ioremap register space for the interconnect target module
+ * @ddata: device driver data
+ *
+ * Note that the interconnect target module registers can be anywhere
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * interconnect target module registers are at the beginning of
+ * the module range though.
+ */
+static int sysc_ioremap(struct sysc *ddata)
+{
+ int size;
+
+ if (ddata->offsets[SYSC_REVISION] < 0 &&
+ ddata->offsets[SYSC_SYSCONFIG] < 0 &&
+ ddata->offsets[SYSC_SYSSTATUS] < 0) {
+ size = ddata->module_size;
+ } else {
+ size = max3(ddata->offsets[SYSC_REVISION],
+ ddata->offsets[SYSC_SYSCONFIG],
+ ddata->offsets[SYSC_SYSSTATUS]);
+
+ if (size < SZ_1K)
+ size = SZ_1K;
+
+ if ((size + sizeof(u32)) > ddata->module_size)
+ size = ddata->module_size;
+ }
+
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ size + sizeof(u32));
+ if (!ddata->module_va)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * sysc_map_and_check_registers - ioremap and check device registers
+ * @ddata: device driver data
+ */
+static int sysc_map_and_check_registers(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+
+ error = sysc_parse_and_check_child_range(ddata);
+ if (error)
+ return error;
+
+ error = sysc_defer_non_critical(ddata);
+ if (error)
+ return error;
+
+ sysc_check_children(ddata);
+
+ if (!of_property_present(np, "reg"))
+ return 0;
+
+ error = sysc_parse_registers(ddata);
+ if (error)
+ return error;
+
+ error = sysc_ioremap(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_registers(ddata);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/**
+ * sysc_show_rev - read and show interconnect target module revision
+ * @bufp: buffer to print the information to
+ * @ddata: device driver data
+ */
+static int sysc_show_rev(char *bufp, struct sysc *ddata)
+{
+ int len;
+
+ if (ddata->offsets[SYSC_REVISION] < 0)
+ return sprintf(bufp, ":NA");
+
+ len = sprintf(bufp, ":%08x", ddata->revision);
+
+ return len;
+}
+
+static int sysc_show_reg(struct sysc *ddata,
+ char *bufp, enum sysc_registers reg)
+{
+ if (ddata->offsets[reg] < 0)
+ return sprintf(bufp, ":NA");
+
+ return sprintf(bufp, ":%x", ddata->offsets[reg]);
+}
+
+static int sysc_show_name(char *bufp, struct sysc *ddata)
+{
+ if (!ddata->name)
+ return 0;
+
+ return sprintf(bufp, ":%s", ddata->name);
+}
+
+/**
+ * sysc_show_registers - show information about interconnect target module
+ * @ddata: device driver data
+ */
+static void sysc_show_registers(struct sysc *ddata)
+{
+ char buf[128];
+ char *bufp = buf;
+ int i;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++)
+ bufp += sysc_show_reg(ddata, bufp, i);
+
+ bufp += sysc_show_rev(bufp, ddata);
+ bufp += sysc_show_name(bufp, ddata);
+
+ dev_dbg(ddata->dev, "%llx:%x%s\n",
+ ddata->module_pa, ddata->module_size,
+ buf);
+}
+
+/**
+ * sysc_write_sysconfig - handle sysconfig quirks for register write
+ * @ddata: device driver data
+ * @value: register value
+ */
+static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
+{
+ if (ddata->module_unlock_quirk)
+ ddata->module_unlock_quirk(ddata);
+
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
+
+ if (ddata->module_lock_quirk)
+ ddata->module_lock_quirk(ddata);
+}
+
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+#define SYSC_CLOCACT_ICK 2
+
+/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
+static int sysc_enable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int error;
+
+ ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ /*
+ * Some modules like i2c and hdq1w have unusable reset status unless
+ * the module reset quirk is enabled. Skip status check on enable.
+ */
+ if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ }
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
+ if (regbits->clkact_shift >= 0 &&
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
+ reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
+ SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
+ best_mode = SYSC_IDLE_NO;
+
+ /* Clear WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg &= ~BIT(regbits->enwkup_shift);
+ } else {
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_autoidle;
+
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ error = -EINVAL;
+ goto save_context;
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+ best_mode = SYSC_IDLE_NO;
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+set_autoidle:
+ /* Autoidle bit must enabled separately if available */
+ if (regbits->autoidle_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
+ reg |= 1 << regbits->autoidle_shift;
+ sysc_write_sysconfig(ddata, reg);
+ }
+
+ error = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ if (ddata->module_enable_quirk)
+ ddata->module_enable_quirk(ddata);
+
+ return error;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
+
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ if (ddata->module_disable_quirk)
+ ddata->module_disable_quirk(ddata);
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
+ }
+
+ if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
+ ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
+ best_mode = SYSC_IDLE_FORCE;
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0) {
+ ret = 0;
+ goto save_context;
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
+ best_mode = SYSC_IDLE_FORCE;
+ } else {
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ ret = -EINVAL;
+ goto save_context;
+ }
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ if (regbits->autoidle_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
+ reg |= 1 << regbits->autoidle_shift;
+ sysc_write_sysconfig(ddata, reg);
+
+ ret = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ return ret;
+}
+
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ reset_control_assert(ddata->rsts);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ reset_control_deassert(ddata->rsts);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (!ddata->enabled)
+ return 0;
+
+ sysc_clkdm_deny_idle(ddata);
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ goto err_allow_idle;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ goto err_allow_idle;
+ }
+
+ sysc_disable_main_clocks(ddata);
+
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
+ ddata->enabled = false;
+
+err_allow_idle:
+ sysc_clkdm_allow_idle(ddata);
+
+ reset_control_assert(ddata->rsts);
+
+ return error;
+}
+
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->enabled)
+ return 0;
+
+
+ sysc_clkdm_deny_idle(ddata);
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ goto err_allow_idle;
+ }
+
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ reset_control_deassert(ddata->rsts);
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
+ ddata->enabled = true;
+
+ sysc_clkdm_allow_idle(ddata);
+
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+err_allow_idle:
+ sysc_clkdm_allow_idle(ddata);
+
+ return error;
+}
+
+/*
+ * Checks if device context was lost. Assumes the sysconfig register value
+ * after lost context is different from the configured value. Only works for
+ * enabled devices.
+ *
+ * Eventually we may want to also add support to using the context lost
+ * registers that some SoCs have.
+ */
+static int sysc_check_context(struct sysc *ddata)
+{
+ u32 reg;
+
+ if (!ddata->enabled)
+ return -ENODATA;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ if (reg == ddata->sysconfig)
+ return 0;
+
+ return -EACCES;
+}
+
+static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
+{
+ struct device *dev = ddata->dev;
+ int error;
+
+ if (ddata->enabled) {
+ /* Nothing to do if enabled and context not lost */
+ error = sysc_check_context(ddata);
+ if (!error)
+ return 0;
+
+ /* Disable target module if it is enabled */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+ }
+
+ /* Enable target module */
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "reinit resume failed: %i\n", error);
+
+ /* Some modules like am335x gpmc need reset and restore of sysconfig */
+ if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_warn(dev, "reinit reset failed: %i\n", error);
+
+ sysc_write_sysconfig(ddata, ddata->sysconfig);
+ }
+
+ if (leave_enabled)
+ return error;
+
+ /* Disable target module if no leave_enabled was set */
+ error = sysc_runtime_suspend(dev);
+ if (error)
+ dev_warn(dev, "reinit suspend failed: %i\n", error);
+
+ return error;
+}
+
+static int __maybe_unused sysc_noirq_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ return 0;
+
+ if (!ddata->enabled)
+ return 0;
+
+ ddata->needs_resume = 1;
+
+ return sysc_runtime_suspend(dev);
+}
+
+static int __maybe_unused sysc_noirq_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ return 0;
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
+ error = sysc_reinit_module(ddata, ddata->needs_resume);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ } else if (ddata->needs_resume) {
+ error = sysc_runtime_resume(dev);
+ if (error)
+ dev_warn(dev, "noirq_resume failed: %i\n", error);
+ }
+
+ ddata->needs_resume = 0;
+
+ return error;
+}
+
+static const struct dev_pm_ops sysc_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
+ SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
+ sysc_runtime_resume,
+ NULL)
+};
+
+/* Module revision register based quirks */
+struct sysc_revision_quirk {
+ const char *name;
+ u32 base;
+ int rev_offset;
+ int sysc_offset;
+ int syss_offset;
+ u32 revision;
+ u32 revision_mask;
+ u32 quirks;
+};
+
+#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
+ optrev_val, optrevmask, optquirkmask) \
+ { \
+ .name = (optname), \
+ .base = (optbase), \
+ .rev_offset = (optrev), \
+ .sysc_offset = (optsysc), \
+ .syss_offset = (optsyss), \
+ .revision = (optrev_val), \
+ .revision_mask = (optrevmask), \
+ .quirks = (optquirkmask), \
+ }
+
+static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ /* Uarts on omap4 and later */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
+
+ /* Quirks that need to be set based on detected module */
+ SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
+ SYSC_MODULE_QUIRK_AESS),
+ /* Errata i893 handling for dra7 dcan1 and 2 */
+ SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
+ SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
+ SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET),
+ SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
+ SYSC_QUIRK_GPMC_DEBUG),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
+ SYSC_QUIRK_OPT_CLKS_NEEDED),
+ SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
+ SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
+ SYSC_MODULE_QUIRK_SGX),
+ SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE),
+ SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
+ SYSC_MODULE_QUIRK_RTC_UNLOCK),
+ SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000033,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000040,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_QUIRK_REINIT_ON_CTX_LOST),
+ SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT),
+ /* PRUSS on am3, am4 and am5 */
+ SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
+ SYSC_MODULE_QUIRK_PRUSS),
+ /* Watchdog on am3 and am4 */
+ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
+
+#ifdef DEBUG
+ SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
+ SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
+ SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
+ SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
+ 0xffff00f0, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("elm", 0x48080000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x40441403, 0xffff0fff, 0),
+ SYSC_QUIRK("emif", 0, 0, -ENODEV, -ENODEV, 0x50440500, 0xffffffff, 0),
+ SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
+ SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
+ SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
+ SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
+ SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
+ SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
+ SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
+ SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
+ SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
+ SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
+ SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
+ SYSC_QUIRK("pcie", 0x51000000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("pcie", 0x51800000, -ENODEV, -ENODEV, -ENODEV, 0, 0, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
+ SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
+ SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
+ SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
+ SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
+ SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
+ SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
+ SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
+ SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
+ SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
+ SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
+ SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
+ SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
+ SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
+#endif
+};
+
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
+static void sysc_init_revision_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (q->base && q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ if (q->revision == ddata->revision ||
+ (q->revision & q->revision_mask) ==
+ (ddata->revision & q->revision_mask)) {
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+ }
+}
+
+/*
+ * DSS needs dispc outputs disabled to reset modules. Returns mask of
+ * enabled DSS interrupts. Eventually we may be able to do this on
+ * dispc init rather than top-level DSS init.
+ */
+static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
+ bool disable)
+{
+ bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
+ const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
+ int manager_count;
+ bool framedonetv_irq = true;
+ u32 val, irq_mask = 0;
+
+ switch (sysc_soc->soc) {
+ case SOC_2420 ... SOC_3630:
+ manager_count = 2;
+ framedonetv_irq = false;
+ break;
+ case SOC_4430 ... SOC_4470:
+ manager_count = 3;
+ break;
+ case SOC_5430:
+ case SOC_DRA7:
+ manager_count = 4;
+ break;
+ case SOC_AM4:
+ manager_count = 1;
+ framedonetv_irq = false;
+ break;
+ case SOC_UNKNOWN:
+ default:
+ return 0;
+ }
+
+ /* Remap the whole module range to be able to reset dispc outputs */
+ devm_iounmap(ddata->dev, ddata->module_va);
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ ddata->module_size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ /* DISP_CONTROL, shut down lcd and digit on disable if enabled */
+ val = sysc_read(ddata, dispc_offset + 0x40);
+ lcd_en = val & lcd_en_mask;
+ digit_en = val & digit_en_mask;
+ if (lcd_en)
+ irq_mask |= BIT(0); /* FRAMEDONE */
+ if (digit_en) {
+ if (framedonetv_irq)
+ irq_mask |= BIT(24); /* FRAMEDONETV */
+ else
+ irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
+ }
+ if (disable && (lcd_en || digit_en))
+ sysc_write(ddata, dispc_offset + 0x40,
+ val & ~(lcd_en_mask | digit_en_mask));
+
+ if (manager_count <= 2)
+ return irq_mask;
+
+ /* DISPC_CONTROL2 */
+ val = sysc_read(ddata, dispc_offset + 0x238);
+ lcd2_en = val & lcd_en_mask;
+ if (lcd2_en)
+ irq_mask |= BIT(22); /* FRAMEDONE2 */
+ if (disable && lcd2_en)
+ sysc_write(ddata, dispc_offset + 0x238,
+ val & ~lcd_en_mask);
+
+ if (manager_count <= 3)
+ return irq_mask;
+
+ /* DISPC_CONTROL3 */
+ val = sysc_read(ddata, dispc_offset + 0x848);
+ lcd3_en = val & lcd_en_mask;
+ if (lcd3_en)
+ irq_mask |= BIT(30); /* FRAMEDONE3 */
+ if (disable && lcd3_en)
+ sysc_write(ddata, dispc_offset + 0x848,
+ val & ~lcd_en_mask);
+
+ return irq_mask;
+}
+
+/* DSS needs child outputs disabled and SDI registers cleared for reset */
+static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
+{
+ const int dispc_offset = 0x1000;
+ int error;
+ u32 irq_mask, val;
+
+ /* Get enabled outputs */
+ irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
+ if (!irq_mask)
+ return;
+
+ /* Clear IRQSTATUS */
+ sysc_write(ddata, dispc_offset + 0x18, irq_mask);
+
+ /* Disable outputs */
+ val = sysc_quirk_dispc(ddata, dispc_offset, true);
+
+ /* Poll IRQSTATUS */
+ error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
+ val, val != irq_mask, 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
+ __func__, val, irq_mask);
+
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
+ /* Clear DSS_SDI_CONTROL */
+ sysc_write(ddata, 0x44, 0);
+
+ /* Clear DSS_PLL_CONTROL */
+ sysc_write(ddata, 0x48, 0);
+ }
+
+ /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
+ sysc_write(ddata, 0x40, 0);
+}
+
+/* 1-wire needs module's internal clocks enabled for reset */
+static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
+{
+ int offset = 0x0c; /* HDQ_CTRL_STATUS */
+ u16 val;
+
+ val = sysc_read(ddata, offset);
+ val |= BIT(5);
+ sysc_write(ddata, offset, val);
+}
+
+/* AESS (Audio Engine SubSystem) needs autogating set after enable */
+static void sysc_module_enable_quirk_aess(struct sysc *ddata)
+{
+ int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
+
+ sysc_write(ddata, offset, 1);
+}
+
+/* I2C needs to be disabled for reset */
+static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
+{
+ int offset;
+ u16 val;
+
+ /* I2C_CON, omap2/3 is different from omap4 and later */
+ if ((ddata->revision & 0xffffff00) == 0x001f0000)
+ offset = 0x24;
+ else
+ offset = 0xa4;
+
+ /* I2C_EN */
+ val = sysc_read(ddata, offset);
+ if (enable)
+ val |= BIT(15);
+ else
+ val &= ~BIT(15);
+ sysc_write(ddata, offset, val);
+}
+
+static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, false);
+}
+
+static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
+{
+ sysc_clk_quirk_i2c(ddata, true);
+}
+
+/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
+static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
+{
+ u32 val, kick0_val = 0, kick1_val = 0;
+ unsigned long flags;
+ int error;
+
+ if (!lock) {
+ kick0_val = 0x83e70b13;
+ kick1_val = 0x95a4f1e0;
+ }
+
+ local_irq_save(flags);
+ /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
+ error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
+ if (error)
+ dev_warn(ddata->dev, "rtc busy timeout\n");
+ /* Now we have ~15 microseconds to read/write various registers */
+ sysc_write(ddata, 0x6c, kick0_val);
+ sysc_write(ddata, 0x70, kick1_val);
+ local_irq_restore(flags);
+}
+
+static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, false);
+}
+
+static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
+{
+ sysc_quirk_rtc(ddata, true);
+}
+
+/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
+static void sysc_module_enable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+
+ sysc_write(ddata, offset, 0);
+}
+
+static void sysc_module_disable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+ u32 val = BIT(0); /* ENABLEFORCE */
+
+ sysc_write(ddata, offset, val);
+}
+
+/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
+static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
+{
+ int offset = 0xff08; /* OCP_DEBUG_CONFIG */
+ u32 val = BIT(31); /* THALIA_INT_BYPASS */
+
+ sysc_write(ddata, offset, val);
+}
+
+/* Watchdog timer needs a disable sequence after reset */
+static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
+{
+ int wps, spr, error;
+ u32 val;
+
+ wps = 0x34;
+ spr = 0x48;
+
+ sysc_write(ddata, spr, 0xaaaa);
+ error = readl_poll_timeout(ddata->module_va + wps, val,
+ !(val & 0x10), 100,
+ MAX_MODULE_SOFTRESET_WAIT);
+ if (error)
+ dev_warn(ddata->dev, "wdt disable step1 failed\n");
+
+ sysc_write(ddata, spr, 0x5555);
+ error = readl_poll_timeout(ddata->module_va + wps, val,
+ !(val & 0x10), 100,
+ MAX_MODULE_SOFTRESET_WAIT);
+ if (error)
+ dev_warn(ddata->dev, "wdt disable step2 failed\n");
+}
+
+/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
+static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ reg |= SYSC_PRUSS_STANDBY_INIT;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
+static void sysc_init_module_quirks(struct sysc *ddata)
+{
+ if (ddata->legacy_mode || !ddata->name)
+ return;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
+
+ return;
+ }
+
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
+
+ return;
+ }
+#endif
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
+ ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
+ ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
+ ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
+ ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
+ ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
+
+ return;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
+ ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
+ ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
+ ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+ ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+ }
+
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+}
+
+static int sysc_clockdomain_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ struct clk *fck = NULL, *ick = NULL;
+ int error;
+
+ if (!pdata || !pdata->init_clockdomain)
+ return 0;
+
+ switch (ddata->nr_clocks) {
+ case 2:
+ ick = ddata->clocks[SYSC_ICK];
+ fallthrough;
+ case 1:
+ fck = ddata->clocks[SYSC_FCK];
+ break;
+ case 0:
+ return 0;
+ }
+
+ error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
+ if (!error || error == -ENODEV)
+ return 0;
+
+ return error;
+}
+
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/*
+ * Note that the caller must ensure the interconnect target module is enabled
+ * before calling reset. Otherwise reset will not complete.
+ */
+static int sysc_reset(struct sysc *ddata)
+{
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;
+
+ sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
+
+ if (ddata->legacy_mode ||
+ ddata->cap->regbits->srst_shift < 0 ||
+ ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ return 0;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (ddata->pre_reset_quirk)
+ ddata->pre_reset_quirk(ddata);
+
+ if (sysc_offset >= 0) {
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
+
+ /*
+ * Some devices need a delay before reading registers
+ * after reset. Presumably a srst_udelay is not needed
+ * for devices that use a rstctrl register reset.
+ */
+ if (ddata->cfg.srst_udelay)
+ fsleep(ddata->cfg.srst_udelay);
+
+ /*
+ * Flush posted write. For devices needing srst_udelay
+ * this should trigger an interconnect error if the
+ * srst_udelay value is needed but not configured.
+ */
+ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+ if (ddata->post_reset_quirk)
+ ddata->post_reset_quirk(ddata);
+
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+
+ if (ddata->reset_done_quirk)
+ ddata->reset_done_quirk(ddata);
+
+ return error;
+}
+
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
+static int sysc_init_module(struct sysc *ddata)
+{
+ bool rstctrl_deasserted = false;
+ int error = 0;
+
+ error = sysc_clockdomain_init(ddata);
+ if (error)
+ return error;
+
+ sysc_clkdm_deny_idle(ddata);
+
+ /*
+ * Always enable clocks. The bootloader may or may not have enabled
+ * the related clocks.
+ */
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ error = reset_control_deassert(ddata->rsts);
+ if (error)
+ goto err_main_clocks;
+ rstctrl_deasserted = true;
+ }
+
+ ddata->revision = sysc_read_revision(ddata);
+ sysc_init_revision_quirks(ddata);
+ sysc_init_module_quirks(ddata);
+
+ if (ddata->legacy_mode) {
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+ }
+
+ if (!ddata->legacy_mode) {
+ error = sysc_enable_module(ddata->dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+ if (error && !ddata->legacy_mode)
+ sysc_disable_module(ddata->dev);
+
+err_main_clocks:
+ if (error)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ /* No re-enable of clockdomain autoidle to prevent module autoidle */
+ if (error) {
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ if (error && rstctrl_deasserted &&
+ !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
+ return error;
+}
+
+static int sysc_init_sysc_mask(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+ u32 val;
+
+ error = of_property_read_u32(np, "ti,sysc-mask", &val);
+ if (error)
+ return 0;
+
+ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
+
+ return 0;
+}
+
+static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
+ const char *name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct property *prop;
+ const __be32 *p;
+ u32 val;
+
+ of_property_for_each_u32(np, name, prop, p, val) {
+ if (val >= SYSC_NR_IDLEMODES) {
+ dev_err(ddata->dev, "invalid idlemode: %i\n", val);
+ return -EINVAL;
+ }
+ *idlemodes |= (1 << val);
+ }
+
+ return 0;
+}
+
+static int sysc_init_idlemodes(struct sysc *ddata)
+{
+ int error;
+
+ error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
+ "ti,sysc-midle");
+ if (error)
+ return error;
+
+ error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
+ "ti,sysc-sidle");
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/*
+ * Only some devices on omap4 and later have SYSCONFIG reset done
+ * bit. We can detect this if there is no SYSSTATUS at all, or the
+ * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
+ * have multiple bits for the child devices like OHCI and EHCI.
+ * Depends on SYSC being parsed first.
+ */
+static int sysc_init_syss_mask(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+ u32 val;
+
+ error = of_property_read_u32(np, "ti,syss-mask", &val);
+ if (error) {
+ if ((ddata->cap->type == TI_SYSC_OMAP4 ||
+ ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
+ (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
+ ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
+
+ return 0;
+ }
+
+ if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
+ ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
+
+ ddata->cfg.syss_mask = val;
+
+ return 0;
+}
+
+/*
+ * Many child device drivers need to have fck and opt clocks available
+ * to get the clock rate for device internal configuration etc.
+ */
+static int sysc_child_add_named_clock(struct sysc *ddata,
+ struct device *child,
+ const char *name)
+{
+ struct clk *clk;
+ struct clk_lookup *l;
+ int error = 0;
+
+ if (!name)
+ return 0;
+
+ clk = clk_get(child, name);
+ if (!IS_ERR(clk)) {
+ error = -EEXIST;
+ goto put_clk;
+ }
+
+ clk = clk_get(ddata->dev, name);
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ l = clkdev_create(clk, name, dev_name(child));
+ if (!l)
+ error = -ENOMEM;
+put_clk:
+ clk_put(clk);
+
+ return error;
+}
+
+static int sysc_child_add_clocks(struct sysc *ddata,
+ struct device *child)
+{
+ int i, error;
+
+ for (i = 0; i < ddata->nr_clocks; i++) {
+ error = sysc_child_add_named_clock(ddata,
+ child,
+ ddata->clock_roles[i]);
+ if (error && error != -EEXIST) {
+ dev_err(ddata->dev, "could not add child clock %s: %i\n",
+ ddata->clock_roles[i], error);
+
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static struct device_type sysc_device_type = {
+};
+
+static struct sysc *sysc_child_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent || parent->type != &sysc_device_type)
+ return NULL;
+
+ return dev_get_drvdata(parent);
+}
+
+static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ error = pm_generic_runtime_suspend(dev);
+ if (error)
+ return error;
+
+ if (!ddata->enabled)
+ return 0;
+
+ return sysc_runtime_suspend(ddata->dev);
+}
+
+static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ if (!ddata->enabled) {
+ error = sysc_runtime_resume(ddata->dev);
+ if (error < 0)
+ dev_err(ddata->dev,
+ "%s error: %i\n", __func__, error);
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sysc_child_suspend_noirq(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ dev_dbg(ddata->dev, "%s %s\n", __func__,
+ ddata->name ? ddata->name : "");
+
+ error = pm_generic_suspend_noirq(dev);
+ if (error) {
+ dev_err(dev, "%s error at %i: %i\n",
+ __func__, __LINE__, error);
+
+ return error;
+ }
+
+ if (!pm_runtime_status_suspended(dev)) {
+ error = pm_generic_runtime_suspend(dev);
+ if (error) {
+ dev_dbg(dev, "%s busy at %i: %i\n",
+ __func__, __LINE__, error);
+
+ return 0;
+ }
+
+ error = sysc_runtime_suspend(ddata->dev);
+ if (error) {
+ dev_err(dev, "%s error at %i: %i\n",
+ __func__, __LINE__, error);
+
+ return error;
+ }
+
+ ddata->child_needs_resume = true;
+ }
+
+ return 0;
+}
+
+static int sysc_child_resume_noirq(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ dev_dbg(ddata->dev, "%s %s\n", __func__,
+ ddata->name ? ddata->name : "");
+
+ if (ddata->child_needs_resume) {
+ ddata->child_needs_resume = false;
+
+ error = sysc_runtime_resume(ddata->dev);
+ if (error)
+ dev_err(ddata->dev,
+ "%s runtime resume error: %i\n",
+ __func__, error);
+
+ error = pm_generic_runtime_resume(dev);
+ if (error)
+ dev_err(ddata->dev,
+ "%s generic runtime resume: %i\n",
+ __func__, error);
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+#endif
+
+static struct dev_pm_domain sysc_child_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
+ sysc_child_runtime_resume,
+ NULL)
+ USE_PLATFORM_PM_SLEEP_OPS
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
+ sysc_child_resume_noirq)
+ }
+};
+
+/* Caller needs to take list_lock if ever used outside of cpu_pm */
+static void sysc_reinit_modules(struct sysc_soc_info *soc)
+{
+ struct sysc_module *module;
+ struct sysc *ddata;
+
+ list_for_each_entry(module, &sysc_soc->restored_modules, node) {
+ ddata = module->ddata;
+ sysc_reinit_module(ddata, ddata->enabled);
+ }
+}
+
+/**
+ * sysc_context_notifier - optionally reset and restore module after idle
+ * @nb: notifier block
+ * @cmd: unused
+ * @v: unused
+ *
+ * Some interconnect target modules need to be restored, or reset and restored
+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
+ * OTG and GPMC target modules even if the modules are unused.
+ */
+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct sysc_soc_info *soc;
+
+ soc = container_of(nb, struct sysc_soc_info, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ sysc_reinit_modules(soc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * sysc_add_restored - optionally add reset and restore quirk hanlling
+ * @ddata: device data
+ */
+static void sysc_add_restored(struct sysc *ddata)
+{
+ struct sysc_module *restored_module;
+
+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
+ if (!restored_module)
+ return;
+
+ restored_module->ddata = ddata;
+
+ mutex_lock(&sysc_soc->list_lock);
+
+ list_add(&restored_module->node, &sysc_soc->restored_modules);
+
+ if (sysc_soc->nb.notifier_call)
+ goto out_unlock;
+
+ sysc_soc->nb.notifier_call = sysc_context_notifier;
+ cpu_pm_register_notifier(&sysc_soc->nb);
+
+out_unlock:
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+/**
+ * sysc_legacy_idle_quirk - handle children in omap_device compatible way
+ * @ddata: device driver data
+ * @child: child device driver
+ *
+ * Allow idle for child devices as done with _od_runtime_suspend().
+ * Otherwise many child devices will not idle because of the permanent
+ * parent usecount set in pm_runtime_irq_safe().
+ *
+ * Note that the long term solution is to just modify the child device
+ * drivers to not set pm_runtime_irq_safe() and then this can be just
+ * dropped.
+ */
+static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
+{
+ if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ dev_pm_domain_set(child, &sysc_child_pm_domain);
+}
+
+static int sysc_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *device)
+{
+ struct device *dev = device;
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+ if (!ddata)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ error = sysc_child_add_clocks(ddata, dev);
+ if (error)
+ return error;
+ sysc_legacy_idle_quirk(ddata, dev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sysc_nb = {
+ .notifier_call = sysc_notifier_call,
+};
+
+/* Device tree configured quirks */
+struct sysc_dts_quirk {
+ const char *name;
+ u32 mask;
+};
+
+static const struct sysc_dts_quirk sysc_dts_quirks[] = {
+ { .name = "ti,no-idle-on-init",
+ .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
+ { .name = "ti,no-reset-on-init",
+ .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
+};
+
+static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
+ bool is_child)
+{
+ const struct property *prop;
+ int i, len;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
+ const char *name = sysc_dts_quirks[i].name;
+
+ prop = of_get_property(np, name, &len);
+ if (!prop)
+ continue;
+
+ ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
+ if (is_child) {
+ dev_warn(ddata->dev,
+ "dts flag should be at module level for %s\n",
+ name);
+ }
+ }
+}
+
+static int sysc_init_dts_quirks(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ int error;
+ u32 val;
+
+ ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
+
+ sysc_parse_dts_quirks(ddata, np, false);
+ error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
+ if (!error) {
+ if (val > 255) {
+ dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
+ val);
+ }
+
+ ddata->cfg.srst_udelay = (u8)val;
+ }
+
+ return 0;
+}
+
+static void sysc_unprepare(struct sysc *ddata)
+{
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!IS_ERR_OR_NULL(ddata->clocks[i]))
+ clk_unprepare(ddata->clocks[i]);
+ }
+}
+
+/*
+ * Common sysc register bits found on omap2, also known as type1
+ */
+static const struct sysc_regbits sysc_regbits_omap2 = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = 12,
+ .sidle_shift = 3,
+ .clkact_shift = 8,
+ .emufree_shift = 5,
+ .enwkup_shift = 2,
+ .srst_shift = 1,
+ .autoidle_shift = 0,
+};
+
+static const struct sysc_capabilities sysc_omap2 = {
+ .type = TI_SYSC_OMAP2,
+ .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap2,
+};
+
+/* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
+static const struct sysc_capabilities sysc_omap2_timer = {
+ .type = TI_SYSC_OMAP2_TIMER,
+ .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap2,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
+};
+
+/*
+ * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
+ * with different sidle position
+ */
+static const struct sysc_regbits sysc_regbits_omap3_sham = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 4,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = 1,
+ .autoidle_shift = 0,
+ .emufree_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap3_sham = {
+ .type = TI_SYSC_OMAP3_SHAM,
+ .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap3_sham,
+};
+
+/*
+ * AES register bits found on omap3 and later, a variant of
+ * sysc_regbits_omap2 with different sidle position
+ */
+static const struct sysc_regbits sysc_regbits_omap3_aes = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 6,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = 1,
+ .autoidle_shift = 0,
+ .emufree_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap3_aes = {
+ .type = TI_SYSC_OMAP3_AES,
+ .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
+ .regbits = &sysc_regbits_omap3_aes,
+};
+
+/*
+ * Common sysc register bits found on omap4, also known as type2
+ */
+static const struct sysc_regbits sysc_regbits_omap4 = {
+ .dmadisable_shift = 16,
+ .midle_shift = 4,
+ .sidle_shift = 2,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .emufree_shift = 1,
+ .srst_shift = 0,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4 = {
+ .type = TI_SYSC_OMAP4,
+ .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_omap4,
+};
+
+static const struct sysc_capabilities sysc_omap4_timer = {
+ .type = TI_SYSC_OMAP4_TIMER,
+ .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_omap4,
+};
+
+/*
+ * Common sysc register bits found on omap4, also known as type3
+ */
+static const struct sysc_regbits sysc_regbits_omap4_simple = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = 2,
+ .sidle_shift = 0,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4_simple = {
+ .type = TI_SYSC_OMAP4_SIMPLE,
+ .regbits = &sysc_regbits_omap4_simple,
+};
+
+/*
+ * SmartReflex sysc found on omap34xx
+ */
+static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = -ENODEV,
+ .clkact_shift = 20,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_34xx_sr = {
+ .type = TI_SYSC_OMAP34XX_SR,
+ .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
+ .regbits = &sysc_regbits_omap34xx_sr,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
+ SYSC_QUIRK_LEGACY_IDLE,
+};
+
+/*
+ * SmartReflex sysc found on omap36xx and later
+ */
+static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 24,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 26,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_36xx_sr = {
+ .type = TI_SYSC_OMAP36XX_SR,
+ .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
+ .regbits = &sysc_regbits_omap36xx_sr,
+ .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
+};
+
+static const struct sysc_capabilities sysc_omap4_sr = {
+ .type = TI_SYSC_OMAP4_SR,
+ .regbits = &sysc_regbits_omap36xx_sr,
+ .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
+};
+
+/*
+ * McASP register bits found on omap4 and later
+ */
+static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 0,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = -ENODEV,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4_mcasp = {
+ .type = TI_SYSC_OMAP4_MCASP,
+ .regbits = &sysc_regbits_omap4_mcasp,
+ .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
+};
+
+/*
+ * McASP found on dra7 and later
+ */
+static const struct sysc_capabilities sysc_dra7_mcasp = {
+ .type = TI_SYSC_OMAP4_SIMPLE,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
+};
+
+/*
+ * FS USB host found on omap4 and later
+ */
+static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = 24,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 26,
+ .srst_shift = -ENODEV,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
+ .type = TI_SYSC_OMAP4_USB_HOST_FS,
+ .sysc_mask = SYSC_OMAP2_ENAWAKEUP,
+ .regbits = &sysc_regbits_omap4_usb_host_fs,
+};
+
+static const struct sysc_regbits sysc_regbits_dra7_mcan = {
+ .dmadisable_shift = -ENODEV,
+ .midle_shift = -ENODEV,
+ .sidle_shift = -ENODEV,
+ .clkact_shift = -ENODEV,
+ .enwkup_shift = 4,
+ .srst_shift = 0,
+ .emufree_shift = -ENODEV,
+ .autoidle_shift = -ENODEV,
+};
+
+static const struct sysc_capabilities sysc_dra7_mcan = {
+ .type = TI_SYSC_DRA7_MCAN,
+ .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
+ .regbits = &sysc_regbits_dra7_mcan,
+ .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
+};
+
+/*
+ * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
+ */
+static const struct sysc_capabilities sysc_pruss = {
+ .type = TI_SYSC_PRUSS,
+ .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
+ .regbits = &sysc_regbits_omap4_simple,
+ .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
+};
+
+static int sysc_init_pdata(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ struct ti_sysc_module_data *mdata;
+
+ if (!pdata)
+ return 0;
+
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
+
+ if (ddata->legacy_mode) {
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
+ }
+
+ ddata->mdata = mdata;
+
+ return 0;
+}
+
+static int sysc_init_match(struct sysc *ddata)
+{
+ const struct sysc_capabilities *cap;
+
+ cap = of_device_get_match_data(ddata->dev);
+ if (!cap)
+ return -EINVAL;
+
+ ddata->cap = cap;
+ if (ddata->cap)
+ ddata->cfg.quirks |= ddata->cap->mod_quirks;
+
+ return 0;
+}
+
+static void ti_sysc_idle(struct work_struct *work)
+{
+ struct sysc *ddata;
+
+ ddata = container_of(work, struct sysc, idle_work.work);
+
+ /*
+ * One time decrement of clock usage counts if left on from init.
+ * Note that we disable opt clocks unconditionally in this case
+ * as they are enabled unconditionally during init without
+ * considering sysc_opt_clks_needed() at that point.
+ */
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
+ SYSC_QUIRK_NO_IDLE_ON_INIT)) {
+ sysc_disable_main_clocks(ddata);
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
+ return;
+
+ /*
+ * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
+ * and SYSC_QUIRK_NO_RESET_ON_INIT
+ */
+ if (pm_runtime_active(ddata->dev))
+ pm_runtime_put_sync(ddata->dev);
+}
+
+/*
+ * SoC model and features detection. Only needed for SoCs that need
+ * special handling for quirks, no need to list others.
+ */
+static const struct soc_device_attribute sysc_soc_match[] = {
+ SOC_FLAG("OMAP242*", SOC_2420),
+ SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM35*", SOC_AM35),
+ SOC_FLAG("OMAP3[45]*", SOC_3430),
+ SOC_FLAG("OMAP3[67]*", SOC_3630),
+ SOC_FLAG("OMAP443*", SOC_4430),
+ SOC_FLAG("OMAP446*", SOC_4460),
+ SOC_FLAG("OMAP447*", SOC_4470),
+ SOC_FLAG("OMAP54*", SOC_5430),
+ SOC_FLAG("AM433", SOC_AM3),
+ SOC_FLAG("AM43*", SOC_AM4),
+ SOC_FLAG("DRA7*", SOC_DRA7),
+
+ { /* sentinel */ }
+};
+
+/*
+ * List of SoCs variants with disabled features. By default we assume all
+ * devices in the device tree are available so no need to list those SoCs.
+ */
+static const struct soc_device_attribute sysc_soc_feat_match[] = {
+ /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
+ SOC_FLAG("AM3505", DIS_SGX),
+ SOC_FLAG("OMAP3525", DIS_SGX),
+ SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
+
+ /* OMAP3630/DM3730 variants with some accelerators disabled */
+ SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
+ SOC_FLAG("DM3725", DIS_SGX),
+ SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
+ SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
+ SOC_FLAG("OMAP3621", DIS_ISP),
+
+ { /* sentinel */ }
+};
+
+static int sysc_add_disabled(unsigned long base)
+{
+ struct sysc_address *disabled_module;
+
+ disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
+ if (!disabled_module)
+ return -ENOMEM;
+
+ disabled_module->base = base;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_add(&disabled_module->node, &sysc_soc->disabled_modules);
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return 0;
+}
+
+/*
+ * One time init to detect the booted SoC, disable unavailable features
+ * and initialize list for optional cpu_pm notifier.
+ *
+ * Note that we initialize static data shared across all ti-sysc instances
+ * so ddata is only used for SoC type. This can be called from module_init
+ * once we no longer need to rely on platform data.
+ */
+static int sysc_init_static_data(struct sysc *ddata)
+{
+ const struct soc_device_attribute *match;
+ struct ti_sysc_platform_data *pdata;
+ unsigned long features = 0;
+ struct device_node *np;
+
+ if (sysc_soc)
+ return 0;
+
+ sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
+ if (!sysc_soc)
+ return -ENOMEM;
+
+ mutex_init(&sysc_soc->list_lock);
+ INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ INIT_LIST_HEAD(&sysc_soc->restored_modules);
+ sysc_soc->general_purpose = true;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (pdata && pdata->soc_type_gp)
+ sysc_soc->general_purpose = pdata->soc_type_gp();
+
+ match = soc_device_match(sysc_soc_match);
+ if (match && match->data)
+ sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
+
+ /*
+ * Check and warn about possible old incomplete dtb. We now want to see
+ * simple-pm-bus instead of simple-bus in the dtb for genpd using SoCs.
+ */
+ switch (sysc_soc->soc) {
+ case SOC_AM3:
+ case SOC_AM4:
+ case SOC_4430 ... SOC_4470:
+ case SOC_5430:
+ case SOC_DRA7:
+ np = of_find_node_by_path("/ocp");
+ WARN_ONCE(np && of_device_is_compatible(np, "simple-bus"),
+ "ti-sysc: Incomplete old dtb, please update\n");
+ break;
+ default:
+ break;
+ }
+
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ case SOC_AM3:
+ sysc_add_disabled(0x48310000); /* rng */
+ break;
+ default:
+ break;
+ }
+ }
+
+ match = soc_device_match(sysc_soc_feat_match);
+ if (!match)
+ return 0;
+
+ if (match->data)
+ features = (unsigned long)match->data;
+
+ /*
+ * Add disabled devices to the list based on the module base.
+ * Note that this must be done before we attempt to access the
+ * device and have module revision checks working.
+ */
+ if (features & DIS_ISP)
+ sysc_add_disabled(0x480bd400);
+ if (features & DIS_IVA)
+ sysc_add_disabled(0x5d000000);
+ if (features & DIS_SGX)
+ sysc_add_disabled(0x50000000);
+
+ return 0;
+}
+
+static void sysc_cleanup_static_data(void)
+{
+ struct sysc_module *restored_module;
+ struct sysc_address *disabled_module;
+ struct list_head *pos, *tmp;
+
+ if (!sysc_soc)
+ return;
+
+ if (sysc_soc->nb.notifier_call)
+ cpu_pm_unregister_notifier(&sysc_soc->nb);
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
+ restored_module = list_entry(pos, struct sysc_module, node);
+ list_del(pos);
+ kfree(restored_module);
+ }
+ list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
+ disabled_module = list_entry(pos, struct sysc_address, node);
+ list_del(pos);
+ kfree(disabled_module);
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
+static int sysc_check_disabled_devices(struct sysc *ddata)
+{
+ struct sysc_address *disabled_module;
+ int error = 0;
+
+ mutex_lock(&sysc_soc->list_lock);
+ list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) {
+ if (ddata->module_pa == disabled_module->base) {
+ dev_dbg(ddata->dev, "module disabled for this SoC\n");
+ error = -ENODEV;
+ break;
+ }
+ }
+ mutex_unlock(&sysc_soc->list_lock);
+
+ return error;
+}
+
+/*
+ * Ignore timers tagged with no-reset and no-idle. These are likely in use,
+ * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
+ * are needed, we could also look at the timer register configuration.
+ */
+static int sysc_check_active_timer(struct sysc *ddata)
+{
+ int error;
+
+ if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
+ ddata->cap->type != TI_SYSC_OMAP4_TIMER)
+ return 0;
+
+ /*
+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
+ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
+ * dts: Fix timer regression for beagleboard revision c"). This all
+ * can be dropped if we stop supporting old beagleboard revisions
+ * A to B4 at some point.
+ */
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ error = -ENXIO;
+ else
+ error = -EBUSY;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
+ (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id sysc_match_table[] = {
+ { .compatible = "simple-bus", },
+ { /* sentinel */ },
+};
+
+static int sysc_probe(struct platform_device *pdev)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct sysc *ddata;
+ int error;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->offsets[SYSC_REVISION] = -ENODEV;
+ ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
+ ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
+ ddata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ddata);
+
+ error = sysc_init_static_data(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_match(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_dts_quirks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_map_and_check_registers(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_sysc_mask(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_idlemodes(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_syss_mask(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_pdata(ddata);
+ if (error)
+ return error;
+
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_check_disabled_devices(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_active_timer(ddata);
+ if (error == -ENXIO)
+ ddata->reserved = true;
+ else if (error)
+ return error;
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_init_resets(ddata);
+ if (error)
+ goto unprepare;
+
+ error = sysc_init_module(ddata);
+ if (error)
+ goto unprepare;
+
+ pm_runtime_enable(ddata->dev);
+ error = pm_runtime_resume_and_get(ddata->dev);
+ if (error < 0) {
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ /* Balance use counts as PM runtime should have enabled these all */
+ if (!(ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
+ sysc_disable_main_clocks(ddata);
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
+ sysc_show_registers(ddata);
+
+ ddata->dev->type = &sysc_device_type;
+
+ if (!ddata->reserved) {
+ error = of_platform_populate(ddata->dev->of_node,
+ sysc_match_table,
+ pdata ? pdata->auxdata : NULL,
+ ddata->dev);
+ if (error)
+ goto err;
+ }
+
+ INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
+
+ /* At least earlycon won't survive without deferred idle */
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
+ SYSC_QUIRK_NO_IDLE_ON_INIT |
+ SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ schedule_delayed_work(&ddata->idle_work, 3000);
+ } else {
+ pm_runtime_put(&pdev->dev);
+ }
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
+ sysc_add_restored(ddata);
+
+ return 0;
+
+err:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+unprepare:
+ sysc_unprepare(ddata);
+
+ return error;
+}
+
+static int sysc_remove(struct platform_device *pdev)
+{
+ struct sysc *ddata = platform_get_drvdata(pdev);
+ int error;
+
+ /* Device can still be enabled, see deferred idle quirk in probe */
+ if (cancel_delayed_work_sync(&ddata->idle_work))
+ ti_sysc_idle(&ddata->idle_work.work);
+
+ error = pm_runtime_resume_and_get(ddata->dev);
+ if (error < 0) {
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ if (!reset_control_status(ddata->rsts))
+ reset_control_assert(ddata->rsts);
+
+unprepare:
+ sysc_unprepare(ddata);
+
+ return 0;
+}
+
+static const struct of_device_id sysc_match[] = {
+ { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
+ { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
+ { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
+ { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
+ { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
+ { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
+ { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
+ { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
+ { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
+ { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
+ { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
+ { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
+ { .compatible = "ti,sysc-usb-host-fs",
+ .data = &sysc_omap4_usb_host_fs, },
+ { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
+ { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sysc_match);
+
+static struct platform_driver sysc_driver = {
+ .probe = sysc_probe,
+ .remove = sysc_remove,
+ .driver = {
+ .name = "ti-sysc",
+ .of_match_table = sysc_match,
+ .pm = &sysc_pm_ops,
+ },
+};
+
+static int __init sysc_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &sysc_nb);
+
+ return platform_driver_register(&sysc_driver);
+}
+module_init(sysc_init);
+
+static void __exit sysc_exit(void)
+{
+ bus_unregister_notifier(&platform_bus_type, &sysc_nb);
+ platform_driver_unregister(&sysc_driver);
+ sysc_cleanup_static_data();
+}
+module_exit(sysc_exit);
+
+MODULE_DESCRIPTION("TI sysc interconnect target driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
new file mode 100644
index 0000000000..38c886dc2e
--- /dev/null
+++ b/drivers/bus/ts-nbus.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NBUS driver for TS-4600 based boards
+ *
+ * Copyright (c) 2016 - Savoir-faire Linux
+ * Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
+ *
+ * This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
+ * Systems. It is used to communicate with the peripherals in the FPGA on the
+ * TS-4600 SoM.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/ts-nbus.h>
+
+#define TS_NBUS_DIRECTION_IN 0
+#define TS_NBUS_DIRECTION_OUT 1
+#define TS_NBUS_WRITE_ADR 0
+#define TS_NBUS_WRITE_VAL 1
+
+struct ts_nbus {
+ struct pwm_device *pwm;
+ struct gpio_descs *data;
+ struct gpio_desc *csn;
+ struct gpio_desc *txrx;
+ struct gpio_desc *strobe;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct mutex lock;
+};
+
+/*
+ * request all gpios required by the bus.
+ */
+static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
+ *ts_nbus)
+{
+ ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->data)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
+ return PTR_ERR(ts_nbus->data);
+ }
+
+ ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->csn)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
+ return PTR_ERR(ts_nbus->csn);
+ }
+
+ ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->txrx)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
+ return PTR_ERR(ts_nbus->txrx);
+ }
+
+ ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->strobe)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
+ return PTR_ERR(ts_nbus->strobe);
+ }
+
+ ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->ale)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
+ return PTR_ERR(ts_nbus->ale);
+ }
+
+ ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
+ if (IS_ERR(ts_nbus->rdy)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
+ return PTR_ERR(ts_nbus->rdy);
+ }
+
+ return 0;
+}
+
+/*
+ * the data gpios are used for reading and writing values, their directions
+ * should be adjusted accordingly.
+ */
+static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (direction == TS_NBUS_DIRECTION_IN)
+ gpiod_direction_input(ts_nbus->data->desc[i]);
+ else
+ /* when used as output the default state of the data
+ * lines are set to high */
+ gpiod_direction_output(ts_nbus->data->desc[i], 1);
+ }
+}
+
+/*
+ * reset the bus in its initial state.
+ * The data, csn, strobe and ale lines must be zero'ed to let the FPGA knows a
+ * new transaction can be process.
+ */
+static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus)
+{
+ DECLARE_BITMAP(values, 8);
+
+ values[0] = 0;
+
+ gpiod_set_array_value_cansleep(8, ts_nbus->data->desc,
+ ts_nbus->data->info, values);
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->strobe, 0);
+ gpiod_set_value_cansleep(ts_nbus->ale, 0);
+}
+
+/*
+ * let the FPGA knows it can process.
+ */
+static void ts_nbus_start_transaction(struct ts_nbus *ts_nbus)
+{
+ gpiod_set_value_cansleep(ts_nbus->strobe, 1);
+}
+
+/*
+ * read a byte value from the data gpios.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ int ret, i;
+
+ *val = 0;
+ for (i = 0; i < 8; i++) {
+ ret = gpiod_get_value_cansleep(gpios->desc[i]);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ *val |= BIT(i);
+ }
+
+ return 0;
+}
+
+/*
+ * set the data gpios accordingly to the byte value.
+ */
+static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ DECLARE_BITMAP(values, 8);
+
+ values[0] = byte;
+
+ gpiod_set_array_value_cansleep(8, gpios->desc, gpios->info, values);
+}
+
+/*
+ * reading the bus consists of resetting the bus, then notifying the FPGA to
+ * send the data in the data gpios and return the read value.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_bus(struct ts_nbus *ts_nbus, u8 *val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+ ts_nbus_start_transaction(ts_nbus);
+
+ return ts_nbus_read_byte(ts_nbus, val);
+}
+
+/*
+ * writing to the bus consists of resetting the bus, then define the type of
+ * command (address/value), write the data and notify the FPGA to retrieve the
+ * value in the data gpios.
+ */
+static void ts_nbus_write_bus(struct ts_nbus *ts_nbus, int cmd, u8 val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+
+ if (cmd == TS_NBUS_WRITE_ADR)
+ gpiod_set_value_cansleep(ts_nbus->ale, 1);
+
+ ts_nbus_write_byte(ts_nbus, val);
+ ts_nbus_start_transaction(ts_nbus);
+}
+
+/*
+ * read the value in the FPGA register at the given address.
+ * return 0 on success or negative errno on failure.
+ */
+int ts_nbus_read(struct ts_nbus *ts_nbus, u8 adr, u16 *val)
+{
+ int ret, i;
+ u8 byte;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in read mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 0);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* set the data gpios direction as input before reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_IN);
+
+ /* reading value MSB first */
+ do {
+ *val = 0;
+ byte = 0;
+ for (i = 1; i >= 0; i--) {
+ /* read a byte from the bus, leave on error */
+ ret = ts_nbus_read_bus(ts_nbus, &byte);
+ if (ret < 0)
+ goto err;
+
+ /* append the byte read to the final value */
+ *val |= byte << (i * 8);
+ }
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ ret = gpiod_get_value_cansleep(ts_nbus->rdy);
+ } while (ret);
+
+err:
+ /* restore the data gpios direction as output after reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_OUT);
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_read);
+
+/*
+ * write the desired value in the FPGA register at the given address.
+ */
+int ts_nbus_write(struct ts_nbus *ts_nbus, u8 adr, u16 val)
+{
+ int i;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in write mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 1);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* writing value MSB first */
+ for (i = 1; i >= 0; i--)
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_VAL, (u8)(val >> (i * 8)));
+
+ /* wait for completion */
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ while (gpiod_get_value_cansleep(ts_nbus->rdy) != 0) {
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ }
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_write);
+
+static int ts_nbus_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct pwm_args pargs;
+ struct device *dev = &pdev->dev;
+ struct ts_nbus *ts_nbus;
+ int ret;
+
+ ts_nbus = devm_kzalloc(dev, sizeof(*ts_nbus), GFP_KERNEL);
+ if (!ts_nbus)
+ return -ENOMEM;
+
+ mutex_init(&ts_nbus->lock);
+
+ ret = ts_nbus_init_pdata(pdev, ts_nbus);
+ if (ret < 0)
+ return ret;
+
+ pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(pwm)) {
+ ret = PTR_ERR(pwm);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to request PWM\n");
+ return ret;
+ }
+
+ pwm_get_args(pwm, &pargs);
+ if (!pargs.period) {
+ dev_err(&pdev->dev, "invalid PWM period\n");
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pwm);
+ ret = pwm_config(pwm, pargs.period, pargs.period);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we can now start the FPGA and populate the peripherals.
+ */
+ pwm_enable(pwm);
+ ts_nbus->pwm = pwm;
+
+ /*
+ * let the child nodes retrieve this instance of the ts-nbus.
+ */
+ dev_set_drvdata(dev, ts_nbus);
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "initialized\n");
+
+ return 0;
+}
+
+static int ts_nbus_remove(struct platform_device *pdev)
+{
+ struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev);
+
+ /* shutdown the FPGA */
+ mutex_lock(&ts_nbus->lock);
+ pwm_disable(ts_nbus->pwm);
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+
+static const struct of_device_id ts_nbus_of_match[] = {
+ { .compatible = "technologic,ts-nbus", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
+
+static struct platform_driver ts_nbus_driver = {
+ .probe = ts_nbus_probe,
+ .remove = ts_nbus_remove,
+ .driver = {
+ .name = "ts_nbus",
+ .of_match_table = ts_nbus_of_match,
+ },
+};
+
+module_platform_driver(ts_nbus_driver);
+
+MODULE_ALIAS("platform:ts_nbus");
+MODULE_AUTHOR("Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems NBUS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
new file mode 100644
index 0000000000..cb5c89ce7b
--- /dev/null
+++ b/drivers/bus/uniphier-system-bus.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* System Bus Controller registers */
+#define UNIPHIER_SBC_BASE 0x100 /* base address of bank0 space */
+#define UNIPHIER_SBC_BASE_BE BIT(0) /* bank_enable */
+#define UNIPHIER_SBC_CTRL0 0x200 /* timing parameter 0 of bank0 */
+#define UNIPHIER_SBC_CTRL1 0x204 /* timing parameter 1 of bank0 */
+#define UNIPHIER_SBC_CTRL2 0x208 /* timing parameter 2 of bank0 */
+#define UNIPHIER_SBC_CTRL3 0x20c /* timing parameter 3 of bank0 */
+#define UNIPHIER_SBC_CTRL4 0x300 /* timing parameter 4 of bank0 */
+
+#define UNIPHIER_SBC_STRIDE 0x10 /* register stride to next bank */
+#define UNIPHIER_SBC_NR_BANKS 8 /* number of banks (chip select) */
+#define UNIPHIER_SBC_BASE_DUMMY 0xffffffff /* data to squash bank 0, 1 */
+
+struct uniphier_system_bus_bank {
+ u32 base;
+ u32 end;
+};
+
+struct uniphier_system_bus_priv {
+ struct device *dev;
+ void __iomem *membase;
+ struct uniphier_system_bus_bank bank[UNIPHIER_SBC_NR_BANKS];
+};
+
+static int uniphier_system_bus_add_bank(struct uniphier_system_bus_priv *priv,
+ int bank, u32 addr, u64 paddr, u32 size)
+{
+ u64 end, mask;
+
+ dev_dbg(priv->dev,
+ "range found: bank = %d, addr = %08x, paddr = %08llx, size = %08x\n",
+ bank, addr, paddr, size);
+
+ if (bank >= ARRAY_SIZE(priv->bank)) {
+ dev_err(priv->dev, "unsupported bank number %d\n", bank);
+ return -EINVAL;
+ }
+
+ if (priv->bank[bank].base || priv->bank[bank].end) {
+ dev_err(priv->dev,
+ "range for bank %d has already been specified\n", bank);
+ return -EINVAL;
+ }
+
+ if (paddr > U32_MAX) {
+ dev_err(priv->dev, "base address %llx is too high\n", paddr);
+ return -EINVAL;
+ }
+
+ end = paddr + size;
+
+ if (addr > paddr) {
+ dev_err(priv->dev,
+ "base %08x cannot be mapped to %08llx of parent\n",
+ addr, paddr);
+ return -EINVAL;
+ }
+ paddr -= addr;
+
+ paddr = round_down(paddr, 0x00020000);
+ end = round_up(end, 0x00020000);
+
+ if (end > U32_MAX) {
+ dev_err(priv->dev, "end address %08llx is too high\n", end);
+ return -EINVAL;
+ }
+ mask = paddr ^ (end - 1);
+ mask = roundup_pow_of_two(mask);
+
+ paddr = round_down(paddr, mask);
+ end = round_up(end, mask);
+
+ priv->bank[bank].base = paddr;
+ priv->bank[bank].end = end;
+
+ dev_dbg(priv->dev, "range added: bank = %d, addr = %08x, end = %08x\n",
+ bank, priv->bank[bank].base, priv->bank[bank].end);
+
+ return 0;
+}
+
+static int uniphier_system_bus_check_overlap(
+ const struct uniphier_system_bus_priv *priv)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+ for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
+ if (priv->bank[i].end > priv->bank[j].base &&
+ priv->bank[i].base < priv->bank[j].end) {
+ dev_err(priv->dev,
+ "region overlap between bank%d and bank%d\n",
+ i, j);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void uniphier_system_bus_check_boot_swap(
+ struct uniphier_system_bus_priv *priv)
+{
+ void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
+ int is_swapped;
+
+ is_swapped = !(readl(base_reg) & UNIPHIER_SBC_BASE_BE);
+
+ dev_dbg(priv->dev, "Boot Swap: %s\n", is_swapped ? "on" : "off");
+
+ /*
+ * If BOOT_SWAP was asserted on power-on-reset, the CS0 and CS1 are
+ * swapped. In this case, bank0 and bank1 should be swapped as well.
+ */
+ if (is_swapped)
+ swap(priv->bank[0], priv->bank[1]);
+}
+
+static void uniphier_system_bus_set_reg(
+ const struct uniphier_system_bus_priv *priv)
+{
+ void __iomem *base_reg = priv->membase + UNIPHIER_SBC_BASE;
+ u32 base, end, mask, val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+ base = priv->bank[i].base;
+ end = priv->bank[i].end;
+
+ if (base == end) {
+ /*
+ * If SBC_BASE0 or SBC_BASE1 is set to zero, the access
+ * to anywhere in the system bus space is routed to
+ * bank 0 (if boot swap if off) or bank 1 (if boot swap
+ * if on). It means that CPUs cannot get access to
+ * bank 2 or later. In other words, bank 0/1 cannot
+ * be disabled even if its bank_enable bits is cleared.
+ * This seems odd, but it is how this hardware goes.
+ * As a workaround, dummy data (0xffffffff) should be
+ * set when the bank 0/1 is unused. As for bank 2 and
+ * later, they can be simply disable by clearing the
+ * bank_enable bit.
+ */
+ if (i < 2)
+ val = UNIPHIER_SBC_BASE_DUMMY;
+ else
+ val = 0;
+ } else {
+ mask = base ^ (end - 1);
+
+ val = base & 0xfffe0000;
+ val |= (~mask >> 16) & 0xfffe;
+ val |= UNIPHIER_SBC_BASE_BE;
+ }
+ dev_dbg(priv->dev, "SBC_BASE[%d] = 0x%08x\n", i, val);
+
+ writel(val, base_reg + UNIPHIER_SBC_STRIDE * i);
+ }
+}
+
+static int uniphier_system_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_system_bus_priv *priv;
+ struct of_range_parser parser;
+ struct of_range range;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->membase))
+ return PTR_ERR(priv->membase);
+
+ priv->dev = dev;
+
+ ret = of_range_parser_init(&parser, dev->of_node);
+ if (ret)
+ return ret;
+
+ for_each_of_range(&parser, &range) {
+ if (range.cpu_addr == OF_BAD_ADDR)
+ return -EINVAL;
+ ret = uniphier_system_bus_add_bank(priv,
+ upper_32_bits(range.bus_addr),
+ lower_32_bits(range.bus_addr),
+ range.cpu_addr, range.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = uniphier_system_bus_check_overlap(priv);
+ if (ret)
+ return ret;
+
+ uniphier_system_bus_check_boot_swap(priv);
+
+ uniphier_system_bus_set_reg(priv);
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Now, the bus is configured. Populate platform_devices below it */
+ return of_platform_default_populate(dev->of_node, NULL, dev);
+}
+
+static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
+{
+ uniphier_system_bus_set_reg(dev_get_drvdata(dev));
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
+};
+
+static const struct of_device_id uniphier_system_bus_match[] = {
+ { .compatible = "socionext,uniphier-system-bus" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_system_bus_match);
+
+static struct platform_driver uniphier_system_bus_driver = {
+ .probe = uniphier_system_bus_probe,
+ .driver = {
+ .name = "uniphier-system-bus",
+ .of_match_table = uniphier_system_bus_match,
+ .pm = &uniphier_system_bus_pm_ops,
+ },
+};
+module_platform_driver(uniphier_system_bus_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier System Bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
new file mode 100644
index 0000000000..c4e1becbb2
--- /dev/null
+++ b/drivers/bus/vexpress-config.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/vexpress.h>
+
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
+
+struct vexpress_config_bridge {
+ struct vexpress_config_bridge_ops *ops;
+ void *context;
+};
+
+
+static DEFINE_MUTEX(vexpress_config_mutex);
+static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
+
+
+static void vexpress_config_set_master(u32 site)
+{
+ vexpress_config_site_master = site;
+}
+
+static void vexpress_config_lock(void *arg)
+{
+ mutex_lock(&vexpress_config_mutex);
+}
+
+static void vexpress_config_unlock(void *arg)
+{
+ mutex_unlock(&vexpress_config_mutex);
+}
+
+
+static void vexpress_config_find_prop(struct device_node *node,
+ const char *name, u32 *val)
+{
+ /* Default value */
+ *val = 0;
+
+ of_node_get(node);
+ while (node) {
+ if (of_property_read_u32(node, name, val) == 0) {
+ of_node_put(node);
+ return;
+ }
+ node = of_get_next_parent(node);
+ }
+}
+
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
+ u32 *position, u32 *dcc)
+{
+ vexpress_config_find_prop(node, "arm,vexpress,site", site);
+ if (*site == VEXPRESS_SITE_MASTER)
+ *site = vexpress_config_site_master;
+ if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER))
+ return -EINVAL;
+ vexpress_config_find_prop(node, "arm,vexpress,position", position);
+ vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc);
+
+ return 0;
+}
+
+
+static void vexpress_config_devres_release(struct device *dev, void *res)
+{
+ struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent);
+ struct regmap *regmap = res;
+
+ bridge->ops->regmap_exit(regmap, bridge->context);
+}
+
+struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
+{
+ struct vexpress_config_bridge *bridge;
+ struct regmap *regmap;
+ struct regmap **res;
+
+ bridge = dev_get_drvdata(dev->parent);
+ if (WARN_ON(!bridge))
+ return ERR_PTR(-EINVAL);
+
+ res = devres_alloc(vexpress_config_devres_release, sizeof(*res),
+ GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ regmap = (bridge->ops->regmap_init)(dev, bridge->context);
+ if (IS_ERR(regmap)) {
+ devres_free(res);
+ return regmap;
+ }
+
+ *res = regmap;
+ devres_add(dev, res);
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
+
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
+{
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
+
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
+ }
+
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
+ return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
+ }
+
+ vexpress_syscfg_regmap_config.max_register = num - 1;
+
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
+
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
+
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
+{
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
+
+ regmap_exit(regmap);
+
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
+}
+
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
+{
+ struct vexpress_syscfg *syscfg;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ syscfg->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
+
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
+
+ dev_set_drvdata(&pdev->dev, bridge);
+
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
+
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
+
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
+
+ for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");