summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-mvebu
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/arm/mach-mvebu/Kconfig134
-rw-r--r--arch/arm/mach-mvebu/Makefile21
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.h23
-rw-r--r--arch/arm/mach-mvebu/board-v7.c209
-rw-r--r--arch/arm/mach-mvebu/coherency.c305
-rw-r--r--arch/arm/mach-mvebu/coherency.h24
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S166
-rw-r--r--arch/arm/mach-mvebu/common.h30
-rw-r--r--arch/arm/mach-mvebu/cpu-reset.c104
-rw-r--r--arch/arm/mach-mvebu/dove.c39
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S23
-rw-r--r--arch/arm/mach-mvebu/headsmp.S40
-rw-r--r--arch/arm/mach-mvebu/kirkwood-pm.c76
-rw-r--r--arch/arm/mach-mvebu/kirkwood-pm.h26
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c194
-rw-r--r--arch/arm/mach-mvebu/kirkwood.h22
-rw-r--r--arch/arm/mach-mvebu/mvebu-soc-id.c178
-rw-r--r--arch/arm/mach-mvebu/mvebu-soc-id.h54
-rw-r--r--arch/arm/mach-mvebu/platsmp-a9.c114
-rw-r--r--arch/arm/mach-mvebu/platsmp.c258
-rw-r--r--arch/arm/mach-mvebu/pm-board.c152
-rw-r--r--arch/arm/mach-mvebu/pm.c270
-rw-r--r--arch/arm/mach-mvebu/pmsu.c609
-rw-r--r--arch/arm/mach-mvebu/pmsu.h24
-rw-r--r--arch/arm/mach-mvebu/pmsu_ll.S75
-rw-r--r--arch/arm/mach-mvebu/system-controller.c180
26 files changed, 3350 insertions, 0 deletions
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
new file mode 100644
index 000000000..2c20599cc
--- /dev/null
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -0,0 +1,134 @@
+menuconfig ARCH_MVEBU
+ bool "Marvell Engineering Business Unit (MVEBU) SoCs"
+ depends on ARCH_MULTI_V7 || ARCH_MULTI_V5
+ select ARCH_SUPPORTS_BIG_ENDIAN
+ select CLKSRC_MMIO
+ select PINCTRL
+ select PLAT_ORION
+ select SOC_BUS
+ select MVEBU_MBUS
+ select ZONE_DMA if ARM_LPAE
+ select GPIOLIB
+ select PCI_QUIRKS if PCI
+
+if ARCH_MVEBU
+
+config MACH_MVEBU_ANY
+ bool
+
+config MACH_MVEBU_V7
+ bool
+ select ARMADA_370_XP_TIMER
+ select CACHE_L2X0
+ select ARM_CPU_SUSPEND
+ select MACH_MVEBU_ANY
+ select MVEBU_CLK_COREDIV
+
+config MACH_ARMADA_370
+ bool "Marvell Armada 370 boards"
+ depends on ARCH_MULTI_V7
+ select ARMADA_370_CLK
+ select ARMADA_370_XP_IRQ
+ select CPU_PJ4B
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_370
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 370 SoC with device tree.
+
+config MACH_ARMADA_375
+ bool "Marvell Armada 375 boards"
+ depends on ARCH_MULTI_V7
+ select ARMADA_370_XP_IRQ
+ select ARM_ERRATA_720789
+ select PL310_ERRATA_753970
+ select ARM_GIC
+ select ARMADA_375_CLK
+ select HAVE_ARM_SCU
+ select HAVE_ARM_TWD if SMP
+ select HAVE_SMP
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_375
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 375 SoC with device tree.
+
+config MACH_ARMADA_38X
+ bool "Marvell Armada 380/385 boards"
+ depends on ARCH_MULTI_V7
+ select ARM_ERRATA_720789
+ select PL310_ERRATA_753970
+ select ARM_GIC
+ select ARM_GLOBAL_TIMER
+ select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
+ select ARMADA_370_XP_IRQ
+ select ARMADA_38X_CLK
+ select HAVE_ARM_SCU
+ select HAVE_ARM_TWD if SMP
+ select HAVE_SMP
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_38X
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 380/385 SoC with device tree.
+
+config MACH_ARMADA_39X
+ bool "Marvell Armada 39x boards"
+ depends on ARCH_MULTI_V7
+ select ARM_GIC
+ select ARMADA_370_XP_IRQ
+ select ARMADA_39X_CLK
+ select CACHE_L2X0
+ select HAVE_ARM_SCU
+ select HAVE_ARM_TWD if SMP
+ select HAVE_SMP
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_39X
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 39x SoC with device tree.
+
+config MACH_ARMADA_XP
+ bool "Marvell Armada XP boards"
+ depends on ARCH_MULTI_V7
+ select ARMADA_370_XP_IRQ
+ select ARMADA_XP_CLK
+ select CPU_PJ4B
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_XP
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada XP SoC with device tree.
+
+config MACH_DOVE
+ bool "Marvell Dove boards"
+ depends on ARCH_MULTI_V7
+ select CACHE_L2X0
+ select CPU_PJ4
+ select DOVE_CLK
+ select MACH_MVEBU_ANY
+ select ORION_IRQCHIP
+ select ORION_TIMER
+ select PM_GENERIC_DOMAINS if PM
+ select PINCTRL_DOVE
+ help
+ Say 'Y' here if you want your kernel to support the
+ Marvell Dove using flattened device tree.
+
+config MACH_KIRKWOOD
+ bool "Marvell Kirkwood boards"
+ depends on ARCH_MULTI_V5
+ select CPU_FEROCEON
+ select GPIOLIB
+ select KIRKWOOD_CLK
+ select MACH_MVEBU_ANY
+ select ORION_IRQCHIP
+ select ORION_TIMER
+ select PCI
+ select PCI_QUIRKS
+ select PINCTRL_KIRKWOOD
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Kirkwood device tree.
+
+endif
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
new file mode 100644
index 000000000..cb106899d
--- /dev/null
+++ b/arch/arm/mach-mvebu/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include
+
+AFLAGS_coherency_ll.o := -Wa,-march=armv7-a
+CFLAGS_pmsu.o := -march=armv7-a
+
+obj-$(CONFIG_MACH_MVEBU_ANY) += system-controller.o mvebu-soc-id.o
+
+ifeq ($(CONFIG_MACH_MVEBU_V7),y)
+obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM) += pm.o pm-board.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
+endif
+
+obj-$(CONFIG_MACH_DOVE) += dove.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y += kirkwood.o
+obj-$(CONFIG_PM) += kirkwood-pm.o
+endif
diff --git a/arch/arm/mach-mvebu/armada-370-xp.h b/arch/arm/mach-mvebu/armada-370-xp.h
new file mode 100644
index 000000000..09413b678
--- /dev/null
+++ b/arch/arm/mach-mvebu/armada-370-xp.h
@@ -0,0 +1,23 @@
+/*
+ * Generic definitions for Marvell Armada_370_XP SoCs
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_ARMADA_370_XP_H
+#define __MACH_ARMADA_370_XP_H
+
+#ifdef CONFIG_SMP
+void armada_xp_secondary_startup(void);
+extern const struct smp_operations armada_xp_smp_ops;
+#endif
+
+#endif /* __MACH_ARMADA_370_XP_H */
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
new file mode 100644
index 000000000..ccca95173
--- /dev/null
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -0,0 +1,209 @@
+/*
+ * Device Tree support for Armada 370 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/io.h>
+#include <linux/clocksource.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/mbus.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/smp_scu.h>
+#include "armada-370-xp.h"
+#include "common.h"
+#include "coherency.h"
+#include "mvebu-soc-id.h"
+
+static void __iomem *scu_base;
+
+/*
+ * Enables the SCU when available. Obviously, this is only useful on
+ * Cortex-A based SOCs, not on PJ4B based ones.
+ */
+static void __init mvebu_scu_enable(void)
+{
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
+ if (np) {
+ scu_base = of_iomap(np, 0);
+ scu_enable(scu_base);
+ of_node_put(np);
+ }
+}
+
+void __iomem *mvebu_get_scu_base(void)
+{
+ return scu_base;
+}
+
+/*
+ * When returning from suspend, the platform goes through the
+ * bootloader, which executes its DDR3 training code. This code has
+ * the unfortunate idea of using the first 10 KB of each DRAM bank to
+ * exercise the RAM and calculate the optimal timings. Therefore, this
+ * area of RAM is overwritten, and shouldn't be used by the kernel if
+ * suspend/resume is supported.
+ */
+
+#ifdef CONFIG_SUSPEND
+#define MVEBU_DDR_TRAINING_AREA_SZ (10 * SZ_1K)
+static int __init mvebu_scan_mem(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *reg, *endp;
+ int l;
+
+ if (type == NULL || strcmp(type, "memory"))
+ return 0;
+
+ reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+ if (reg == NULL)
+ reg = of_get_flat_dt_prop(node, "reg", &l);
+ if (reg == NULL)
+ return 0;
+
+ endp = reg + (l / sizeof(__be32));
+ while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+ u64 base, size;
+
+ base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+ memblock_reserve(base, MVEBU_DDR_TRAINING_AREA_SZ);
+ }
+
+ return 0;
+}
+
+static void __init mvebu_memblock_reserve(void)
+{
+ of_scan_flat_dt(mvebu_scan_mem, NULL);
+}
+#else
+static void __init mvebu_memblock_reserve(void) {}
+#endif
+
+static void __init mvebu_init_irq(void)
+{
+ irqchip_init();
+ mvebu_scu_enable();
+ coherency_init();
+ BUG_ON(mvebu_mbus_dt_init(coherency_available()));
+}
+
+static void __init i2c_quirk(void)
+{
+ struct device_node *np;
+ u32 dev, rev;
+
+ /*
+ * Only revisons more recent than A0 support the offload
+ * mechanism. We can exit only if we are sure that we can
+ * get the SoC revision and it is more recent than A0.
+ */
+ if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > MV78XX0_A0_REV)
+ return;
+
+ for_each_compatible_node(np, NULL, "marvell,mv78230-i2c") {
+ struct property *new_compat;
+
+ new_compat = kzalloc(sizeof(*new_compat), GFP_KERNEL);
+
+ new_compat->name = kstrdup("compatible", GFP_KERNEL);
+ new_compat->length = sizeof("marvell,mv78230-a0-i2c");
+ new_compat->value = kstrdup("marvell,mv78230-a0-i2c",
+ GFP_KERNEL);
+
+ of_update_property(np, new_compat);
+ }
+ return;
+}
+
+static void __init mvebu_dt_init(void)
+{
+ if (of_machine_is_compatible("marvell,armadaxp"))
+ i2c_quirk();
+}
+
+static const char * const armada_370_xp_dt_compat[] __initconst = {
+ "marvell,armada-370-xp",
+ NULL,
+};
+
+DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
+/*
+ * The following field (.smp) is still needed to ensure backward
+ * compatibility with old Device Trees that were not specifying the
+ * cpus enable-method property.
+ */
+ .smp = smp_ops(armada_xp_smp_ops),
+ .init_machine = mvebu_dt_init,
+ .init_irq = mvebu_init_irq,
+ .restart = mvebu_restart,
+ .reserve = mvebu_memblock_reserve,
+ .dt_compat = armada_370_xp_dt_compat,
+MACHINE_END
+
+static const char * const armada_375_dt_compat[] __initconst = {
+ "marvell,armada375",
+ NULL,
+};
+
+DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
+ .init_irq = mvebu_init_irq,
+ .init_machine = mvebu_dt_init,
+ .restart = mvebu_restart,
+ .dt_compat = armada_375_dt_compat,
+MACHINE_END
+
+static const char * const armada_38x_dt_compat[] __initconst = {
+ "marvell,armada380",
+ "marvell,armada385",
+ NULL,
+};
+
+DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
+ .init_irq = mvebu_init_irq,
+ .restart = mvebu_restart,
+ .dt_compat = armada_38x_dt_compat,
+MACHINE_END
+
+static const char * const armada_39x_dt_compat[] __initconst = {
+ "marvell,armada390",
+ "marvell,armada398",
+ NULL,
+};
+
+DT_MACHINE_START(ARMADA_39X_DT, "Marvell Armada 39x (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
+ .init_irq = mvebu_init_irq,
+ .restart = mvebu_restart,
+ .dt_compat = armada_39x_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
new file mode 100644
index 000000000..8f8748a0c
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -0,0 +1,305 @@
+/*
+ * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP
+ * platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is
+ * responsible for ensuring hardware coherency between all CPUs and between
+ * CPUs and I/O masters. This file initializes the coherency fabric and
+ * supplies basic routines for configuring and controlling hardware coherency
+ */
+
+#define pr_fmt(fmt) "mvebu-coherency: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mbus.h>
+#include <linux/pci.h>
+#include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+#include <asm/dma-mapping.h>
+#include "coherency.h"
+#include "mvebu-soc-id.h"
+
+unsigned long coherency_phys_base;
+void __iomem *coherency_base;
+static void __iomem *coherency_cpu_base;
+static void __iomem *cpu_config_base;
+
+/* Coherency fabric registers */
+#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
+
+enum {
+ COHERENCY_FABRIC_TYPE_NONE,
+ COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
+ COHERENCY_FABRIC_TYPE_ARMADA_375,
+ COHERENCY_FABRIC_TYPE_ARMADA_380,
+};
+
+static const struct of_device_id of_coherency_table[] = {
+ {.compatible = "marvell,coherency-fabric",
+ .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
+ {.compatible = "marvell,armada-375-coherency-fabric",
+ .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
+ {.compatible = "marvell,armada-380-coherency-fabric",
+ .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
+ { /* end of list */ },
+};
+
+/* Functions defined in coherency_ll.S */
+int ll_enable_coherency(void);
+void ll_add_cpu_to_smp_group(void);
+
+#define CPU_CONFIG_SHARED_L2 BIT(16)
+
+/*
+ * Disable the "Shared L2 Present" bit in CPU Configuration register
+ * on Armada XP.
+ *
+ * The "Shared L2 Present" bit affects the "level of coherence" value
+ * in the clidr CP15 register. Cache operation functions such as
+ * "flush all" and "invalidate all" operate on all the cache levels
+ * that included in the defined level of coherence. When HW I/O
+ * coherency is used, this bit causes unnecessary flushes of the L2
+ * cache.
+ */
+static void armada_xp_clear_shared_l2(void)
+{
+ u32 reg;
+
+ if (!cpu_config_base)
+ return;
+
+ reg = readl(cpu_config_base);
+ reg &= ~CPU_CONFIG_SHARED_L2;
+ writel(reg, cpu_config_base);
+}
+
+static int mvebu_hwcc_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+ set_dma_ops(dev, &arm_coherent_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mvebu_hwcc_nb = {
+ .notifier_call = mvebu_hwcc_notifier,
+};
+
+static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
+ .notifier_call = mvebu_hwcc_notifier,
+};
+
+static int armada_xp_clear_l2_starting(unsigned int cpu)
+{
+ armada_xp_clear_shared_l2();
+ return 0;
+}
+
+static void __init armada_370_coherency_init(struct device_node *np)
+{
+ struct resource res;
+ struct device_node *cpu_config_np;
+
+ of_address_to_resource(np, 0, &res);
+ coherency_phys_base = res.start;
+ /*
+ * Ensure secondary CPUs will see the updated value,
+ * which they read before they join the coherency
+ * fabric, and therefore before they are coherent with
+ * the boot CPU cache.
+ */
+ sync_cache_w(&coherency_phys_base);
+ coherency_base = of_iomap(np, 0);
+ coherency_cpu_base = of_iomap(np, 1);
+
+ cpu_config_np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-xp-cpu-config");
+ if (!cpu_config_np)
+ goto exit;
+
+ cpu_config_base = of_iomap(cpu_config_np, 0);
+ if (!cpu_config_base) {
+ of_node_put(cpu_config_np);
+ goto exit;
+ }
+
+ of_node_put(cpu_config_np);
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
+ "arm/mvebu/coherency:starting",
+ armada_xp_clear_l2_starting, NULL);
+exit:
+ set_cpu_coherent();
+}
+
+/*
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
+ */
+static void __iomem *
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ mtype = MT_UNCACHED;
+ return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+}
+
+static void __init armada_375_380_coherency_init(struct device_node *np)
+{
+ struct device_node *cache_dn;
+
+ coherency_cpu_base = of_iomap(np, 0);
+ arch_ioremap_caller = armada_wa_ioremap_caller;
+ pci_ioremap_set_mem_type(MT_UNCACHED);
+
+ /*
+ * We should switch the PL310 to I/O coherency mode only if
+ * I/O coherency is actually enabled.
+ */
+ if (!coherency_available())
+ return;
+
+ /*
+ * Add the PL310 property "arm,io-coherent". This makes sure the
+ * outer sync operation is not used, which allows to
+ * workaround the system erratum that causes deadlocks when
+ * doing PCIe in an SMP situation on Armada 375 and Armada
+ * 38x.
+ */
+ for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
+ struct property *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
+ of_add_property(cache_dn, p);
+ }
+}
+
+static int coherency_type(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ int type;
+
+ /*
+ * The coherency fabric is needed:
+ * - For coherency between processors on Armada XP, so only
+ * when SMP is enabled.
+ * - For coherency between the processor and I/O devices, but
+ * this coherency requires many pre-requisites (write
+ * allocate cache policy, shareable pages, SMP bit set) that
+ * are only meant in SMP situations.
+ *
+ * Note that this means that on Armada 370, there is currently
+ * no way to use hardware I/O coherency, because even when
+ * CONFIG_SMP is enabled, is_smp() returns false due to the
+ * Armada 370 being a single-core processor. To lift this
+ * limitation, we would have to find a way to make the cache
+ * policy set to write-allocate (on all Armada SoCs), and to
+ * set the shareable attribute in page tables (on all Armada
+ * SoCs except the Armada 370). Unfortunately, such decisions
+ * are taken very early in the kernel boot process, at a point
+ * where we don't know yet on which SoC we are running.
+
+ */
+ if (!is_smp())
+ return COHERENCY_FABRIC_TYPE_NONE;
+
+ np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
+ if (!np)
+ return COHERENCY_FABRIC_TYPE_NONE;
+
+ type = (int) match->data;
+
+ of_node_put(np);
+
+ return type;
+}
+
+int set_cpu_coherent(void)
+{
+ int type = coherency_type();
+
+ if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) {
+ if (!coherency_base) {
+ pr_warn("Can't make current CPU cache coherent.\n");
+ pr_warn("Coherency fabric is not initialized\n");
+ return 1;
+ }
+
+ armada_xp_clear_shared_l2();
+ ll_add_cpu_to_smp_group();
+ return ll_enable_coherency();
+ }
+
+ return 0;
+}
+
+int coherency_available(void)
+{
+ return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+}
+
+int __init coherency_init(void)
+{
+ int type = coherency_type();
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, of_coherency_table);
+
+ if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
+ armada_370_coherency_init(np);
+ else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
+ type == COHERENCY_FABRIC_TYPE_ARMADA_380)
+ armada_375_380_coherency_init(np);
+
+ of_node_put(np);
+
+ return 0;
+}
+
+static int __init coherency_late_init(void)
+{
+ if (coherency_available())
+ bus_register_notifier(&platform_bus_type,
+ &mvebu_hwcc_nb);
+ return 0;
+}
+
+postcore_initcall(coherency_late_init);
+
+#if IS_ENABLED(CONFIG_PCI)
+static int __init coherency_pci_init(void)
+{
+ if (coherency_available())
+ bus_register_notifier(&pci_bus_type,
+ &mvebu_hwcc_pci_nb);
+ return 0;
+}
+
+arch_initcall(coherency_pci_init);
+#endif
diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
new file mode 100644
index 000000000..6067f1426
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-mvebu/include/mach/coherency.h
+ *
+ *
+ * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_370_XP_COHERENCY_H
+#define __MACH_370_XP_COHERENCY_H
+
+extern void __iomem *coherency_base; /* for coherency_ll.S */
+extern unsigned long coherency_phys_base;
+int set_cpu_coherent(void);
+
+int coherency_init(void);
+int coherency_available(void);
+
+#endif /* __MACH_370_XP_COHERENCY_H */
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
new file mode 100644
index 000000000..8b2fbc8b6
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -0,0 +1,166 @@
+/*
+ * Coherency fabric: low level functions
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This file implements the assembly function to add a CPU to the
+ * coherency fabric. This function is called by each of the secondary
+ * CPUs during their early boot in an SMP kernel, this why this
+ * function have to callable from assembly. It can also be called by a
+ * primary CPU from C code during its boot.
+ */
+
+#include <linux/linkage.h>
+#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
+#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
+
+#include <asm/assembler.h>
+#include <asm/cp15.h>
+
+ .text
+/*
+ * Returns the coherency base address in r1 (r0 is untouched), or 0 if
+ * the coherency fabric is not enabled.
+ */
+ENTRY(ll_get_coherency_base)
+ mrc p15, 0, r1, c1, c0, 0
+ tst r1, #CR_M @ Check MMU bit enabled
+ bne 1f
+
+ /*
+ * MMU is disabled, use the physical address of the coherency
+ * base address. However, if the coherency fabric isn't mapped
+ * (i.e its virtual address is zero), it means coherency is
+ * not enabled, so we return 0.
+ */
+ ldr r1, =coherency_base
+ cmp r1, #0
+ beq 2f
+ adr r1, 3f
+ ldr r3, [r1]
+ ldr r1, [r1, r3]
+ b 2f
+1:
+ /*
+ * MMU is enabled, use the virtual address of the coherency
+ * base address.
+ */
+ ldr r1, =coherency_base
+ ldr r1, [r1]
+2:
+ ret lr
+ENDPROC(ll_get_coherency_base)
+
+/*
+ * Returns the coherency CPU mask in r3 (r0 is untouched). This
+ * coherency CPU mask can be used with the coherency fabric
+ * configuration and control registers. Note that the mask is already
+ * endian-swapped as appropriate so that the calling functions do not
+ * have to care about endianness issues while accessing the coherency
+ * fabric registers
+ */
+ENTRY(ll_get_coherency_cpumask)
+ mrc 15, 0, r3, cr0, cr0, 5
+ and r3, r3, #15
+ mov r2, #(1 << 24)
+ lsl r3, r2, r3
+ARM_BE8(rev r3, r3)
+ ret lr
+ENDPROC(ll_get_coherency_cpumask)
+
+/*
+ * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
+ * ll_disable_coherency() use the strex/ldrex instructions while the
+ * MMU can be disabled. The Armada XP SoC has an exclusive monitor
+ * that tracks transactions to Device and/or SO memory and thanks to
+ * that, exclusive transactions are functional even when the MMU is
+ * disabled.
+ */
+
+ENTRY(ll_add_cpu_to_smp_group)
+ /*
+ * As r0 is not modified by ll_get_coherency_base() and
+ * ll_get_coherency_cpumask(), we use it to temporarly save lr
+ * and avoid it being modified by the branch and link
+ * calls. This function is used very early in the secondary
+ * CPU boot, and no stack is available at this point.
+ */
+ mov r0, lr
+ bl ll_get_coherency_base
+ /* Bail out if the coherency is not enabled */
+ cmp r1, #0
+ reteq r0
+ bl ll_get_coherency_cpumask
+ mov lr, r0
+ add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
+1:
+ ldrex r2, [r0]
+ orr r2, r2, r3
+ strex r1, r2, [r0]
+ cmp r1, #0
+ bne 1b
+ ret lr
+ENDPROC(ll_add_cpu_to_smp_group)
+
+ENTRY(ll_enable_coherency)
+ /*
+ * As r0 is not modified by ll_get_coherency_base() and
+ * ll_get_coherency_cpumask(), we use it to temporarly save lr
+ * and avoid it being modified by the branch and link
+ * calls. This function is used very early in the secondary
+ * CPU boot, and no stack is available at this point.
+ */
+ mov r0, lr
+ bl ll_get_coherency_base
+ /* Bail out if the coherency is not enabled */
+ cmp r1, #0
+ reteq r0
+ bl ll_get_coherency_cpumask
+ mov lr, r0
+ add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
+1:
+ ldrex r2, [r0]
+ orr r2, r2, r3
+ strex r1, r2, [r0]
+ cmp r1, #0
+ bne 1b
+ dsb
+ mov r0, #0
+ ret lr
+ENDPROC(ll_enable_coherency)
+
+ENTRY(ll_disable_coherency)
+ /*
+ * As r0 is not modified by ll_get_coherency_base() and
+ * ll_get_coherency_cpumask(), we use it to temporarly save lr
+ * and avoid it being modified by the branch and link
+ * calls. This function is used very early in the secondary
+ * CPU boot, and no stack is available at this point.
+ */
+ mov r0, lr
+ bl ll_get_coherency_base
+ /* Bail out if the coherency is not enabled */
+ cmp r1, #0
+ reteq r0
+ bl ll_get_coherency_cpumask
+ mov lr, r0
+ add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
+1:
+ ldrex r2, [r0]
+ bic r2, r2, r3
+ strex r1, r2, [r0]
+ cmp r1, #0
+ bne 1b
+ dsb
+ ret lr
+ENDPROC(ll_disable_coherency)
+
+ .align 2
+3:
+ .long coherency_phys_base - .
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h
new file mode 100644
index 000000000..6b775492c
--- /dev/null
+++ b/arch/arm/mach-mvebu/common.h
@@ -0,0 +1,30 @@
+/*
+ * Core functions for Marvell System On Chip
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ARCH_MVEBU_COMMON_H
+#define __ARCH_MVEBU_COMMON_H
+
+#include <linux/reboot.h>
+
+void mvebu_restart(enum reboot_mode mode, const char *cmd);
+int mvebu_cpu_reset_deassert(int cpu);
+void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr);
+void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr);
+int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev);
+
+void __iomem *mvebu_get_scu_base(void);
+
+int mvebu_pm_suspend_init(void (*board_pm_enter)(void __iomem *sdram_reg,
+ u32 srcmd));
+#endif
diff --git a/arch/arm/mach-mvebu/cpu-reset.c b/arch/arm/mach-mvebu/cpu-reset.c
new file mode 100644
index 000000000..f33a31c6a
--- /dev/null
+++ b/arch/arm/mach-mvebu/cpu-reset.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "mvebu-cpureset: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/resource.h>
+
+#include "common.h"
+
+static void __iomem *cpu_reset_base;
+static size_t cpu_reset_size;
+
+#define CPU_RESET_OFFSET(cpu) (cpu * 0x8)
+#define CPU_RESET_ASSERT BIT(0)
+
+int mvebu_cpu_reset_deassert(int cpu)
+{
+ u32 reg;
+
+ if (!cpu_reset_base)
+ return -ENODEV;
+
+ if (CPU_RESET_OFFSET(cpu) >= cpu_reset_size)
+ return -EINVAL;
+
+ reg = readl(cpu_reset_base + CPU_RESET_OFFSET(cpu));
+ reg &= ~CPU_RESET_ASSERT;
+ writel(reg, cpu_reset_base + CPU_RESET_OFFSET(cpu));
+
+ return 0;
+}
+
+static int mvebu_cpu_reset_map(struct device_node *np, int res_idx)
+{
+ struct resource res;
+
+ if (of_address_to_resource(np, res_idx, &res)) {
+ pr_err("unable to get resource\n");
+ return -ENOENT;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ np->full_name)) {
+ pr_err("unable to request region\n");
+ return -EBUSY;
+ }
+
+ cpu_reset_base = ioremap(res.start, resource_size(&res));
+ if (!cpu_reset_base) {
+ pr_err("unable to map registers\n");
+ release_mem_region(res.start, resource_size(&res));
+ return -ENOMEM;
+ }
+
+ cpu_reset_size = resource_size(&res);
+
+ return 0;
+}
+
+static int __init mvebu_cpu_reset_init(void)
+{
+ struct device_node *np;
+ int res_idx;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-370-cpu-reset");
+ if (np) {
+ res_idx = 0;
+ } else {
+ /*
+ * This code is kept for backward compatibility with
+ * old Device Trees.
+ */
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-370-xp-pmsu");
+ if (np) {
+ pr_warn(FW_WARN "deprecated pmsu binding\n");
+ res_idx = 1;
+ }
+ }
+
+ /* No reset node found */
+ if (!np)
+ return -ENODEV;
+
+ ret = mvebu_cpu_reset_map(np, res_idx);
+ of_node_put(np);
+
+ return ret;
+}
+
+early_initcall(mvebu_cpu_reset_init);
diff --git a/arch/arm/mach-mvebu/dove.c b/arch/arm/mach-mvebu/dove.c
new file mode 100644
index 000000000..d076c5771
--- /dev/null
+++ b/arch/arm/mach-mvebu/dove.c
@@ -0,0 +1,39 @@
+/*
+ * arch/arm/mach-mvebu/dove.c
+ *
+ * Marvell Dove 88AP510 System On Chip FDT Board
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/of.h>
+#include <linux/soc/dove/pmu.h>
+#include <asm/hardware/cache-tauros2.h>
+#include <asm/mach/arch.h>
+#include "common.h"
+
+static void __init dove_init(void)
+{
+ pr_info("Dove 88AP510 SoC\n");
+
+#ifdef CONFIG_CACHE_TAUROS2
+ tauros2_init(0);
+#endif
+ BUG_ON(mvebu_mbus_dt_init(false));
+ dove_init_pmu();
+}
+
+static const char * const dove_dt_compat[] __initconst = {
+ "marvell,dove",
+ NULL
+};
+
+DT_MACHINE_START(DOVE_DT, "Marvell Dove")
+ .init_machine = dove_init,
+ .restart = mvebu_restart,
+ .dt_compat = dove_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
new file mode 100644
index 000000000..b093a196e
--- /dev/null
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -0,0 +1,23 @@
+/*
+ * SMP support: Entry point for secondary CPUs of Marvell EBU
+ * Cortex-A9 based SOCs (Armada 375 and Armada 38x).
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+
+ENTRY(mvebu_cortex_a9_secondary_startup)
+ARM_BE8(setend be)
+ bl armada_38x_scu_power_up
+ b secondary_startup
+ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
new file mode 100644
index 000000000..2c4032e36
--- /dev/null
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -0,0 +1,40 @@
+/*
+ * SMP support: Entry point for secondary CPUs
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This file implements the assembly entry point for secondary CPUs in
+ * an SMP kernel. The only thing we need to do is to add the CPU to
+ * the coherency fabric by writing to 2 registers. Currently the base
+ * register addresses are hard coded due to the early initialisation
+ * problems.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+
+/*
+ * Armada XP specific entry point for secondary CPUs.
+ * We add the CPU to the coherency fabric and then jump to secondary
+ * startup
+ */
+ENTRY(armada_xp_secondary_startup)
+ ARM_BE8(setend be ) @ go BE8 if entered LE
+
+ bl ll_add_cpu_to_smp_group
+
+ bl ll_enable_coherency
+
+ b secondary_startup
+
+ENDPROC(armada_xp_secondary_startup)
diff --git a/arch/arm/mach-mvebu/kirkwood-pm.c b/arch/arm/mach-mvebu/kirkwood-pm.c
new file mode 100644
index 000000000..1e1f879f1
--- /dev/null
+++ b/arch/arm/mach-mvebu/kirkwood-pm.c
@@ -0,0 +1,76 @@
+/*
+ * Power Management driver for Marvell Kirkwood SoCs
+ *
+ * Copyright (C) 2013 Ezequiel Garcia <ezequiel@free-electrons.com>
+ * Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License,
+ * version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/io.h>
+#include "kirkwood.h"
+#include "kirkwood-pm.h"
+
+static void __iomem *ddr_operation_base;
+static void __iomem *memory_pm_ctrl;
+
+static void kirkwood_low_power(void)
+{
+ u32 mem_pm_ctrl;
+
+ mem_pm_ctrl = readl(memory_pm_ctrl);
+
+ /* Set peripherals to low-power mode */
+ writel_relaxed(~0, memory_pm_ctrl);
+
+ /* Set DDR in self-refresh */
+ writel_relaxed(0x7, ddr_operation_base);
+
+ /*
+ * Set CPU in wait-for-interrupt state.
+ * This disables the CPU core clocks,
+ * the array clocks, and also the L2 controller.
+ */
+ cpu_do_idle();
+
+ writel_relaxed(mem_pm_ctrl, memory_pm_ctrl);
+}
+
+static int kirkwood_suspend_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ kirkwood_low_power();
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int kirkwood_pm_valid_standby(suspend_state_t state)
+{
+ return state == PM_SUSPEND_STANDBY;
+}
+
+static const struct platform_suspend_ops kirkwood_suspend_ops = {
+ .enter = kirkwood_suspend_enter,
+ .valid = kirkwood_pm_valid_standby,
+};
+
+void __init kirkwood_pm_init(void)
+{
+ ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4);
+ memory_pm_ctrl = ioremap(MEMORY_PM_CTRL_PHYS, 4);
+
+ suspend_set_ops(&kirkwood_suspend_ops);
+}
diff --git a/arch/arm/mach-mvebu/kirkwood-pm.h b/arch/arm/mach-mvebu/kirkwood-pm.h
new file mode 100644
index 000000000..21e7530f3
--- /dev/null
+++ b/arch/arm/mach-mvebu/kirkwood-pm.h
@@ -0,0 +1,26 @@
+/*
+ * Power Management driver for Marvell Kirkwood SoCs
+ *
+ * Copyright (C) 2013 Ezequiel Garcia <ezequiel@free-electrons.com>
+ * Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License,
+ * version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_KIRKWOOD_PM_H
+#define __ARCH_KIRKWOOD_PM_H
+
+#ifdef CONFIG_PM
+void kirkwood_pm_init(void);
+#else
+static inline void kirkwood_pm_init(void) {};
+#endif
+
+#endif
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
new file mode 100644
index 000000000..0aa88105d
--- /dev/null
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2012 (C), Jason Cooper <jason@lakedaemon.net>
+ *
+ * arch/arm/mach-mvebu/kirkwood.c
+ *
+ * Flattened Device Tree board initialization
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/hardware/cache-feroceon-l2.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include "kirkwood.h"
+#include "kirkwood-pm.h"
+#include "common.h"
+
+static struct resource kirkwood_cpufreq_resources[] = {
+ [0] = {
+ .start = CPU_CONTROL_PHYS,
+ .end = CPU_CONTROL_PHYS + 3,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device kirkwood_cpufreq_device = {
+ .name = "kirkwood-cpufreq",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(kirkwood_cpufreq_resources),
+ .resource = kirkwood_cpufreq_resources,
+};
+
+static void __init kirkwood_cpufreq_init(void)
+{
+ platform_device_register(&kirkwood_cpufreq_device);
+}
+
+static struct resource kirkwood_cpuidle_resource[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = DDR_OPERATION_BASE,
+ .end = DDR_OPERATION_BASE + 3,
+ },
+};
+
+static struct platform_device kirkwood_cpuidle = {
+ .name = "kirkwood_cpuidle",
+ .id = -1,
+ .resource = kirkwood_cpuidle_resource,
+ .num_resources = 1,
+};
+
+static void __init kirkwood_cpuidle_init(void)
+{
+ platform_device_register(&kirkwood_cpuidle);
+}
+
+#define MV643XX_ETH_MAC_ADDR_LOW 0x0414
+#define MV643XX_ETH_MAC_ADDR_HIGH 0x0418
+
+static void __init kirkwood_dt_eth_fixup(void)
+{
+ struct device_node *np;
+
+ /*
+ * The ethernet interfaces forget the MAC address assigned by u-boot
+ * if the clocks are turned off. Usually, u-boot on kirkwood boards
+ * has no DT support to properly set local-mac-address property.
+ * As a workaround, we get the MAC address from mv643xx_eth registers
+ * and update the port device node if no valid MAC address is set.
+ */
+ for_each_compatible_node(np, NULL, "marvell,kirkwood-eth-port") {
+ struct device_node *pnp = of_get_parent(np);
+ struct clk *clk;
+ struct property *pmac;
+ void __iomem *io;
+ u8 *macaddr;
+ u32 reg;
+
+ if (!pnp)
+ continue;
+
+ /* skip disabled nodes or nodes with valid MAC address*/
+ if (!of_device_is_available(pnp) || of_get_mac_address(np))
+ goto eth_fixup_skip;
+
+ clk = of_clk_get(pnp, 0);
+ if (IS_ERR(clk))
+ goto eth_fixup_skip;
+
+ io = of_iomap(pnp, 0);
+ if (!io)
+ goto eth_fixup_no_map;
+
+ /* ensure port clock is not gated to not hang CPU */
+ clk_prepare_enable(clk);
+
+ /* store MAC address register contents in local-mac-address */
+ pr_err(FW_INFO "%pOF: local-mac-address is not set\n", np);
+
+ pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL);
+ if (!pmac)
+ goto eth_fixup_no_mem;
+
+ pmac->value = pmac + 1;
+ pmac->length = 6;
+ pmac->name = kstrdup("local-mac-address", GFP_KERNEL);
+ if (!pmac->name) {
+ kfree(pmac);
+ goto eth_fixup_no_mem;
+ }
+
+ macaddr = pmac->value;
+ reg = readl(io + MV643XX_ETH_MAC_ADDR_HIGH);
+ macaddr[0] = (reg >> 24) & 0xff;
+ macaddr[1] = (reg >> 16) & 0xff;
+ macaddr[2] = (reg >> 8) & 0xff;
+ macaddr[3] = reg & 0xff;
+
+ reg = readl(io + MV643XX_ETH_MAC_ADDR_LOW);
+ macaddr[4] = (reg >> 8) & 0xff;
+ macaddr[5] = reg & 0xff;
+
+ of_update_property(np, pmac);
+
+eth_fixup_no_mem:
+ iounmap(io);
+ clk_disable_unprepare(clk);
+eth_fixup_no_map:
+ clk_put(clk);
+eth_fixup_skip:
+ of_node_put(pnp);
+ }
+}
+
+/*
+ * Disable propagation of mbus errors to the CPU local bus, as this
+ * causes mbus errors (which can occur for example for PCI aborts) to
+ * throw CPU aborts, which we're not set up to deal with.
+ */
+static void kirkwood_disable_mbus_error_propagation(void)
+{
+ void __iomem *cpu_config;
+
+ cpu_config = ioremap(CPU_CONFIG_PHYS, 4);
+ writel(readl(cpu_config) & ~CPU_CONFIG_ERROR_PROP, cpu_config);
+}
+
+static struct of_dev_auxdata auxdata[] __initdata = {
+ OF_DEV_AUXDATA("marvell,kirkwood-audio", 0xf10a0000,
+ "mvebu-audio", NULL),
+ { /* sentinel */ }
+};
+
+static void __init kirkwood_dt_init(void)
+{
+ kirkwood_disable_mbus_error_propagation();
+
+ BUG_ON(mvebu_mbus_dt_init(false));
+
+#ifdef CONFIG_CACHE_FEROCEON_L2
+ feroceon_of_init();
+#endif
+ kirkwood_cpufreq_init();
+ kirkwood_cpuidle_init();
+
+ kirkwood_pm_init();
+ kirkwood_dt_eth_fixup();
+
+ of_platform_default_populate(NULL, auxdata, NULL);
+}
+
+static const char * const kirkwood_dt_board_compat[] __initconst = {
+ "marvell,kirkwood",
+ NULL
+};
+
+DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)")
+ /* Maintainer: Jason Cooper <jason@lakedaemon.net> */
+ .init_machine = kirkwood_dt_init,
+ .restart = mvebu_restart,
+ .dt_compat = kirkwood_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/kirkwood.h b/arch/arm/mach-mvebu/kirkwood.h
new file mode 100644
index 000000000..89f3d1f51
--- /dev/null
+++ b/arch/arm/mach-mvebu/kirkwood.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/mach-mvebu/kirkwood.h
+ *
+ * Generic definitions for Marvell Kirkwood SoC flavors:
+ * 88F6180, 88F6192 and 88F6281.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define KIRKWOOD_REGS_PHYS_BASE 0xf1000000
+#define DDR_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE + 0x00000)
+#define BRIDGE_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE + 0x20000)
+
+#define DDR_OPERATION_BASE (DDR_PHYS_BASE + 0x1418)
+
+#define CPU_CONFIG_PHYS (BRIDGE_PHYS_BASE + 0x0100)
+#define CPU_CONFIG_ERROR_PROP 0x00000004
+
+#define CPU_CONTROL_PHYS (BRIDGE_PHYS_BASE + 0x0104)
+#define MEMORY_PM_CTRL_PHYS (BRIDGE_PHYS_BASE + 0x0118)
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
new file mode 100644
index 000000000..a99434bce
--- /dev/null
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
@@ -0,0 +1,178 @@
+/*
+ * ID and revision information for mvebu SoCs
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * All the mvebu SoCs have information related to their variant and
+ * revision that can be read from the PCI control register. This is
+ * done before the PCI initialization to avoid any conflict. Once the
+ * ID and revision are retrieved, the mapping is freed.
+ */
+
+#define pr_fmt(fmt) "mvebu-soc-id: " fmt
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include "common.h"
+#include "mvebu-soc-id.h"
+
+#define PCIE_DEV_ID_OFF 0x0
+#define PCIE_DEV_REV_OFF 0x8
+
+#define SOC_ID_MASK 0xFFFF0000
+#define SOC_REV_MASK 0xFF
+
+static u32 soc_dev_id;
+static u32 soc_rev;
+static bool is_id_valid;
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-xp-pcie", },
+ { .compatible = "marvell,armada-370-pcie", },
+ { .compatible = "marvell,kirkwood-pcie" },
+ {},
+};
+
+int mvebu_get_soc_id(u32 *dev, u32 *rev)
+{
+ if (is_id_valid) {
+ *dev = soc_dev_id;
+ *rev = soc_rev;
+ return 0;
+ } else
+ return -ENODEV;
+}
+
+static int __init get_soc_id_by_pci(void)
+{
+ struct device_node *np;
+ int ret = 0;
+ void __iomem *pci_base;
+ struct clk *clk;
+ struct device_node *child;
+
+ np = of_find_matching_node(NULL, mvebu_pcie_of_match_table);
+ if (!np)
+ return ret;
+
+ /*
+ * ID and revision are available from any port, so we
+ * just pick the first one
+ */
+ child = of_get_next_child(np, NULL);
+ if (child == NULL) {
+ pr_err("cannot get pci node\n");
+ ret = -ENOMEM;
+ goto clk_err;
+ }
+
+ clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("cannot get clock\n");
+ ret = -ENOMEM;
+ goto clk_err;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("cannot enable clock\n");
+ goto clk_err;
+ }
+
+ pci_base = of_iomap(child, 0);
+ if (pci_base == NULL) {
+ pr_err("cannot map registers\n");
+ ret = -ENOMEM;
+ goto res_ioremap;
+ }
+
+ /* SoC ID */
+ soc_dev_id = readl(pci_base + PCIE_DEV_ID_OFF) >> 16;
+
+ /* SoC revision */
+ soc_rev = readl(pci_base + PCIE_DEV_REV_OFF) & SOC_REV_MASK;
+
+ is_id_valid = true;
+
+ pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev);
+
+ iounmap(pci_base);
+
+res_ioremap:
+ /*
+ * If the PCIe unit is actually enabled and we have PCI
+ * support in the kernel, we intentionally do not release the
+ * reference to the clock. We want to keep it running since
+ * the bootloader does some PCIe link configuration that the
+ * kernel is for now unable to do, and gating the clock would
+ * make us loose this precious configuration.
+ */
+ if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
+clk_err:
+ of_node_put(child);
+ of_node_put(np);
+
+ return ret;
+}
+
+static int __init mvebu_soc_id_init(void)
+{
+
+ /*
+ * First try to get the ID and the revision by the system
+ * register and use PCI registers only if it is not possible
+ */
+ if (!mvebu_system_controller_get_soc_id(&soc_dev_id, &soc_rev)) {
+ is_id_valid = true;
+ pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev);
+ return 0;
+ }
+
+ return get_soc_id_by_pci();
+}
+early_initcall(mvebu_soc_id_init);
+
+static int __init mvebu_soc_device(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+
+ /* Also protects against running on non-mvebu systems */
+ if (!is_id_valid)
+ return 0;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "Marvell");
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X", soc_rev);
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%X", soc_dev_id);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ }
+
+ return 0;
+}
+postcore_initcall(mvebu_soc_device);
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.h b/arch/arm/mach-mvebu/mvebu-soc-id.h
new file mode 100644
index 000000000..e124a0b82
--- /dev/null
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.h
@@ -0,0 +1,54 @@
+/*
+ * Marvell EBU SoC ID and revision definitions.
+ *
+ * Copyright (C) 2014 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_MVEBU_SOC_ID_H
+#define __LINUX_MVEBU_SOC_ID_H
+
+/* Armada XP ID */
+#define MV78230_DEV_ID 0x7823
+#define MV78260_DEV_ID 0x7826
+#define MV78460_DEV_ID 0x7846
+
+/* Armada XP Revision */
+#define MV78XX0_A0_REV 0x1
+#define MV78XX0_B0_REV 0x2
+
+/* Amada 370 ID */
+#define ARMADA_370_DEV_ID 0x6710
+
+/* Amada 370 Revision */
+#define ARMADA_370_A1_REV 0x1
+
+/* Armada 375 ID */
+#define ARMADA_375_DEV_ID 0x6720
+
+/* Armada 375 */
+#define ARMADA_375_Z1_REV 0x0
+#define ARMADA_375_A0_REV 0x3
+
+/* Armada 38x ID */
+#define ARMADA_380_DEV_ID 0x6810
+#define ARMADA_385_DEV_ID 0x6820
+#define ARMADA_388_DEV_ID 0x6828
+
+/* Armada 38x Revision */
+#define ARMADA_38x_Z1_REV 0x0
+#define ARMADA_38x_A0_REV 0x4
+
+#ifdef CONFIG_ARCH_MVEBU
+int mvebu_get_soc_id(u32 *dev, u32 *rev);
+#else
+static inline int mvebu_get_soc_id(u32 *dev, u32 *rev)
+{
+ return -1;
+}
+#endif
+
+#endif /* __LINUX_MVEBU_SOC_ID_H */
diff --git a/arch/arm/mach-mvebu/platsmp-a9.c b/arch/arm/mach-mvebu/platsmp-a9.c
new file mode 100644
index 000000000..d715dec1c
--- /dev/null
+++ b/arch/arm/mach-mvebu/platsmp-a9.c
@@ -0,0 +1,114 @@
+/*
+ * Symmetric Multi Processing (SMP) support for Marvell EBU Cortex-A9
+ * based SOCs (Armada 375/38x).
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/mbus.h>
+#include <asm/smp_scu.h>
+#include <asm/smp_plat.h>
+#include "common.h"
+#include "pmsu.h"
+
+extern void mvebu_cortex_a9_secondary_startup(void);
+
+static int mvebu_cortex_a9_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ int ret, hw_cpu;
+
+ pr_info("Booting CPU %d\n", cpu);
+
+ /*
+ * Write the address of secondary startup into the system-wide
+ * flags register. The boot monitor waits until it receives a
+ * soft interrupt, and then the secondary CPU branches to this
+ * address.
+ */
+ hw_cpu = cpu_logical_map(cpu);
+ if (of_machine_is_compatible("marvell,armada375"))
+ mvebu_system_controller_set_cpu_boot_addr(mvebu_cortex_a9_secondary_startup);
+ else
+ mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cortex_a9_secondary_startup);
+ smp_wmb();
+
+ /*
+ * Doing this before deasserting the CPUs is needed to wake up CPUs
+ * in the offline state after using CPU hotplug.
+ */
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ ret = mvebu_cpu_reset_deassert(hw_cpu);
+ if (ret) {
+ pr_err("Could not start the secondary CPU: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+/*
+ * When a CPU is brought back online, either through CPU hotplug, or
+ * because of the boot of a kexec'ed kernel, the PMSU configuration
+ * for this CPU might be in the deep idle state, preventing this CPU
+ * from receiving interrupts. Here, we therefore take out the current
+ * CPU from this state, which was entered by armada_38x_cpu_die()
+ * below.
+ */
+static void armada_38x_secondary_init(unsigned int cpu)
+{
+ mvebu_v7_pmsu_idle_exit();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void armada_38x_cpu_die(unsigned int cpu)
+{
+ /*
+ * CPU hotplug is implemented by putting offline CPUs into the
+ * deep idle sleep state.
+ */
+ armada_38x_do_cpu_suspend(true);
+}
+
+/*
+ * We need a dummy function, so that platform_can_cpu_hotplug() knows
+ * we support CPU hotplug. However, the function does not need to do
+ * anything, because CPUs going offline can enter the deep idle state
+ * by themselves, without any help from a still alive CPU.
+ */
+static int armada_38x_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+#endif
+
+static const struct smp_operations mvebu_cortex_a9_smp_ops __initconst = {
+ .smp_boot_secondary = mvebu_cortex_a9_boot_secondary,
+};
+
+static const struct smp_operations armada_38x_smp_ops __initconst = {
+ .smp_boot_secondary = mvebu_cortex_a9_boot_secondary,
+ .smp_secondary_init = armada_38x_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = armada_38x_cpu_die,
+ .cpu_kill = armada_38x_cpu_kill,
+#endif
+};
+
+CPU_METHOD_OF_DECLARE(mvebu_armada_375_smp, "marvell,armada-375-smp",
+ &mvebu_cortex_a9_smp_ops);
+CPU_METHOD_OF_DECLARE(mvebu_armada_380_smp, "marvell,armada-380-smp",
+ &armada_38x_smp_ops);
+CPU_METHOD_OF_DECLARE(mvebu_armada_390_smp, "marvell,armada-390-smp",
+ &armada_38x_smp_ops);
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
new file mode 100644
index 000000000..c130497dc
--- /dev/null
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -0,0 +1,258 @@
+/*
+ * Symmetric Multi Processing (SMP) support for Armada XP
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada XP SoC has 4 ARMv7 PJ4B CPUs running in full HW coherency
+ * This file implements the routines for preparing the SMP infrastructure
+ * and waking up the secondary CPUs
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/mbus.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include "common.h"
+#include "armada-370-xp.h"
+#include "pmsu.h"
+#include "coherency.h"
+
+#define ARMADA_XP_MAX_CPUS 4
+
+#define AXP_BOOTROM_BASE 0xfff00000
+#define AXP_BOOTROM_SIZE 0x100000
+
+static struct clk *boot_cpu_clk;
+
+static struct clk *get_cpu_clk(int cpu)
+{
+ struct clk *cpu_clk;
+ struct device_node *np = of_get_cpu_node(cpu, NULL);
+
+ if (WARN(!np, "missing cpu node\n"))
+ return NULL;
+ cpu_clk = of_clk_get(np, 0);
+ if (WARN_ON(IS_ERR(cpu_clk)))
+ return NULL;
+ return cpu_clk;
+}
+
+static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ int ret, hw_cpu;
+
+ pr_info("Booting CPU %d\n", cpu);
+
+ hw_cpu = cpu_logical_map(cpu);
+ mvebu_pmsu_set_cpu_boot_addr(hw_cpu, armada_xp_secondary_startup);
+
+ /*
+ * This is needed to wake up CPUs in the offline state after
+ * using CPU hotplug.
+ */
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ /*
+ * This is needed to take secondary CPUs out of reset on the
+ * initial boot.
+ */
+ ret = mvebu_cpu_reset_deassert(hw_cpu);
+ if (ret) {
+ pr_warn("unable to boot CPU: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * When a CPU is brought back online, either through CPU hotplug, or
+ * because of the boot of a kexec'ed kernel, the PMSU configuration
+ * for this CPU might be in the deep idle state, preventing this CPU
+ * from receiving interrupts. Here, we therefore take out the current
+ * CPU from this state, which was entered by armada_xp_cpu_die()
+ * below.
+ */
+static void armada_xp_secondary_init(unsigned int cpu)
+{
+ mvebu_v7_pmsu_idle_exit();
+}
+
+static void __init armada_xp_smp_init_cpus(void)
+{
+ unsigned int ncores = num_possible_cpus();
+
+ if (ncores == 0 || ncores > ARMADA_XP_MAX_CPUS)
+ panic("Invalid number of CPUs in DT\n");
+}
+
+static int armada_xp_sync_secondary_clk(unsigned int cpu)
+{
+ struct clk *cpu_clk = get_cpu_clk(cpu);
+
+ if (!cpu_clk || !boot_cpu_clk)
+ return 0;
+
+ clk_prepare_enable(cpu_clk);
+ clk_set_rate(cpu_clk, clk_get_rate(boot_cpu_clk));
+
+ return 0;
+}
+
+static void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
+{
+ struct device_node *node;
+ struct resource res;
+ int err;
+
+ flush_cache_all();
+ set_cpu_coherent();
+
+ boot_cpu_clk = get_cpu_clk(smp_processor_id());
+ if (boot_cpu_clk) {
+ clk_prepare_enable(boot_cpu_clk);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+ "arm/mvebu/sync_clocks:online",
+ armada_xp_sync_secondary_clk, NULL);
+ }
+
+ /*
+ * In order to boot the secondary CPUs we need to ensure
+ * the bootROM is mapped at the correct address.
+ */
+ node = of_find_compatible_node(NULL, NULL, "marvell,bootrom");
+ if (!node)
+ panic("Cannot find 'marvell,bootrom' compatible node");
+
+ err = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (err < 0)
+ panic("Cannot get 'bootrom' node address");
+
+ if (res.start != AXP_BOOTROM_BASE ||
+ resource_size(&res) != AXP_BOOTROM_SIZE)
+ panic("The address for the BootROM is incorrect");
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void armada_xp_cpu_die(unsigned int cpu)
+{
+ /*
+ * CPU hotplug is implemented by putting offline CPUs into the
+ * deep idle sleep state.
+ */
+ armada_370_xp_pmsu_idle_enter(true);
+}
+
+/*
+ * We need a dummy function, so that platform_can_cpu_hotplug() knows
+ * we support CPU hotplug. However, the function does not need to do
+ * anything, because CPUs going offline can enter the deep idle state
+ * by themselves, without any help from a still alive CPU.
+ */
+static int armada_xp_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+#endif
+
+const struct smp_operations armada_xp_smp_ops __initconst = {
+ .smp_init_cpus = armada_xp_smp_init_cpus,
+ .smp_prepare_cpus = armada_xp_smp_prepare_cpus,
+ .smp_boot_secondary = armada_xp_boot_secondary,
+ .smp_secondary_init = armada_xp_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = armada_xp_cpu_die,
+ .cpu_kill = armada_xp_cpu_kill,
+#endif
+};
+
+CPU_METHOD_OF_DECLARE(armada_xp_smp, "marvell,armada-xp-smp",
+ &armada_xp_smp_ops);
+
+#define MV98DX3236_CPU_RESUME_CTRL_REG 0x08
+#define MV98DX3236_CPU_RESUME_ADDR_REG 0x04
+
+static const struct of_device_id of_mv98dx3236_resume_table[] = {
+ {
+ .compatible = "marvell,98dx3336-resume-ctrl",
+ },
+ { /* end of list */ },
+};
+
+static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
+{
+ struct device_node *np;
+ void __iomem *base;
+ WARN_ON(hw_cpu != 1);
+
+ np = of_find_matching_node(NULL, of_mv98dx3236_resume_table);
+ if (!np)
+ return -ENODEV;
+
+ base = of_io_request_and_map(np, 0, of_node_full_name(np));
+ of_node_put(np);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG);
+ writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
+
+ iounmap(base);
+
+ return 0;
+}
+
+static int mv98dx3236_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ int ret, hw_cpu;
+
+ hw_cpu = cpu_logical_map(cpu);
+ mv98dx3236_resume_set_cpu_boot_addr(hw_cpu,
+ armada_xp_secondary_startup);
+
+ /*
+ * This is needed to wake up CPUs in the offline state after
+ * using CPU hotplug.
+ */
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ /*
+ * This is needed to take secondary CPUs out of reset on the
+ * initial boot.
+ */
+ ret = mvebu_cpu_reset_deassert(hw_cpu);
+ if (ret) {
+ pr_warn("unable to boot CPU: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct smp_operations mv98dx3236_smp_ops __initconst = {
+ .smp_init_cpus = armada_xp_smp_init_cpus,
+ .smp_prepare_cpus = armada_xp_smp_prepare_cpus,
+ .smp_boot_secondary = mv98dx3236_boot_secondary,
+ .smp_secondary_init = armada_xp_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = armada_xp_cpu_die,
+ .cpu_kill = armada_xp_cpu_kill,
+#endif
+};
+
+CPU_METHOD_OF_DECLARE(mv98dx3236_smp, "marvell,98dx3236-smp",
+ &mv98dx3236_smp_ops);
diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
new file mode 100644
index 000000000..db17121d7
--- /dev/null
+++ b/arch/arm/mach-mvebu/pm-board.c
@@ -0,0 +1,152 @@
+/*
+ * Board-level suspend/resume support.
+ *
+ * Copyright (C) 2014-2015 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include "common.h"
+
+#define ARMADA_PIC_NR_GPIOS 3
+
+static void __iomem *gpio_ctrl;
+static int pic_gpios[ARMADA_PIC_NR_GPIOS];
+static int pic_raw_gpios[ARMADA_PIC_NR_GPIOS];
+
+static void mvebu_armada_pm_enter(void __iomem *sdram_reg, u32 srcmd)
+{
+ u32 reg, ackcmd;
+ int i;
+
+ /* Put 001 as value on the GPIOs */
+ reg = readl(gpio_ctrl);
+ for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++)
+ reg &= ~BIT(pic_raw_gpios[i]);
+ reg |= BIT(pic_raw_gpios[0]);
+ writel(reg, gpio_ctrl);
+
+ /* Prepare writing 111 to the GPIOs */
+ ackcmd = readl(gpio_ctrl);
+ for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++)
+ ackcmd |= BIT(pic_raw_gpios[i]);
+
+ srcmd = cpu_to_le32(srcmd);
+ ackcmd = cpu_to_le32(ackcmd);
+
+ /*
+ * Wait a while, the PIC needs quite a bit of time between the
+ * two GPIO commands.
+ */
+ mdelay(3000);
+
+ asm volatile (
+ /* Align to a cache line */
+ ".balign 32\n\t"
+
+ /* Enter self refresh */
+ "str %[srcmd], [%[sdram_reg]]\n\t"
+
+ /*
+ * Wait 100 cycles for DDR to enter self refresh, by
+ * doing 50 times two instructions.
+ */
+ "mov r1, #50\n\t"
+ "1: subs r1, r1, #1\n\t"
+ "bne 1b\n\t"
+
+ /* Issue the command ACK */
+ "str %[ackcmd], [%[gpio_ctrl]]\n\t"
+
+ /* Trap the processor */
+ "b .\n\t"
+ : : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg),
+ [ackcmd] "r" (ackcmd), [gpio_ctrl] "r" (gpio_ctrl) : "r1");
+}
+
+static int __init mvebu_armada_pm_init(void)
+{
+ struct device_node *np;
+ struct device_node *gpio_ctrl_np;
+ int ret = 0, i;
+
+ if (!of_machine_is_compatible("marvell,axp-gp"))
+ return -ENODEV;
+
+ np = of_find_node_by_name(NULL, "pm_pic");
+ if (!np)
+ return -ENODEV;
+
+ for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++) {
+ char *name;
+ struct of_phandle_args args;
+
+ pic_gpios[i] = of_get_named_gpio(np, "ctrl-gpios", i);
+ if (pic_gpios[i] < 0) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ name = kasprintf(GFP_KERNEL, "pic-pin%d", i);
+ if (!name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = gpio_request(pic_gpios[i], name);
+ if (ret < 0) {
+ kfree(name);
+ goto out;
+ }
+
+ ret = gpio_direction_output(pic_gpios[i], 0);
+ if (ret < 0) {
+ gpio_free(pic_gpios[i]);
+ kfree(name);
+ goto out;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(np, "ctrl-gpios", 2,
+ i, &args);
+ if (ret < 0) {
+ gpio_free(pic_gpios[i]);
+ kfree(name);
+ goto out;
+ }
+
+ gpio_ctrl_np = args.np;
+ pic_raw_gpios[i] = args.args[0];
+ }
+
+ gpio_ctrl = of_iomap(gpio_ctrl_np, 0);
+ if (!gpio_ctrl)
+ return -ENOMEM;
+
+ mvebu_pm_suspend_init(mvebu_armada_pm_enter);
+
+out:
+ of_node_put(np);
+ return ret;
+}
+
+/*
+ * Registering the mvebu_board_pm_enter callback must be done before
+ * the platform_suspend_ops will be registered. In the same time we
+ * also need to have the gpio devices registered. That's why we use a
+ * device_initcall_sync which is called after all the device_initcall
+ * (used by the gpio device) but before the late_initcall (used to
+ * register the platform_suspend_ops)
+ */
+device_initcall_sync(mvebu_armada_pm_init);
diff --git a/arch/arm/mach-mvebu/pm.c b/arch/arm/mach-mvebu/pm.c
new file mode 100644
index 000000000..c487be61d
--- /dev/null
+++ b/arch/arm/mach-mvebu/pm.c
@@ -0,0 +1,270 @@
+/*
+ * Suspend/resume support. Currently supporting Armada XP only.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/of_address.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/suspend.h>
+
+#include "coherency.h"
+#include "common.h"
+#include "pmsu.h"
+
+#define SDRAM_CONFIG_OFFS 0x0
+#define SDRAM_CONFIG_SR_MODE_BIT BIT(24)
+#define SDRAM_OPERATION_OFFS 0x18
+#define SDRAM_OPERATION_SELF_REFRESH 0x7
+#define SDRAM_DLB_EVICTION_OFFS 0x30c
+#define SDRAM_DLB_EVICTION_THRESHOLD_MASK 0xff
+
+static void (*mvebu_board_pm_enter)(void __iomem *sdram_reg, u32 srcmd);
+static void __iomem *sdram_ctrl;
+
+static int mvebu_pm_powerdown(unsigned long data)
+{
+ u32 reg, srcmd;
+
+ flush_cache_all();
+ outer_flush_all();
+
+ /*
+ * Issue a Data Synchronization Barrier instruction to ensure
+ * that all state saving has been completed.
+ */
+ dsb();
+
+ /* Flush the DLB and wait ~7 usec */
+ reg = readl(sdram_ctrl + SDRAM_DLB_EVICTION_OFFS);
+ reg &= ~SDRAM_DLB_EVICTION_THRESHOLD_MASK;
+ writel(reg, sdram_ctrl + SDRAM_DLB_EVICTION_OFFS);
+
+ udelay(7);
+
+ /* Set DRAM in battery backup mode */
+ reg = readl(sdram_ctrl + SDRAM_CONFIG_OFFS);
+ reg &= ~SDRAM_CONFIG_SR_MODE_BIT;
+ writel(reg, sdram_ctrl + SDRAM_CONFIG_OFFS);
+
+ /* Prepare to go to self-refresh */
+
+ srcmd = readl(sdram_ctrl + SDRAM_OPERATION_OFFS);
+ srcmd &= ~0x1F;
+ srcmd |= SDRAM_OPERATION_SELF_REFRESH;
+
+ mvebu_board_pm_enter(sdram_ctrl + SDRAM_OPERATION_OFFS, srcmd);
+
+ return 0;
+}
+
+#define BOOT_INFO_ADDR 0x3000
+#define BOOT_MAGIC_WORD 0xdeadb002
+#define BOOT_MAGIC_LIST_END 0xffffffff
+
+/*
+ * Those registers are accessed before switching the internal register
+ * base, which is why we hardcode the 0xd0000000 base address, the one
+ * used by the SoC out of reset.
+ */
+#define MBUS_WINDOW_12_CTRL 0xd00200b0
+#define MBUS_INTERNAL_REG_ADDRESS 0xd0020080
+
+#define SDRAM_WIN_BASE_REG(x) (0x20180 + (0x8*x))
+#define SDRAM_WIN_CTRL_REG(x) (0x20184 + (0x8*x))
+
+static phys_addr_t mvebu_internal_reg_base(void)
+{
+ struct device_node *np;
+ __be32 in_addr[2];
+
+ np = of_find_node_by_name(NULL, "internal-regs");
+ BUG_ON(!np);
+
+ /*
+ * Ask the DT what is the internal register address on this
+ * platform. In the mvebu-mbus DT binding, 0xf0010000
+ * corresponds to the internal register window.
+ */
+ in_addr[0] = cpu_to_be32(0xf0010000);
+ in_addr[1] = 0x0;
+
+ return of_translate_address(np, in_addr);
+}
+
+static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr)
+{
+ phys_addr_t resume_pc;
+
+ resume_pc = __pa_symbol(armada_370_xp_cpu_resume);
+
+ /*
+ * The bootloader expects the first two words to be a magic
+ * value (BOOT_MAGIC_WORD), followed by the address of the
+ * resume code to jump to. Then, it expects a sequence of
+ * (address, value) pairs, which can be used to restore the
+ * value of certain registers. This sequence must end with the
+ * BOOT_MAGIC_LIST_END magic value.
+ */
+
+ writel(BOOT_MAGIC_WORD, store_addr++);
+ writel(resume_pc, store_addr++);
+
+ /*
+ * Some platforms remap their internal register base address
+ * to 0xf1000000. However, out of reset, window 12 starts at
+ * 0xf0000000 and ends at 0xf7ffffff, which would overlap with
+ * the internal registers. Therefore, disable window 12.
+ */
+ writel(MBUS_WINDOW_12_CTRL, store_addr++);
+ writel(0x0, store_addr++);
+
+ /*
+ * Set the internal register base address to the value
+ * expected by Linux, as read from the Device Tree.
+ */
+ writel(MBUS_INTERNAL_REG_ADDRESS, store_addr++);
+ writel(mvebu_internal_reg_base(), store_addr++);
+
+ /*
+ * Ask the mvebu-mbus driver to store the SDRAM window
+ * configuration, which has to be restored by the bootloader
+ * before re-entering the kernel on resume.
+ */
+ store_addr += mvebu_mbus_save_cpu_target(store_addr);
+
+ writel(BOOT_MAGIC_LIST_END, store_addr);
+}
+
+static int mvebu_pm_store_bootinfo(void)
+{
+ u32 *store_addr;
+
+ store_addr = phys_to_virt(BOOT_INFO_ADDR);
+
+ if (of_machine_is_compatible("marvell,armadaxp"))
+ mvebu_pm_store_armadaxp_bootinfo(store_addr);
+ else
+ return -ENODEV;
+
+ return 0;
+}
+
+static int mvebu_enter_suspend(void)
+{
+ int ret;
+
+ ret = mvebu_pm_store_bootinfo();
+ if (ret)
+ return ret;
+
+ cpu_pm_enter();
+
+ cpu_suspend(0, mvebu_pm_powerdown);
+
+ outer_resume();
+
+ mvebu_v7_pmsu_idle_exit();
+
+ set_cpu_coherent();
+
+ cpu_pm_exit();
+ return 0;
+}
+
+static int mvebu_pm_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ cpu_do_idle();
+ break;
+ case PM_SUSPEND_MEM:
+ pr_warn("Entering suspend to RAM. Only special wake-up sources will resume the system\n");
+ return mvebu_enter_suspend();
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mvebu_pm_valid(suspend_state_t state)
+{
+ if (state == PM_SUSPEND_STANDBY)
+ return 1;
+
+ if (state == PM_SUSPEND_MEM && mvebu_board_pm_enter != NULL)
+ return 1;
+
+ return 0;
+}
+
+static const struct platform_suspend_ops mvebu_pm_ops = {
+ .enter = mvebu_pm_enter,
+ .valid = mvebu_pm_valid,
+};
+
+static int __init mvebu_pm_init(void)
+{
+ if (!of_machine_is_compatible("marvell,armadaxp") &&
+ !of_machine_is_compatible("marvell,armada370") &&
+ !of_machine_is_compatible("marvell,armada380") &&
+ !of_machine_is_compatible("marvell,armada390"))
+ return -ENODEV;
+
+ suspend_set_ops(&mvebu_pm_ops);
+
+ return 0;
+}
+
+
+late_initcall(mvebu_pm_init);
+
+int __init mvebu_pm_suspend_init(void (*board_pm_enter)(void __iomem *sdram_reg,
+ u32 srcmd))
+{
+ struct device_node *np;
+ struct resource res;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-xp-sdram-controller");
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ np->full_name)) {
+ of_node_put(np);
+ return -EBUSY;
+ }
+
+ sdram_ctrl = ioremap(res.start, resource_size(&res));
+ if (!sdram_ctrl) {
+ release_mem_region(res.start, resource_size(&res));
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ of_node_put(np);
+
+ mvebu_board_pm_enter = board_pm_enter;
+
+ return 0;
+}
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
new file mode 100644
index 000000000..73d5d72df
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -0,0 +1,609 @@
+/*
+ * Power Management Service Unit(PMSU) support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada 370 and Armada XP SOCs have a power management service
+ * unit which is responsible for powering down and waking up CPUs and
+ * other SOC units
+ */
+
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/mvebu-pmsu.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_scu.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+#include "common.h"
+#include "pmsu.h"
+
+#define PMSU_BASE_OFFSET 0x100
+#define PMSU_REG_SIZE 0x1000
+
+/* PMSU MP registers */
+#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
+#define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
+#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
+#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
+
+#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
+
+#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
+
+#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
+#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
+#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
+#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
+#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
+#define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
+#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
+#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
+
+#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
+#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1)
+#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17)
+
+#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
+
+/* PMSU fabric registers */
+#define L2C_NFABRIC_PM_CTL 0x4
+#define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20)
+
+/* PMSU delay registers */
+#define PMSU_POWERDOWN_DELAY 0xF04
+#define PMSU_POWERDOWN_DELAY_PMU BIT(1)
+#define PMSU_POWERDOWN_DELAY_MASK 0xFFFE
+#define PMSU_DFLT_ARMADA38X_DELAY 0x64
+
+/* CA9 MPcore SoC Control registers */
+
+#define MPCORE_RESET_CTL 0x64
+#define MPCORE_RESET_CTL_L2 BIT(0)
+#define MPCORE_RESET_CTL_DEBUG BIT(16)
+
+#define SRAM_PHYS_BASE 0xFFFF0000
+#define BOOTROM_BASE 0xFFF00000
+#define BOOTROM_SIZE 0x100000
+
+#define ARMADA_370_CRYPT0_ENG_TARGET 0x9
+#define ARMADA_370_CRYPT0_ENG_ATTR 0x1
+
+extern void ll_disable_coherency(void);
+extern void ll_enable_coherency(void);
+
+extern void armada_370_xp_cpu_resume(void);
+extern void armada_38x_cpu_resume(void);
+
+static phys_addr_t pmsu_mp_phys_base;
+static void __iomem *pmsu_mp_base;
+
+static void *mvebu_cpu_resume;
+
+static const struct of_device_id of_pmsu_table[] = {
+ { .compatible = "marvell,armada-370-pmsu", },
+ { .compatible = "marvell,armada-370-xp-pmsu", },
+ { .compatible = "marvell,armada-380-pmsu", },
+ { /* end of list */ },
+};
+
+void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
+{
+ writel(__pa_symbol(boot_addr), pmsu_mp_base +
+ PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
+}
+
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
+
+/*
+ * This function sets up the boot address workaround needed for SMP
+ * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the
+ * BootROM Mbus window, and instead remaps a crypto SRAM into which a
+ * custom piece of code is copied to replace the problematic BootROM.
+ */
+int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
+ unsigned int crypto_eng_attribute,
+ phys_addr_t resume_addr_reg)
+{
+ void __iomem *sram_virt_base;
+ u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
+
+ mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
+ mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
+ SRAM_PHYS_BASE, SZ_64K);
+
+ sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K);
+ if (!sram_virt_base) {
+ pr_err("Unable to map SRAM to setup the boot address WA\n");
+ return -ENOMEM;
+ }
+
+ memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len);
+
+ /*
+ * The last word of the code copied in SRAM must contain the
+ * physical base address of the PMSU register. We
+ * intentionally store this address in the native endianness
+ * of the system.
+ */
+ __raw_writel((unsigned long)resume_addr_reg,
+ sram_virt_base + code_len - 4);
+
+ iounmap(sram_virt_base);
+
+ return 0;
+}
+
+static int __init mvebu_v7_pmsu_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret = 0;
+
+ np = of_find_matching_node(NULL, of_pmsu_table);
+ if (!np)
+ return 0;
+
+ pr_info("Initializing Power Management Service Unit\n");
+
+ if (of_address_to_resource(np, 0, &res)) {
+ pr_err("unable to get resource\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) {
+ pr_warn(FW_WARN "deprecated pmsu binding\n");
+ res.start = res.start - PMSU_BASE_OFFSET;
+ res.end = res.start + PMSU_REG_SIZE - 1;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ np->full_name)) {
+ pr_err("unable to request region\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ pmsu_mp_phys_base = res.start;
+
+ pmsu_mp_base = ioremap(res.start, resource_size(&res));
+ if (!pmsu_mp_base) {
+ pr_err("unable to map registers\n");
+ release_mem_region(res.start, resource_size(&res));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ out:
+ of_node_put(np);
+ return ret;
+}
+
+static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void)
+{
+ u32 reg;
+
+ if (pmsu_mp_base == NULL)
+ return;
+
+ /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */
+ reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL);
+ reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN;
+ writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
+}
+
+enum pmsu_idle_prepare_flags {
+ PMSU_PREPARE_NORMAL = 0,
+ PMSU_PREPARE_DEEP_IDLE = BIT(0),
+ PMSU_PREPARE_SNOOP_DISABLE = BIT(1),
+};
+
+/* No locking is needed because we only access per-CPU registers */
+static int mvebu_v7_pmsu_idle_prepare(unsigned long flags)
+{
+ unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+ u32 reg;
+
+ if (pmsu_mp_base == NULL)
+ return -EINVAL;
+
+ /*
+ * Adjust the PMSU configuration to wait for WFI signal, enable
+ * IRQ and FIQ as wakeup events, set wait for snoop queue empty
+ * indication and mask IRQ and FIQ from CPU
+ */
+ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+ reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
+ PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
+ PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
+ PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
+ PMSU_STATUS_AND_MASK_IRQ_MASK |
+ PMSU_STATUS_AND_MASK_FIQ_MASK;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+
+ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ /* ask HW to power down the L2 Cache if needed */
+ if (flags & PMSU_PREPARE_DEEP_IDLE)
+ reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+
+ /* request power down */
+ reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
+ writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+
+ if (flags & PMSU_PREPARE_SNOOP_DISABLE) {
+ /* Disable snoop disable by HW - SW is taking care of it */
+ reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
+ reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP;
+ writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
+ }
+
+ return 0;
+}
+
+int armada_370_xp_pmsu_idle_enter(unsigned long deepidle)
+{
+ unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE;
+ int ret;
+
+ if (deepidle)
+ flags |= PMSU_PREPARE_DEEP_IDLE;
+
+ ret = mvebu_v7_pmsu_idle_prepare(flags);
+ if (ret)
+ return ret;
+
+ v7_exit_coherency_flush(all);
+
+ ll_disable_coherency();
+
+ dsb();
+
+ wfi();
+
+ /* If we are here, wfi failed. As processors run out of
+ * coherency for some time, tlbs might be stale, so flush them
+ */
+ local_flush_tlb_all();
+
+ ll_enable_coherency();
+
+ /* Test the CR_C bit and set it if it was cleared */
+ asm volatile(
+ "mrc p15, 0, r0, c1, c0, 0 \n\t"
+ "tst r0, %0 \n\t"
+ "orreq r0, r0, #(1 << 2) \n\t"
+ "mcreq p15, 0, r0, c1, c0, 0 \n\t"
+ "isb "
+ : : "Ir" (CR_C) : "r0");
+
+ pr_debug("Failed to suspend the system\n");
+
+ return 0;
+}
+
+static int armada_370_xp_cpu_suspend(unsigned long deepidle)
+{
+ return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter);
+}
+
+int armada_38x_do_cpu_suspend(unsigned long deepidle)
+{
+ unsigned long flags = 0;
+
+ if (deepidle)
+ flags |= PMSU_PREPARE_DEEP_IDLE;
+
+ mvebu_v7_pmsu_idle_prepare(flags);
+ /*
+ * Already flushed cache, but do it again as the outer cache
+ * functions dirty the cache with spinlocks
+ */
+ v7_exit_coherency_flush(louis);
+
+ scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF);
+
+ cpu_do_idle();
+
+ return 1;
+}
+
+static int armada_38x_cpu_suspend(unsigned long deepidle)
+{
+ return cpu_suspend(false, armada_38x_do_cpu_suspend);
+}
+
+/* No locking is needed because we only access per-CPU registers */
+void mvebu_v7_pmsu_idle_exit(void)
+{
+ unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+ u32 reg;
+
+ if (pmsu_mp_base == NULL)
+ return;
+ /* cancel ask HW to power down the L2 Cache if possible */
+ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+ writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+
+ /* cancel Enable wakeup events and mask interrupts */
+ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+ reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
+ reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
+ reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
+ reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
+ writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+}
+
+static int mvebu_v7_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_PM_ENTER) {
+ unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+ mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume);
+ } else if (action == CPU_PM_EXIT) {
+ mvebu_v7_pmsu_idle_exit();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mvebu_v7_cpu_pm_notifier = {
+ .notifier_call = mvebu_v7_cpu_pm_notify,
+};
+
+static struct platform_device mvebu_v7_cpuidle_device;
+
+static int broken_idle(struct device_node *np)
+{
+ if (of_property_read_bool(np, "broken-idle")) {
+ pr_warn("CPU idle is currently broken: disabling\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static __init int armada_370_cpuidle_init(void)
+{
+ struct device_node *np;
+ phys_addr_t redirect_reg;
+
+ np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
+ if (!np)
+ return -ENODEV;
+
+ if (broken_idle(np))
+ goto end;
+
+ /*
+ * On Armada 370, there is "a slow exit process from the deep
+ * idle state due to heavy L1/L2 cache cleanup operations
+ * performed by the BootROM software". To avoid this, we
+ * replace the restart code of the bootrom by a a simple jump
+ * to the boot address. Then the code located at this boot
+ * address will take care of the initialization.
+ */
+ redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0);
+ mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET,
+ ARMADA_370_CRYPT0_ENG_ATTR,
+ redirect_reg);
+
+ mvebu_cpu_resume = armada_370_xp_cpu_resume;
+ mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend;
+ mvebu_v7_cpuidle_device.name = "cpuidle-armada-370";
+
+end:
+ of_node_put(np);
+ return 0;
+}
+
+static __init int armada_38x_cpuidle_init(void)
+{
+ struct device_node *np;
+ void __iomem *mpsoc_base;
+ u32 reg;
+
+ pr_warn("CPU idle is currently broken on Armada 38x: disabling\n");
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-380-coherency-fabric");
+ if (!np)
+ return -ENODEV;
+
+ if (broken_idle(np))
+ goto end;
+
+ of_node_put(np);
+
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-380-mpcore-soc-ctrl");
+ if (!np)
+ return -ENODEV;
+ mpsoc_base = of_iomap(np, 0);
+ BUG_ON(!mpsoc_base);
+
+ /* Set up reset mask when powering down the cpus */
+ reg = readl(mpsoc_base + MPCORE_RESET_CTL);
+ reg |= MPCORE_RESET_CTL_L2;
+ reg |= MPCORE_RESET_CTL_DEBUG;
+ writel(reg, mpsoc_base + MPCORE_RESET_CTL);
+ iounmap(mpsoc_base);
+
+ /* Set up delay */
+ reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY);
+ reg &= ~PMSU_POWERDOWN_DELAY_MASK;
+ reg |= PMSU_DFLT_ARMADA38X_DELAY;
+ reg |= PMSU_POWERDOWN_DELAY_PMU;
+ writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY);
+
+ mvebu_cpu_resume = armada_38x_cpu_resume;
+ mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend;
+ mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x";
+
+end:
+ of_node_put(np);
+ return 0;
+}
+
+static __init int armada_xp_cpuidle_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
+ if (!np)
+ return -ENODEV;
+
+ if (broken_idle(np))
+ goto end;
+
+ mvebu_cpu_resume = armada_370_xp_cpu_resume;
+ mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend;
+ mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp";
+
+end:
+ of_node_put(np);
+ return 0;
+}
+
+static int __init mvebu_v7_cpu_pm_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ np = of_find_matching_node(NULL, of_pmsu_table);
+ if (!np)
+ return 0;
+ of_node_put(np);
+
+ /*
+ * Currently the CPU idle support for Armada 38x is broken, as
+ * the CPU hotplug uses some of the CPU idle functions it is
+ * broken too, so let's disable it
+ */
+ if (of_machine_is_compatible("marvell,armada380")) {
+ cpu_hotplug_disable();
+ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n");
+ }
+
+ if (of_machine_is_compatible("marvell,armadaxp"))
+ ret = armada_xp_cpuidle_init();
+ else if (of_machine_is_compatible("marvell,armada370"))
+ ret = armada_370_cpuidle_init();
+ else if (of_machine_is_compatible("marvell,armada380"))
+ ret = armada_38x_cpuidle_init();
+ else
+ return 0;
+
+ if (ret)
+ return ret;
+
+ mvebu_v7_pmsu_enable_l2_powerdown_onidle();
+ if (mvebu_v7_cpuidle_device.name)
+ platform_device_register(&mvebu_v7_cpuidle_device);
+ cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
+
+ return 0;
+}
+
+arch_initcall(mvebu_v7_cpu_pm_init);
+early_initcall(mvebu_v7_pmsu_init);
+
+static void mvebu_pmsu_dfs_request_local(void *data)
+{
+ u32 reg;
+ u32 cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Prepare to enter idle */
+ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
+ reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
+ PMSU_STATUS_AND_MASK_IRQ_MASK |
+ PMSU_STATUS_AND_MASK_FIQ_MASK;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
+
+ /* Request the DFS transition */
+ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
+ reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ;
+ writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
+
+ /* The fact of entering idle will trigger the DFS transition */
+ wfi();
+
+ /*
+ * We're back from idle, the DFS transition has completed,
+ * clear the idle wait indication.
+ */
+ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
+ reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
+
+ local_irq_restore(flags);
+}
+
+int mvebu_pmsu_dfs_request(int cpu)
+{
+ unsigned long timeout;
+ int hwcpu = cpu_logical_map(cpu);
+ u32 reg;
+
+ /* Clear any previous DFS DONE event */
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+ reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE;
+ writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+
+ /* Mask the DFS done interrupt, since we are going to poll */
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+ reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
+ writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+
+ /* Trigger the DFS on the appropriate CPU */
+ smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
+ NULL, false);
+
+ /* Poll until the DFS done event is generated */
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout)) {
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+ if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE)
+ break;
+ udelay(10);
+ }
+
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+
+ /* Restore the DFS mask to its original state */
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+ reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
+ writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
+
+ return 0;
+}
diff --git a/arch/arm/mach-mvebu/pmsu.h b/arch/arm/mach-mvebu/pmsu.h
new file mode 100644
index 000000000..ea79269c2
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu.h
@@ -0,0 +1,24 @@
+/*
+ * Power Management Service Unit (PMSU) support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MVEBU_PMSU_H
+#define __MACH_MVEBU_PMSU_H
+
+int armada_xp_boot_cpu(unsigned int cpu_id, void *phys_addr);
+int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
+ unsigned int crypto_eng_attribute,
+ phys_addr_t resume_addr_reg);
+
+void mvebu_v7_pmsu_idle_exit(void);
+void armada_370_xp_cpu_resume(void);
+
+int armada_370_xp_pmsu_idle_enter(unsigned long deepidle);
+int armada_38x_do_cpu_suspend(unsigned long deepidle);
+#endif /* __MACH_370_XP_PMSU_H */
diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S
new file mode 100644
index 000000000..88651221d
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu_ll.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+
+ENTRY(armada_38x_scu_power_up)
+ mrc p15, 4, r1, c15, c0 @ get SCU base address
+ orr r1, r1, #0x8 @ SCU CPU Power Status Register
+ mrc 15, 0, r0, cr0, cr0, 5 @ get the CPU ID
+ and r0, r0, #15
+ add r1, r1, r0
+ mov r0, #0x0
+ strb r0, [r1] @ switch SCU power state to Normal mode
+ ret lr
+ENDPROC(armada_38x_scu_power_up)
+
+/*
+ * This is the entry point through which CPUs exiting cpuidle deep
+ * idle state are going.
+ */
+ENTRY(armada_370_xp_cpu_resume)
+ARM_BE8(setend be ) @ go BE8 if entered LE
+ /*
+ * Disable the MMU that might have been enabled in BootROM if
+ * this code is used in the resume path of a suspend/resume
+ * cycle.
+ */
+ mrc p15, 0, r1, c1, c0, 0
+ bic r1, #1
+ mcr p15, 0, r1, c1, c0, 0
+ bl ll_add_cpu_to_smp_group
+ bl ll_enable_coherency
+ b cpu_resume
+ENDPROC(armada_370_xp_cpu_resume)
+
+ENTRY(armada_38x_cpu_resume)
+ /* do we need it for Armada 38x*/
+ARM_BE8(setend be ) @ go BE8 if entered LE
+ bl v7_invalidate_l1
+ bl armada_38x_scu_power_up
+ b cpu_resume
+ENDPROC(armada_38x_cpu_resume)
+
+.global mvebu_boot_wa_start
+.global mvebu_boot_wa_end
+
+/* The following code will be executed from SRAM */
+ENTRY(mvebu_boot_wa_start)
+mvebu_boot_wa_start:
+ARM_BE8(setend be)
+ adr r0, 1f
+ ldr r0, [r0] @ load the address of the
+ @ resume register
+ ldr r0, [r0] @ load the value in the
+ @ resume register
+ARM_BE8(rev r0, r0) @ the value is stored LE
+ mov pc, r0 @ jump to this value
+/*
+ * the last word of this piece of code will be filled by the physical
+ * address of the boot address register just after being copied in SRAM
+ */
+1:
+ .long .
+mvebu_boot_wa_end:
+ENDPROC(mvebu_boot_wa_end)
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
new file mode 100644
index 000000000..04d9ebe6a
--- /dev/null
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -0,0 +1,180 @@
+/*
+ * System controller support for Armada 370, 375 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada 370, 375 and Armada XP SoCs have a range of
+ * miscellaneous registers, that do not belong to a particular device,
+ * but rather provide system-level features. This basic
+ * system-controller driver provides a device tree binding for those
+ * registers, and implements utility functions offering various
+ * features related to those registers.
+ *
+ * For now, the feature set is limited to restarting the platform by a
+ * soft-reset, but it might be extended in the future.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include "common.h"
+#include "mvebu-soc-id.h"
+#include "pmsu.h"
+
+#define ARMADA_375_CRYPT0_ENG_TARGET 41
+#define ARMADA_375_CRYPT0_ENG_ATTR 1
+
+static void __iomem *system_controller_base;
+static phys_addr_t system_controller_phys_base;
+
+struct mvebu_system_controller {
+ u32 rstoutn_mask_offset;
+ u32 system_soft_reset_offset;
+
+ u32 rstoutn_mask_reset_out_en;
+ u32 system_soft_reset;
+
+ u32 resume_boot_addr;
+
+ u32 dev_id;
+ u32 rev_id;
+};
+static struct mvebu_system_controller *mvebu_sc;
+
+static const struct mvebu_system_controller armada_370_xp_system_controller = {
+ .rstoutn_mask_offset = 0x60,
+ .system_soft_reset_offset = 0x64,
+ .rstoutn_mask_reset_out_en = 0x1,
+ .system_soft_reset = 0x1,
+ .dev_id = 0x38,
+ .rev_id = 0x3c,
+};
+
+static const struct mvebu_system_controller armada_375_system_controller = {
+ .rstoutn_mask_offset = 0x54,
+ .system_soft_reset_offset = 0x58,
+ .rstoutn_mask_reset_out_en = 0x1,
+ .system_soft_reset = 0x1,
+ .resume_boot_addr = 0xd4,
+ .dev_id = 0x38,
+ .rev_id = 0x3c,
+};
+
+static const struct mvebu_system_controller orion_system_controller = {
+ .rstoutn_mask_offset = 0x108,
+ .system_soft_reset_offset = 0x10c,
+ .rstoutn_mask_reset_out_en = 0x4,
+ .system_soft_reset = 0x1,
+};
+
+static const struct of_device_id of_system_controller_table[] = {
+ {
+ .compatible = "marvell,orion-system-controller",
+ .data = (void *) &orion_system_controller,
+ }, {
+ .compatible = "marvell,armada-370-xp-system-controller",
+ .data = (void *) &armada_370_xp_system_controller,
+ }, {
+ .compatible = "marvell,armada-375-system-controller",
+ .data = (void *) &armada_375_system_controller,
+ },
+ { /* end of list */ },
+};
+
+void mvebu_restart(enum reboot_mode mode, const char *cmd)
+{
+ if (!system_controller_base) {
+ pr_err("Cannot restart, system-controller not available: check the device tree\n");
+ } else {
+ /*
+ * Enable soft reset to assert RSTOUTn.
+ */
+ writel(mvebu_sc->rstoutn_mask_reset_out_en,
+ system_controller_base +
+ mvebu_sc->rstoutn_mask_offset);
+ /*
+ * Assert soft reset.
+ */
+ writel(mvebu_sc->system_soft_reset,
+ system_controller_base +
+ mvebu_sc->system_soft_reset_offset);
+ }
+
+ while (1)
+ ;
+}
+
+int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
+{
+ if (of_machine_is_compatible("marvell,armada380") &&
+ system_controller_base) {
+ *dev = readl(system_controller_base + mvebu_sc->dev_id) >> 16;
+ *rev = (readl(system_controller_base + mvebu_sc->rev_id) >> 8)
+ & 0xF;
+ return 0;
+ } else
+ return -ENODEV;
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
+static void mvebu_armada375_smp_wa_init(void)
+{
+ u32 dev, rev;
+ phys_addr_t resume_addr_reg;
+
+ if (mvebu_get_soc_id(&dev, &rev) != 0)
+ return;
+
+ if (rev != ARMADA_375_Z1_REV)
+ return;
+
+ resume_addr_reg = system_controller_phys_base +
+ mvebu_sc->resume_boot_addr;
+ mvebu_setup_boot_addr_wa(ARMADA_375_CRYPT0_ENG_TARGET,
+ ARMADA_375_CRYPT0_ENG_ATTR,
+ resume_addr_reg);
+}
+
+void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr)
+{
+ BUG_ON(system_controller_base == NULL);
+ BUG_ON(mvebu_sc->resume_boot_addr == 0);
+
+ if (of_machine_is_compatible("marvell,armada375"))
+ mvebu_armada375_smp_wa_init();
+
+ writel(__pa_symbol(boot_addr), system_controller_base +
+ mvebu_sc->resume_boot_addr);
+}
+#endif
+
+static int __init mvebu_system_controller_init(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ np = of_find_matching_node_and_match(NULL, of_system_controller_table,
+ &match);
+ if (np) {
+ struct resource res;
+ system_controller_base = of_iomap(np, 0);
+ of_address_to_resource(np, 0, &res);
+ system_controller_phys_base = res.start;
+ mvebu_sc = (struct mvebu_system_controller *)match->data;
+ of_node_put(np);
+ }
+
+ return 0;
+}
+
+early_initcall(mvebu_system_controller_init);