summaryrefslogtreecommitdiffstats
path: root/plat/marvell/armada/common
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 17:43:51 +0000
commitbe58c81aff4cd4c0ccf43dbd7998da4a6a08c03b (patch)
tree779c248fb61c83f65d1f0dc867f2053d76b4e03a /plat/marvell/armada/common
parentInitial commit. (diff)
downloadarm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.tar.xz
arm-trusted-firmware-be58c81aff4cd4c0ccf43dbd7998da4a6a08c03b.zip
Adding upstream version 2.10.0+dfsg.upstream/2.10.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'plat/marvell/armada/common')
-rw-r--r--plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c168
-rw-r--r--plat/marvell/armada/common/aarch64/marvell_common.c137
-rw-r--r--plat/marvell/armada/common/aarch64/marvell_helpers.S259
-rw-r--r--plat/marvell/armada/common/marvell_bl1_setup.c105
-rw-r--r--plat/marvell/armada/common/marvell_bl2_setup.c158
-rw-r--r--plat/marvell/armada/common/marvell_bl31_setup.c237
-rw-r--r--plat/marvell/armada/common/marvell_cci.c52
-rw-r--r--plat/marvell/armada/common/marvell_common.mk99
-rw-r--r--plat/marvell/armada/common/marvell_console.c77
-rw-r--r--plat/marvell/armada/common/marvell_ddr_info.c112
-rw-r--r--plat/marvell/armada/common/marvell_gicv2.c148
-rw-r--r--plat/marvell/armada/common/marvell_gicv3.c210
-rw-r--r--plat/marvell/armada/common/marvell_image_load.c36
-rw-r--r--plat/marvell/armada/common/marvell_io_storage.c227
-rw-r--r--plat/marvell/armada/common/marvell_pm.c60
-rw-r--r--plat/marvell/armada/common/marvell_topology.c84
-rw-r--r--plat/marvell/armada/common/mrvl_sip_svc.c188
-rw-r--r--plat/marvell/armada/common/mss/mss_common.mk20
-rw-r--r--plat/marvell/armada/common/mss/mss_ipc_drv.c113
-rw-r--r--plat/marvell/armada/common/mss/mss_ipc_drv.h120
-rw-r--r--plat/marvell/armada/common/mss/mss_mem.h60
-rw-r--r--plat/marvell/armada/common/mss/mss_scp_bl2_format.h45
-rw-r--r--plat/marvell/armada/common/mss/mss_scp_bootloader.c368
-rw-r--r--plat/marvell/armada/common/mss/mss_scp_bootloader.h20
-rw-r--r--plat/marvell/armada/common/plat_delay_timer.c34
25 files changed, 3137 insertions, 0 deletions
diff --git a/plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c b/plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c
new file mode 100644
index 0000000..6c55858
--- /dev/null
+++ b/plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <marvell_def.h>
+#include <plat/common/platform.h>
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+ /* Fill SCP_BL2 related information if it exists */
+ {
+ .image_id = SCP_BL2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = SCP_BL2_BASE,
+ .image_info.image_max_size = SCP_BL2_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+ /* Fill EL3 payload related information (BL31 is EL3 payload)*/
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = EL3_PAYLOAD_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+#else /* EL3_PAYLOAD_BASE */
+
+ /* Fill BL31 related information */
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL31_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+ .ep_info.args.arg3 = MARVELL_BL31_PLAT_PARAM_VAL,
+#endif
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL31_BASE,
+ .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef BL32_BASE
+ .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+ .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+ },
+
+# ifdef BL32_BASE
+ /* Fill BL32 related information */
+ {
+ .image_id = BL32_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+ .ep_info.pc = BL32_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+
+ /*
+ * Fill BL32 external 1 related information.
+ * A typical use for extra1 image is with OP-TEE
+ * where it is the pager image.
+ */
+ {
+ .image_id = BL32_EXTRA1_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+ /*
+ * Fill BL32 external 2 related information.
+ * A typical use for extra2 image is with OP-TEE,
+ * where it is the paged image.
+ */
+ {
+ .image_id = BL32_EXTRA2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#ifdef SPD_opteed
+ .image_info.image_base = MARVELL_OPTEE_PAGEABLE_LOAD_BASE,
+ .image_info.image_max_size = MARVELL_OPTEE_PAGEABLE_LOAD_SIZE,
+#endif
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+# endif /* BL32_BASE */
+
+ /* Fill BL33 related information */
+ {
+ .image_id = BL33_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+ .ep_info.pc = PRELOADED_BL33_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+# else
+ .ep_info.pc = MARVELL_DRAM_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = MARVELL_DRAM_BASE,
+ .image_info.image_max_size = MARVELL_DRAM_SIZE,
+# endif /* PRELOADED_BL33_BASE */
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ }
+#endif /* EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/marvell/armada/common/aarch64/marvell_common.c b/plat/marvell/armada/common/aarch64/marvell_common.c
new file mode 100644
index 0000000..21a62d4
--- /dev/null
+++ b/plat/marvell/armada/common/aarch64/marvell_common.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <plat_marvell.h>
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_marvell_get_mmap
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+ size_t total_size,
+ uintptr_t code_start,
+ uintptr_t code_limit,
+ uintptr_t rodata_start,
+ uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+ ,
+ uintptr_t coh_start,
+ uintptr_t coh_limit
+#endif
+ )
+{
+ /*
+ * Map the Trusted SRAM with appropriate memory attributes.
+ * Subsequent mappings will adjust the attributes for specific regions.
+ */
+ VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+ (void *) total_base, (void *) (total_base + total_size));
+ mmap_add_region(total_base, total_base,
+ total_size,
+ MT_MEMORY | MT_RW | MT_SECURE);
+
+ /* Re-map the code section */
+ VERBOSE("Code region: %p - %p\n",
+ (void *) code_start, (void *) code_limit);
+ mmap_add_region(code_start, code_start,
+ code_limit - code_start,
+ MT_CODE | MT_SECURE);
+
+ /* Re-map the read-only data section */
+ VERBOSE("Read-only data region: %p - %p\n",
+ (void *) rodata_start, (void *) rodata_limit);
+ mmap_add_region(rodata_start, rodata_start,
+ rodata_limit - rodata_start,
+ MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+ /* Re-map the coherent memory region */
+ VERBOSE("Coherent region: %p - %p\n",
+ (void *) coh_start, (void *) coh_limit);
+ mmap_add_region(coh_start, coh_start,
+ coh_limit - coh_start,
+ MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+ /* Now (re-)map the platform-specific memory regions */
+ mmap_add(plat_marvell_get_mmap());
+
+ /* Create the page tables to reflect the above mappings */
+ init_xlat_tables();
+}
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+ return PLAT_MARVELL_NS_IMAGE_OFFSET;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl32_entry(void)
+{
+ /*
+ * The Secure Payload Dispatcher service is responsible for
+ * setting the SPSR prior to entry into the BL32 image.
+ */
+ return 0;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+/*****************************************************************************
+ * Returns ARM platform specific memory map regions.
+ *****************************************************************************
+ */
+const mmap_region_t *plat_marvell_get_mmap(void)
+{
+ return plat_marvell_mmap;
+}
+
diff --git a/plat/marvell/armada/common/aarch64/marvell_helpers.S b/plat/marvell/armada/common/aarch64/marvell_helpers.S
new file mode 100644
index 0000000..3038ec0
--- /dev/null
+++ b/plat/marvell/armada/common/aarch64/marvell_helpers.S
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2020, ARM Limited. All rights reserved.
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <cortex_a72.h>
+#ifndef PLAT_a3700
+#include <drivers/marvell/ccu.h>
+#include <drivers/marvell/cache_llc.h>
+#endif
+#include <marvell_def.h>
+#include <platform_def.h>
+
+ .weak plat_marvell_calc_core_pos
+ .weak plat_my_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+ .globl platform_mem_init
+ .globl disable_mmu_dcache
+ .globl invalidate_tlb_all
+ .globl platform_unmap_sram
+ .globl disable_sram
+ .globl disable_icache
+ .globl invalidate_icache_all
+ .globl marvell_exit_bootrom
+ .globl ca72_l2_enable_unique_clean
+
+ /* -----------------------------------------------------
+ * unsigned int plat_my_core_pos(void)
+ * This function uses the plat_marvell_calc_core_pos()
+ * definition to get the index of the calling CPU.
+ * -----------------------------------------------------
+ */
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ b plat_marvell_calc_core_pos
+endfunc plat_my_core_pos
+
+ /* -----------------------------------------------------
+ * unsigned int plat_marvell_calc_core_pos(uint64_t mpidr)
+ * Helper function to calculate the core position.
+ * With this function: CorePos = (ClusterId * 2) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func plat_marvell_calc_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #7
+ ret
+endfunc plat_marvell_calc_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+#ifdef PLAT_a3700
+ mov x1, x30
+ bl get_ref_clk
+ mov x30, x1
+ mov_imm x1, 1000000
+ mul x1, x0, x1
+#else
+ mov_imm x1, PLAT_MARVELL_UART_CLK_IN_HZ
+#endif
+ mov_imm x0, PLAT_MARVELL_UART_BASE
+ mov_imm x2, MARVELL_CONSOLE_BAUDRATE
+#ifdef PLAT_a3700
+ b console_a3700_core_init
+#else
+ b console_16550_core_init
+#endif
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_MARVELL_UART_BASE
+#ifdef PLAT_a3700
+
+ b console_a3700_core_putc
+#else
+ b console_16550_core_putc
+#endif
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * void plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : void.
+ * Clobber list : r0
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x0, PLAT_MARVELL_UART_BASE
+#ifdef PLAT_a3700
+ b console_a3700_core_flush
+#else
+ b console_16550_core_flush
+#endif
+endfunc plat_crash_console_flush
+
+ /* ---------------------------------------------------------------------
+ * We don't need to carry out any memory initialization on ARM
+ * platforms. The Secure RAM is accessible straight away.
+ * ---------------------------------------------------------------------
+ */
+func platform_mem_init
+ ret
+endfunc platform_mem_init
+
+ /* -----------------------------------------------------
+ * Disable icache, dcache, and MMU
+ * -----------------------------------------------------
+ */
+func disable_mmu_dcache
+ mrs x0, sctlr_el3
+ bic x0, x0, 0x1 /* M bit - MMU */
+ bic x0, x0, 0x4 /* C bit - Dcache L1 & L2 */
+ msr sctlr_el3, x0
+ isb
+ b mmu_off
+mmu_off:
+ ret
+endfunc disable_mmu_dcache
+
+ /* -----------------------------------------------------
+ * Disable all TLB entries
+ * -----------------------------------------------------
+ */
+func invalidate_tlb_all
+ tlbi alle3
+ dsb sy
+ isb
+ ret
+endfunc invalidate_tlb_all
+
+ /* -----------------------------------------------------
+ * Disable the i cache
+ * -----------------------------------------------------
+ */
+func disable_icache
+ mrs x0, sctlr_el3
+ bic x0, x0, 0x1000 /* I bit - Icache L1 & L2 */
+ msr sctlr_el3, x0
+ isb
+ ret
+endfunc disable_icache
+
+ /* -----------------------------------------------------
+ * Disable all of the i caches
+ * -----------------------------------------------------
+ */
+func invalidate_icache_all
+ ic ialluis
+ isb sy
+ ret
+endfunc invalidate_icache_all
+
+ /* -----------------------------------------------------
+ * Clear the SRAM enabling bit to unmap SRAM
+ * -----------------------------------------------------
+ */
+func platform_unmap_sram
+ ldr x0, =CCU_SRAM_WIN_CR
+ str wzr, [x0]
+ ret
+endfunc platform_unmap_sram
+
+ /* -----------------------------------------------------
+ * Disable the SRAM
+ * -----------------------------------------------------
+ */
+func disable_sram
+ /* Disable the line lockings. They must be disabled expictly
+ * or the OS will have problems using the cache */
+ ldr x1, =MASTER_LLC_TC0_LOCK
+ str wzr, [x1]
+
+ /* Invalidate all ways */
+ ldr w1, =LLC_WAY_MASK
+ ldr x0, =MASTER_LLC_INV_WAY
+ str w1, [x0]
+
+ /* Finally disable LLC */
+ ldr x0, =MASTER_LLC_CTRL
+ str wzr, [x0]
+
+ ret
+endfunc disable_sram
+
+ /* -----------------------------------------------------
+ * Operation when exit bootROM:
+ * Disable the MMU
+ * Disable and invalidate the dcache
+ * Unmap and disable the SRAM
+ * Disable and invalidate the icache
+ * -----------------------------------------------------
+ */
+func marvell_exit_bootrom
+ /* Save the system restore address */
+ mov x28, x0
+
+ /* Close the caches and MMU */
+ bl disable_mmu_dcache
+
+ /*
+ * There is nothing important in the caches now,
+ * so invalidate them instead of cleaning.
+ */
+ adr x0, __RW_START__
+ adr x1, __RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+ bl invalidate_tlb_all
+
+ /*
+ * Clean the memory mapping of SRAM
+ * the DDR mapping will remain to enable boot image to execute
+ */
+ bl platform_unmap_sram
+
+ /* Disable the SRAM */
+ bl disable_sram
+
+ /* Disable and invalidate icache */
+ bl disable_icache
+ bl invalidate_icache_all
+
+ mov x0, x28
+ br x0
+endfunc marvell_exit_bootrom
+
+ /*
+ * Enable L2 UniqueClean evictions with data
+ */
+func ca72_l2_enable_unique_clean
+
+ mrs x0, CORTEX_A72_L2ACTLR_EL1
+ orr x0, x0, #CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
+ msr CORTEX_A72_L2ACTLR_EL1, x0
+
+ ret
+endfunc ca72_l2_enable_unique_clean
diff --git a/plat/marvell/armada/common/marvell_bl1_setup.c b/plat/marvell/armada/common/marvell_bl1_setup.c
new file mode 100644
index 0000000..7b7cef3
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_bl1_setup.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform_def.h>
+
+#include <bl1/bl1.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/arm/sp805.h>
+#include <drivers/console.h>
+#include <plat/common/platform.h>
+
+#include <plat_marvell.h>
+
+/* Weak definitions may be overridden in specific Marvell standard platform */
+#pragma weak bl1_early_platform_setup
+#pragma weak bl1_plat_arch_setup
+#pragma weak bl1_platform_setup
+#pragma weak bl1_plat_sec_mem_layout
+
+/* Data structure which holds the extents of the RAM for BL1*/
+static meminfo_t bl1_ram_layout;
+
+meminfo_t *bl1_plat_sec_mem_layout(void)
+{
+ return &bl1_ram_layout;
+}
+
+/*
+ * BL1 specific platform actions shared between Marvell standard platforms.
+ */
+void marvell_bl1_early_platform_setup(void)
+{
+ /* Initialize the console to provide early debug support */
+ marvell_console_boot_init();
+
+ /* Allow BL1 to see the whole Trusted RAM */
+ bl1_ram_layout.total_base = MARVELL_BL_RAM_BASE;
+ bl1_ram_layout.total_size = MARVELL_BL_RAM_SIZE;
+}
+
+void bl1_early_platform_setup(void)
+{
+ marvell_bl1_early_platform_setup();
+}
+
+/*
+ * Perform the very early platform specific architecture setup shared between
+ * MARVELL standard platforms. This only does basic initialization. Later
+ * architectural setup (bl1_arch_setup()) does not do anything platform
+ * specific.
+ */
+void marvell_bl1_plat_arch_setup(void)
+{
+ marvell_setup_page_tables(bl1_ram_layout.total_base,
+ bl1_ram_layout.total_size,
+ BL1_RO_BASE,
+ BL1_RO_LIMIT,
+ BL1_RO_DATA_BASE,
+ BL1_RO_DATA_END
+#if USE_COHERENT_MEM
+ , BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END
+#endif
+ );
+ enable_mmu_el3(0);
+}
+
+void bl1_plat_arch_setup(void)
+{
+ marvell_bl1_plat_arch_setup();
+}
+
+/*
+ * Perform the platform specific architecture setup shared between
+ * MARVELL standard platforms.
+ */
+void marvell_bl1_platform_setup(void)
+{
+ /* Initialise the IO layer and register platform IO devices */
+ plat_marvell_io_setup();
+}
+
+void bl1_platform_setup(void)
+{
+ marvell_bl1_platform_setup();
+}
+
+void bl1_plat_prepare_exit(entry_point_info_t *ep_info)
+{
+#ifdef EL3_PAYLOAD_BASE
+ /*
+ * Program the EL3 payload's entry point address into the CPUs mailbox
+ * in order to release secondary CPUs from their holding pen and make
+ * them jump there.
+ */
+ marvell_program_trusted_mailbox(ep_info->pc);
+ dsbsy();
+ sev();
+#endif
+}
diff --git a/plat/marvell/armada/common/marvell_bl2_setup.c b/plat/marvell/armada/common/marvell_bl2_setup.c
new file mode 100644
index 0000000..3dfa82e
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_bl2_setup.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/console.h>
+#include <lib/utils.h>
+
+#ifdef SPD_opteed
+#include <optee_utils.h>
+#endif
+#include <marvell_def.h>
+#include <plat_marvell.h>
+
+/* Data structure which holds the extents of the trusted SRAM for BL2 */
+static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
+
+/* Weak definitions may be overridden in specific MARVELL standard platform */
+#pragma weak bl2_early_platform_setup2
+#pragma weak bl2_platform_setup
+#pragma weak bl2_plat_arch_setup
+#pragma weak bl2_plat_sec_mem_layout
+
+meminfo_t *bl2_plat_sec_mem_layout(void)
+{
+ return &bl2_tzram_layout;
+}
+
+/*****************************************************************************
+ * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
+ * in x0. This memory layout is sitting at the base of the free trusted SRAM.
+ * Copy it to a safe location before its reclaimed by later BL2 functionality.
+ *****************************************************************************
+ */
+void marvell_bl2_early_platform_setup(meminfo_t *mem_layout)
+{
+ /* Initialize the console to provide early debug support */
+ marvell_console_boot_init();
+
+ /* Setup the BL2 memory layout */
+ bl2_tzram_layout = *mem_layout;
+
+ /* Initialise the IO layer and register platform IO devices */
+ plat_marvell_io_setup();
+}
+
+
+void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ struct meminfo *mem_layout = (struct meminfo *)arg1;
+
+ marvell_bl2_early_platform_setup(mem_layout);
+}
+
+void bl2_platform_setup(void)
+{
+ /* Nothing to do */
+}
+
+/*****************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only initializes the mmu in a quick and dirty way.
+ *****************************************************************************
+ */
+void marvell_bl2_plat_arch_setup(void)
+{
+ marvell_setup_page_tables(bl2_tzram_layout.total_base,
+ bl2_tzram_layout.total_size,
+ BL_CODE_BASE,
+ BL_CODE_END,
+ BL_RO_DATA_BASE,
+ BL_RO_DATA_END
+#if USE_COHERENT_MEM
+ , BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END
+#endif
+ );
+ enable_mmu_el1(0);
+}
+
+void bl2_plat_arch_setup(void)
+{
+ marvell_bl2_plat_arch_setup();
+}
+
+int marvell_bl2_handle_post_image_load(unsigned int image_id)
+{
+ int err = 0;
+ bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+#ifdef SPD_opteed
+ bl_mem_params_node_t *pager_mem_params = NULL;
+ bl_mem_params_node_t *paged_mem_params = NULL;
+#endif /* SPD_opteed */
+ assert(bl_mem_params);
+
+ switch (image_id) {
+ case BL32_IMAGE_ID:
+#ifdef SPD_opteed
+ pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+ assert(pager_mem_params);
+
+ paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+ assert(paged_mem_params);
+
+ err = parse_optee_header(&bl_mem_params->ep_info,
+ &pager_mem_params->image_info,
+ &paged_mem_params->image_info);
+ if (err != 0)
+ WARN("OPTEE header parse error.\n");
+#endif /* SPD_opteed */
+ bl_mem_params->ep_info.spsr = marvell_get_spsr_for_bl32_entry();
+ break;
+
+ case BL33_IMAGE_ID:
+ /* BL33 expects to receive the primary CPU MPID (through r0) */
+ bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+ bl_mem_params->ep_info.spsr = marvell_get_spsr_for_bl33_entry();
+ break;
+#ifdef SCP_BL2_BASE
+ case SCP_BL2_IMAGE_ID:
+ /* The subsequent handling of SCP_BL2 is platform specific */
+ err = bl2_plat_handle_scp_bl2(&bl_mem_params->image_info);
+ if (err) {
+ WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
+ }
+ break;
+#endif
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+
+ return err;
+
+}
+
+/*******************************************************************************
+ * This function can be used by the platforms to update/use image
+ * information for given `image_id`.
+ ******************************************************************************/
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ return marvell_bl2_handle_post_image_load(image_id);
+}
+
diff --git a/plat/marvell/armada/common/marvell_bl31_setup.c b/plat/marvell/armada/common/marvell_bl31_setup.c
new file mode 100644
index 0000000..26ba906
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_bl31_setup.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#ifdef USE_CCI
+#include <drivers/arm/cci.h>
+#endif
+#include <drivers/console.h>
+#include <plat/common/platform.h>
+
+#include <marvell_def.h>
+#include <marvell_plat_priv.h>
+#include <plat_marvell.h>
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL31 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak bl31_early_platform_setup2
+#pragma weak bl31_platform_setup
+#pragma weak bl31_plat_arch_setup
+#pragma weak bl31_plat_get_next_image_ep_info
+#pragma weak plat_get_syscnt_freq2
+
+/*****************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ *****************************************************************************
+ */
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ assert(sec_state_is_valid(type));
+ next_image_info = (type == NON_SECURE)
+ ? &bl33_image_ep_info : &bl32_image_ep_info;
+
+ return next_image_info;
+}
+
+/*****************************************************************************
+ * Perform any BL31 early platform setup common to ARM standard platforms.
+ * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
+ * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
+ * done before the MMU is initialized so that the memory layout can be used
+ * while creating page tables. BL2 has flushed this information to memory, so
+ * we are guaranteed to pick up good data.
+ *****************************************************************************
+ */
+void marvell_bl31_early_platform_setup(void *from_bl2,
+ uintptr_t soc_fw_config,
+ uintptr_t hw_config,
+ void *plat_params_from_bl2)
+{
+ /* Initialize the console to provide early debug support */
+ marvell_console_boot_init();
+
+#if RESET_TO_BL31
+ /* There are no parameters from BL2 if BL31 is a reset vector */
+ assert(from_bl2 == NULL);
+ assert(plat_params_from_bl2 == NULL);
+
+#ifdef BL32_BASE
+ /* Populate entry point information for BL32 */
+ SET_PARAM_HEAD(&bl32_image_ep_info,
+ PARAM_EP,
+ VERSION_1,
+ 0);
+ SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+ bl32_image_ep_info.pc = BL32_BASE;
+ bl32_image_ep_info.spsr = marvell_get_spsr_for_bl32_entry();
+#endif /* BL32_BASE */
+
+ /* Populate entry point information for BL33 */
+ SET_PARAM_HEAD(&bl33_image_ep_info,
+ PARAM_EP,
+ VERSION_1,
+ 0);
+ /*
+ * Tell BL31 where the non-trusted software image
+ * is located and the entry state information
+ */
+ bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
+ bl33_image_ep_info.spsr = marvell_get_spsr_for_bl33_entry();
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#else
+ /*
+ * In debug builds, we pass a special value in 'plat_params_from_bl2'
+ * to verify platform parameters from BL2 to BL31.
+ * In release builds, it's not used.
+ */
+ assert(((unsigned long long)plat_params_from_bl2) ==
+ MARVELL_BL31_PLAT_PARAM_VAL);
+
+ /*
+ * Check params passed from BL2 should not be NULL,
+ */
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+ assert(params_from_bl2 != NULL);
+ assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+ assert(params_from_bl2->h.version >= VERSION_2);
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ /*
+ * Copy BL33 and BL32 (if present), entry point information.
+ * They are stored in Secure RAM, in BL2's address space.
+ */
+ while (bl_params != NULL) {
+ if (bl_params->image_id == BL32_IMAGE_ID)
+ bl32_image_ep_info = *bl_params->ep_info;
+
+ if (bl_params->image_id == BL33_IMAGE_ID)
+ bl33_image_ep_info = *bl_params->ep_info;
+
+ bl_params = bl_params->next_params_info;
+ }
+#endif
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+
+{
+ marvell_bl31_early_platform_setup((void *)arg0, arg1, arg2,
+ (void *)arg3);
+
+#ifdef USE_CCI
+ /*
+ * Initialize CCI for this cluster during cold boot.
+ * No need for locks as no other CPU is active.
+ */
+ plat_marvell_interconnect_init();
+
+ /*
+ * Enable CCI coherency for the primary CPU's cluster.
+ * Platform specific PSCI code will enable coherency for other
+ * clusters.
+ */
+ plat_marvell_interconnect_enter_coherency();
+#endif
+}
+
+/*****************************************************************************
+ * Perform any BL31 platform setup common to ARM standard platforms
+ *****************************************************************************
+ */
+void marvell_bl31_platform_setup(void)
+{
+ /* Initialize the GIC driver, cpu and distributor interfaces */
+ plat_marvell_gic_driver_init();
+ plat_marvell_gic_init();
+
+ /* For Armada-8k-plus family, the SoC includes more than
+ * a single AP die, but the default die that boots is AP #0.
+ * For other families there is only one die (#0).
+ * Initialize psci arch from die 0
+ */
+ marvell_psci_arch_init(0);
+}
+
+/*****************************************************************************
+ * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
+ * standard platforms
+ *****************************************************************************
+ */
+void marvell_bl31_plat_runtime_setup(void)
+{
+ console_switch_state(CONSOLE_FLAG_RUNTIME);
+
+ /* Initialize the runtime console */
+ marvell_console_runtime_init();
+}
+
+void bl31_platform_setup(void)
+{
+ marvell_bl31_platform_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+ marvell_bl31_plat_runtime_setup();
+}
+
+/*****************************************************************************
+ * Perform the very early platform specific architectural setup shared between
+ * ARM standard platforms. This only does basic initialization. Later
+ * architectural setup (bl31_arch_setup()) does not do anything platform
+ * specific.
+ *****************************************************************************
+ */
+void marvell_bl31_plat_arch_setup(void)
+{
+ marvell_setup_page_tables(BL31_BASE,
+ BL31_END - BL31_BASE,
+ BL_CODE_BASE,
+ BL_CODE_END,
+ BL_RO_DATA_BASE,
+ BL_RO_DATA_END
+#if USE_COHERENT_MEM
+ , BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END
+#endif
+ );
+
+#if BL31_CACHE_DISABLE
+ enable_mmu_el3(DISABLE_DCACHE);
+ INFO("Cache is disabled in BL3\n");
+#else
+ enable_mmu_el3(0);
+#endif
+}
+
+void bl31_plat_arch_setup(void)
+{
+ marvell_bl31_plat_arch_setup();
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return PLAT_REF_CLK_IN_HZ;
+}
diff --git a/plat/marvell/armada/common/marvell_cci.c b/plat/marvell/armada/common/marvell_cci.c
new file mode 100644
index 0000000..80351ae
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_cci.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <drivers/arm/cci.h>
+
+#include <plat_marvell.h>
+
+static const int cci_map[] = {
+ PLAT_MARVELL_CCI_CLUSTER0_SL_IFACE_IX,
+ PLAT_MARVELL_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+/****************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way ARM CCI driver is initialised and used.
+ ****************************************************************************
+ */
+#pragma weak plat_marvell_interconnect_init
+#pragma weak plat_marvell_interconnect_enter_coherency
+#pragma weak plat_marvell_interconnect_exit_coherency
+
+
+/****************************************************************************
+ * Helper function to initialize ARM CCI driver.
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_init(void)
+{
+ cci_init(PLAT_MARVELL_CCI_BASE, cci_map, ARRAY_SIZE(cci_map));
+}
+
+/****************************************************************************
+ * Helper function to place current master into coherency
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_enter_coherency(void)
+{
+ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+/****************************************************************************
+ * Helper function to remove current master from coherency
+ ****************************************************************************
+ */
+void plat_marvell_interconnect_exit_coherency(void)
+{
+ cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/marvell/armada/common/marvell_common.mk b/plat/marvell/armada/common/marvell_common.mk
new file mode 100644
index 0000000..f0e6edf
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_common.mk
@@ -0,0 +1,99 @@
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+
+MARVELL_PLAT_BASE := plat/marvell/armada
+MARVELL_PLAT_INCLUDE_BASE := include/plat/marvell/armada
+
+SEPARATE_CODE_AND_RODATA := 1
+
+# flag to switch from PLL to ARO
+ARO_ENABLE := 0
+$(eval $(call add_define,ARO_ENABLE))
+
+# Convert LLC to secure SRAM
+LLC_SRAM := 0
+$(eval $(call add_define,LLC_SRAM))
+
+# Enable/Disable LLC
+LLC_ENABLE := 1
+$(eval $(call add_define,LLC_ENABLE))
+
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_INCLUDES += -I$(MARVELL_PLAT_INCLUDE_BASE)/common \
+ -I$(MARVELL_PLAT_INCLUDE_BASE)/common/aarch64
+
+
+PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} \
+ $(MARVELL_PLAT_BASE)/common/aarch64/marvell_common.c \
+ $(MARVELL_PLAT_BASE)/common/aarch64/marvell_helpers.S \
+ $(MARVELL_COMMON_BASE)/marvell_console.c
+
+BL1_SOURCES += drivers/delay_timer/delay_timer.c \
+ drivers/io/io_fip.c \
+ drivers/io/io_memmap.c \
+ drivers/io/io_storage.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_bl1_setup.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_io_storage.c \
+ $(MARVELL_PLAT_BASE)/common/plat_delay_timer.c
+
+ifdef EL3_PAYLOAD_BASE
+# Need the arm_program_trusted_mailbox() function to release secondary CPUs from
+# their holding pen
+endif
+
+BL2_SOURCES += drivers/io/io_fip.c \
+ drivers/io/io_memmap.c \
+ drivers/io/io_storage.c \
+ common/desc_image_load.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_bl2_setup.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_io_storage.c \
+ $(MARVELL_PLAT_BASE)/common/aarch64/marvell_bl2_mem_params_desc.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_image_load.c
+
+ifeq (${SPD},opteed)
+PLAT_INCLUDES += -Iinclude/lib
+BL2_SOURCES += lib/optee/optee_utils.c
+endif
+
+BL31_SOURCES += $(MARVELL_PLAT_BASE)/common/marvell_bl31_setup.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_pm.c \
+ $(MARVELL_PLAT_BASE)/common/marvell_topology.c \
+ plat/common/plat_psci_common.c \
+ $(MARVELL_PLAT_BASE)/common/plat_delay_timer.c \
+ drivers/delay_timer/delay_timer.c
+
+# PSCI functionality
+$(eval $(call add_define,CONFIG_ARM64))
+
+# Add the build options to pack Trusted OS Extra1 and Trusted OS Extra2 images
+# in the FIP if the platform requires.
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call TOOL_ADD_IMG,bl32_extra1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call TOOL_ADD_IMG,bl32_extra2,--tos-fw-extra2))
+endif
+
+# MSS (SCP) build
+ifeq (${MSS_SUPPORT}, 1)
+include $(MARVELL_PLAT_BASE)/common/mss/mss_common.mk
+endif
+
+$(BUILD_PLAT)/$(BOOT_IMAGE): $(BUILD_PLAT)/bl1.bin $(BUILD_PLAT)/$(FIP_NAME)
+ $(if $(shell find $(BUILD_PLAT)/bl1.bin -type f -size +128k),$(error "Image '$(BUILD_PLAT)/bl1.bin' is bigger than 128kB"))
+ @cp $(BUILD_PLAT)/bl1.bin $(BUILD_PLAT)/$(BOOT_IMAGE) || { rm -f $(BUILD_PLAT)/$(BOOT_IMAGE); false; }
+ @truncate -s %128K $(BUILD_PLAT)/$(BOOT_IMAGE) || { rm -f $(BUILD_PLAT)/$(BOOT_IMAGE); false; }
+ @cat $(BUILD_PLAT)/$(FIP_NAME) >> $(BUILD_PLAT)/$(BOOT_IMAGE) || { rm -f $(BUILD_PLAT)/$(BOOT_IMAGE); false; }
+ @truncate -s %4 $(BUILD_PLAT)/$(BOOT_IMAGE) || { rm -f $(BUILD_PLAT)/$(BOOT_IMAGE); false; }
+ @$(ECHO_BLANK_LINE)
+ @echo "Built $@ successfully"
+ @$(ECHO_BLANK_LINE)
+
+.PHONY: mrvl_bootimage
+mrvl_bootimage: $(BUILD_PLAT)/$(BOOT_IMAGE)
+
+.PHONY: mrvl_flash
+mrvl_flash: $(BUILD_PLAT)/$(FLASH_IMAGE)
diff --git a/plat/marvell/armada/common/marvell_console.c b/plat/marvell/armada/common/marvell_console.c
new file mode 100644
index 0000000..ef54bff
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_console.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <common/debug.h>
+#include <drivers/console.h>
+
+#include <plat_marvell.h>
+
+#ifdef PLAT_a3700
+#include <drivers/marvell/uart/a3700_console.h>
+#define PLAT_MARVELL_UART_CLK_IN_HZ (get_ref_clk() * 1000000)
+#define console_marvell_register console_a3700_register
+#else
+#include <drivers/ti/uart/uart_16550.h>
+#define console_marvell_register console_16550_register
+#endif
+
+static console_t marvell_boot_console;
+static console_t marvell_runtime_console;
+
+/*******************************************************************************
+ * Functions that set up the console
+ ******************************************************************************/
+
+/* Initialize the console to provide early debug support */
+void marvell_console_boot_init(void)
+{
+ int rc =
+ console_marvell_register(PLAT_MARVELL_UART_BASE,
+ PLAT_MARVELL_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE,
+ &marvell_boot_console);
+ if (rc == 0) {
+ /*
+ * The crash console doesn't use the multi console API, it uses
+ * the core console functions directly. It is safe to call panic
+ * and let it print debug information.
+ */
+ panic();
+ }
+
+ console_set_scope(&marvell_boot_console, CONSOLE_FLAG_BOOT);
+}
+
+void marvell_console_boot_end(void)
+{
+ console_flush();
+
+ (void)console_unregister(&marvell_boot_console);
+}
+
+/* Initialize the runtime console */
+void marvell_console_runtime_init(void)
+{
+ int rc =
+ console_marvell_register(PLAT_MARVELL_UART_BASE,
+ PLAT_MARVELL_UART_CLK_IN_HZ,
+ MARVELL_CONSOLE_BAUDRATE,
+ &marvell_runtime_console);
+ if (rc == 0)
+ panic();
+
+ console_set_scope(&marvell_runtime_console, CONSOLE_FLAG_RUNTIME);
+}
+
+void marvell_console_runtime_end(void)
+{
+ console_flush();
+
+ (void)console_unregister(&marvell_runtime_console);
+}
diff --git a/plat/marvell/armada/common/marvell_ddr_info.c b/plat/marvell/armada/common/marvell_ddr_info.c
new file mode 100644
index 0000000..1ae0254
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_ddr_info.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform_def.h>
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <ddr_info.h>
+
+#define DRAM_CH0_MMAP_LOW_REG(iface, cs, base) \
+ (base + DRAM_CH0_MMAP_LOW_OFFSET + (iface) * 0x10000 + (cs) * 0x8)
+#define DRAM_CH0_MMAP_HIGH_REG(iface, cs, base) \
+ (DRAM_CH0_MMAP_LOW_REG(iface, cs, base) + 4)
+#define DRAM_CS_VALID_ENABLED_MASK 0x1
+#define DRAM_AREA_LENGTH_OFFS 16
+#define DRAM_AREA_LENGTH_MASK (0x1f << DRAM_AREA_LENGTH_OFFS)
+#define DRAM_START_ADDRESS_L_OFFS 23
+#define DRAM_START_ADDRESS_L_MASK \
+ (0x1ff << DRAM_START_ADDRESS_L_OFFS)
+#define DRAM_START_ADDR_HTOL_OFFS 32
+
+#define DRAM_MAX_CS_NUM 2
+
+#define DRAM_CS_ENABLED(iface, cs, base) \
+ (mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
+ DRAM_CS_VALID_ENABLED_MASK)
+#define GET_DRAM_REGION_SIZE_CODE(iface, cs, base) \
+ (mmio_read_32(DRAM_CH0_MMAP_LOW_REG(iface, cs, base)) & \
+ DRAM_AREA_LENGTH_MASK) >> DRAM_AREA_LENGTH_OFFS
+
+/* Mapping between DDR area length and real DDR size is specific and looks like
+ * below:
+ * 0 => 384 MB
+ * 1 => 768 MB
+ * 2 => 1536 MB
+ * 3 => 3 GB
+ * 4 => 6 GB
+ *
+ * 7 => 8 MB
+ * 8 => 16 MB
+ * 9 => 32 MB
+ * 10 => 64 MB
+ * 11 => 128 MB
+ * 12 => 256 MB
+ * 13 => 512 MB
+ * 14 => 1 GB
+ * 15 => 2 GB
+ * 16 => 4 GB
+ * 17 => 8 GB
+ * 18 => 16 GB
+ * 19 => 32 GB
+ * 20 => 64 GB
+ * 21 => 128 GB
+ * 22 => 256 GB
+ * 23 => 512 GB
+ * 24 => 1 TB
+ * 25 => 2 TB
+ * 26 => 4 TB
+ *
+ * to calculate real size we need to use two different formulas:
+ * -- GET_DRAM_REGION_SIZE_ODD for values 0-4 (DRAM_REGION_SIZE_ODD)
+ * -- GET_DRAM_REGION_SIZE_EVEN for values 7-26 (DRAM_REGION_SIZE_EVEN)
+ * using mentioned formulas we cover whole mapping between "Area length" value
+ * and real size (see above mapping).
+ */
+#define DRAM_REGION_SIZE_EVEN(C) (((C) >= 7) && ((C) <= 26))
+#define GET_DRAM_REGION_SIZE_EVEN(C) ((uint64_t)1 << ((C) + 16))
+#define DRAM_REGION_SIZE_ODD(C) ((C) <= 4)
+#define GET_DRAM_REGION_SIZE_ODD(C) ((uint64_t)0x18000000 << (C))
+
+
+uint64_t mvebu_get_dram_size(uint64_t ap_base_addr)
+{
+ uint64_t mem_size = 0;
+ uint8_t region_code;
+ uint8_t cs, iface;
+
+ for (iface = 0; iface < DRAM_MAX_IFACE; iface++) {
+ for (cs = 0; cs < DRAM_MAX_CS_NUM; cs++) {
+
+ /* Exit loop on first disabled DRAM CS */
+ if (!DRAM_CS_ENABLED(iface, cs, ap_base_addr))
+ break;
+
+ /* Decode area length for current CS
+ * from register value
+ */
+ region_code =
+ GET_DRAM_REGION_SIZE_CODE(iface, cs,
+ ap_base_addr);
+
+ if (DRAM_REGION_SIZE_EVEN(region_code)) {
+ mem_size +=
+ GET_DRAM_REGION_SIZE_EVEN(region_code);
+ } else if (DRAM_REGION_SIZE_ODD(region_code)) {
+ mem_size +=
+ GET_DRAM_REGION_SIZE_ODD(region_code);
+ } else {
+ WARN("%s: Invalid mem region (0x%x) CS#%d\n",
+ __func__, region_code, cs);
+ return 0;
+ }
+ }
+ }
+
+ return mem_size;
+}
diff --git a/plat/marvell/armada/common/marvell_gicv2.c b/plat/marvell/armada/common/marvell_gicv2.c
new file mode 100644
index 0000000..2505c9f
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_gicv2.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform_def.h>
+
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <drivers/arm/gicv2.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+
+#include <plat_marvell.h>
+
+/*
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv2 driver is initialised and used.
+ */
+#pragma weak plat_marvell_gic_driver_init
+#pragma weak plat_marvell_gic_init
+
+#define A7K8K_PIC_CAUSE_REG 0xf03f0100
+#define A7K8K_PIC0_MASK_REG 0xf03f0108
+
+#define A7K8K_PIC_PMUOF_IRQ_MASK (1 << 17)
+
+#define A7K8K_PIC_MAX_IRQS 32
+#define A7K8K_PIC_MAX_IRQ_MASK ((1UL << A7K8K_PIC_MAX_IRQS) - 1)
+
+#define A7K8K_ODMIN_SET_REG 0xf0300040
+#define A7K8K_ODMI_PMU_IRQ(idx) ((2 + idx) << 12)
+
+#define A7K8K_ODMI_PMU_GIC_IRQ(idx) (130 + idx)
+
+static DEFINE_BAKERY_LOCK(a7k8k_irq_lock);
+
+/*
+ * On a GICv2 system, the Group 1 secure interrupts are treated as Group 0
+ * interrupts.
+ */
+static const interrupt_prop_t marvell_interrupt_props[] = {
+ PLAT_MARVELL_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+ PLAT_MARVELL_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+/*
+ * Ideally `marvell_gic_data` structure definition should be a `const` but it is
+ * kept as modifiable for overwriting with different GICD and GICC base when
+ * running on FVP with VE memory map.
+ */
+static gicv2_driver_data_t marvell_gic_data = {
+ .gicd_base = PLAT_MARVELL_GICD_BASE,
+ .gicc_base = PLAT_MARVELL_GICC_BASE,
+ .interrupt_props = marvell_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(marvell_interrupt_props),
+ .target_masks = target_mask_array,
+ .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*
+ * ARM common helper to initialize the GICv2 only driver.
+ */
+void plat_marvell_gic_driver_init(void)
+{
+ gicv2_driver_init(&marvell_gic_data);
+}
+
+static uint64_t a7k8k_pmu_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ unsigned int idx = plat_my_core_pos();
+ uint32_t irq;
+
+ bakery_lock_get(&a7k8k_irq_lock);
+
+ /* Acknowledge IRQ */
+ irq = plat_ic_acknowledge_interrupt();
+
+ plat_ic_end_of_interrupt(irq);
+
+ if (irq != MARVELL_IRQ_PIC0) {
+ bakery_lock_release(&a7k8k_irq_lock);
+ return 0;
+ }
+
+ /* Acknowledge PMU overflow IRQ in PIC0 */
+ mmio_setbits_32(A7K8K_PIC_CAUSE_REG, A7K8K_PIC_PMUOF_IRQ_MASK);
+
+ /* Trigger ODMI Frame IRQ */
+ mmio_write_32(A7K8K_ODMIN_SET_REG, A7K8K_ODMI_PMU_IRQ(idx));
+
+ bakery_lock_release(&a7k8k_irq_lock);
+
+ return 0;
+}
+
+void mvebu_pmu_interrupt_enable(void)
+{
+ unsigned int idx;
+ uint32_t flags;
+ int32_t rc;
+
+ /* Reset PIC */
+ mmio_write_32(A7K8K_PIC_CAUSE_REG, A7K8K_PIC_MAX_IRQ_MASK);
+ /* Unmask PMU overflow IRQ in PIC0 */
+ mmio_clrbits_32(A7K8K_PIC0_MASK_REG, A7K8K_PIC_PMUOF_IRQ_MASK);
+
+ /* Configure ODMI Frame IRQs as edge triggered */
+ for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++)
+ gicv2_interrupt_set_cfg(A7K8K_ODMI_PMU_GIC_IRQ(idx),
+ GIC_INTR_CFG_EDGE);
+
+ /*
+ * Register IRQ handler as INTR_TYPE_S_EL1 as its the only valid type
+ * for GICv2 in ARM-TF.
+ */
+ flags = 0U;
+ set_interrupt_rm_flag((flags), (NON_SECURE));
+ rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ a7k8k_pmu_interrupt_handler,
+ flags);
+ if (rc != 0)
+ panic();
+}
+
+void mvebu_pmu_interrupt_disable(void)
+{
+ /* Reset PIC */
+ mmio_write_32(A7K8K_PIC_CAUSE_REG, A7K8K_PIC_MAX_IRQ_MASK);
+ /* Mask PMU overflow IRQ in PIC0 */
+ mmio_setbits_32(A7K8K_PIC0_MASK_REG, A7K8K_PIC_PMUOF_IRQ_MASK);
+}
+
+void plat_marvell_gic_init(void)
+{
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_set_pe_target_mask(plat_my_core_pos());
+ gicv2_cpuif_enable();
+}
diff --git a/plat/marvell/armada/common/marvell_gicv3.c b/plat/marvell/armada/common/marvell_gicv3.c
new file mode 100644
index 0000000..5419506
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_gicv3.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <platform_def.h>
+
+#include <common/debug.h>
+#include <common/interrupt_props.h>
+#include <drivers/arm/gicv3.h>
+#include <plat/common/platform.h>
+
+#include <marvell_def.h>
+#include <plat_marvell.h>
+
+/******************************************************************************
+ * The following functions are defined as weak to allow a platform to override
+ * the way the GICv3 driver is initialised and used.
+ ******************************************************************************
+ */
+#pragma weak plat_marvell_gic_driver_init
+#pragma weak plat_marvell_gic_init
+#pragma weak plat_marvell_gic_cpuif_enable
+#pragma weak plat_marvell_gic_cpuif_disable
+#pragma weak plat_marvell_gic_pcpu_init
+
+/* The GICv3 driver only needs to be initialized in EL3 */
+static uintptr_t rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static const interrupt_prop_t marvell_interrupt_props[] = {
+ PLAT_MARVELL_G1S_IRQ_PROPS(INTR_GROUP1S),
+ PLAT_MARVELL_G0_IRQ_PROPS(INTR_GROUP0)
+};
+
+/*
+ * We save and restore the GICv3 context on system suspend. Allocate the
+ * data in the designated EL3 Secure carve-out memory
+ */
+static gicv3_redist_ctx_t rdist_ctx __section(".arm_el3_tzc_dram");
+static gicv3_dist_ctx_t dist_ctx __section(".arm_el3_tzc_dram");
+
+/*
+ * MPIDR hashing function for translating MPIDRs read from GICR_TYPER register
+ * to core position.
+ *
+ * Calculating core position is dependent on MPIDR_EL1.MT bit. However, affinity
+ * values read from GICR_TYPER don't have an MT field. To reuse the same
+ * translation used for CPUs, we insert MT bit read from the PE's MPIDR into
+ * that read from GICR_TYPER.
+ *
+ * Assumptions:
+ *
+ * - All CPUs implemented in the system have MPIDR_EL1.MT bit set;
+ * - No CPUs implemented in the system use affinity level 3.
+ */
+static unsigned int marvell_gicv3_mpidr_hash(u_register_t mpidr)
+{
+ mpidr |= (read_mpidr_el1() & MPIDR_MT_MASK);
+ return plat_marvell_calc_core_pos(mpidr);
+}
+
+const gicv3_driver_data_t marvell_gic_data = {
+ .gicd_base = PLAT_MARVELL_GICD_BASE,
+ .gicr_base = PLAT_MARVELL_GICR_BASE,
+ .interrupt_props = marvell_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(marvell_interrupt_props),
+ .rdistif_num = PLATFORM_CORE_COUNT,
+ .rdistif_base_addrs = rdistif_base_addrs,
+ .mpidr_to_core_pos = marvell_gicv3_mpidr_hash
+};
+
+void plat_marvell_gic_driver_init(void)
+{
+ /*
+ * The GICv3 driver is initialized in EL3 and does not need
+ * to be initialized again in SEL1. This is because the S-EL1
+ * can use GIC system registers to manage interrupts and does
+ * not need GIC interface base addresses to be configured.
+ */
+#if IMAGE_BL31
+ gicv3_driver_init(&marvell_gic_data);
+#endif
+}
+
+/******************************************************************************
+ * Marvell common helper to initialize the GIC. Only invoked by BL31
+ ******************************************************************************
+ */
+void plat_marvell_gic_init(void)
+{
+ /* Initialize GIC-600 Multi Chip feature,
+ * only if the maximum number of north bridges
+ * is more than 1 - otherwise no need for multi
+ * chip feature initialization
+ */
+#if (PLAT_MARVELL_NORTHB_COUNT > 1)
+ if (gic600_multi_chip_init())
+ ERROR("GIC-600 Multi Chip initialization failed\n");
+#endif
+ gicv3_distif_init();
+ gicv3_rdistif_init(plat_my_core_pos());
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Marvell common helper to enable the GIC CPU interface
+ ******************************************************************************
+ */
+void plat_marvell_gic_cpuif_enable(void)
+{
+ gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Marvell common helper to disable the GIC CPU interface
+ ******************************************************************************
+ */
+void plat_marvell_gic_cpuif_disable(void)
+{
+ gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Marvell common helper to init. the per-cpu redistributor interface in GICv3
+ ******************************************************************************
+ */
+void plat_marvell_gic_pcpu_init(void)
+{
+ gicv3_rdistif_init(plat_my_core_pos());
+}
+
+/******************************************************************************
+ * Marvell common helper to save SPI irq states in GICv3
+ ******************************************************************************
+ */
+void plat_marvell_gic_irq_save(void)
+{
+
+ /*
+ * If an ITS is available, save its context before
+ * the Redistributor using:
+ * gicv3_its_save_disable(gits_base, &its_ctx[i])
+ * Additionally, an implementation-defined sequence may
+ * be required to save the whole ITS state.
+ */
+
+ /*
+ * Save the GIC Redistributors and ITS contexts before the
+ * Distributor context. As we only handle SYSTEM SUSPEND API,
+ * we only need to save the context of the CPU that is issuing
+ * the SYSTEM SUSPEND call, i.e. the current CPU.
+ */
+ gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
+
+ /* Save the GIC Distributor context */
+ gicv3_distif_save(&dist_ctx);
+
+ /*
+ * From here, all the components of the GIC can be safely powered down
+ * as long as there is an alternate way to handle wakeup interrupt
+ * sources.
+ */
+}
+
+/******************************************************************************
+ * Marvell common helper to restore SPI irq states in GICv3
+ ******************************************************************************
+ */
+void plat_marvell_gic_irq_restore(void)
+{
+ /* Restore the GIC Distributor context */
+ gicv3_distif_init_restore(&dist_ctx);
+
+ /*
+ * Restore the GIC Redistributor and ITS contexts after the
+ * Distributor context. As we only handle SYSTEM SUSPEND API,
+ * we only need to restore the context of the CPU that issued
+ * the SYSTEM SUSPEND call.
+ */
+ gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
+
+ /*
+ * If an ITS is available, restore its context after
+ * the Redistributor using:
+ * gicv3_its_restore(gits_base, &its_ctx[i])
+ * An implementation-defined sequence may be required to
+ * restore the whole ITS state. The ITS must also be
+ * re-enabled after this sequence has been executed.
+ */
+}
+
+/******************************************************************************
+ * Marvell common helper to save per-cpu PPI irq states in GICv3
+ ******************************************************************************
+ */
+void plat_marvell_gic_irq_pcpu_save(void)
+{
+ gicv3_rdistif_save(plat_my_core_pos(), &rdist_ctx);
+}
+
+/******************************************************************************
+ * Marvell common helper to restore per-cpu PPI irq states in GICv3
+ ******************************************************************************
+ */
+void plat_marvell_gic_irq_pcpu_restore(void)
+{
+ gicv3_rdistif_init_restore(plat_my_core_pos(), &rdist_ctx);
+}
diff --git a/plat/marvell/armada/common/marvell_image_load.c b/plat/marvell/armada/common/marvell_image_load.c
new file mode 100644
index 0000000..be16b08
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_image_load.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <plat/common/platform.h>
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+ flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+ return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+ return get_next_bl_params_from_mem_params_desc();
+}
diff --git a/plat/marvell/armada/common/marvell_io_storage.c b/plat/marvell/armada/common/marvell_io_storage.c
new file mode 100644
index 0000000..2627ba4
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_io_storage.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <platform_def.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/io/io_driver.h>
+#include <drivers/io/io_fip.h>
+#include <drivers/io/io_memmap.h>
+#include <drivers/io/io_storage.h>
+#include <tools_share/firmware_image_package.h>
+
+/* IO devices */
+static const io_dev_connector_t *fip_dev_con;
+static uintptr_t fip_dev_handle;
+static const io_dev_connector_t *memmap_dev_con;
+static uintptr_t memmap_dev_handle;
+
+static const io_block_spec_t fip_block_spec = {
+ .offset = PLAT_MARVELL_FIP_BASE,
+ .length = PLAT_MARVELL_FIP_MAX_SIZE
+};
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+ .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t scp_bl2_uuid_spec = {
+ .uuid = UUID_SCP_FIRMWARE_SCP_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+ .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+static const io_uuid_spec_t bl32_uuid_spec = {
+ .uuid = UUID_SECURE_PAYLOAD_BL32,
+};
+
+static const io_uuid_spec_t bl32_extra1_uuid_spec = {
+ .uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
+};
+
+static const io_uuid_spec_t bl32_extra2_uuid_spec = {
+ .uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+ .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+static int open_fip(const uintptr_t spec);
+static int open_memmap(const uintptr_t spec);
+
+struct plat_io_policy {
+ uintptr_t *dev_handle;
+ uintptr_t image_spec;
+ int (*check)(const uintptr_t spec);
+};
+
+/* By default, Marvell platforms load images from the FIP */
+static const struct plat_io_policy policies[] = {
+ [FIP_IMAGE_ID] = {
+ &memmap_dev_handle,
+ (uintptr_t)&fip_block_spec,
+ open_memmap
+ },
+ [BL2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl2_uuid_spec,
+ open_fip
+ },
+ [SCP_BL2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&scp_bl2_uuid_spec,
+ open_fip
+ },
+ [BL31_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl31_uuid_spec,
+ open_fip
+ },
+ [BL32_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl32_uuid_spec,
+ open_fip
+ },
+ [BL32_EXTRA1_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl32_extra1_uuid_spec,
+ open_fip
+ },
+ [BL32_EXTRA2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl32_extra2_uuid_spec,
+ open_fip
+ },
+ [BL33_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl33_uuid_spec,
+ open_fip
+ },
+};
+
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_marvell_io_setup
+#pragma weak plat_marvell_get_alt_image_source
+
+
+static int open_fip(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ /* See if a Firmware Image Package is available */
+ result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+ if (result == 0) {
+ result = io_open(fip_dev_handle, spec, &local_image_handle);
+ if (result == 0) {
+ VERBOSE("Using FIP\n");
+ io_close(local_image_handle);
+ }
+ }
+ return result;
+}
+
+
+static int open_memmap(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
+ if (result == 0) {
+ result = io_open(memmap_dev_handle, spec, &local_image_handle);
+ if (result == 0) {
+ VERBOSE("Using Memmap\n");
+ io_close(local_image_handle);
+ }
+ }
+ return result;
+}
+
+
+void marvell_io_setup(void)
+{
+ int io_result;
+
+ io_result = register_io_dev_fip(&fip_dev_con);
+ assert(io_result == 0);
+
+ io_result = register_io_dev_memmap(&memmap_dev_con);
+ assert(io_result == 0);
+
+ /* Open connections to devices and cache the handles */
+ io_result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
+ &fip_dev_handle);
+ assert(io_result == 0);
+
+ io_result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
+ &memmap_dev_handle);
+ assert(io_result == 0);
+
+ /* Ignore improbable errors in release builds */
+ (void)io_result;
+}
+
+void plat_marvell_io_setup(void)
+{
+ marvell_io_setup();
+}
+
+int plat_marvell_get_alt_image_source(
+ unsigned int image_id __attribute__((unused)),
+ uintptr_t *dev_handle __attribute__((unused)),
+ uintptr_t *image_spec __attribute__((unused)))
+{
+ /* By default do not try an alternative */
+ return -ENOENT;
+}
+
+/*
+ * Return an IO device handle and specification which can be used to access
+ * an image. Use this to enforce platform load policy
+ */
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+ uintptr_t *image_spec)
+{
+ int result;
+ const struct plat_io_policy *policy;
+
+ assert(image_id < ARRAY_SIZE(policies));
+
+ policy = &policies[image_id];
+ result = policy->check(policy->image_spec);
+ if (result == 0) {
+ *image_spec = policy->image_spec;
+ *dev_handle = *(policy->dev_handle);
+ } else {
+ VERBOSE("Trying alternative IO\n");
+ result = plat_marvell_get_alt_image_source(image_id, dev_handle,
+ image_spec);
+ }
+
+ return result;
+}
+
+/*
+ * See if a Firmware Image Package is available,
+ * by checking if TOC is valid or not.
+ */
+int marvell_io_is_toc_valid(void)
+{
+ int result;
+
+ result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+
+ return result == 0;
+}
diff --git a/plat/marvell/armada/common/marvell_pm.c b/plat/marvell/armada/common/marvell_pm.c
new file mode 100644
index 0000000..3c675b2
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_pm.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <lib/psci/psci.h>
+
+#include <marvell_pm.h>
+
+/* Standard ARM platforms are expected to export plat_arm_psci_pm_ops */
+extern const plat_psci_ops_t plat_arm_psci_pm_ops;
+
+/*****************************************************************************
+ * Private function to program the mailbox for a cpu before it is released
+ * from reset. This function assumes that the mail box base is within
+ * the MARVELL_SHARED_RAM region
+ *****************************************************************************
+ */
+void marvell_program_mailbox(uintptr_t address)
+{
+ uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
+
+ /*
+ * Ensure that the PLAT_MARVELL_MAILBOX_BASE is within
+ * MARVELL_SHARED_RAM region.
+ */
+ assert((PLAT_MARVELL_MAILBOX_BASE >= MARVELL_SHARED_RAM_BASE) &&
+ ((PLAT_MARVELL_MAILBOX_BASE + sizeof(*mailbox)) <=
+ (MARVELL_SHARED_RAM_BASE + MARVELL_SHARED_RAM_SIZE)));
+
+ mailbox[MBOX_IDX_MAGIC] = MVEBU_MAILBOX_MAGIC_NUM;
+ mailbox[MBOX_IDX_SEC_ADDR] = address;
+
+ /* Flush data cache if the mail box shared RAM is cached */
+#if PLAT_MARVELL_SHARED_RAM_CACHED
+ flush_dcache_range((uintptr_t)PLAT_MARVELL_MAILBOX_BASE +
+ 8 * MBOX_IDX_MAGIC,
+ 2 * sizeof(uint64_t));
+#endif
+}
+
+/*****************************************************************************
+ * The ARM Standard platform definition of platform porting API
+ * `plat_setup_psci_ops`.
+ *****************************************************************************
+ */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ *psci_ops = &plat_arm_psci_pm_ops;
+
+ /* Setup mailbox with entry point. */
+ marvell_program_mailbox(sec_entrypoint);
+ return 0;
+}
diff --git a/plat/marvell/armada/common/marvell_topology.c b/plat/marvell/armada/common/marvell_topology.c
new file mode 100644
index 0000000..a40ff6f
--- /dev/null
+++ b/plat/marvell/armada/common/marvell_topology.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <plat_marvell.h>
+
+/* The power domain tree descriptor */
+unsigned char marvell_power_domain_tree_desc[PLAT_MARVELL_CLUSTER_COUNT + 1];
+
+/*****************************************************************************
+ * This function dynamically constructs the topology according to
+ * PLAT_MARVELL_CLUSTER_COUNT and returns it.
+ *****************************************************************************
+ */
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ int i;
+
+ /*
+ * The power domain tree does not have a single system level power
+ * domain i.e. a single root node. The first entry in the power domain
+ * descriptor specifies the number of power domains at the highest power
+ * level.
+ * For Marvell Platform this is the number of cluster power domains.
+ */
+ marvell_power_domain_tree_desc[0] = PLAT_MARVELL_CLUSTER_COUNT;
+
+ for (i = 0; i < PLAT_MARVELL_CLUSTER_COUNT; i++)
+ marvell_power_domain_tree_desc[i + 1] =
+ PLAT_MARVELL_CLUSTER_CORE_COUNT;
+
+ return marvell_power_domain_tree_desc;
+}
+
+/*****************************************************************************
+ * This function validates an MPIDR by checking whether it falls within the
+ * acceptable bounds. An error code (-1) is returned if an incorrect mpidr
+ * is passed.
+ *****************************************************************************
+ */
+int marvell_check_mpidr(u_register_t mpidr)
+{
+ unsigned int nb_id, cluster_id, cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+
+ if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK |
+ MPIDR_AFFLVL_MASK << MPIDR_AFF2_SHIFT))
+ return -1;
+
+ /* Get north bridge ID */
+ nb_id = MPIDR_AFFLVL3_VAL(mpidr);
+ cluster_id = MPIDR_AFFLVL1_VAL(mpidr);
+ cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ if (nb_id >= PLAT_MARVELL_CLUSTER_COUNT)
+ return -1;
+
+ if (cluster_id >= PLAT_MARVELL_CLUSTER_COUNT)
+ return -1;
+
+ if (cpu_id >= PLAT_MARVELL_CLUSTER_CORE_COUNT)
+ return -1;
+
+ return 0;
+}
+
+/*****************************************************************************
+ * This function implements a part of the critical interface between the PSCI
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ *****************************************************************************
+ */
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ if (marvell_check_mpidr(mpidr) == -1)
+ return -1;
+
+ return plat_marvell_calc_core_pos(mpidr);
+}
diff --git a/plat/marvell/armada/common/mrvl_sip_svc.c b/plat/marvell/armada/common/mrvl_sip_svc.c
new file mode 100644
index 0000000..c4c5c0e
--- /dev/null
+++ b/plat/marvell/armada/common/mrvl_sip_svc.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <drivers/marvell/cache_llc.h>
+#include <drivers/marvell/mochi/ap_setup.h>
+#include <drivers/rambus/trng_ip_76.h>
+#include <lib/smccc.h>
+
+#include <marvell_plat_priv.h>
+#include <plat_marvell.h>
+
+#include "comphy/phy-comphy-cp110.h"
+#include "secure_dfx_access/dfx.h"
+#include "ddr_phy_access.h"
+#include <stdbool.h>
+
+/* #define DEBUG_COMPHY */
+#ifdef DEBUG_COMPHY
+#define debug(format...) NOTICE(format)
+#else
+#define debug(format, arg...)
+#endif
+
+/* Comphy related FID's */
+#define MV_SIP_COMPHY_POWER_ON 0x82000001
+#define MV_SIP_COMPHY_POWER_OFF 0x82000002
+#define MV_SIP_COMPHY_PLL_LOCK 0x82000003
+#define MV_SIP_COMPHY_XFI_TRAIN 0x82000004
+#define MV_SIP_COMPHY_DIG_RESET 0x82000005
+
+/* Miscellaneous FID's' */
+#define MV_SIP_DRAM_SIZE 0x82000010
+#define MV_SIP_LLC_ENABLE 0x82000011
+#define MV_SIP_PMU_IRQ_ENABLE 0x82000012
+#define MV_SIP_PMU_IRQ_DISABLE 0x82000013
+#define MV_SIP_DFX 0x82000014
+#define MV_SIP_DDR_PHY_WRITE 0x82000015
+#define MV_SIP_DDR_PHY_READ 0x82000016
+
+/* TRNG */
+#define MV_SIP_RNG_64 0xC200FF11
+
+#define MAX_LANE_NR 6
+#define MVEBU_COMPHY_OFFSET 0x441000
+#define MVEBU_CP_BASE_MASK (~0xffffff)
+
+/* Common PHY register */
+#define COMPHY_TRX_TRAIN_CTRL_REG_0_OFFS 0x120a2c
+
+/* This macro is used to identify COMPHY related calls from SMC function ID */
+#define is_comphy_fid(fid) \
+ ((fid) >= MV_SIP_COMPHY_POWER_ON && (fid) <= MV_SIP_COMPHY_DIG_RESET)
+
+_Bool is_cp_range_valid(u_register_t *addr)
+{
+ int cp_nr;
+
+ *addr &= MVEBU_CP_BASE_MASK;
+ for (cp_nr = 0; cp_nr < CP_NUM; cp_nr++) {
+ if (*addr == MVEBU_CP_REGS_BASE(cp_nr))
+ return true;
+ }
+
+ return false;
+}
+
+uintptr_t mrvl_sip_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ u_register_t ret, read, x5 = x1;
+ int i;
+
+ debug("%s: got SMC (0x%x) x1 0x%lx, x2 0x%lx, x3 0x%lx\n",
+ __func__, smc_fid, x1, x2, x3);
+
+ if (is_comphy_fid(smc_fid)) {
+ /* validate address passed via x1 */
+ if (!is_cp_range_valid(&x1)) {
+ ERROR("%s: Wrong smc (0x%x) address: %lx\n",
+ __func__, smc_fid, x1);
+ SMC_RET1(handle, SMC_UNK);
+ }
+
+ x5 = x1 + COMPHY_TRX_TRAIN_CTRL_REG_0_OFFS;
+ x1 += MVEBU_COMPHY_OFFSET;
+
+ if (x2 >= MAX_LANE_NR) {
+ ERROR("%s: Wrong smc (0x%x) lane nr: %lx\n",
+ __func__, smc_fid, x2);
+ SMC_RET1(handle, SMC_UNK);
+ }
+ }
+
+ switch (smc_fid) {
+
+ /* Comphy related FID's */
+ case MV_SIP_COMPHY_POWER_ON:
+ /* x1: comphy_base, x2: comphy_index, x3: comphy_mode */
+ ret = mvebu_cp110_comphy_power_on(x1, x2, x3, x5);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_POWER_OFF:
+ /* x1: comphy_base, x2: comphy_index */
+ ret = mvebu_cp110_comphy_power_off(x1, x2, x3);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_PLL_LOCK:
+ /* x1: comphy_base, x2: comphy_index */
+ ret = mvebu_cp110_comphy_is_pll_locked(x1, x2);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_XFI_TRAIN:
+ /* x1: comphy_base, x2: comphy_index */
+ ret = mvebu_cp110_comphy_xfi_rx_training(x1, x2);
+ SMC_RET1(handle, ret);
+ case MV_SIP_COMPHY_DIG_RESET:
+ /* x1: comphy_base, x2: comphy_index, x3: mode, x4: command */
+ ret = mvebu_cp110_comphy_digital_reset(x1, x2, x3, x4);
+ SMC_RET1(handle, ret);
+
+ /* Miscellaneous FID's' */
+ case MV_SIP_DRAM_SIZE:
+ ret = mvebu_get_dram_size(MVEBU_REGS_BASE);
+ SMC_RET1(handle, ret);
+ case MV_SIP_LLC_ENABLE:
+ for (i = 0; i < ap_get_count(); i++)
+ llc_runtime_enable(i);
+
+ SMC_RET1(handle, 0);
+#ifdef MVEBU_PMU_IRQ_WA
+ case MV_SIP_PMU_IRQ_ENABLE:
+ mvebu_pmu_interrupt_enable();
+ SMC_RET1(handle, 0);
+ case MV_SIP_PMU_IRQ_DISABLE:
+ mvebu_pmu_interrupt_disable();
+ SMC_RET1(handle, 0);
+#endif
+ case MV_SIP_DFX:
+ if (x1 >= MV_SIP_DFX_THERMAL_INIT &&
+ x1 <= MV_SIP_DFX_THERMAL_SEL_CHANNEL) {
+ ret = mvebu_dfx_thermal_handle(x1, &read, x2, x3);
+ SMC_RET2(handle, ret, read);
+ }
+ if (x1 >= MV_SIP_DFX_SREAD && x1 <= MV_SIP_DFX_SWRITE) {
+ ret = mvebu_dfx_misc_handle(x1, &read, x2, x3);
+ SMC_RET2(handle, ret, read);
+ }
+
+ SMC_RET1(handle, SMC_UNK);
+ case MV_SIP_DDR_PHY_WRITE:
+ ret = mvebu_ddr_phy_write(x1, x2);
+ SMC_RET1(handle, ret);
+ case MV_SIP_DDR_PHY_READ:
+ read = 0;
+ ret = mvebu_ddr_phy_read(x1, (uint16_t *)&read);
+ SMC_RET2(handle, ret, read);
+ case MV_SIP_RNG_64:
+ if ((x1 % 2 + 1) > sizeof(read)/4) {
+ ERROR("%s: Maximum %ld random bytes per SMC call\n",
+ __func__, sizeof(read));
+ SMC_RET1(handle, SMC_UNK);
+ }
+ ret = eip76_rng_get_random((uint8_t *)&read, 4 * (x1 % 2 + 1));
+ SMC_RET2(handle, ret, read);
+ default:
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+}
+
+/* Define a runtime service descriptor for fast SMC calls */
+DECLARE_RT_SVC(
+ marvell_sip_svc,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_FAST,
+ NULL,
+ mrvl_sip_smc_handler
+);
diff --git a/plat/marvell/armada/common/mss/mss_common.mk b/plat/marvell/armada/common/mss/mss_common.mk
new file mode 100644
index 0000000..4ab4359
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_common.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# https://spdx.org/licenses
+#
+
+
+PLAT_MARVELL := plat/marvell/armada
+MSS_SOURCE := $(PLAT_MARVELL)/common/mss
+
+BL2_SOURCES += $(MSS_SOURCE)/mss_scp_bootloader.c \
+ $(PLAT_MARVELL)/common/plat_delay_timer.c \
+ drivers/delay_timer/delay_timer.c \
+ $(MARVELL_DRV) \
+ $(BOARD_DIR)/board/marvell_plat_config.c
+
+BL31_SOURCES += $(MSS_SOURCE)/mss_ipc_drv.c
+
+PLAT_INCLUDES += -I$(MSS_SOURCE)
diff --git a/plat/marvell/armada/common/mss/mss_ipc_drv.c b/plat/marvell/armada/common/mss/mss_ipc_drv.c
new file mode 100644
index 0000000..70ccfa5
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_ipc_drv.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <string.h>
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include <plat_marvell.h>
+#include <mss_ipc_drv.h>
+
+#define IPC_MSG_BASE_MASK MVEBU_REGS_BASE_MASK
+
+#define IPC_CH_NUM_OF_MSG (16)
+#define IPC_CH_MSG_IDX (-1)
+
+unsigned long mv_pm_ipc_msg_base;
+unsigned int mv_pm_ipc_queue_size;
+
+unsigned int msg_sync;
+int msg_index = IPC_CH_MSG_IDX;
+
+/******************************************************************************
+ * mss_pm_ipc_init
+ *
+ * DESCRIPTION: Initialize PM IPC infrastructure
+ ******************************************************************************
+ */
+int mv_pm_ipc_init(unsigned long ipc_control_addr)
+{
+ struct mss_pm_ipc_ctrl *ipc_control =
+ (struct mss_pm_ipc_ctrl *)ipc_control_addr;
+
+ /* Initialize PM IPC control block */
+ mv_pm_ipc_msg_base = ipc_control->msg_base_address |
+ IPC_MSG_BASE_MASK;
+ mv_pm_ipc_queue_size = ipc_control->queue_size;
+
+ return 0;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_queue_addr_get
+ *
+ * DESCRIPTION: Returns the IPC queue address
+ ******************************************************************************
+ */
+unsigned int mv_pm_ipc_queue_addr_get(void)
+{
+ unsigned int addr;
+
+ inv_dcache_range((uint64_t)&msg_index, sizeof(msg_index));
+ msg_index = msg_index + 1;
+ if (msg_index >= IPC_CH_NUM_OF_MSG)
+ msg_index = 0;
+
+ addr = (unsigned int)(mv_pm_ipc_msg_base +
+ (msg_index * mv_pm_ipc_queue_size));
+
+ flush_dcache_range((uint64_t)&msg_index, sizeof(msg_index));
+
+ return addr;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_msg_rx
+ *
+ * DESCRIPTION: Retrieve message from IPC channel
+ ******************************************************************************
+ */
+int mv_pm_ipc_msg_rx(unsigned int channel_id, struct mss_pm_ipc_msg *msg)
+{
+ unsigned int addr = mv_pm_ipc_queue_addr_get();
+
+ msg->msg_reply = mmio_read_32(addr + IPC_MSG_REPLY_LOC);
+
+ return 0;
+}
+
+/******************************************************************************
+ * mv_pm_ipc_msg_tx
+ *
+ * DESCRIPTION: Send message via IPC channel
+ ******************************************************************************
+ */
+int mv_pm_ipc_msg_tx(unsigned int channel_id, unsigned int msg_id,
+ unsigned int cluster_power_state)
+{
+ unsigned int addr = mv_pm_ipc_queue_addr_get();
+
+ /* Validate the entry for message placed by the host is free */
+ if (mmio_read_32(addr + IPC_MSG_STATE_LOC) == IPC_MSG_FREE) {
+ inv_dcache_range((uint64_t)&msg_sync, sizeof(msg_sync));
+ msg_sync = msg_sync + 1;
+ flush_dcache_range((uint64_t)&msg_sync, sizeof(msg_sync));
+
+ mmio_write_32(addr + IPC_MSG_SYNC_ID_LOC, msg_sync);
+ mmio_write_32(addr + IPC_MSG_ID_LOC, msg_id);
+ mmio_write_32(addr + IPC_MSG_CPU_ID_LOC, channel_id);
+ mmio_write_32(addr + IPC_MSG_POWER_STATE_LOC,
+ cluster_power_state);
+ mmio_write_32(addr + IPC_MSG_STATE_LOC, IPC_MSG_OCCUPY);
+
+ } else {
+ ERROR("%s: FAILED\n", __func__);
+ }
+
+ return 0;
+}
diff --git a/plat/marvell/armada/common/mss/mss_ipc_drv.h b/plat/marvell/armada/common/mss/mss_ipc_drv.h
new file mode 100644
index 0000000..bcb4b2d
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_ipc_drv.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef MSS_IPC_DRV_H
+#define MSS_IPC_DRV_H
+
+#include <lib/psci/psci.h>
+
+#define MV_PM_FW_IPC_VERSION_MAGIC (0xCA530000) /* Do NOT change */
+/* Increament for each version */
+#define MV_PM_FW_IPC_VERSION_SEQ (0x00000001)
+#define MV_PM_FW_IPC_VERSION (MV_PM_FW_IPC_VERSION_MAGIC | \
+ MV_PM_FW_IPC_VERSION_SEQ)
+
+#define IPC_MSG_STATE_LOC (0x0)
+#define IPC_MSG_SYNC_ID_LOC (0x4)
+#define IPC_MSG_ID_LOC (0x8)
+#define IPC_MSG_RET_CH_ID_LOC (0xC)
+#define IPC_MSG_CPU_ID_LOC (0x10)
+#define IPC_MSG_CLUSTER_ID_LOC (0x14)
+#define IPC_MSG_SYSTEM_ID_LOC (0x18)
+#define IPC_MSG_POWER_STATE_LOC (0x1C)
+#define IPC_MSG_REPLY_LOC (0x20)
+#define IPC_MSG_RESERVED_LOC (0x24)
+
+/* IPC initialization state */
+enum mss_pm_ipc_init_state {
+ IPC_UN_INITIALIZED = 1,
+ IPC_INITIALIZED = 2
+};
+
+/* IPC queue direction */
+enum mss_pm_ipc_init_msg_dir {
+ IPC_MSG_TX = 0,
+ IPC_MSG_RX = 1
+};
+
+/* IPC message state */
+enum mss_pm_ipc_msg_state {
+ IPC_MSG_FREE = 1,
+ IPC_MSG_OCCUPY = 2
+
+};
+
+/* IPC control block */
+struct mss_pm_ipc_ctrl {
+ unsigned int ctrl_base_address;
+ unsigned int msg_base_address;
+ unsigned int num_of_channels;
+ unsigned int channel_size;
+ unsigned int queue_size;
+};
+
+/* IPC message types */
+enum mss_pm_msg_id {
+ PM_IPC_MSG_CPU_SUSPEND = 1,
+ PM_IPC_MSG_CPU_OFF = 2,
+ PM_IPC_MSG_CPU_ON = 3,
+ PM_IPC_MSG_SYSTEM_RESET = 4,
+ PM_IPC_MSG_SYSTEM_SUSPEND = 5,
+ PM_IPC_MAX_MSG
+};
+
+struct mss_pm_ipc_msg {
+ unsigned int msg_sync_id; /*
+ * Sync number, validate message
+ * reply corresponding to message
+ * received
+ */
+ unsigned int msg_id; /* Message Id */
+ unsigned int ret_channel_id; /* IPC channel reply */
+ unsigned int cpu_id; /* CPU Id */
+ unsigned int cluster_id; /* Cluster Id */
+ unsigned int system_id; /* System Id */
+ unsigned int power_state;
+ unsigned int msg_reply; /* Message reply */
+};
+
+/* IPC queue */
+struct mss_pm_ipc_queue {
+ unsigned int state;
+ struct mss_pm_ipc_msg msg;
+};
+
+/* IPC channel */
+struct mss_pm_ipc_ch {
+ struct mss_pm_ipc_queue *tx_queue;
+ struct mss_pm_ipc_queue *rx_queue;
+};
+
+/*****************************************************************************
+ * mv_pm_ipc_init
+ *
+ * DESCRIPTION: Initialize PM IPC infrastructure
+ *****************************************************************************
+ */
+int mv_pm_ipc_init(unsigned long ipc_control_addr);
+
+/*****************************************************************************
+ * mv_pm_ipc_msg_rx
+ *
+ * DESCRIPTION: Retrieve message from IPC channel
+ *****************************************************************************
+ */
+int mv_pm_ipc_msg_rx(unsigned int channel_id, struct mss_pm_ipc_msg *msg);
+
+/*****************************************************************************
+ * mv_pm_ipc_msg_tx
+ *
+ * DESCRIPTION: Send message via IPC channel
+ *****************************************************************************
+ */
+int mv_pm_ipc_msg_tx(unsigned int channel_id, unsigned int msg_id,
+ unsigned int cluster_power_state);
+
+#endif /* MSS_IPC_DRV_H */
diff --git a/plat/marvell/armada/common/mss/mss_mem.h b/plat/marvell/armada/common/mss/mss_mem.h
new file mode 100644
index 0000000..5d68ac7
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_mem.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef MSS_MEM_H
+#define MSS_MEM_H
+
+/* MSS SRAM Memory base */
+#define MSS_SRAM_PM_CONTROL_BASE (MVEBU_REGS_BASE + 0x520000)
+
+enum mss_pm_ctrl_handshake {
+ MSS_UN_INITIALIZED = 0,
+ MSS_COMPATIBILITY_ERROR = 1,
+ MSS_ACKNOWLEDGMENT = 2,
+ HOST_ACKNOWLEDGMENT = 3
+};
+
+enum mss_pm_ctrl_rtos_env {
+ MSS_MULTI_PROCESS_ENV = 0,
+ MSS_SINGLE_PROCESS_ENV = 1,
+ MSS_MAX_PROCESS_ENV
+};
+
+struct mss_pm_ctrl_block {
+ /* This field is used to synchronize the Host
+ * and MSS initialization sequence
+ * Valid Values
+ * 0 - Un-Initialized
+ * 1 - Compatibility Error
+ * 2 - MSS Acknowledgment
+ * 3 - Host Acknowledgment
+ */
+ unsigned int handshake;
+
+ /*
+ * This field include Host IPC version. Once received by the MSS
+ * It will be compared to MSS IPC version and set MSS Acknowledge to
+ * "compatibility error" in case there is no match
+ */
+ unsigned int ipc_version;
+ unsigned int ipc_base_address;
+ unsigned int ipc_state;
+
+ /* Following fields defines firmware core architecture */
+ unsigned int num_of_cores;
+ unsigned int num_of_clusters;
+ unsigned int num_of_cores_per_cluster;
+
+ /* Following fields define pm trace debug base address */
+ unsigned int pm_trace_ctrl_base_address;
+ unsigned int pm_trace_info_base_address;
+ unsigned int pm_trace_info_core_size;
+
+ unsigned int ctrl_blk_size;
+};
+
+#endif /* MSS_MEM_H */
diff --git a/plat/marvell/armada/common/mss/mss_scp_bl2_format.h b/plat/marvell/armada/common/mss/mss_scp_bl2_format.h
new file mode 100644
index 0000000..90913b0
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_scp_bl2_format.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef MSS_SCP_BL2_FORMAT_H
+#define MSS_SCP_BL2_FORMAT_H
+
+#define MAX_NR_OF_FILES 8
+#define FILE_MAGIC 0xddd01ff
+#define HEADER_VERSION 0x1
+
+#define MSS_IDRAM_SIZE 0x10000 /* 64KB */
+#define MSS_SRAM_SIZE 0x8000 /* 32KB */
+
+/* Types definitions */
+typedef struct file_header {
+ /* Magic specific for concatenated file (used for validation) */
+ uint32_t magic;
+ uint32_t nr_of_imgs; /* Number of images concatenated */
+} file_header_t;
+
+/* Types definitions */
+enum cm3_t {
+ MSS_AP,
+ MSS_CP0,
+ MSS_CP1,
+ MSS_CP2,
+ MSS_CP3,
+ MG_CP0,
+ MG_CP1,
+ MG_CP2,
+};
+
+typedef struct img_header {
+ uint32_t type; /* CM3 type, can be one of cm3_t */
+ uint32_t length; /* Image length */
+ uint32_t version; /* For sanity checks and future
+ * extended functionality
+ */
+} img_header_t;
+
+#endif /* MSS_SCP_BL2_FORMAT_H */
diff --git a/plat/marvell/armada/common/mss/mss_scp_bootloader.c b/plat/marvell/armada/common/mss/mss_scp_bootloader.c
new file mode 100644
index 0000000..fbede1b
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_scp_bootloader.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <mg_conf_cm3/mg_conf_cm3.h>
+#include <lib/mmio.h>
+
+#include <plat_pm_trace.h>
+#include <mss_scp_bootloader.h>
+#include <mss_ipc_drv.h>
+#include <mss_mem.h>
+#include <mss_defs.h>
+#include <mss_scp_bl2_format.h>
+
+#define MSS_DMA_TIMEOUT 1000
+#define MSS_EXTERNAL_SPACE 0x50000000
+#define MSS_EXTERNAL_ADDR_MASK 0xfffffff
+#define MSS_INTERNAL_SPACE 0x40000000
+#define MSS_INTERNAL_ADDR_MASK 0x00ffffff
+
+#define DMA_SIZE 128
+
+#define MSS_HANDSHAKE_TIMEOUT 50
+
+static int mss_check_image_ready(volatile struct mss_pm_ctrl_block *mss_pm_crtl)
+{
+ int timeout = MSS_HANDSHAKE_TIMEOUT;
+
+ /* Wait for SCP to signal it's ready */
+ while ((mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT) &&
+ (timeout-- > 0))
+ mdelay(1);
+
+ if (mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT)
+ return -1;
+
+ mss_pm_crtl->handshake = HOST_ACKNOWLEDGMENT;
+
+ return 0;
+}
+
+static int mss_iram_dma_load(uint32_t src_addr, uint32_t size,
+ uintptr_t mss_regs)
+{
+ uint32_t i, loop_num, timeout;
+
+ /* load image to MSS RAM using DMA */
+ loop_num = (size / DMA_SIZE) + !!(size % DMA_SIZE);
+ for (i = 0; i < loop_num; i++) {
+ /* write source address */
+ mmio_write_32(MSS_DMA_SRCBR(mss_regs),
+ src_addr + (i * DMA_SIZE));
+ /* write destination address */
+ mmio_write_32(MSS_DMA_DSTBR(mss_regs), (i * DMA_SIZE));
+ /* make sure DMA data is ready before triggering it */
+ dsb();
+ /* set the DMA control register */
+ mmio_write_32(MSS_DMA_CTRLR(mss_regs),
+ ((MSS_DMA_CTRLR_REQ_SET <<
+ MSS_DMA_CTRLR_REQ_OFFSET) |
+ (DMA_SIZE << MSS_DMA_CTRLR_SIZE_OFFSET)));
+ /* Poll DMA_ACK at MSS_DMACTLR until it is ready */
+ timeout = MSS_DMA_TIMEOUT;
+ while (timeout > 0U) {
+ if (((mmio_read_32(MSS_DMA_CTRLR(mss_regs)) >>
+ MSS_DMA_CTRLR_ACK_OFFSET) &
+ MSS_DMA_CTRLR_ACK_MASK)
+ == MSS_DMA_CTRLR_ACK_READY) {
+ break;
+ }
+ udelay(50);
+ timeout--;
+ }
+ if (timeout == 0) {
+ ERROR("\nMSS DMA failed (timeout)\n");
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int mss_image_load(uint32_t src_addr, uint32_t size,
+ uintptr_t mss_regs, uintptr_t sram)
+{
+ uint32_t chunks = 1; /* !sram case */
+ uint32_t chunk_num;
+ int ret;
+
+ /* Check if the img size is not bigger than ID-RAM size of MSS CM3 */
+ if (size > MSS_IDRAM_SIZE) {
+ ERROR("image is too big to fit into MSS CM3 memory\n");
+ return 1;
+ }
+
+ /* The CPx MSS DMA cannot access DRAM directly in secure boot mode
+ * Copy the MSS FW image to MSS SRAM by the CPU first, then run
+ * MSS DMA for SRAM to IRAM copy
+ */
+ if (sram != 0) {
+ chunks = size / MSS_SRAM_SIZE + !!(size % MSS_SRAM_SIZE);
+ }
+
+ NOTICE("%s Loading MSS FW from addr. 0x%x Size 0x%x to MSS at 0x%lx\n",
+ sram == 0 ? "" : "SECURELY", src_addr, size, mss_regs);
+ for (chunk_num = 0; chunk_num < chunks; chunk_num++) {
+ size_t chunk_size = size;
+ uint32_t img_src = MSS_EXTERNAL_SPACE | /* no SRAM */
+ (src_addr & MSS_EXTERNAL_ADDR_MASK);
+
+ if (sram != 0) {
+ uintptr_t chunk_source =
+ src_addr + MSS_SRAM_SIZE * chunk_num;
+
+ if (chunk_num != (size / MSS_SRAM_SIZE)) {
+ chunk_size = MSS_SRAM_SIZE;
+ } else {
+ chunk_size = size % MSS_SRAM_SIZE;
+ }
+
+ if (chunk_size == 0) {
+ break;
+ }
+
+ VERBOSE("Chunk %d -> SRAM 0x%lx from 0x%lx SZ 0x%lx\n",
+ chunk_num, sram, chunk_source, chunk_size);
+ memcpy((void *)sram, (void *)chunk_source, chunk_size);
+ dsb();
+ img_src = MSS_INTERNAL_SPACE |
+ (sram & MSS_INTERNAL_ADDR_MASK);
+ }
+
+ ret = mss_iram_dma_load(img_src, chunk_size, mss_regs);
+ if (ret != 0) {
+ ERROR("MSS FW chunk %d load failed\n", chunk_num);
+ return ret;
+ }
+ }
+
+ bl2_plat_configure_mss_windows(mss_regs);
+
+ if (sram != 0) {
+ /* Wipe the MSS SRAM after using it as copy buffer */
+ memset((void *)sram, 0, MSS_SRAM_SIZE);
+ NOTICE("CP MSS startup is postponed\n");
+ /* FW loaded, but CPU startup postponed until final CP setup */
+ mmio_write_32(sram, MSS_FW_READY_MAGIC);
+ dsb();
+ } else {
+ /* Release M3 from reset */
+ mmio_write_32(MSS_M3_RSTCR(mss_regs),
+ (MSS_M3_RSTCR_RST_OFF <<
+ MSS_M3_RSTCR_RST_OFFSET));
+ }
+
+ NOTICE("Done\n");
+
+ return 0;
+}
+
+/* Load image to MSS AP and do PM related initialization
+ * Note that this routine is different than other CM3 loading routines, because
+ * firmware for AP is dedicated for PM and therefore some additional PM
+ * initialization is required
+ */
+static int mss_ap_load_image(uintptr_t single_img,
+ uint32_t image_size, uint32_t ap_idx)
+{
+ volatile struct mss_pm_ctrl_block *mss_pm_crtl;
+ int ret;
+
+ /* TODO: add PM Control Info from platform */
+ mss_pm_crtl = (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
+ mss_pm_crtl->ipc_version = MV_PM_FW_IPC_VERSION;
+ mss_pm_crtl->num_of_clusters = PLAT_MARVELL_CLUSTER_COUNT;
+ mss_pm_crtl->num_of_cores_per_cluster =
+ PLAT_MARVELL_CLUSTER_CORE_COUNT;
+ mss_pm_crtl->num_of_cores = PLAT_MARVELL_CLUSTER_COUNT *
+ PLAT_MARVELL_CLUSTER_CORE_COUNT;
+ mss_pm_crtl->pm_trace_ctrl_base_address = AP_MSS_ATF_CORE_CTRL_BASE;
+ mss_pm_crtl->pm_trace_info_base_address = AP_MSS_ATF_CORE_INFO_BASE;
+ mss_pm_crtl->pm_trace_info_core_size = AP_MSS_ATF_CORE_INFO_SIZE;
+ VERBOSE("MSS Control Block = 0x%x\n", MSS_SRAM_PM_CONTROL_BASE);
+ VERBOSE("mss_pm_crtl->ipc_version = 0x%x\n",
+ mss_pm_crtl->ipc_version);
+ VERBOSE("mss_pm_crtl->num_of_cores = 0x%x\n",
+ mss_pm_crtl->num_of_cores);
+ VERBOSE("mss_pm_crtl->num_of_clusters = 0x%x\n",
+ mss_pm_crtl->num_of_clusters);
+ VERBOSE("mss_pm_crtl->num_of_cores_per_cluster = 0x%x\n",
+ mss_pm_crtl->num_of_cores_per_cluster);
+ VERBOSE("mss_pm_crtl->pm_trace_ctrl_base_address = 0x%x\n",
+ mss_pm_crtl->pm_trace_ctrl_base_address);
+ VERBOSE("mss_pm_crtl->pm_trace_info_base_address = 0x%x\n",
+ mss_pm_crtl->pm_trace_info_base_address);
+ VERBOSE("mss_pm_crtl->pm_trace_info_core_size = 0x%x\n",
+ mss_pm_crtl->pm_trace_info_core_size);
+
+ /* TODO: add checksum to image */
+ VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
+
+ ret = mss_image_load(single_img, image_size,
+ bl2_plat_get_ap_mss_regs(ap_idx), 0);
+ if (ret != 0) {
+ ERROR("SCP Image load failed\n");
+ return -1;
+ }
+
+ /* check that the image was loaded successfully */
+ ret = mss_check_image_ready(mss_pm_crtl);
+ if (ret != 0)
+ NOTICE("SCP Image doesn't contain PM firmware\n");
+
+ return 0;
+}
+
+/* Load CM3 image (single_img) to CM3 pointed by cm3_type */
+static int load_img_to_cm3(enum cm3_t cm3_type,
+ uintptr_t single_img, uint32_t image_size)
+{
+ int ret, ap_idx, cp_index;
+ uint32_t ap_count = bl2_plat_get_ap_count();
+
+ switch (cm3_type) {
+ case MSS_AP:
+ for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
+ NOTICE("Load image to AP%d MSS\n", ap_idx);
+ ret = mss_ap_load_image(single_img, image_size, ap_idx);
+ if (ret != 0)
+ return ret;
+ }
+ break;
+ case MSS_CP0:
+ case MSS_CP1:
+ case MSS_CP2:
+ case MSS_CP3:
+ /* MSS_AP = 0
+ * MSS_CP1 = 1
+ * .
+ * .
+ * MSS_CP3 = 4
+ * Actual CP index is MSS_CPX - 1
+ */
+ cp_index = cm3_type - 1;
+ for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
+ /* Check if we should load this image
+ * according to number of CPs
+ */
+ if (bl2_plat_get_cp_count(ap_idx) <= cp_index) {
+ NOTICE("Skipping MSS CP%d related image\n",
+ cp_index);
+ break;
+ }
+
+ NOTICE("Load image to CP%d MSS AP%d\n",
+ cp_index, ap_idx);
+ ret = mss_image_load(single_img, image_size,
+ bl2_plat_get_cp_mss_regs(
+ ap_idx, cp_index),
+ bl2_plat_get_cp_mss_sram(
+ ap_idx, cp_index));
+ if (ret != 0) {
+ ERROR("SCP Image load failed\n");
+ return -1;
+ }
+ }
+ break;
+ case MG_CP0:
+ case MG_CP1:
+ case MG_CP2:
+ cp_index = cm3_type - MG_CP0;
+ if (bl2_plat_get_cp_count(0) <= cp_index) {
+ NOTICE("Skipping MG CP%d related image\n",
+ cp_index);
+ break;
+ }
+ NOTICE("Load image to CP%d MG\n", cp_index);
+ ret = mg_image_load(single_img, image_size, cp_index);
+ if (ret != 0) {
+ ERROR("SCP Image load failed\n");
+ return -1;
+ }
+ break;
+ default:
+ ERROR("SCP_BL2 wrong img format (cm3_type=%d)\n", cm3_type);
+ break;
+ }
+
+ return 0;
+}
+
+/* The Armada 8K has 5 service CPUs and Armada 7K has 3. Therefore it was
+ * required to provide a method for loading firmware to all of the service CPUs.
+ * To achieve that, the scp_bl2 image in fact is file containing up to 5
+ * concatenated firmwares and this routine splits concatenated image into single
+ * images dedicated for appropriate service CPU and then load them.
+ */
+static int split_and_load_bl2_image(void *image)
+{
+ file_header_t *file_hdr;
+ img_header_t *img_hdr;
+ uintptr_t single_img;
+ int i;
+
+ file_hdr = (file_header_t *)image;
+
+ if (file_hdr->magic != FILE_MAGIC) {
+ ERROR("SCP_BL2 wrong img format\n");
+ return -1;
+ }
+
+ if (file_hdr->nr_of_imgs > MAX_NR_OF_FILES) {
+ ERROR("SCP_BL2 concatenated image contains too many images\n");
+ return -1;
+ }
+
+ img_hdr = (img_header_t *)((uintptr_t)image + sizeof(file_header_t));
+ single_img = (uintptr_t)image + sizeof(file_header_t) +
+ sizeof(img_header_t) * file_hdr->nr_of_imgs;
+
+ NOTICE("SCP_BL2 contains %d concatenated images\n",
+ file_hdr->nr_of_imgs);
+ for (i = 0; i < file_hdr->nr_of_imgs; i++) {
+
+ /* Before loading make sanity check on header */
+ if (img_hdr->version != HEADER_VERSION) {
+ ERROR("Wrong header, img corrupted exiting\n");
+ return -1;
+ }
+
+ load_img_to_cm3(img_hdr->type, single_img, img_hdr->length);
+
+ /* Prepare offsets for next run */
+ single_img += img_hdr->length;
+ img_hdr++;
+ }
+
+ return 0;
+}
+
+int scp_bootloader_transfer(void *image, unsigned int image_size)
+{
+#ifdef SCP_BL2_BASE
+ assert((uintptr_t) image == SCP_BL2_BASE);
+#endif
+
+ VERBOSE("Concatenated img size %d\n", image_size);
+
+ if (image_size == 0) {
+ ERROR("SCP_BL2 image size can't be 0 (current size = 0x%x)\n",
+ image_size);
+ return -1;
+ }
+
+ if (split_and_load_bl2_image(image))
+ return -1;
+
+ return 0;
+}
diff --git a/plat/marvell/armada/common/mss/mss_scp_bootloader.h b/plat/marvell/armada/common/mss/mss_scp_bootloader.h
new file mode 100644
index 0000000..d65354a
--- /dev/null
+++ b/plat/marvell/armada/common/mss/mss_scp_bootloader.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#ifndef MSS_SCP_BOOTLOADER_H
+#define MSS_SCP_BOOTLOADER_H
+
+int scp_bootloader_transfer(void *image, unsigned int image_size);
+uintptr_t bl2_plat_get_cp_mss_regs(int ap_idx, int cp_idx);
+uintptr_t bl2_plat_get_cp_mss_sram(int ap_idx, int cp_idx);
+uintptr_t bl2_plat_get_ap_mss_regs(int ap_idx);
+uint32_t bl2_plat_get_cp_count(int ap_idx);
+uint32_t bl2_plat_get_ap_count(void);
+void bl2_plat_configure_mss_windows(uintptr_t mss_regs);
+int bl2_plat_mss_check_image_ready(void);
+
+#endif /* MSS_SCP_BOOTLOADER_H */
diff --git a/plat/marvell/armada/common/plat_delay_timer.c b/plat/marvell/armada/common/plat_delay_timer.c
new file mode 100644
index 0000000..2539752
--- /dev/null
+++ b/plat/marvell/armada/common/plat_delay_timer.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <arch_helpers.h>
+#include <drivers/delay_timer.h>
+
+#include <mvebu_def.h>
+
+#define SYS_COUNTER_FREQ_IN_MHZ (COUNTER_FREQUENCY/1000000)
+
+static uint32_t plat_get_timer_value(void)
+{
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. We apply bitwise NOT operator to the tick values returned
+ * by read_cntpct_el0() to simulate the down counter.
+ */
+ return (uint32_t)(~read_cntpct_el0());
+}
+
+static const timer_ops_t plat_timer_ops = {
+ .get_timer_value = plat_get_timer_value,
+ .clk_mult = 1,
+ .clk_div = SYS_COUNTER_FREQ_IN_MHZ
+};
+
+void plat_delay_timer_init(void)
+{
+ timer_init(&plat_timer_ops);
+}