summaryrefslogtreecommitdiffstats
path: root/plat/marvell/armada/common/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'plat/marvell/armada/common/aarch64')
-rw-r--r--plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c168
-rw-r--r--plat/marvell/armada/common/aarch64/marvell_common.c137
-rw-r--r--plat/marvell/armada/common/aarch64/marvell_helpers.S259
3 files changed, 564 insertions, 0 deletions
diff --git a/plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c b/plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c
new file mode 100644
index 0000000..6c55858
--- /dev/null
+++ b/plat/marvell/armada/common/aarch64/marvell_bl2_mem_params_desc.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <marvell_def.h>
+#include <plat/common/platform.h>
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+ /* Fill SCP_BL2 related information if it exists */
+ {
+ .image_id = SCP_BL2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = SCP_BL2_BASE,
+ .image_info.image_max_size = SCP_BL2_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+ /* Fill EL3 payload related information (BL31 is EL3 payload)*/
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = EL3_PAYLOAD_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+#else /* EL3_PAYLOAD_BASE */
+
+ /* Fill BL31 related information */
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL31_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+#if DEBUG
+ .ep_info.args.arg3 = MARVELL_BL31_PLAT_PARAM_VAL,
+#endif
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL31_BASE,
+ .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+# ifdef BL32_BASE
+ .next_handoff_image_id = BL32_IMAGE_ID,
+# else
+ .next_handoff_image_id = BL33_IMAGE_ID,
+# endif
+ },
+
+# ifdef BL32_BASE
+ /* Fill BL32 related information */
+ {
+ .image_id = BL32_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | EXECUTABLE),
+ .ep_info.pc = BL32_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+
+ /*
+ * Fill BL32 external 1 related information.
+ * A typical use for extra1 image is with OP-TEE
+ * where it is the pager image.
+ */
+ {
+ .image_id = BL32_EXTRA1_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_LIMIT - BL32_BASE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+ /*
+ * Fill BL32 external 2 related information.
+ * A typical use for extra2 image is with OP-TEE,
+ * where it is the paged image.
+ */
+ {
+ .image_id = BL32_EXTRA2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+#ifdef SPD_opteed
+ .image_info.image_base = MARVELL_OPTEE_PAGEABLE_LOAD_BASE,
+ .image_info.image_max_size = MARVELL_OPTEE_PAGEABLE_LOAD_SIZE,
+#endif
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+# endif /* BL32_BASE */
+
+ /* Fill BL33 related information */
+ {
+ .image_id = BL33_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+# ifdef PRELOADED_BL33_BASE
+ .ep_info.pc = PRELOADED_BL33_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+# else
+ .ep_info.pc = MARVELL_DRAM_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = MARVELL_DRAM_BASE,
+ .image_info.image_max_size = MARVELL_DRAM_SIZE,
+# endif /* PRELOADED_BL33_BASE */
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ }
+#endif /* EL3_PAYLOAD_BASE */
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
diff --git a/plat/marvell/armada/common/aarch64/marvell_common.c b/plat/marvell/armada/common/aarch64/marvell_common.c
new file mode 100644
index 0000000..21a62d4
--- /dev/null
+++ b/plat/marvell/armada/common/aarch64/marvell_common.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <plat_marvell.h>
+
+/* Weak definitions may be overridden in specific ARM standard platform */
+#pragma weak plat_get_ns_image_entrypoint
+#pragma weak plat_marvell_get_mmap
+
+/*
+ * Set up the page tables for the generic and platform-specific memory regions.
+ * The extents of the generic memory regions are specified by the function
+ * arguments and consist of:
+ * - Trusted SRAM seen by the BL image;
+ * - Code section;
+ * - Read-only data section;
+ * - Coherent memory region, if applicable.
+ */
+void marvell_setup_page_tables(uintptr_t total_base,
+ size_t total_size,
+ uintptr_t code_start,
+ uintptr_t code_limit,
+ uintptr_t rodata_start,
+ uintptr_t rodata_limit
+#if USE_COHERENT_MEM
+ ,
+ uintptr_t coh_start,
+ uintptr_t coh_limit
+#endif
+ )
+{
+ /*
+ * Map the Trusted SRAM with appropriate memory attributes.
+ * Subsequent mappings will adjust the attributes for specific regions.
+ */
+ VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
+ (void *) total_base, (void *) (total_base + total_size));
+ mmap_add_region(total_base, total_base,
+ total_size,
+ MT_MEMORY | MT_RW | MT_SECURE);
+
+ /* Re-map the code section */
+ VERBOSE("Code region: %p - %p\n",
+ (void *) code_start, (void *) code_limit);
+ mmap_add_region(code_start, code_start,
+ code_limit - code_start,
+ MT_CODE | MT_SECURE);
+
+ /* Re-map the read-only data section */
+ VERBOSE("Read-only data region: %p - %p\n",
+ (void *) rodata_start, (void *) rodata_limit);
+ mmap_add_region(rodata_start, rodata_start,
+ rodata_limit - rodata_start,
+ MT_RO_DATA | MT_SECURE);
+
+#if USE_COHERENT_MEM
+ /* Re-map the coherent memory region */
+ VERBOSE("Coherent region: %p - %p\n",
+ (void *) coh_start, (void *) coh_limit);
+ mmap_add_region(coh_start, coh_start,
+ coh_limit - coh_start,
+ MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+
+ /* Now (re-)map the platform-specific memory regions */
+ mmap_add(plat_marvell_get_mmap());
+
+ /* Create the page tables to reflect the above mappings */
+ init_xlat_tables();
+}
+
+unsigned long plat_get_ns_image_entrypoint(void)
+{
+ return PLAT_MARVELL_NS_IMAGE_OFFSET;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl32_entry(void)
+{
+ /*
+ * The Secure Payload Dispatcher service is responsible for
+ * setting the SPSR prior to entry into the BL32 image.
+ */
+ return 0;
+}
+
+/*****************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************
+ */
+uint32_t marvell_get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+/*****************************************************************************
+ * Returns ARM platform specific memory map regions.
+ *****************************************************************************
+ */
+const mmap_region_t *plat_marvell_get_mmap(void)
+{
+ return plat_marvell_mmap;
+}
+
diff --git a/plat/marvell/armada/common/aarch64/marvell_helpers.S b/plat/marvell/armada/common/aarch64/marvell_helpers.S
new file mode 100644
index 0000000..3038ec0
--- /dev/null
+++ b/plat/marvell/armada/common/aarch64/marvell_helpers.S
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2020, ARM Limited. All rights reserved.
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * https://spdx.org/licenses
+ */
+
+#include <asm_macros.S>
+#include <cortex_a72.h>
+#ifndef PLAT_a3700
+#include <drivers/marvell/ccu.h>
+#include <drivers/marvell/cache_llc.h>
+#endif
+#include <marvell_def.h>
+#include <platform_def.h>
+
+ .weak plat_marvell_calc_core_pos
+ .weak plat_my_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+ .globl platform_mem_init
+ .globl disable_mmu_dcache
+ .globl invalidate_tlb_all
+ .globl platform_unmap_sram
+ .globl disable_sram
+ .globl disable_icache
+ .globl invalidate_icache_all
+ .globl marvell_exit_bootrom
+ .globl ca72_l2_enable_unique_clean
+
+ /* -----------------------------------------------------
+ * unsigned int plat_my_core_pos(void)
+ * This function uses the plat_marvell_calc_core_pos()
+ * definition to get the index of the calling CPU.
+ * -----------------------------------------------------
+ */
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ b plat_marvell_calc_core_pos
+endfunc plat_my_core_pos
+
+ /* -----------------------------------------------------
+ * unsigned int plat_marvell_calc_core_pos(uint64_t mpidr)
+ * Helper function to calculate the core position.
+ * With this function: CorePos = (ClusterId * 2) +
+ * CoreId
+ * -----------------------------------------------------
+ */
+func plat_marvell_calc_core_pos
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #7
+ ret
+endfunc plat_marvell_calc_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+#ifdef PLAT_a3700
+ mov x1, x30
+ bl get_ref_clk
+ mov x30, x1
+ mov_imm x1, 1000000
+ mul x1, x0, x1
+#else
+ mov_imm x1, PLAT_MARVELL_UART_CLK_IN_HZ
+#endif
+ mov_imm x0, PLAT_MARVELL_UART_BASE
+ mov_imm x2, MARVELL_CONSOLE_BAUDRATE
+#ifdef PLAT_a3700
+ b console_a3700_core_init
+#else
+ b console_16550_core_init
+#endif
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_MARVELL_UART_BASE
+#ifdef PLAT_a3700
+
+ b console_a3700_core_putc
+#else
+ b console_16550_core_putc
+#endif
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * void plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : void.
+ * Clobber list : r0
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x0, PLAT_MARVELL_UART_BASE
+#ifdef PLAT_a3700
+ b console_a3700_core_flush
+#else
+ b console_16550_core_flush
+#endif
+endfunc plat_crash_console_flush
+
+ /* ---------------------------------------------------------------------
+ * We don't need to carry out any memory initialization on ARM
+ * platforms. The Secure RAM is accessible straight away.
+ * ---------------------------------------------------------------------
+ */
+func platform_mem_init
+ ret
+endfunc platform_mem_init
+
+ /* -----------------------------------------------------
+ * Disable icache, dcache, and MMU
+ * -----------------------------------------------------
+ */
+func disable_mmu_dcache
+ mrs x0, sctlr_el3
+ bic x0, x0, 0x1 /* M bit - MMU */
+ bic x0, x0, 0x4 /* C bit - Dcache L1 & L2 */
+ msr sctlr_el3, x0
+ isb
+ b mmu_off
+mmu_off:
+ ret
+endfunc disable_mmu_dcache
+
+ /* -----------------------------------------------------
+ * Disable all TLB entries
+ * -----------------------------------------------------
+ */
+func invalidate_tlb_all
+ tlbi alle3
+ dsb sy
+ isb
+ ret
+endfunc invalidate_tlb_all
+
+ /* -----------------------------------------------------
+ * Disable the i cache
+ * -----------------------------------------------------
+ */
+func disable_icache
+ mrs x0, sctlr_el3
+ bic x0, x0, 0x1000 /* I bit - Icache L1 & L2 */
+ msr sctlr_el3, x0
+ isb
+ ret
+endfunc disable_icache
+
+ /* -----------------------------------------------------
+ * Disable all of the i caches
+ * -----------------------------------------------------
+ */
+func invalidate_icache_all
+ ic ialluis
+ isb sy
+ ret
+endfunc invalidate_icache_all
+
+ /* -----------------------------------------------------
+ * Clear the SRAM enabling bit to unmap SRAM
+ * -----------------------------------------------------
+ */
+func platform_unmap_sram
+ ldr x0, =CCU_SRAM_WIN_CR
+ str wzr, [x0]
+ ret
+endfunc platform_unmap_sram
+
+ /* -----------------------------------------------------
+ * Disable the SRAM
+ * -----------------------------------------------------
+ */
+func disable_sram
+ /* Disable the line lockings. They must be disabled expictly
+ * or the OS will have problems using the cache */
+ ldr x1, =MASTER_LLC_TC0_LOCK
+ str wzr, [x1]
+
+ /* Invalidate all ways */
+ ldr w1, =LLC_WAY_MASK
+ ldr x0, =MASTER_LLC_INV_WAY
+ str w1, [x0]
+
+ /* Finally disable LLC */
+ ldr x0, =MASTER_LLC_CTRL
+ str wzr, [x0]
+
+ ret
+endfunc disable_sram
+
+ /* -----------------------------------------------------
+ * Operation when exit bootROM:
+ * Disable the MMU
+ * Disable and invalidate the dcache
+ * Unmap and disable the SRAM
+ * Disable and invalidate the icache
+ * -----------------------------------------------------
+ */
+func marvell_exit_bootrom
+ /* Save the system restore address */
+ mov x28, x0
+
+ /* Close the caches and MMU */
+ bl disable_mmu_dcache
+
+ /*
+ * There is nothing important in the caches now,
+ * so invalidate them instead of cleaning.
+ */
+ adr x0, __RW_START__
+ adr x1, __RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+ bl invalidate_tlb_all
+
+ /*
+ * Clean the memory mapping of SRAM
+ * the DDR mapping will remain to enable boot image to execute
+ */
+ bl platform_unmap_sram
+
+ /* Disable the SRAM */
+ bl disable_sram
+
+ /* Disable and invalidate icache */
+ bl disable_icache
+ bl invalidate_icache_all
+
+ mov x0, x28
+ br x0
+endfunc marvell_exit_bootrom
+
+ /*
+ * Enable L2 UniqueClean evictions with data
+ */
+func ca72_l2_enable_unique_clean
+
+ mrs x0, CORTEX_A72_L2ACTLR_EL1
+ orr x0, x0, #CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
+ msr CORTEX_A72_L2ACTLR_EL1, x0
+
+ ret
+endfunc ca72_l2_enable_unique_clean