diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 09:13:47 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 09:13:47 +0000 |
commit | 102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch) | |
tree | bcf648efac40ca6139842707f0eba5a4496a6dd2 /plat/nxp/common | |
parent | Initial commit. (diff) | |
download | arm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.tar.xz arm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.zip |
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'plat/nxp/common')
69 files changed, 8145 insertions, 0 deletions
diff --git a/plat/nxp/common/aarch64/bl31_data.S b/plat/nxp/common/aarch64/bl31_data.S new file mode 100644 index 0000000..cc91540 --- /dev/null +++ b/plat/nxp/common/aarch64/bl31_data.S @@ -0,0 +1,558 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <asm_macros.S> + +#include "bl31_data.h" +#include "plat_psci.h" +#include "platform_def.h" + +.global _getCoreData +.global _setCoreData +.global _getCoreState +.global _setCoreState +.global _init_global_data +.global _get_global_data +.global _set_global_data +.global _initialize_psci +.global _init_task_flags +.global _set_task1_start +.global _set_task1_done + + +/* Function returns the specified data field value from the specified cpu + * core data area + * in: x0 = core mask lsb + * x1 = data field name/offset + * out: x0 = data value + * uses x0, x1, x2, [x13, x14, x15] + */ +func _getCoreData + + /* generate a 0-based core number from the input mask */ + clz x2, x0 + mov x0, #63 + sub x0, x0, x2 + + /* x0 = core number (0-based) */ + /* x1 = field offset */ + + /* determine if this is bootcore or secondary core */ + cbnz x0, 1f + + /* get base address for bootcore data */ + ldr x2, =BC_PSCI_BASE + add x2, x2, x1 + b 2f + +1: /* get base address for secondary core data */ + + /* x0 = core number (0-based) */ + /* x1 = field offset */ + + /* generate number of regions to offset */ + mov x2, #SEC_REGION_SIZE + mul x2, x2, x0 + + /* x1 = field offset */ + /* x2 = region offset */ + + /* generate the total offset to data element */ + sub x1, x2, x1 + + /* x1 = total offset to data element */ + + /* get the base address */ + ldr x2, =SECONDARY_TOP + + /* apply offset to base addr */ + sub x2, x2, x1 +2: + /* x2 = data element address */ + + dc ivac, x2 + dsb sy + isb + /* read data */ + ldr x0, [x2] + + ret +endfunc _getCoreData + + +/* Function returns the SoC-specific state of the specified cpu + * in: x0 = core mask lsb + * out: x0 = data value + * uses x0, x1, x2, [x13, x14, x15] + */ +func _getCoreState + + mov x1, #CORE_STATE_DATA + + /* generate a 0-based core number from the input mask */ + clz x2, x0 + mov x0, #63 + sub x0, x0, x2 + + /* x0 = core number (0-based) */ + /* x1 = field offset */ + + /* determine if this is bootcore or secondary core */ + cbnz x0, 1f + + /* get base address for bootcore data */ + ldr x2, =BC_PSCI_BASE + add x2, x2, x1 + b 2f + +1: /* get base address for secondary core data */ + + /* x0 = core number (0-based) */ + /* x1 = field offset */ + + /* generate number of regions to offset */ + mov x2, #SEC_REGION_SIZE + mul x2, x2, x0 + + /* x1 = field offset */ + /* x2 = region offset */ + + /* generate the total offset to data element */ + sub x1, x2, x1 + + /* x1 = total offset to data element */ + + /* get the base address */ + ldr x2, =SECONDARY_TOP + + /* apply offset to base addr */ + sub x2, x2, x1 +2: + /* x2 = data element address */ + + dc ivac, x2 + dsb sy + isb + + /* read data */ + ldr x0, [x2] + + ret +endfunc _getCoreState + + +/* Function writes the specified data value into the specified cpu + * core data area + * in: x0 = core mask lsb + * x1 = data field offset + * x2 = data value to write/store + * out: none + * uses x0, x1, x2, x3, [x13, x14, x15] + */ +func _setCoreData + /* x0 = core mask */ + /* x1 = field offset */ + /* x2 = data value */ + + clz x3, x0 + mov x0, #63 + sub x0, x0, x3 + + /* x0 = core number (0-based) */ + /* x1 = field offset */ + /* x2 = data value */ + + /* determine if this is bootcore or secondary core */ + cbnz x0, 1f + + /* get base address for bootcore data */ + ldr x3, =BC_PSCI_BASE + add x3, x3, x1 + b 2f + +1: /* get base address for secondary core data */ + + /* x0 = core number (0-based) */ + /* x1 = field offset */ + /* x2 = data value */ + + /* generate number of regions to offset */ + mov x3, #SEC_REGION_SIZE + mul x3, x3, x0 + + /* x1 = field offset */ + /* x2 = data value */ + /* x3 = region offset */ + + /* generate the total offset to data element */ + sub x1, x3, x1 + + /* x1 = total offset to data element */ + /* x2 = data value */ + + ldr x3, =SECONDARY_TOP + + /* apply offset to base addr */ + sub x3, x3, x1 + +2: + /* x2 = data value */ + /* x3 = data element address */ + + str x2, [x3] + + dc cvac, x3 + dsb sy + isb + ret +endfunc _setCoreData + + +/* Function stores the specified core state + * in: x0 = core mask lsb + * x1 = data value to write/store + * out: none + * uses x0, x1, x2, x3, [x13, x14, x15] + */ +func _setCoreState + mov x2, #CORE_STATE_DATA + + clz x3, x0 + mov x0, #63 + sub x0, x0, x3 + + /* x0 = core number (0-based) */ + /* x1 = data value */ + /* x2 = field offset */ + + /* determine if this is bootcore or secondary core */ + cbnz x0, 1f + + /* get base address for bootcore data */ + ldr x3, =BC_PSCI_BASE + add x3, x3, x2 + b 2f + +1: /* get base address for secondary core data */ + + /* x0 = core number (0-based) */ + /* x1 = data value */ + /* x2 = field offset */ + + /* generate number of regions to offset */ + mov x3, #SEC_REGION_SIZE + mul x3, x3, x0 + + /* x1 = data value */ + /* x2 = field offset */ + /* x3 = region offset */ + + /* generate the total offset to data element */ + sub x2, x3, x2 + + /* x1 = data value */ + /* x2 = total offset to data element */ + + ldr x3, =SECONDARY_TOP + + /* apply offset to base addr */ + sub x3, x3, x2 + +2: + /* x1 = data value */ + /* x3 = data element address */ + + str x1, [x3] + + dc civac, x3 + dsb sy + isb + ret +endfunc _setCoreState + + +/* Function sets the task1 start + * in: w0 = value to set flag to + * out: none + * uses x0, x1 + */ +func _set_task1_start + + ldr x1, =SMC_TASK1_BASE + + add x1, x1, #TSK_START_OFFSET + str w0, [x1] + dc cvac, x1 + dsb sy + isb + ret +endfunc _set_task1_start + + +/* Function sets the state of the task 1 done flag + * in: w0 = value to set flag to + * out: none + * uses x0, x1 + */ +func _set_task1_done + + ldr x1, =SMC_TASK1_BASE + + add x1, x1, #TSK_DONE_OFFSET + str w0, [x1] + dc cvac, x1 + dsb sy + isb + ret +endfunc _set_task1_done + + +/* Function initializes the smc global data entries + * Note: the constant LAST_SMC_GLBL_OFFSET must reference the last entry in the + * smc global region + * in: none + * out: none + * uses x0, x1, x2 + */ +func _init_global_data + + ldr x1, =SMC_GLBL_BASE + + /* x1 = SMC_GLBL_BASE */ + + mov x2, #LAST_SMC_GLBL_OFFSET + add x2, x2, x1 +1: + str xzr, [x1] + dc cvac, x1 + cmp x2, x1 + add x1, x1, #8 + b.hi 1b + + dsb sy + isb + ret +endfunc _init_global_data + + +/* Function gets the value of the specified global data element + * in: x0 = offset of data element + * out: x0 = requested data element + * uses x0, x1 + */ +func _get_global_data + + ldr x1, =SMC_GLBL_BASE + add x1, x1, x0 + dc ivac, x1 + isb + + ldr x0, [x1] + ret +endfunc _get_global_data + + +/* Function sets the value of the specified global data element + * in: x0 = offset of data element + * x1 = value to write + * out: none + * uses x0, x1, x2 + */ +func _set_global_data + + ldr x2, =SMC_GLBL_BASE + add x0, x0, x2 + str x1, [x0] + dc cvac, x0 + + dsb sy + isb + ret +endfunc _set_global_data + + +/* Function initializes the core data areas + * only executed by the boot core + * in: none + * out: none + * uses: x0, x1, x2, x3, x4, x5, x6, x7, [x13, x14, x15] + */ +func _initialize_psci + mov x7, x30 + + /* initialize the bootcore psci data */ + ldr x5, =BC_PSCI_BASE + mov x6, #CORE_RELEASED + + str x6, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5], #8 + dc cvac, x5 + str xzr, [x5] + dc cvac, x5 + dsb sy + isb + + /* see if we have any secondary cores */ + mov x4, #PLATFORM_CORE_COUNT + sub x4, x4, #1 + cbz x4, 3f + + /* initialize the secondary core's psci data */ + ldr x5, =SECONDARY_TOP + /* core mask lsb for core 1 */ + mov x3, #2 + sub x5, x5, #SEC_REGION_SIZE + + /* x3 = core1 mask lsb */ + /* x4 = number of secondary cores */ + /* x5 = core1 psci data base address */ +2: + /* set core state in x6 */ + mov x0, x3 + mov x6, #CORE_IN_RESET + bl _soc_ck_disabled + cbz x0, 1f + mov x6, #CORE_DISABLED +1: + add x2, x5, #CORE_STATE_DATA + str x6, [x2] + dc cvac, x2 + add x2, x5, #SPSR_EL3_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #CNTXT_ID_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #START_ADDR_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #LINK_REG_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #GICC_CTLR_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #ABORT_FLAG_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #SCTLR_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #CPUECTLR_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #AUX_01_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #AUX_02_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #AUX_03_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #AUX_04_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #AUX_05_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #SCR_EL3_DATA + str xzr, [x2] + dc cvac, x2 + add x2, x5, #HCR_EL2_DATA + str xzr, [x2] + dc cvac, x2 + dsb sy + isb + + sub x4, x4, #1 + cbz x4, 3f + + /* generate next core mask */ + lsl x3, x3, #1 + + /* decrement base address to next data area */ + sub x5, x5, #SEC_REGION_SIZE + b 2b +3: + mov x30, x7 + ret +endfunc _initialize_psci + + +/* Function initializes the soc init task flags + * in: none + * out: none + * uses x0, x1, [x13, x14, x15] + */ +func _init_task_flags + + /* get the base address of the first task structure */ + ldr x0, =SMC_TASK1_BASE + + /* x0 = task1 base address */ + + str wzr, [x0, #TSK_START_OFFSET] + str wzr, [x0, #TSK_DONE_OFFSET] + str wzr, [x0, #TSK_CORE_OFFSET] + dc cvac, x0 + + /* move to task2 structure */ + add x0, x0, #SMC_TASK_OFFSET + + str wzr, [x0, #TSK_START_OFFSET] + str wzr, [x0, #TSK_DONE_OFFSET] + str wzr, [x0, #TSK_CORE_OFFSET] + dc cvac, x0 + + /* move to task3 structure */ + add x0, x0, #SMC_TASK_OFFSET + + str wzr, [x0, #TSK_START_OFFSET] + str wzr, [x0, #TSK_DONE_OFFSET] + str wzr, [x0, #TSK_CORE_OFFSET] + dc cvac, x0 + + /* move to task4 structure */ + add x0, x0, #SMC_TASK_OFFSET + + str wzr, [x0, #TSK_START_OFFSET] + str wzr, [x0, #TSK_DONE_OFFSET] + str wzr, [x0, #TSK_CORE_OFFSET] + dc cvac, x0 + + dsb sy + isb + ret +endfunc _init_task_flags diff --git a/plat/nxp/common/aarch64/ls_helpers.S b/plat/nxp/common/aarch64/ls_helpers.S new file mode 100644 index 0000000..311dce1 --- /dev/null +++ b/plat/nxp/common/aarch64/ls_helpers.S @@ -0,0 +1,220 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <asm_macros.S> +#include <cortex_a53.h> +#include <drivers/console.h> +#include <lib/cpus/aarch64/cortex_a72.h> + +#include <platform_def.h> + + + .globl plat_crash_console_init + .globl plat_crash_console_putc + .globl plat_crash_console_flush + .globl plat_core_pos + .globl plat_my_core_pos + .globl plat_core_mask + .globl plat_my_core_mask + .globl plat_core_pos_by_mpidr + .globl _disable_ldstr_pfetch_A53 + .globl _disable_ldstr_pfetch_A72 + .global _set_smmu_pagesz_64 + + /* int plat_crash_console_init(void) + * Function to initialize the crash console + * without a C Runtime to print crash report. + * Clobber list : x0 - x4 + */ + + /* int plat_crash_console_init(void) + * Use normal console by default. Switch it to crash + * mode so serial consoles become active again. + * NOTE: This default implementation will only work for + * crashes that occur after a normal console (marked + * valid for the crash state) has been registered with + * the console framework. To debug crashes that occur + * earlier, the platform has to override these functions + * with an implementation that initializes a console + * driver with hardcoded parameters. See + * docs/porting-guide.rst for more information. + */ +func plat_crash_console_init + mov x3, x30 + mov x0, #CONSOLE_FLAG_CRASH + bl console_switch_state + mov x0, #1 + ret x3 +endfunc plat_crash_console_init + + /* void plat_crash_console_putc(int character) + * Output through the normal console by default. + */ +func plat_crash_console_putc + b console_putc +endfunc plat_crash_console_putc + + /* void plat_crash_console_flush(void) + * Flush normal console by default. + */ +func plat_crash_console_flush + b console_flush +endfunc plat_crash_console_flush + +/* This function implements a part of the critical interface between the psci + * generic layer and the platform that allows the former to query the platform + * to convert an MPIDR to a unique linear index. An error code (-1) is returned + * in case the MPIDR is invalid. + */ +func plat_core_pos_by_mpidr + + b plat_core_pos + +endfunc plat_core_pos_by_mpidr + +#if (SYMMETRICAL_CLUSTERS) +/* unsigned int plat_my_core_mask(void) + * generate a mask bit for this core + */ +func plat_my_core_mask + mrs x0, MPIDR_EL1 + b plat_core_mask +endfunc plat_my_core_mask + +/* unsigned int plat_core_mask(u_register_t mpidr) + * generate a lsb-based mask bit for the core specified by mpidr in x0. + * + * SoC core = ((cluster * cpu_per_cluster) + core) + * mask = (1 << SoC core) + */ +func plat_core_mask + mov w1, wzr + mov w2, wzr + + /* extract cluster */ + bfxil w1, w0, #8, #8 + /* extract cpu # */ + bfxil w2, w0, #0, #8 + + mov w0, wzr + + /* error checking */ + cmp w1, #NUMBER_OF_CLUSTERS + b.ge 1f + cmp w2, #CORES_PER_CLUSTER + b.ge 1f + + mov w0, #CORES_PER_CLUSTER + mul w1, w1, w0 + add w1, w1, w2 + mov w2, #0x1 + lsl w0, w2, w1 +1: + ret +endfunc plat_core_mask + +/* + * unsigned int plat_my_core_pos(void) + * generate a linear core number for this core + */ +func plat_my_core_pos + mrs x0, MPIDR_EL1 + b plat_core_pos +endfunc plat_my_core_pos + +/* + * unsigned int plat_core_pos(u_register_t mpidr) + * Generate a linear core number for the core specified by mpidr. + * + * SoC core = ((cluster * cpu_per_cluster) + core) + * Returns -1 if mpidr invalid + */ +func plat_core_pos + mov w1, wzr + mov w2, wzr + bfxil w1, w0, #8, #8 /* extract cluster */ + bfxil w2, w0, #0, #8 /* extract cpu # */ + + mov w0, #-1 + + /* error checking */ + cmp w1, #NUMBER_OF_CLUSTERS + b.ge 1f + cmp w2, #CORES_PER_CLUSTER + b.ge 1f + + mov w0, #CORES_PER_CLUSTER + mul w1, w1, w0 + add w0, w1, w2 +1: + ret +endfunc plat_core_pos + +#endif + +/* this function disables the load-store prefetch of the calling core + * Note: this function is for A53 cores ONLY + * in: none + * out: none + * uses x0 + */ +func _disable_ldstr_pfetch_A53 + mrs x0, CORTEX_A53_CPUACTLR_EL1 + tst x0, #CORTEX_A53_CPUACTLR_EL1_L1PCTL + b.ne 1f + b 2f + +.align 6 +1: + dsb sy + isb + bic x0, x0, #CORTEX_A53_CPUACTLR_EL1_L1PCTL + msr CORTEX_A53_CPUACTLR_EL1, x0 + isb + +2: + ret +endfunc _disable_ldstr_pfetch_A53 + + +/* this function disables the load-store prefetch of the calling core + * Note: this function is for A72 cores ONLY + * in: none + * out: none + * uses x0 + */ +func _disable_ldstr_pfetch_A72 + + mrs x0, CORTEX_A72_CPUACTLR_EL1 + tst x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH + b.eq 1f + b 2f + +.align 6 +1: + dsb sy + isb + orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH + msr CORTEX_A72_CPUACTLR_EL1, x0 + isb + +2: + ret +endfunc _disable_ldstr_pfetch_A72 + +/* + * Function sets the SACR pagesize to 64k + */ +func _set_smmu_pagesz_64 + + ldr x1, =NXP_SMMU_ADDR + ldr w0, [x1, #0x10] + orr w0, w0, #1 << 16 /* setting to 64K page */ + str w0, [x1, #0x10] + + ret +endfunc _set_smmu_pagesz_64 diff --git a/plat/nxp/common/fip_handler/common/plat_def_fip_uuid.h b/plat/nxp/common/fip_handler/common/plat_def_fip_uuid.h new file mode 100644 index 0000000..65aef14 --- /dev/null +++ b/plat/nxp/common/fip_handler/common/plat_def_fip_uuid.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_DEF_FIP_UUID_H +#define PLAT_DEF_FIP_UUID_H + +/* PHy images configs */ +#define UUID_DDR_IMEM_UDIMM_1D \ + {{0x5b, 0xdb, 0xe3, 0x83}, {0xd1, 0x9f}, {0xc7, 0x06}, 0xd4, 0x91, {0x76, 0x4f, 0x9d, 0x23, 0x2d, 0x2d} } + +#define UUID_DDR_IMEM_UDIMM_2D \ + {{0xfa, 0x0e, 0xeb, 0x21}, {0xe0, 0x7f}, {0x8e, 0x65}, 0x95, 0xd8, {0x2b, 0x94, 0xf6, 0xb8, 0x28, 0x0a} } + +#define UUID_DDR_DMEM_UDIMM_1D \ + {{0xba, 0xbb, 0xfd, 0x7e}, {0x5b, 0xf0}, {0xeb, 0xb8}, 0xeb, 0x71, {0xb1, 0x85, 0x07, 0xdd, 0xe1, 0x32} } + +#define UUID_DDR_DMEM_UDIMM_2D \ + {{0xb6, 0x99, 0x61, 0xda}, {0xf9, 0x92}, {0x4b, 0x9e}, 0x0c, 0x49, {0x74, 0xa5, 0xe0, 0x5c, 0xbe, 0xc3} } + +#define UUID_DDR_IMEM_RDIMM_1D \ + {{0x42, 0x33, 0x66, 0x52}, {0xd8, 0x94}, {0x4d, 0xc1}, 0x91, 0xcc, {0x26, 0x8f, 0x7a, 0x67, 0xf1, 0xa2} } + +#define UUID_DDR_IMEM_RDIMM_2D \ + {{0x2e, 0x95, 0x73, 0xba}, {0xb5, 0xca}, {0x7c, 0xc7}, 0xef, 0xc9, {0x5e, 0xb0, 0x42, 0xec, 0x08, 0x7a} } + +#define UUID_DDR_DMEM_RDIMM_1D \ + {{0x1c, 0x51, 0x17, 0xed}, {0x30, 0x0d}, {0xae, 0xba}, 0x87, 0x03, {0x1f, 0x37, 0x85, 0xec, 0xe1, 0x44} } + +#define UUID_DDR_DMEM_RDIMM_2D \ + {{0xe9, 0x0a, 0x90, 0x78}, {0x11, 0xd6}, {0x8b, 0xba}, 0x24, 0x35, {0xec, 0x10, 0x75, 0x4f, 0x56, 0xa5} } + +#define UUID_DDR_FW_KEY_CERT \ + {{0xac, 0x4b, 0xb8, 0x9c}, {0x8f, 0xb9}, {0x11, 0xea}, 0xbc, 0x55, {0x02, 0x42, 0xac, 0x12, 0x00, 0x03} } + +#define UUID_DDR_UDIMM_FW_CONTENT_CERT \ + {{0x2c, 0x7f, 0x52, 0x54}, {0x70, 0x92}, {0x48, 0x40}, 0x8c, 0x34, {0x87, 0x4b, 0xbf, 0xbd, 0x9d, 0x89} } + +#define UUID_DDR_RDIMM_FW_CONTENT_CERT \ + {{0x94, 0xc3, 0x63, 0x30}, {0x7c, 0xf7}, {0x4f, 0x1d}, 0xaa, 0xcd, {0xb5, 0x80, 0xb2, 0xc2, 0x40, 0xa5} } + +#define UUID_FUSE_PROV \ + {{0xec, 0x45, 0x90, 0x42}, {0x30, 0x0d}, {0xae, 0xba}, 0x87, 0x03, {0x1f, 0x37, 0x85, 0xec, 0xe1, 0x44} } + +#define UUID_FUSE_UP \ + {{0x89, 0x46, 0xef, 0x78}, {0x11, 0xd6}, {0x8b, 0xba}, 0x24, 0x35, {0xec, 0x10, 0x75, 0x4f, 0x56, 0xa5} } + +#endif /* PLAT_DEF_FIP_UUID_H */ diff --git a/plat/nxp/common/fip_handler/common/plat_tbbr_img_def.h b/plat/nxp/common/fip_handler/common/plat_tbbr_img_def.h new file mode 100644 index 0000000..9856f70 --- /dev/null +++ b/plat/nxp/common/fip_handler/common/plat_tbbr_img_def.h @@ -0,0 +1,53 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef NXP_IMG_DEF_H +#define NXP_IMG_DEF_H + +#include <export/common/tbbr/tbbr_img_def_exp.h> + +#ifdef CONFIG_DDR_FIP_IMAGE +/* DDR FIP IMAGE ID */ +#define DDR_FIP_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + +#define DDR_IMEM_UDIMM_1D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 1 +#define DDR_IMEM_UDIMM_2D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 2 + +#define DDR_DMEM_UDIMM_1D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 3 +#define DDR_DMEM_UDIMM_2D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 4 + +#define DDR_IMEM_RDIMM_1D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 5 +#define DDR_IMEM_RDIMM_2D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 6 + +#define DDR_DMEM_RDIMM_1D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 7 +#define DDR_DMEM_RDIMM_2D_IMAGE_ID MAX_IMG_IDS_WITH_SPMDS + 8 + +#define DDR_FW_KEY_CERT_ID MAX_IMG_IDS_WITH_SPMDS + 9 +#define DDR_UDIMM_FW_CONTENT_CERT_ID MAX_IMG_IDS_WITH_SPMDS + 10 +#define DDR_RDIMM_FW_CONTENT_CERT_ID MAX_IMG_IDS_WITH_SPMDS + 11 +/* Max Images */ +#define MAX_IMG_WITH_DDR_IDS MAX_IMG_IDS_WITH_SPMDS + 12 +#else +#define MAX_IMG_WITH_DDR_IDS MAX_IMG_IDS_WITH_SPMDS +#endif + +#ifdef POLICY_FUSE_PROVISION +/* FUSE FIP IMAGE ID */ +#define FUSE_FIP_IMAGE_ID MAX_IMG_WITH_DDR_IDS + +#define FUSE_PROV_IMAGE_ID MAX_IMG_WITH_DDR_IDS + 1 + +#define FUSE_UP_IMAGE_ID MAX_IMG_WITH_DDR_IDS + 2 + +#define MAX_IMG_WITH_FIMG_IDS MAX_IMG_WITH_DDR_IDS + 3 +#else +#define MAX_IMG_WITH_FIMG_IDS MAX_IMG_WITH_DDR_IDS +#endif + +#define MAX_NUMBER_IDS MAX_IMG_WITH_FIMG_IDS + +#endif /* NXP_IMG_DEF_H */ diff --git a/plat/nxp/common/fip_handler/common/platform_oid.h b/plat/nxp/common/fip_handler/common/platform_oid.h new file mode 100644 index 0000000..bbd6041 --- /dev/null +++ b/plat/nxp/common/fip_handler/common/platform_oid.h @@ -0,0 +1,16 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#define DDR_FW_CONTENT_CERT_PK_OID "1.3.6.1.4.1.4128.2200.1" +#define DDR_IMEM_UDIMM_1D_HASH_OID "1.3.6.1.4.1.4128.2200.2" +#define DDR_IMEM_UDIMM_2D_HASH_OID "1.3.6.1.4.1.4128.2200.3" +#define DDR_DMEM_UDIMM_1D_HASH_OID "1.3.6.1.4.1.4128.2200.4" +#define DDR_DMEM_UDIMM_2D_HASH_OID "1.3.6.1.4.1.4128.2200.5" +#define DDR_IMEM_RDIMM_1D_HASH_OID "1.3.6.1.4.1.4128.2200.6" +#define DDR_IMEM_RDIMM_2D_HASH_OID "1.3.6.1.4.1.4128.2200.7" +#define DDR_DMEM_RDIMM_1D_HASH_OID "1.3.6.1.4.1.4128.2200.8" +#define DDR_DMEM_RDIMM_2D_HASH_OID "1.3.6.1.4.1.4128.2200.9" diff --git a/plat/nxp/common/fip_handler/ddr_fip/ddr_fip_io.mk b/plat/nxp/common/fip_handler/ddr_fip/ddr_fip_io.mk new file mode 100644 index 0000000..7d673ba --- /dev/null +++ b/plat/nxp/common/fip_handler/ddr_fip/ddr_fip_io.mk @@ -0,0 +1,38 @@ +# +# Copyright 2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +#----------------------------------------------------------------------------- +ifeq (${DDR_FIP_IO_STORAGE_ADDED},) + +$(eval $(call add_define, PLAT_DEF_FIP_UUID)) +$(eval $(call add_define, PLAT_TBBR_IMG_DEF)) +$(eval $(call SET_NXP_MAKE_FLAG,IMG_LOADR_NEEDED,BL2)) + +DDR_FIP_IO_STORAGE_ADDED := 1 +$(eval $(call add_define,CONFIG_DDR_FIP_IMAGE)) + +FIP_HANDLER_PATH := ${PLAT_COMMON_PATH}/fip_handler +FIP_HANDLER_COMMON_PATH := ${FIP_HANDLER_PATH}/common +DDR_FIP_IO_STORAGE_PATH := ${FIP_HANDLER_PATH}/ddr_fip + +PLAT_INCLUDES += -I${FIP_HANDLER_COMMON_PATH}\ + -I$(DDR_FIP_IO_STORAGE_PATH) + +DDR_FIP_IO_SOURCES += $(DDR_FIP_IO_STORAGE_PATH)/ddr_io_storage.c + +$(shell cp tools/nxp/plat_fiptool/plat_fiptool.mk ${PLAT_DIR}) + +ifeq (${BL_COMM_DDR_FIP_IO_NEEDED},yes) +BL_COMMON_SOURCES += ${DDR_FIP_IO_SOURCES} +else +ifeq (${BL2_DDR_FIP_IO_NEEDED},yes) +BL2_SOURCES += ${DDR_FIP_IO_SOURCES} +endif +ifeq (${BL31_DDR_FIP_IO_NEEDED},yes) +BL31_SOURCES += ${DDR_FIP_IO_SOURCES} +endif +endif +endif +#------------------------------------------------ diff --git a/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.c b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.c new file mode 100644 index 0000000..fc3c4a4 --- /dev/null +++ b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.c @@ -0,0 +1,232 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <string.h> + +#include <io_block.h> +#include <io_driver.h> +#include <io_fip.h> +#include <io_memmap.h> +#include <io_storage.h> +#include <lib/utils.h> +#include <tools_share/firmware_image_package.h> +#include "ddr_io_storage.h" +#include "plat_common.h" +#include "platform_def.h" + + +/* TBD - Move these defined to the platform_def.h file. + * Keeping them for reference here + */ +extern uintptr_t backend_dev_handle; + +static uint32_t ddr_fip; + +static uintptr_t ddr_fip_dev_handle; + +static io_block_spec_t ddr_fip_block_spec = { + .offset = PLAT_DDR_FIP_OFFSET, + .length = PLAT_DDR_FIP_MAX_SIZE +}; + +static const io_uuid_spec_t ddr_imem_udimm_1d_uuid_spec = { + .uuid = UUID_DDR_IMEM_UDIMM_1D, +}; + +static const io_uuid_spec_t ddr_imem_udimm_2d_uuid_spec = { + .uuid = UUID_DDR_IMEM_UDIMM_2D, +}; + +static const io_uuid_spec_t ddr_dmem_udimm_1d_uuid_spec = { + .uuid = UUID_DDR_DMEM_UDIMM_1D, +}; + +static const io_uuid_spec_t ddr_dmem_udimm_2d_uuid_spec = { + .uuid = UUID_DDR_DMEM_UDIMM_2D, +}; + +static const io_uuid_spec_t ddr_imem_rdimm_1d_uuid_spec = { + .uuid = UUID_DDR_IMEM_RDIMM_1D, +}; + +static const io_uuid_spec_t ddr_imem_rdimm_2d_uuid_spec = { + .uuid = UUID_DDR_IMEM_RDIMM_2D, +}; + +static const io_uuid_spec_t ddr_dmem_rdimm_1d_uuid_spec = { + .uuid = UUID_DDR_DMEM_RDIMM_1D, +}; + +static const io_uuid_spec_t ddr_dmem_rdimm_2d_uuid_spec = { + .uuid = UUID_DDR_DMEM_RDIMM_2D, +}; + +#if TRUSTED_BOARD_BOOT +static const io_uuid_spec_t ddr_fw_key_cert_uuid_spec = { + .uuid = UUID_DDR_FW_KEY_CERT, +}; +static const io_uuid_spec_t ddr_udimm_fw_cert_uuid_spec = { + .uuid = UUID_DDR_UDIMM_FW_CONTENT_CERT, +}; +static const io_uuid_spec_t ddr_rdimm_fw_cert_uuid_spec = { + .uuid = UUID_DDR_RDIMM_FW_CONTENT_CERT, +}; +#endif + +static int open_ddr_fip(const uintptr_t spec); + +struct plat_io_policy { + uintptr_t *dev_handle; + uintptr_t image_spec; + int (*check)(const uintptr_t spec); +}; + +/* By default, ARM platforms load images from the FIP */ +static const struct plat_io_policy ddr_policies[] = { + [DDR_FIP_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &backend_dev_handle, + (uintptr_t)&ddr_fip_block_spec, + NULL + }, + [DDR_IMEM_UDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_imem_udimm_1d_uuid_spec, + open_ddr_fip + }, + [DDR_IMEM_UDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_imem_udimm_2d_uuid_spec, + open_ddr_fip + }, + [DDR_DMEM_UDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_dmem_udimm_1d_uuid_spec, + open_ddr_fip + }, + [DDR_DMEM_UDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_dmem_udimm_2d_uuid_spec, + open_ddr_fip + }, + [DDR_IMEM_RDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_imem_rdimm_1d_uuid_spec, + open_ddr_fip + }, + [DDR_IMEM_RDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_imem_rdimm_2d_uuid_spec, + open_ddr_fip + }, + [DDR_DMEM_RDIMM_1D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_dmem_rdimm_1d_uuid_spec, + open_ddr_fip + }, + [DDR_DMEM_RDIMM_2D_IMAGE_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_dmem_rdimm_2d_uuid_spec, + open_ddr_fip + }, +#if TRUSTED_BOARD_BOOT + [DDR_FW_KEY_CERT_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_fw_key_cert_uuid_spec, + open_ddr_fip + }, + [DDR_UDIMM_FW_CONTENT_CERT_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_udimm_fw_cert_uuid_spec, + open_ddr_fip + }, + [DDR_RDIMM_FW_CONTENT_CERT_ID - DDR_FIP_IMAGE_ID] = { + &ddr_fip_dev_handle, + (uintptr_t)&ddr_rdimm_fw_cert_uuid_spec, + open_ddr_fip + }, +#endif +}; + +static int open_ddr_fip(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + /* See if a Firmware Image Package is available */ + result = io_dev_init(ddr_fip_dev_handle, (uintptr_t)DDR_FIP_IMAGE_ID); + if (result == 0) { + result = io_open(ddr_fip_dev_handle, spec, &local_image_handle); + if (result == 0) { + VERBOSE("Using FIP\n"); + io_close(local_image_handle); + } + } + return result; +} + +/* The image can be one of the DDR PHY images, which can be sleected via DDR + * policies + */ +int plat_get_ddr_fip_image_source(unsigned int image_id, uintptr_t *dev_handle, + uintptr_t *image_spec, + int (*check)(const uintptr_t spec)) +{ + int result = -1; + const struct plat_io_policy *policy; + + if (image_id >= (DDR_FIP_IMAGE_ID + ARRAY_SIZE(ddr_policies))) { + return result; + } + + policy = &ddr_policies[image_id - DDR_FIP_IMAGE_ID]; + if (image_id == DDR_FIP_IMAGE_ID) { + result = check(policy->image_spec); + } else { + result = policy->check(policy->image_spec); + } + if (result == 0) { + *image_spec = policy->image_spec; + *dev_handle = *(policy->dev_handle); + } + return result; +} + +int ddr_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev) +{ + int io_result; + size_t ddr_fip_offset = PLAT_DDR_FIP_OFFSET; + + /* Open connections to ddr fip and cache the handles */ + io_result = io_dev_open(fip_dev_con, (uintptr_t)&ddr_fip, + &ddr_fip_dev_handle); + assert(io_result == 0); + + switch (boot_dev) { +#if QSPI_BOOT + case BOOT_DEVICE_QSPI: + ddr_fip_offset += NXP_QSPI_FLASH_ADDR; + break; +#endif +#if NOR_BOOT + case BOOT_DEVICE_IFC_NOR: + ddr_fip_offset += NXP_NOR_FLASH_ADDR; + break; +#endif +#if FLEXSPI_NOR_BOOT + case BOOT_DEVICE_FLEXSPI_NOR: + ddr_fip_offset += NXP_FLEXSPI_FLASH_ADDR; + break; +#endif + default: + break; + } + + ddr_fip_block_spec.offset = ddr_fip_offset; + + return io_result; +} diff --git a/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.h b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.h new file mode 100644 index 0000000..6df3902 --- /dev/null +++ b/plat/nxp/common/fip_handler/ddr_fip/ddr_io_storage.h @@ -0,0 +1,26 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef DDR_IO_STORAGE_H +#define DDR_IO_STORAGE_H + +#include <drivers/io/io_driver.h> + +#ifndef PLAT_DDR_FIP_OFFSET +#define PLAT_DDR_FIP_OFFSET 0x800000 +#endif + +#ifndef PLAT_DDR_FIP_MAX_SIZE +#define PLAT_DDR_FIP_MAX_SIZE 0x32000 +#endif + +int ddr_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev); +int plat_get_ddr_fip_image_source(unsigned int image_id, uintptr_t *dev_handle, + uintptr_t *image_spec, + int (*check)(const uintptr_t spec)); + +#endif /* DDR_IO_STORAGE_H */ diff --git a/plat/nxp/common/fip_handler/fuse_fip/fuse.mk b/plat/nxp/common/fip_handler/fuse_fip/fuse.mk new file mode 100644 index 0000000..d8f5ae6 --- /dev/null +++ b/plat/nxp/common/fip_handler/fuse_fip/fuse.mk @@ -0,0 +1,100 @@ +# +# Copyright 2018-2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# + +NEED_FUSE := yes + +$(eval $(call add_define, PLAT_DEF_FIP_UUID)) +$(eval $(call add_define, POLICY_FUSE_PROVISION)) +$(eval $(call add_define, PLAT_TBBR_IMG_DEF)) + +$(eval $(call SET_NXP_MAKE_FLAG,IMG_LOADR_NEEDED,BL2)) +$(eval $(call SET_NXP_MAKE_FLAG,SFP_NEEDED,BL2)) +$(eval $(call SET_NXP_MAKE_FLAG,GPIO_NEEDED,BL2)) + +FIP_HANDLER_PATH := ${PLAT_COMMON_PATH}/fip_handler +FIP_HANDLER_COMMON_PATH := ${FIP_HANDLER_PATH}/common + +FUSE_SOURCES := ${FIP_HANDLER_PATH}/fuse_fip/fuse_io_storage.c + +PLAT_INCLUDES += -I${FIP_HANDLER_COMMON_PATH}\ + -I${FIP_HANDLER_PATH}/fuse_fip + +FUSE_FIP_NAME := fuse_fip.bin + +fip_fuse: ${BUILD_PLAT}/${FUSE_FIP_NAME} + +ifeq (${FUSE_PROV_FILE},) + +$(shell cp tools/nxp/plat_fiptool/plat_fiptool.mk ${PLAT_DIR}) + +else +ifeq (${TRUSTED_BOARD_BOOT},1) +FUSE_PROV_FILE_SB = $(notdir ${FUSE_PROV_FILE})_prov.sb +FUSE_FIP_ARGS += --fuse-prov ${BUILD_PLAT}/${FUSE_PROV_FILE_SB} +FUSE_FIP_DEPS += ${BUILD_PLAT}/${FUSE_PROV_FILE_SB} +else +FUSE_FIP_ARGS += --fuse-prov ${FUSE_PROV_FILE} +FUSE_FIP_DEPS += ${FUSE_PROV_FILE} +endif +endif + +ifeq (${FUSE_UP_FILE},) +else +ifeq (${TRUSTED_BOARD_BOOT},1) +FUSE_UP_FILE_SB = $(notdir ${FUSE_UP_FILE})_up.sb +FUSE_FIP_ARGS += --fuse-up ${BUILD_PLAT}/${FUSE_UP_FILE_SB} +FUSE_FIP_DEPS += ${BUILD_PLAT}/${FUSE_UP_FILE_SB} +else +FUSE_FIP_ARGS += --fuse-up ${FUSE_UP_FILE} +FUSE_FIP_DEPS += ${FUSE_UP_FILE} +endif +endif + +ifeq (${TRUSTED_BOARD_BOOT},1) + +ifeq (${MBEDTLS_DIR},) +else + $(error Error: Trusted Board Boot with X509 certificates not supported with FUSE_PROG build option) +endif + +# Path to CST directory is required to generate the CSF header +# and prepend it to image before fip image gets generated +ifeq (${CST_DIR},) + $(error Error: CST_DIR not set) +endif + +ifeq (${FUSE_INPUT_FILE},) +FUSE_INPUT_FILE := $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/${CSF_FILE} +endif + +ifeq (${FUSE_PROV_FILE},) +else +${BUILD_PLAT}/${FUSE_PROV_FILE_SB}: ${FUSE_PROV_FILE} + @echo " Generating CSF Header for $@ $<" + $(CST_DIR)/create_hdr_esbc --in $< --out $@ --app_off ${CSF_HDR_SZ} \ + --app $< ${FUSE_INPUT_FILE} +endif + +ifeq (${FUSE_UP_FILE},) +else +${BUILD_PLAT}/${FUSE_UP_FILE_SB}: ${FUSE_UP_FILE} + @echo " Generating CSF Header for $@ $<" + $(CST_DIR)/create_hdr_esbc --in $< --out $@ --app_off ${CSF_HDR_SZ} \ + --app $< ${FUSE_INPUT_FILE} +endif + +endif + +${BUILD_PLAT}/${FUSE_FIP_NAME}: fiptool ${FUSE_FIP_DEPS} +ifeq (${FUSE_FIP_DEPS},) + $(error "Error: FUSE_PROV_FILE or/and FUSE_UP_FILE needs to point to the right file") +endif + ${FIPTOOL} create ${FUSE_FIP_ARGS} $@ + ${FIPTOOL} info $@ + @${ECHO_BLANK_LINE} + @echo "Built $@ successfully" + @${ECHO_BLANK_LINE} diff --git a/plat/nxp/common/fip_handler/fuse_fip/fuse_io.h b/plat/nxp/common/fip_handler/fuse_fip/fuse_io.h new file mode 100644 index 0000000..e8775d0 --- /dev/null +++ b/plat/nxp/common/fip_handler/fuse_fip/fuse_io.h @@ -0,0 +1,27 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ +#ifndef FUSE_IO_H +#define FUSE_IO_H + +#include <drivers/io/io_driver.h> + +/* Can be overridden from platform_def.h file. + */ +#ifndef PLAT_FUSE_FIP_OFFSET +#define PLAT_FUSE_FIP_OFFSET 0x880000 +#endif +#ifndef PLAT_FUSE_FIP_MAX_SIZE +#define PLAT_FUSE_FIP_MAX_SIZE 0x80000 +#endif + +int fip_fuse_provisioning(uintptr_t image_buf, uint32_t size); +int fuse_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev); +int plat_get_fuse_image_source(unsigned int image_id, + uintptr_t *dev_handle, + uintptr_t *image_spec, + int (*check)(const uintptr_t spec)); +#endif /* FUSE_IO_H */ diff --git a/plat/nxp/common/fip_handler/fuse_fip/fuse_io_storage.c b/plat/nxp/common/fip_handler/fuse_fip/fuse_io_storage.c new file mode 100644 index 0000000..017ffcf --- /dev/null +++ b/plat/nxp/common/fip_handler/fuse_fip/fuse_io_storage.c @@ -0,0 +1,223 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <string.h> + +#include <common/debug.h> +#include <dcfg.h> +#include <drivers/delay_timer.h> +#include <fuse_prov.h> +#include <io_block.h> +#include <io_driver.h> +#include <io_fip.h> +#include <io_memmap.h> +#include <io_storage.h> +#include <lib/utils.h> +#include <nxp_gpio.h> +#include <sfp.h> +#include <sfp_error_codes.h> +#include <tools_share/firmware_image_package.h> + +#include "fuse_io.h" +#include <load_img.h> +#include <plat/common/platform.h> +#include "plat_common.h" +#include "platform_def.h" + +extern uintptr_t backend_dev_handle; + +static uint32_t fuse_fip; + +static uintptr_t fuse_fip_dev_handle; + +static io_block_spec_t fuse_fip_block_spec = { + .offset = PLAT_FUSE_FIP_OFFSET, + .length = PLAT_FUSE_FIP_MAX_SIZE +}; + +static const io_uuid_spec_t fuse_prov_uuid_spec = { + .uuid = UUID_FUSE_PROV, +}; + +static const io_uuid_spec_t fuse_up_uuid_spec = { + .uuid = UUID_FUSE_UP, +}; + +static int open_fuse_fip(const uintptr_t spec); + +struct plat_io_policy { + uintptr_t *dev_handle; + uintptr_t image_spec; + int (*check)(const uintptr_t spec); +}; + +/* By default, ARM platforms load images from the FIP */ +static const struct plat_io_policy fuse_policies[] = { + [FUSE_FIP_IMAGE_ID - FUSE_FIP_IMAGE_ID] = { + &backend_dev_handle, + (uintptr_t)&fuse_fip_block_spec, + NULL + }, + [FUSE_PROV_IMAGE_ID - FUSE_FIP_IMAGE_ID] = { + &fuse_fip_dev_handle, + (uintptr_t)&fuse_prov_uuid_spec, + open_fuse_fip + }, + [FUSE_UP_IMAGE_ID - FUSE_FIP_IMAGE_ID] = { + &fuse_fip_dev_handle, + (uintptr_t)&fuse_up_uuid_spec, + open_fuse_fip + } +}; + +static int open_fuse_fip(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + /* See if a Firmware Image Package is available */ + result = io_dev_init(fuse_fip_dev_handle, (uintptr_t)FUSE_FIP_IMAGE_ID); + if (result == 0) { + result = io_open(fuse_fip_dev_handle, + spec, + &local_image_handle); + if (result == 0) { + VERBOSE("Using FIP\n"); + io_close(local_image_handle); + } + } + return result; +} + +/* The image can be one of the DDR PHY images, which can be sleected via DDR + * policies + */ +int plat_get_fuse_image_source(unsigned int image_id, + uintptr_t *dev_handle, + uintptr_t *image_spec, + int (*check)(const uintptr_t spec)) +{ + int result; + const struct plat_io_policy *policy; + + assert(image_id < (FUSE_FIP_IMAGE_ID + ARRAY_SIZE(fuse_policies))); + + policy = &fuse_policies[image_id - FUSE_FIP_IMAGE_ID]; + + if (image_id == FUSE_FIP_IMAGE_ID) { + result = check(policy->image_spec); + } else { + result = policy->check(policy->image_spec); + } + + if (result == 0) { + *image_spec = policy->image_spec; + *dev_handle = *(policy->dev_handle); + } + return result; +} + +int fuse_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev) +{ + int io_result; + size_t fuse_fip_offset = PLAT_FUSE_FIP_OFFSET; + + /* Open connections to fuse fip and cache the handles */ + io_result = io_dev_open(fip_dev_con, (uintptr_t)&fuse_fip, + &fuse_fip_dev_handle); + + assert(io_result == 0); + + switch (boot_dev) { +#if QSPI_BOOT + case BOOT_DEVICE_QSPI: + fuse_fip_offset += NXP_QSPI_FLASH_ADDR; + break; +#endif +#if NOR_BOOT + case BOOT_DEVICE_IFC_NOR: + fuse_fip_offset += NXP_NOR_FLASH_ADDR; + break; +#endif +#if FLEXSPI_NOR_BOOT + case BOOT_DEVICE_FLEXSPI_NOR: + fuse_fip_offset += NXP_FLEXSPI_FLASH_ADDR; + break; +#endif + default: + break; + } + + fuse_fip_block_spec.offset = fuse_fip_offset; + + return io_result; +} + +int fip_fuse_provisioning(uintptr_t image_buf, uint32_t size) +{ + uint32_t bit_num; + uint32_t *gpio_base_addr = NULL; + struct fuse_hdr_t *fuse_hdr = NULL; + uint8_t barker[] = {0x68U, 0x39U, 0x27U, 0x81U}; + int ret = -1; + + if (sfp_check_oem_wp() == 0) { + ret = load_img(FUSE_PROV_IMAGE_ID, &image_buf, &size); + if (ret != 0) { + ERROR("Failed to load FUSE PRIV image\n"); + assert(ret == 0); + } + fuse_hdr = (struct fuse_hdr_t *)image_buf; + + /* Check barker code */ + if (memcmp(fuse_hdr->barker, barker, sizeof(barker)) != 0) { + ERROR("FUSE Barker code mismatch.\n"); + error_handler(ERROR_FUSE_BARKER); + return 1; + } + + /* Check if GPIO pin to be set for POVDD */ + if (((fuse_hdr->flags >> FLAG_POVDD_SHIFT) & 0x1) != 0) { + gpio_base_addr = + select_gpio_n_bitnum(fuse_hdr->povdd_gpio, + &bit_num); + /* + * Add delay so that Efuse gets the power + * when GPIO is enabled. + */ + ret = set_gpio_bit(gpio_base_addr, bit_num); + mdelay(EFUSE_POWERUP_DELAY_mSec); + } else { + ret = (board_enable_povdd() == true) ? 0 : PLAT_ERROR_ENABLE_POVDD; + } + if (ret != 0) { + ERROR("Error enabling board POVDD: %d\n", ret); + ERROR("Only SFP mirror register will be set.\n"); + } + + provision_fuses(image_buf, ret == 0); + + /* Check if GPIO pin to be reset for POVDD */ + if (((fuse_hdr->flags >> FLAG_POVDD_SHIFT) & 0x1) != 0) { + if (gpio_base_addr == NULL) { + gpio_base_addr = + select_gpio_n_bitnum( + fuse_hdr->povdd_gpio, + &bit_num); + } + ret = clr_gpio_bit(gpio_base_addr, bit_num); + } else { + ret = board_disable_povdd() ? 0 : PLAT_ERROR_DISABLE_POVDD; + } + + if (ret != 0) { + ERROR("Error disabling board POVDD: %d\n", ret); + } + } + return 0; +} diff --git a/plat/nxp/common/img_loadr/img_loadr.mk b/plat/nxp/common/img_loadr/img_loadr.mk new file mode 100644 index 0000000..f64b1fa --- /dev/null +++ b/plat/nxp/common/img_loadr/img_loadr.mk @@ -0,0 +1,21 @@ +# +# Copyright 2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +IMG_LOADR_DRIVERS_PATH := ${PLAT_COMMON_PATH}/img_loadr + +IMG_LOADR_SOURCES := $(IMG_LOADR_DRIVERS_PATH)/load_img.c +PLAT_INCLUDES += -I$(IMG_LOADR_DRIVERS_PATH) + +ifeq (${BL_COMM_IMG_LOADR_NEEDED},yes) +BL_COMMON_SOURCES += ${IMG_LOADR_SOURCES} +else +ifeq (${BL2_IMG_LOADR_NEEDED},yes) +BL2_SOURCES += ${IMG_LOADR_SOURCES} +endif +ifeq (${BL31_IMG_LOADR_NEEDED},yes) +BL31_SOURCES += ${IMG_LOADR_SOURCES} +endif +endif diff --git a/plat/nxp/common/img_loadr/load_img.c b/plat/nxp/common/img_loadr/load_img.c new file mode 100644 index 0000000..51011e4 --- /dev/null +++ b/plat/nxp/common/img_loadr/load_img.c @@ -0,0 +1,83 @@ +/* + * Copyright 2018-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> + +#include <common/bl_common.h> +#include <common/desc_image_load.h> +#include <lib/xlat_tables/xlat_tables_v2.h> + +#include "load_img.h" + +/****************************************************************************** + * This function can be used to load DDR PHY/FUSE Images + * + * @param [in] image_id Image ID to be loaded + * + * @param [in,out] image_base Location at which the image should be loaded + * In case image is prepended by a CSF header, + * image_base is pointer to actual image after + * the header + * + * @param [in,out] image_size User should pass the maximum size of the image + * possible.(Buffer size starting from image_base) + * Actual size of the image loaded is returned + * back. + *****************************************************************************/ +int load_img(unsigned int image_id, uintptr_t *image_base, + uint32_t *image_size) +{ + int err = 0; + + image_desc_t img_info = { + .image_id = image_id, + SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY, + VERSION_2, image_info_t, 0), +#ifdef CSF_HEADER_PREPENDED + .image_info.image_base = *image_base - CSF_HDR_SZ, + .image_info.image_max_size = *image_size + CSF_HDR_SZ, +#else + .image_info.image_base = *image_base, + .image_info.image_max_size = *image_size, +#endif + }; + + /* Create MMU entry for the CSF header */ +#if PLAT_XLAT_TABLES_DYNAMIC +#ifdef CSF_HEADER_PREPENDED + err = mmap_add_dynamic_region(img_info.image_info.image_base, + img_info.image_info.image_base, + CSF_HDR_SZ, + MT_MEMORY | MT_RW | MT_SECURE); + if (err != 0) { + ERROR("Failed to add dynamic memory region.\n"); + return err; + } +#endif +#endif + + VERBOSE("BL2: Loading IMG %d\n", image_id); + err = load_auth_image(image_id, &img_info.image_info); + if (err != 0) { + VERBOSE("Failed to load IMG %d\n", image_id); + return err; + } + +#ifdef CSF_HEADER_PREPENDED + *image_base = img_info.image_info.image_base + CSF_HDR_SZ; + *image_size = img_info.image_info.image_size - CSF_HDR_SZ; +#if PLAT_XLAT_TABLES_DYNAMIC + mmap_remove_dynamic_region(img_info.image_info.image_base, + CSF_HDR_SZ); +#endif +#else + *image_base = img_info.image_info.image_base; + *image_size = img_info.image_info.image_size; +#endif + + return err; +} diff --git a/plat/nxp/common/img_loadr/load_img.h b/plat/nxp/common/img_loadr/load_img.h new file mode 100644 index 0000000..6f9de32 --- /dev/null +++ b/plat/nxp/common/img_loadr/load_img.h @@ -0,0 +1,14 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef LOAD_IMAGE_H +#define LOAD_IMAGE_H + +int load_img(unsigned int image_id, uintptr_t *image_base, + uint32_t *image_size); + +#endif /* LOAD_IMAGE_H */ diff --git a/plat/nxp/common/include/default/ch_2/soc_default_base_addr.h b/plat/nxp/common/include/default/ch_2/soc_default_base_addr.h new file mode 100644 index 0000000..6296aef --- /dev/null +++ b/plat/nxp/common/include/default/ch_2/soc_default_base_addr.h @@ -0,0 +1,70 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef SOC_DEFAULT_BASE_ADDR_H +#define SOC_DEFAULT_BASE_ADDR_H + +/* CCSR mmu_def.h */ +#define NXP_CCSR_ADDR 0x01000000 +#define NXP_CCSR_SIZE 0x0F000000 + +#define NXP_DCSR_ADDR 0x20000000 +#define NXP_DCSR_SIZE 0x4000000 + +/* Flex-SPI controller address */ +#define NXP_FLEXSPI_ADDR 0x020C0000 +/* QSPI Flash Start address */ +#define NXP_QSPI_FLASH_ADDR 0x40000000 +/* NOR Flash Start address */ +#define NXP_IFC_REGION_ADDR 0x60000000 +#define NXP_NOR_FLASH_ADDR NXP_IFC_REGION_ADDR + +/* MMU 500 soc.c*/ +#define NXP_SMMU_ADDR 0x09000000 + +#define NXP_SNVS_ADDR 0x01E90000 + +#define NXP_DCFG_ADDR 0x01EE0000 +#define NXP_SFP_ADDR 0x01E80000 +#define NXP_RCPM_ADDR 0x01EE2000 +#define NXP_CSU_ADDR 0x01510000 +#define NXP_IFC_ADDR 0x01530000 +#define NXP_SCFG_ADDR 0x01570000 +#define NXP_DCSR_ADDR 0x20000000 +#define NXP_DCSR_DCFG_ADDR (NXP_DCSR_ADDR + 0x00140000) +#define NXP_I2C_ADDR 0x02180000 +#define NXP_ESDHC_ADDR 0x01560000 +#define NXP_UART_ADDR 0x021C0500 +#define NXP_UART1_ADDR 0x021C0600 + +#define NXP_GPIO1_ADDR 0x02300000 +#define NXP_GPIO2_ADDR 0x02310000 +#define NXP_GPIO3_ADDR 0x02320000 +#define NXP_GPIO4_ADDR 0x02330000 + +#define NXP_WDOG1_NS_ADDR 0x02390000 +#define NXP_WDOG2_NS_ADDR 0x023A0000 +#define NXP_WDOG1_TZ_ADDR 0x023B0000 +#define NXP_WDOG2_TZ_ADDR 0x023C0000 + +#define NXP_TIMER_STATUS_ADDR 0x023F0000 + +#define NXP_GICD_4K_ADDR 0x01401000 +#define NXP_GICC_4K_ADDR 0x01402000 +#define NXP_GICD_64K_ADDR 0x01410000 +#define NXP_GICC_64K_ADDR 0x01420000 + +#define NXP_CAAM_ADDR 0x01700000 + +#define NXP_TZC_ADDR 0x01500000 +#define NXP_DDR_ADDR 0x01080000 + +#define NXP_TIMER_ADDR 0x02B00000 +#define NXP_CCI_ADDR 0x01180000 +#define NXP_RESET_ADDR 0x01E60000 +#define NXP_SEC_REGFILE_ADDR 0x01E88000 +#endif /* SOC_DEFAULT_BASE_ADDR_H */ diff --git a/plat/nxp/common/include/default/ch_2/soc_default_helper_macros.h b/plat/nxp/common/include/default/ch_2/soc_default_helper_macros.h new file mode 100644 index 0000000..928ac05 --- /dev/null +++ b/plat/nxp/common/include/default/ch_2/soc_default_helper_macros.h @@ -0,0 +1,83 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef SOC_DEFAULT_HELPER_MACROS_H +#define SOC_DEFAULT_HELPER_MACROS_H + +#ifdef NXP_OCRAM_TZPC_ADDR + +/* 0x1: means 4 KB + * 0x2: means 8 KB + */ +#define TZPC_BLOCK_SIZE 0x1000 +#endif + +/* DDR controller offsets and defines */ +#ifdef NXP_DDR_ADDR + +#define DDR_CFG_2_OFFSET 0x114 +#define CFG_2_FORCE_REFRESH 0x80000000 + +#endif /* NXP_DDR_ADDR */ + + /* Reset block register offsets */ +#ifdef NXP_RESET_ADDR + +/* Register Offset */ +#define RST_RSTCR_OFFSET 0x0 +#define RST_RSTRQMR1_OFFSET 0x10 +#define RST_RSTRQSR1_OFFSET 0x18 +#define BRR_OFFSET 0x60 + +/* helper macros */ +#define RSTRQSR1_SWRR 0x800 +#define RSTRQMR_RPTOE_MASK (1 << 19) + +#endif /* NXP_RESET_ADDR */ + +/* Secure-Register-File register offsets and bit masks */ +#ifdef NXP_RST_ADDR +/* Register Offset */ +#define CORE_HOLD_OFFSET 0x140 +#define RSTCNTL_OFFSET 0x180 + +/* Helper macros */ +#define SW_RST_REQ_INIT 0x1 +#endif + +#ifdef NXP_RCPM_ADDR +/* RCPM Register Offsets */ +#define RCPM_PCPH20SETR_OFFSET 0x0D4 +#define RCPM_PCPH20CLRR_OFFSET 0x0D8 +#define RCPM_POWMGTCSR_OFFSET 0x130 +#define RCPM_IPPDEXPCR0_OFFSET 0x140 +#define RCPM_POWMGTCSR_LPM20_REQ 0x00100000 + +#define RCPM2_IPSTPCR0_OFFSET 0x8 +#define RCPM2_IPSTPCR1_OFFSET 0xC +#define RCPM2_IPSTPCR2_OFFSET 0x10 +#define RCPM2_IPSTPCR3_OFFSET 0x14 +#define RCPM2_IPSTPCR4_OFFSET 0x28 + +#define RCPM2_IPSTPACKR0_OFFSET 0x18 +#define RCPM2_IPSTPACKR1_OFFSET 0x1C +#define RCPM2_IPSTPACKR2_OFFSET 0x20 +#define RCPM2_IPSTPACKR3_OFFSET 0x24 +#define RCPM2_IPSTPACKR4_OFFSET 0x2C +#define RCPM2_POWMGTDCR_OFFSET 0x0 + +/* bitfield masks */ +#define POWMGTDCR_OVRD_EN 0x80000000 + +#endif /* NXP_RCPM_ADDR */ + +#define DCFG_SBEESR2_ADDR 0x20140534 +#define DCFG_MBEESR2_ADDR 0x20140544 +/* SBEESR and MBEESR bit mask */ +#define OCRAM_EESR_MASK 0x00000060 + +#endif /* SOC_DEFAULT_HELPER_MACROS_H */ diff --git a/plat/nxp/common/include/default/ch_3/soc_default_base_addr.h b/plat/nxp/common/include/default/ch_3/soc_default_base_addr.h new file mode 100644 index 0000000..8d64f04 --- /dev/null +++ b/plat/nxp/common/include/default/ch_3/soc_default_base_addr.h @@ -0,0 +1,100 @@ +/* + * Copyright 2021-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef SOC_DEFAULT_BASE_ADDR_H +#define SOC_DEFAULT_BASE_ADDR_H + +/* CCSR mmu_def.h */ +#define NXP_CCSR_ADDR 0x1000000 +#define NXP_CCSR_SIZE 0xF000000 + +#define NXP_DCSR_ADDR 0x700000000 +#define NXP_DCSR_SIZE 0x40000000 + +/* Quad SPI Region #1 base address */ +#define NXP_QSPI_FLASH_ADDR 0x20000000 + +/* IFC Region #1 base address */ +#define NXP_NOR_FLASH_ADDR 0x30000000 + +/* MMU 500 */ +#define NXP_SMMU_ADDR 0x05000000 + +#define NXP_SNVS_ADDR 0x01E90000 + +#define NXP_DCFG_ADDR 0x01E00000 +#define NXP_PMU_CCSR_ADDR 0x01E30000 +#define NXP_PMU_DCSR_ADDR 0x700123000 +#define NXP_PMU_ADDR NXP_PMU_CCSR_ADDR +#define NXP_SFP_ADDR 0x01E80000 +#define NXP_SCFG_ADDR 0x01FC0000 +#define NXP_I2C_ADDR 0x02000000 +#define NXP_ESDHC_ADDR 0x02140000 +#define NXP_ESDHC2_ADDR 0x02150000 +#ifndef NXP_UART_ADDR +#define NXP_UART_ADDR 0x021C0500 +#endif +#ifndef NXP_UART1_ADDR +#define NXP_UART1_ADDR 0x021C0600 +#endif + +#define NXP_GPIO1_ADDR 0x02300000 +#define NXP_GPIO2_ADDR 0x02310000 +#define NXP_GPIO3_ADDR 0x02320000 +#define NXP_GPIO4_ADDR 0x02330000 + +#define NXP_WDOG1_NS_ADDR 0x02390000 +#define NXP_WDOG2_NS_ADDR 0x023A0000 +#define NXP_WDOG1_TZ_ADDR 0x023B0000 +#define NXP_WDOG2_TZ_ADDR 0x023C0000 + +#define NXP_TIMER_STATUS_ADDR 0x023F0000 + +#define NXP_GICD_ADDR 0x06000000 +#define NXP_GICR_ADDR 0x06100000 +#define NXP_GICR_SGI_ADDR 0x06110000 + +#define NXP_CAAM_ADDR 0x08000000 + +#define NXP_TZC_ADDR 0x01100000 +#define NXP_TZC2_ADDR 0x01110000 +#define NXP_TZC3_ADDR 0x01120000 + +#define NXP_RESET_ADDR 0x01E60000 +#define NXP_SEC_REGFILE_ADDR 0x01E88000 + +#define NXP_RST_ADDR 0x01E88000 + +/* DDR memory Map */ +#define NXP_DDR_ADDR 0x01080000 +#define NXP_DDR2_ADDR 0x01090000 +#define NXP_DDR3_ADDR 0x08210000 + +/* QuadSPI base address */ +#define NXP_QSPI_ADDR 0x020C0000 +/* IFC base address */ +#define NXP_IFC_ADDR 0x02240000 + +/* CCI400 base address */ +#define NXP_CCI_ADDR 0x04090000 + +/* Global Generic Reference Timer base address */ +#define NXP_TIMER_ADDR 0x023E0000 + +/* OCRAM TZPC base address */ +#define NXP_OCRAM_TZPC_ADDR 0x02200000 + +#define NXP_EPU_ADDR 0x700060000 + +#define NXP_CCN_ADDR 0x04000000 +#define NXP_CCN_HNI_ADDR 0x04080000 +#define NXP_CCN_HN_F_0_ADDR 0x04200000 +#define NXP_CCN_HN_F_1_ADDR 0x04210000 + +#define TPMWAKEMR0_ADDR 0x700123c50 + +#endif /* SOC_DEFAULT_BASE_ADDR_H */ diff --git a/plat/nxp/common/include/default/ch_3/soc_default_helper_macros.h b/plat/nxp/common/include/default/ch_3/soc_default_helper_macros.h new file mode 100644 index 0000000..8e68367 --- /dev/null +++ b/plat/nxp/common/include/default/ch_3/soc_default_helper_macros.h @@ -0,0 +1,98 @@ +/* + * Copyright 2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef SOC_DEFAULT_HELPER_MACROS_H +#define SOC_DEFAULT_HELPER_MACROS_H + +#ifdef NXP_OCRAM_TZPC_ADDR +#define TZPC_BLOCK_SIZE 0x1000 +#endif + +/* Reset block register offsets */ +#ifdef NXP_RESET_ADDR + +/* Register Offset */ +#define RST_RSTCR_OFFSET 0x0 +#define RST_RSTRQMR1_OFFSET 0x10 +#define RST_RSTRQSR1_OFFSET 0x18 +#define BRR_OFFSET 0x60 + +/* helper macros */ +#define RSTRQMR_RPTOE_MASK (1 << 19) +#endif /* NXP_RESET_ADDR */ + +#define PCIeRC_RN_I_NODE_ID_OFFSET 0x8 +#define PoS_CONTROL_REG_OFFSET 0x0 +#define POS_EARLY_WR_COMP_EN 0x20 +#define HNI_POS_EN 0x01 +#define POS_TERMINATE_BARRIERS 0x10 +#define SERIALIZE_DEV_nGnRnE_WRITES 0x200 +#define ENABLE_ERR_SIGNAL_TO_MN 0x4 +#define ENABLE_RESERVE_BIT53 0x400 +#define ENABLE_WUO 0x10 + +#define PORT_S0_CTRL_REG_RNI 0x010 +#define PORT_S1_CTRL_REG_RNI 0x110 +#define PORT_S2_CTRL_REG_RNI 0x210 +#define ENABLE_FORCE_RD_QUO 0x20 +#define QOS_SETTING 0x00FF000C + +/* epu register offsets and values */ +#define EPU_EPGCR_OFFSET 0x0 +#define EPU_EPIMCR10_OFFSET 0x128 +#define EPU_EPCTR10_OFFSET 0xa28 +#define EPU_EPCCR10_OFFSET 0x828 +#ifndef EPU_EPCCR10_VAL +#define EPU_EPCCR10_VAL 0xb2800000 +#endif +#define EPU_EPIMCR10_VAL 0xba000000 +#define EPU_EPCTR10_VAL 0x0 +#define EPU_EPGCR_VAL (1 << 31) + +#ifdef NXP_CCN_ADDR +#define NXP_CCN_HN_F_1_ADDR 0x04210000 + +#define CCN_HN_F_SAM_NODEID_MASK 0x7f +#define CCN_HN_F_SNP_DMN_CTL_OFFSET 0x200 +#define CCN_HN_F_SNP_DMN_CTL_SET_OFFSET 0x210 +#define CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET 0x220 +#define CCN_HN_F_SNP_DMN_CTL_MASK 0x80a00 +#define CCN_HNF_NODE_COUNT 8 +#define CCN_HNF_OFFSET 0x10000 + +#define SA_AUX_CTRL_REG_OFFSET 0x500 +#define NUM_HNI_NODE 2 +#define CCN_HNI_MEMORY_MAP_SIZE 0x10000 + +#define PCIeRC_RN_I_NODE_ID_OFFSET 0x8 +#define PoS_CONTROL_REG_OFFSET 0x0 +#define POS_EARLY_WR_COMP_EN 0x20 +#define HNI_POS_EN 0x01 +#define POS_TERMINATE_BARRIERS 0x10 +#define SERIALIZE_DEV_nGnRnE_WRITES 0x200 +#define ENABLE_ERR_SIGNAL_TO_MN 0x4 +#define ENABLE_RESERVE_BIT53 0x400 +#define ENABLE_WUO 0x10 +#endif + +/* reset register bit */ +#define RSTRQMR_RPTOE_MASK (1 << 19) + +/* secmon register offsets and bitfields */ +#define SECMON_HPCOMR_OFFSET 0x4 +#define SECMON_HPCOMR_NPSWAEN 0x80000000 + +/* Secure-Register-File register offsets and bit masks */ +#ifdef NXP_RST_ADDR +/* Register Offset */ +#define CORE_HOLD_OFFSET 0x140 +#endif + +#define DCFG_SBEESR2_ADDR 0x00100534 +#define DCFG_MBEESR2_ADDR 0x00100544 +/* SBEESR and MBEESR bit mask */ +#define OCRAM_EESR_MASK 0x00000008 + +#endif /* SOC_DEFAULT_HELPER_MACROS_H */ diff --git a/plat/nxp/common/include/default/ch_3_2/soc_default_base_addr.h b/plat/nxp/common/include/default/ch_3_2/soc_default_base_addr.h new file mode 100644 index 0000000..0a4228b --- /dev/null +++ b/plat/nxp/common/include/default/ch_3_2/soc_default_base_addr.h @@ -0,0 +1,88 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef SOC_DEFAULT_BASE_ADDR_H +#define SOC_DEFAULT_BASE_ADDR_H + +/* CCSR mmu_def.h */ +#define NXP_CCSR_ADDR 0x1000000 +#define NXP_CCSR_SIZE 0xF000000 + +#define NXP_DCSR_ADDR 0x700000000 +#define NXP_DCSR_SIZE 0x40000000 + +/* Flex-SPI controller address */ +#define NXP_FLEXSPI_ADDR 0x020C0000 +/* Flex-SPI Flash Start address */ +#define NXP_FLEXSPI_FLASH_ADDR 0x20000000 + +/* MMU 500 soc.c*/ +#define NXP_SMMU_ADDR 0x05000000 + +/* CCI400 base address */ +#define NXP_CCI_ADDR 0x04090000 + +#define NXP_SNVS_ADDR 0x01E90000 + +#define NXP_DCFG_ADDR 0x01E00000 +#define NXP_PMU_CCSR_ADDR 0x01E30000 +#define NXP_PMU_DCSR_ADDR 0x700123000 +#define NXP_PMU_ADDR NXP_PMU_CCSR_ADDR +#define NXP_SFP_ADDR 0x01E80000 +#define NXP_SCFG_ADDR 0x01FC0000 +#define NXP_I2C_ADDR 0x02000000 +#define NXP_ESDHC_ADDR 0x02140000 +#define NXP_ESDHC2_ADDR 0x02150000 +#define NXP_UART_ADDR 0x021C0000 +#define NXP_UART1_ADDR 0x021D0000 + +#define NXP_GPIO1_ADDR 0x02300000 +#define NXP_GPIO2_ADDR 0x02310000 +#define NXP_GPIO3_ADDR 0x02320000 +#define NXP_GPIO4_ADDR 0x02330000 + +#define NXP_WDOG1_NS_ADDR 0x02390000 +#define NXP_WDOG2_NS_ADDR 0x023A0000 +#define NXP_WDOG1_TZ_ADDR 0x023B0000 +#define NXP_WDOG2_TZ_ADDR 0x023C0000 + +#define NXP_TIMER_STATUS_ADDR 0x023F0000 + +#define NXP_GICD_ADDR 0x06000000 +#define NXP_GICR_ADDR 0x06200000 +#define NXP_GICR_SGI_ADDR 0x06210000 + +#define NXP_CAAM_ADDR 0x08000000 + +#define NXP_TZC_ADDR 0x01100000 +#define NXP_TZC2_ADDR 0x01110000 +#define NXP_TZC3_ADDR 0x01120000 + +#define NXP_TIMER_ADDR 0x023E0000 + +#define NXP_RESET_ADDR 0x01E60000 +#define NXP_SEC_REGFILE_ADDR 0x01E88000 +#define NXP_RST_ADDR 0x01E88000 + +#define TPMWAKEMR0_ADDR 0x700123c50 +#define TZPC_BLOCK_SIZE 0x1000 + +#define NXP_TZC_ADDR 0x01100000 +#define NXP_TZC2_ADDR 0x01110000 +#define NXP_TZC3_ADDR 0x01120000 +#define NXP_TZC4_ADDR 0x01130000 +#define NXP_DDR_ADDR 0x01080000 +#define NXP_DDR2_ADDR 0x01090000 + +#define NXP_OCRAM_TZPC_ADDR 0x02200000 + +#define NXP_CCN_ADDR 0x04000000 +#define NXP_CCN_HNI_ADDR 0x04080000 +#define NXP_CCN_HN_F_0_ADDR 0x04200000 + +#define NXP_EPU_ADDR 0x700060000 +#endif /* SOC_DEFAULT_BASE_ADDR_H */ diff --git a/plat/nxp/common/include/default/ch_3_2/soc_default_helper_macros.h b/plat/nxp/common/include/default/ch_3_2/soc_default_helper_macros.h new file mode 100644 index 0000000..1edd28d --- /dev/null +++ b/plat/nxp/common/include/default/ch_3_2/soc_default_helper_macros.h @@ -0,0 +1,87 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef SOC_DEFAULT_HELPER_MACROS_H +#define SOC_DEFAULT_HELPER_MACROS_H + +#ifdef NXP_OCRAM_TZPC_ADDR + +/* 0x1: means 4 KB + * 0x2: means 8 KB + */ +#define TZPC_BLOCK_SIZE 0x1000 +#endif + +/* DDR controller offsets and defines */ +#ifdef NXP_DDR_ADDR + +#define DDR_CFG_2_OFFSET 0x114 +#define CFG_2_FORCE_REFRESH 0x80000000 + +#endif /* NXP_DDR_ADDR */ + + /* Reset block register offsets */ +#ifdef NXP_RESET_ADDR + +/* Register Offset */ +#define RST_RSTCR_OFFSET 0x0 +#define RST_RSTRQMR1_OFFSET 0x10 +#define RST_RSTRQSR1_OFFSET 0x18 +#define BRR_OFFSET 0x60 + +/* helper macros */ +#define RSTRQSR1_SWRR 0x800 +#define RSTRQMR_RPTOE_MASK (1 << 19) + +#endif /* NXP_RESET_ADDR */ + +/* secmon register offsets and bitfields */ +#define SECMON_HPCOMR_OFFSET 0x4 +#define SECMON_HPCOMR_NPSWAEN 0x80000000 + +/* Secure-Register-File register offsets and bit masks */ +#ifdef NXP_RST_ADDR +/* Register Offset */ +#define CORE_HOLD_OFFSET 0x140 +#define RSTCNTL_OFFSET 0x180 + +/* Helper macros */ +#define SW_RST_REQ_INIT 0x1 +#endif + +#ifdef NXP_CCN_ADDR +#define NXP_CCN_HN_F_1_ADDR 0x04210000 + +#define CCN_HN_F_SAM_NODEID_MASK 0x7f +#define CCN_HN_F_SNP_DMN_CTL_OFFSET 0x200 +#define CCN_HN_F_SNP_DMN_CTL_SET_OFFSET 0x210 +#define CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET 0x220 +#define CCN_HN_F_SNP_DMN_CTL_MASK 0x80a00 +#define CCN_HNF_NODE_COUNT 8 +#define CCN_HNF_OFFSET 0x10000 + +#define SA_AUX_CTRL_REG_OFFSET 0x500 +#define NUM_HNI_NODE 2 +#define CCN_HNI_MEMORY_MAP_SIZE 0x10000 + +#define PCIeRC_RN_I_NODE_ID_OFFSET 0x8 +#define PoS_CONTROL_REG_OFFSET 0x0 +#define POS_EARLY_WR_COMP_EN 0x20 +#define HNI_POS_EN 0x01 +#define POS_TERMINATE_BARRIERS 0x10 +#define SERIALIZE_DEV_nGnRnE_WRITES 0x200 +#define ENABLE_ERR_SIGNAL_TO_MN 0x4 +#define ENABLE_RESERVE_BIT53 0x400 +#define ENABLE_WUO 0x10 +#endif /* NXP_CCN_ADDR */ + +#define DCFG_SBEESR2_ADDR 0x00100534 +#define DCFG_MBEESR2_ADDR 0x00100544 +/* SBEESR and MBEESR bit mask */ +#define OCRAM_EESR_MASK 0x00000008 + +#endif /* SOC_DEFAULT_HELPER_MACROS_H */ diff --git a/plat/nxp/common/include/default/plat_default_def.h b/plat/nxp/common/include/default/plat_default_def.h new file mode 100644 index 0000000..43320bb --- /dev/null +++ b/plat/nxp/common/include/default/plat_default_def.h @@ -0,0 +1,172 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_DEFAULT_DEF_H +#define PLAT_DEFAULT_DEF_H + +/* + * Platform binary types for linking + */ +#ifdef __aarch64__ +#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64" +#define PLATFORM_LINKER_ARCH aarch64 +#else +#define PLATFORM_LINKER_FORMAT "elf32-littlearm" +#define PLATFORM_LINKER_ARCH arm +#endif /* __aarch64__ */ + +#define LS_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL + +/* NXP Platforms have DRAM divided into banks. + * DRAM0 Bank: Maximum size of this bank is fixed to 2GB + * DRAM1 Bank: Greater than 2GB belongs to bank1 and size of bank1 varies from + * one platform to other platform. + * DRAMn Bank: + * + * Except a few, all the platforms have 2GB size as DRAM0 BANK. + * Hence common for all the platforms. + * For platforms where DRAM0 Size is < 2GB, it is defined in platform_def.h + */ +#ifndef PLAT_DEF_DRAM0_SIZE +#define PLAT_DEF_DRAM0_SIZE 0x80000000 /* 2G */ +#endif + +/* This is common for all platforms where: */ +#ifndef NXP_NS_DRAM_ADDR +#define NXP_NS_DRAM_ADDR NXP_DRAM0_ADDR +#endif + +/* 1 MB is reserved for dma of sd */ +#ifndef NXP_SD_BLOCK_BUF_SIZE +#define NXP_SD_BLOCK_BUF_SIZE (1 * 1024 * 1024) +#endif + +/* 64MB is reserved for Secure memory */ +#ifndef NXP_SECURE_DRAM_SIZE +#define NXP_SECURE_DRAM_SIZE (64 * 1024 * 1024) +#endif + +/* 2M Secure EL1 Payload Shared Memory */ +#ifndef NXP_SP_SHRD_DRAM_SIZE +#define NXP_SP_SHRD_DRAM_SIZE (2 * 1024 * 1024) +#endif + +#ifndef NXP_NS_DRAM_SIZE +/* Non secure memory */ +#define NXP_NS_DRAM_SIZE (PLAT_DEF_DRAM0_SIZE - \ + (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) +#endif + +#ifndef NXP_SD_BLOCK_BUF_ADDR +#define NXP_SD_BLOCK_BUF_ADDR (NXP_NS_DRAM_ADDR) +#endif + +#ifndef NXP_SECURE_DRAM_ADDR +#ifdef TEST_BL31 +#define NXP_SECURE_DRAM_ADDR 0 +#else +#define NXP_SECURE_DRAM_ADDR (NXP_NS_DRAM_ADDR + PLAT_DEF_DRAM0_SIZE - \ + (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) +#endif +#endif + +#ifndef NXP_SP_SHRD_DRAM_ADDR +#define NXP_SP_SHRD_DRAM_ADDR (NXP_NS_DRAM_ADDR + PLAT_DEF_DRAM0_SIZE - \ + NXP_SP_SHRD_DRAM_SIZE) +#endif + +#ifndef BL31_BASE +/* 2 MB reserved in secure memory for DDR */ +#define BL31_BASE NXP_SECURE_DRAM_ADDR +#endif + +#ifndef BL31_SIZE +#define BL31_SIZE (0x200000) +#endif + +#ifndef BL31_LIMIT +#define BL31_LIMIT (BL31_BASE + BL31_SIZE) +#endif + +/* Put BL32 in secure memory */ +#ifndef BL32_BASE +#define BL32_BASE (NXP_SECURE_DRAM_ADDR + BL31_SIZE) +#endif + +#ifndef BL32_LIMIT +#define BL32_LIMIT (NXP_SECURE_DRAM_ADDR + \ + NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE) +#endif + +/* BL33 memory region */ +/* Hardcoded based on current address in u-boot */ +#ifndef BL33_BASE +#define BL33_BASE 0x82000000 +#endif + +#ifndef BL33_LIMIT +#define BL33_LIMIT (NXP_NS_DRAM_ADDR + NXP_NS_DRAM_SIZE) +#endif + +/* + * FIP image defines - Offset at which FIP Image would be present + * Image would include Bl31 , Bl33 and Bl32 (optional) + */ +#ifdef POLICY_FUSE_PROVISION +#ifndef FUSE_BUF +#define FUSE_BUF ULL(0x81000000) +#endif + +#ifndef FUSE_SZ +#define FUSE_SZ 0x80000 +#endif +#endif + +#ifndef MAX_FIP_DEVICES +#define MAX_FIP_DEVICES 2 +#endif + +#ifndef PLAT_FIP_OFFSET +#define PLAT_FIP_OFFSET 0x100000 +#endif + +#ifndef PLAT_FIP_MAX_SIZE +#define PLAT_FIP_MAX_SIZE 0x400000 +#endif + +/* Check if this size can be determined from array size */ +#if defined(IMAGE_BL2) +#ifndef MAX_MMAP_REGIONS +#define MAX_MMAP_REGIONS 8 +#endif +#ifndef MAX_XLAT_TABLES +#define MAX_XLAT_TABLES 6 +#endif +#elif defined(IMAGE_BL31) +#ifndef MAX_MMAP_REGIONS +#define MAX_MMAP_REGIONS 9 +#endif +#ifndef MAX_XLAT_TABLES +#define MAX_XLAT_TABLES 9 +#endif +#elif defined(IMAGE_BL32) +#ifndef MAX_MMAP_REGIONS +#define MAX_MMAP_REGIONS 8 +#endif +#ifndef MAX_XLAT_TABLES +#define MAX_XLAT_TABLES 9 +#endif +#endif + +/* + * ID of the secure physical generic timer interrupt used by the BL32. + */ +#ifndef BL32_IRQ_SEC_PHY_TIMER +#define BL32_IRQ_SEC_PHY_TIMER 29 +#endif + +#endif /* PLAT_DEFAULT_DEF_H */ diff --git a/plat/nxp/common/nv_storage/nv_storage.mk b/plat/nxp/common/nv_storage/nv_storage.mk new file mode 100644 index 0000000..dddba5f --- /dev/null +++ b/plat/nxp/common/nv_storage/nv_storage.mk @@ -0,0 +1,29 @@ +# +# Copyright 2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# NXP Non-Volatile data flag storage used and then cleared by SW on boot-up + +$(eval $(call add_define,NXP_NV_SW_MAINT_LAST_EXEC_DATA)) + +ifeq ($(NXP_COINED_BB),yes) +$(eval $(call add_define,NXP_COINED_BB)) +# BL2 : To read the reset cause from LP SECMON GPR register +# BL31: To write the reset cause to LP SECMON GPR register +$(eval $(call SET_NXP_MAKE_FLAG,SNVS_NEEDED,BL_COMM)) + +# BL2: DDR training data is stored on Flexspi NOR. +ifneq (${BOOT_MODE},flexspi_nor) +$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL2)) +endif + +else +$(eval $(call add_define_val,DEFAULT_NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}')) +$(eval $(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL_COMM)) +endif + +NV_STORAGE_INCLUDES += -I${PLAT_COMMON_PATH}/nv_storage + +NV_STORAGE_SOURCES += ${PLAT_COMMON_PATH}/nv_storage/plat_nv_storage.c diff --git a/plat/nxp/common/nv_storage/plat_nv_storage.c b/plat/nxp/common/nv_storage/plat_nv_storage.c new file mode 100644 index 0000000..af3b966 --- /dev/null +++ b/plat/nxp/common/nv_storage/plat_nv_storage.c @@ -0,0 +1,121 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <errno.h> +#include <stddef.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include <platform_def.h> +#include <common/debug.h> +#ifndef NXP_COINED_BB +#include <flash_info.h> +#include <fspi.h> +#include <fspi_api.h> +#endif +#include <lib/mmio.h> +#ifdef NXP_COINED_BB +#include <snvs.h> +#else +#include <xspi_error_codes.h> +#endif + +#include <plat_nv_storage.h> + +/*This structure will be a static structure and + * will be populated as first step of BL2 booting-up. + * fspi_strorage.c . To be located in the fspi driver folder. + */ + +static nv_app_data_t nv_app_data; + +int read_nv_app_data(void) +{ + int ret = 0; + +#ifdef NXP_COINED_BB + uint8_t *nv_app_data_array = (uint8_t *) &nv_app_data; + uint8_t offset = 0U; + + ret = snvs_read_app_data(); + do { + nv_app_data_array[offset] = snvs_read_app_data_bit(offset); + offset++; + + } while (offset < APP_DATA_MAX_OFFSET); + snvs_clear_app_data(); +#else + uintptr_t nv_base_addr = NV_STORAGE_BASE_ADDR; + + ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR); + + if (ret != XSPI_SUCCESS) { + ERROR("Failed to initialized driver flexspi-nor.\n"); + ERROR("exiting warm-reset request.\n"); + return -ENODEV; + } + + xspi_read(nv_base_addr, + (uint32_t *)&nv_app_data, sizeof(nv_app_data_t)); + xspi_sector_erase((uint32_t) nv_base_addr, + F_SECTOR_ERASE_SZ); +#endif + return ret; +} + +int wr_nv_app_data(int data_offset, + uint8_t *data, + int data_size) +{ + int ret = 0; +#ifdef NXP_COINED_BB +#if !TRUSTED_BOARD_BOOT + snvs_disable_zeroize_lp_gpr(); +#endif + /* In case LP SecMon General purpose register, + * only 1 bit flags can be saved. + */ + if ((data_size > 1) || (*data != DEFAULT_SET_VALUE)) { + ERROR("Only binary value is allowed to be written.\n"); + ERROR("Use flash instead of SNVS GPR as NV location.\n"); + return -ENODEV; + } + snvs_write_app_data_bit(data_offset); +#else + uint8_t read_val[sizeof(nv_app_data_t)]; + uint8_t ready_to_write_val[sizeof(nv_app_data_t)]; + uintptr_t nv_base_addr = NV_STORAGE_BASE_ADDR; + + assert((nv_base_addr + data_offset + data_size) > (nv_base_addr + F_SECTOR_ERASE_SZ)); + + ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR); + + if (ret != XSPI_SUCCESS) { + ERROR("Failed to initialized driver flexspi-nor.\n"); + ERROR("exiting warm-reset request.\n"); + return -ENODEV; + } + + ret = xspi_read(nv_base_addr + data_offset, (uint32_t *)read_val, data_size); + + memset(ready_to_write_val, READY_TO_WRITE_VALUE, ARRAY_SIZE(ready_to_write_val)); + + if (memcmp(read_val, ready_to_write_val, data_size) == 0) { + xspi_write(nv_base_addr + data_offset, data, data_size); + } +#endif + + return ret; +} + +const nv_app_data_t *get_nv_data(void) +{ + return (const nv_app_data_t *) &nv_app_data; +} diff --git a/plat/nxp/common/nv_storage/plat_nv_storage.h b/plat/nxp/common/nv_storage/plat_nv_storage.h new file mode 100644 index 0000000..1f5264a --- /dev/null +++ b/plat/nxp/common/nv_storage/plat_nv_storage.h @@ -0,0 +1,40 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_NV_STRG_H +#define PLAT_NV_STRG_H + +#define DEFAULT_SET_VALUE 0xA1 +#define READY_TO_WRITE_VALUE 0xFF + +#ifndef NV_STORAGE_BASE_ADDR +#define NV_STORAGE_BASE_ADDR DEFAULT_NV_STORAGE_BASE_ADDR +#endif + +typedef struct { +uint8_t warm_rst_flag; +uint8_t wdt_rst_flag; +uint8_t dummy[2]; +} nv_app_data_t; + + +/*below enum and above structure should be in-sync. */ +enum app_data_offset { + WARM_RESET_FLAG_OFFSET, + WDT_RESET_FLAG_OFFSET, + APP_DATA_MAX_OFFSET, +}; + +int read_nv_app_data(void); + +int wr_nv_app_data(int data_offset, + uint8_t *data, + int data_size); + +const nv_app_data_t *get_nv_data(void); + +#endif /* PLAT_NV_STRG_H */ diff --git a/plat/nxp/common/ocram/aarch64/ocram.S b/plat/nxp/common/ocram/aarch64/ocram.S new file mode 100644 index 0000000..ec53341 --- /dev/null +++ b/plat/nxp/common/ocram/aarch64/ocram.S @@ -0,0 +1,71 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <asm_macros.S> + +#include <soc_default_base_addr.h> +#include <soc_default_helper_macros.h> + +.global ocram_init + +/* + * void ocram_init(uintptr_t start_addr, size_t size) + * + * This function will do OCRAM ECC. + * OCRAM is initialized with 64-bit writes and then a write + * performed to address 0x0010_0534 with the value 0x0000_0008. + * + * x0: start_addr + * x1: size in bytes + * Called from C + */ + +func ocram_init + /* save the aarch32/64 non-volatile registers */ + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + /* convert bytes to 64-byte chunks */ + lsr x1, x1, #6 +1: + /* for each location, read and write-back */ + dc ivac, x0 + dsb sy + ldp x4, x5, [x0] + ldp x6, x7, [x0, #16] + ldp x8, x9, [x0, #32] + ldp x10, x11, [x0, #48] + stp x4, x5, [x0] + stp x6, x7, [x0, #16] + stp x8, x9, [x0, #32] + stp x10, x11, [x0, #48] + dc cvac, x0 + + sub x1, x1, #1 + cbz x1, 2f + add x0, x0, #64 + b 1b +2: + /* Clear OCRAM ECC status bit in SBEESR2 and MBEESR2 */ + ldr w1, =OCRAM_EESR_MASK + ldr x0, =DCFG_SBEESR2_ADDR + str w1, [x0] + ldr x0, =DCFG_MBEESR2_ADDR + str w1, [x0] + + /* restore the aarch32/64 non-volatile registers */ + ldp x18, x30, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ret +endfunc ocram_init diff --git a/plat/nxp/common/ocram/ocram.h b/plat/nxp/common/ocram/ocram.h new file mode 100644 index 0000000..479de61 --- /dev/null +++ b/plat/nxp/common/ocram/ocram.h @@ -0,0 +1,13 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef OCRAM_H +#define OCRAM_H + +void ocram_init(uintptr_t start_addr, size_t size); + +#endif /* OCRAM_H */ diff --git a/plat/nxp/common/ocram/ocram.mk b/plat/nxp/common/ocram/ocram.mk new file mode 100644 index 0000000..c77bd4a --- /dev/null +++ b/plat/nxp/common/ocram/ocram.mk @@ -0,0 +1,14 @@ +# +# Copyright 2021 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# + +PLAT_OCRAM_PATH := $(PLAT_COMMON_PATH)/ocram + +OCRAM_SOURCES := ${PLAT_OCRAM_PATH}/$(ARCH)/ocram.S + +BL2_SOURCES += ${OCRAM_SOURCES} + +PLAT_INCLUDES += -I${PLAT_COMMON_PATH}/ocram diff --git a/plat/nxp/common/plat_make_helper/plat_build_macros.mk b/plat/nxp/common/plat_make_helper/plat_build_macros.mk new file mode 100644 index 0000000..bba5e36 --- /dev/null +++ b/plat/nxp/common/plat_make_helper/plat_build_macros.mk @@ -0,0 +1,11 @@ +# +# Copyright (c) 2020, NXP. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# + +define SET_NXP_MAKE_FLAG +$1 := yes +$2_$1 := yes +endef diff --git a/plat/nxp/common/plat_make_helper/plat_common_def.mk b/plat/nxp/common/plat_make_helper/plat_common_def.mk new file mode 100644 index 0000000..86dacf8 --- /dev/null +++ b/plat/nxp/common/plat_make_helper/plat_common_def.mk @@ -0,0 +1,103 @@ +# Copyright 2020-2021 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# Include build macros, for example: SET_NXP_MAKE_FLAG +include plat/nxp/common/plat_make_helper/plat_build_macros.mk + +# Adding platform specific defines + +$(eval $(call add_define_val,BOARD,'"${BOARD}"')) + +ifeq (${POVDD_ENABLE},yes) +$(eval $(call add_define,CONFIG_POVDD_ENABLE)) +endif + +ifneq (${FLASH_TYPE},) +$(eval $(call add_define,CONFIG_${FLASH_TYPE})) +endif + +ifneq (${XSPI_FLASH_SZ},) +$(eval $(call add_define_val,NXP_FLEXSPI_FLASH_SIZE,${XSPI_FLASH_SZ})) +endif + +ifneq (${QSPI_FLASH_SZ},) +$(eval $(call add_define_val,NXP_QSPI_FLASH_SIZE,${QSPI_FLASH_SZ})) +endif + +ifneq (${NOR_FLASH_SZ},) +$(eval $(call add_define_val,NXP_NOR_FLASH_SIZE,${NOR_FLASH_SZ})) +endif + + +ifneq (${FSPI_ERASE_4K},) +$(eval $(call add_define_val,CONFIG_FSPI_ERASE_4K,${FSPI_ERASE_4K})) +endif + +ifneq (${NUM_OF_DDRC},) +$(eval $(call add_define_val,NUM_OF_DDRC,${NUM_OF_DDRC})) +endif + +ifeq (${CONFIG_DDR_NODIMM},1) +$(eval $(call add_define,CONFIG_DDR_NODIMM)) +DDRC_NUM_DIMM := 1 +endif + +ifneq (${DDRC_NUM_DIMM},) +$(eval $(call add_define_val,DDRC_NUM_DIMM,${DDRC_NUM_DIMM})) +endif + +ifneq (${DDRC_NUM_CS},) +$(eval $(call add_define_val,DDRC_NUM_CS,${DDRC_NUM_CS})) +endif + +ifeq (${DDR_ADDR_DEC},yes) +$(eval $(call add_define,CONFIG_DDR_ADDR_DEC)) +endif + +ifeq (${DDR_ECC_EN},yes) +$(eval $(call add_define,CONFIG_DDR_ECC_EN)) +endif + +ifeq (${CONFIG_STATIC_DDR},1) +$(eval $(call add_define,CONFIG_STATIC_DDR)) +endif + +# Platform can control the base address for non-volatile storage. +#$(eval $(call add_define_val,NV_STORAGE_BASE_ADDR,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - 2 * ${NXP_XSPI_NOR_UNIT_SIZE}')) + +ifeq (${WARM_BOOT},yes) +$(eval $(call add_define_val,PHY_TRAINING_REGS_ON_FLASH,'${BL2_BIN_XSPI_NOR_END_ADDRESS} - ${NXP_XSPI_NOR_UNIT_SIZE}')) +endif + +# Selecting Boot Source for the TFA images. +define add_boot_mode_define + ifeq ($(1),qspi) + $$(eval $$(call SET_NXP_MAKE_FLAG,QSPI_NEEDED,BL2)) + $$(eval $$(call add_define,QSPI_BOOT)) + else ifeq ($(1),sd) + $$(eval $$(call SET_NXP_MAKE_FLAG,SD_MMC_NEEDED,BL2)) + $$(eval $$(call add_define,SD_BOOT)) + else ifeq ($(1),emmc) + $$(eval $$(call SET_NXP_MAKE_FLAG,SD_MMC_NEEDED,BL2)) + $$(eval $$(call add_define,EMMC_BOOT)) + else ifeq ($(1),nor) + $$(eval $$(call SET_NXP_MAKE_FLAG,IFC_NOR_NEEDED,BL2)) + $$(eval $$(call add_define,NOR_BOOT)) + else ifeq ($(1),nand) + $$(eval $$(call SET_NXP_MAKE_FLAG,IFC_NAND_NEEDED,BL2)) + $$(eval $$(call add_define,NAND_BOOT)) + else ifeq ($(1),flexspi_nor) + $$(eval $$(call SET_NXP_MAKE_FLAG,XSPI_NEEDED,BL2)) + $$(eval $$(call add_define,FLEXSPI_NOR_BOOT)) + else + $$(error $(PLAT) Cannot Support Boot Mode: $(BOOT_MODE)) + endif +endef + +ifneq (,$(findstring $(BOOT_MODE),$(SUPPORTED_BOOT_MODE))) + $(eval $(call add_boot_mode_define,$(strip $(BOOT_MODE)))) +else + $(error $(PLAT) Un-supported Boot Mode = $(BOOT_MODE)) +endif diff --git a/plat/nxp/common/plat_make_helper/soc_common_def.mk b/plat/nxp/common/plat_make_helper/soc_common_def.mk new file mode 100644 index 0000000..52f2867 --- /dev/null +++ b/plat/nxp/common/plat_make_helper/soc_common_def.mk @@ -0,0 +1,117 @@ +# Copyright 2020-2022 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# Adding SoC specific defines + +ifneq (${CACHE_LINE},) +$(eval $(call add_define_val,PLATFORM_CACHE_LINE_SHIFT,${CACHE_LINE})) +$(eval CACHE_WRITEBACK_GRANULE=$(shell echo $$((1 << $(CACHE_LINE))))) +$(eval $(call add_define_val,CACHE_WRITEBACK_GRANULE,$(CACHE_WRITEBACK_GRANULE))) +endif + +ifneq (${INTERCONNECT},) +$(eval $(call add_define,NXP_HAS_${INTERCONNECT})) +ifeq (${INTERCONNECT}, CCI400) +ICNNCT_ID := 0x420 +$(eval $(call add_define,ICNNCT_ID)) +endif +endif + +ifneq (${CHASSIS},) +$(eval $(call add_define,CONFIG_CHASSIS_${CHASSIS})) +endif + +ifneq (${PLAT_DDR_PHY},) +$(eval $(call add_define,NXP_DDR_${PLAT_DDR_PHY})) +endif + +ifneq (${PHYS_SYS},) +$(eval $(call add_define,CONFIG_PHYS_64BIT)) +endif + +ifneq (${CSF_HDR_SZ},) +$(eval $(call add_define_val,CSF_HDR_SZ,${CSF_HDR_SZ})) +endif + +ifneq (${OCRAM_START_ADDR},) +$(eval $(call add_define_val,NXP_OCRAM_ADDR,${OCRAM_START_ADDR})) +endif + +ifneq (${OCRAM_SIZE},) +$(eval $(call add_define_val,NXP_OCRAM_SIZE,${OCRAM_SIZE})) +endif + +ifneq (${NXP_ROM_RSVD},) +$(eval $(call add_define_val,NXP_ROM_RSVD,${NXP_ROM_RSVD})) +endif + +ifneq (${BL2_BASE},) +$(eval $(call add_define_val,BL2_BASE,${BL2_BASE})) +endif + +ifeq (${SEC_MEM_NON_COHERENT},yes) +$(eval $(call add_define,SEC_MEM_NON_COHERENT)) +endif + +ifneq (${NXP_ESDHC_ENDIANNESS},) +$(eval $(call add_define,NXP_ESDHC_${NXP_ESDHC_ENDIANNESS})) +endif + +ifneq (${NXP_SFP_VER},) +$(eval $(call add_define,NXP_SFP_VER_${NXP_SFP_VER})) +endif + +ifneq (${NXP_SFP_ENDIANNESS},) +$(eval $(call add_define,NXP_SFP_${NXP_SFP_ENDIANNESS})) +endif + +ifneq (${NXP_GPIO_ENDIANNESS},) +$(eval $(call add_define,NXP_GPIO_${NXP_GPIO_ENDIANNESS})) +endif + +ifneq (${NXP_SNVS_ENDIANNESS},) +$(eval $(call add_define,NXP_SNVS_${NXP_SNVS_ENDIANNESS})) +endif + +ifneq (${NXP_GUR_ENDIANNESS},) +$(eval $(call add_define,NXP_GUR_${NXP_GUR_ENDIANNESS})) +endif + +ifneq (${NXP_FSPI_ENDIANNESS},) +$(eval $(call add_define,NXP_FSPI_${NXP_FSPI_ENDIANNESS})) +endif + +ifneq (${NXP_SEC_ENDIANNESS},) +$(eval $(call add_define,NXP_SEC_${NXP_SEC_ENDIANNESS})) +endif + +ifneq (${NXP_DDR_ENDIANNESS},) +$(eval $(call add_define,NXP_DDR_${NXP_DDR_ENDIANNESS})) +endif + +ifneq (${NXP_QSPI_ENDIANNESS},) +$(eval $(call add_define,NXP_QSPI_${NXP_QSPI_ENDIANNESS})) +endif + +ifneq (${NXP_SCFG_ENDIANNESS},) +$(eval $(call add_define,NXP_SCFG_${NXP_SCFG_ENDIANNESS})) +endif + +ifneq (${NXP_IFC_ENDIANNESS},) +$(eval $(call add_define,NXP_IFC_${NXP_IFC_ENDIANNESS})) +endif + +ifneq (${NXP_DDR_INTLV_256B},) +$(eval $(call add_define,NXP_DDR_INTLV_256B)) +endif + +ifneq (${PLAT_XLAT_TABLES_DYNAMIC},) +$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC)) +endif + +ifeq (${OCRAM_ECC_EN},yes) +$(eval $(call add_define,CONFIG_OCRAM_ECC_EN)) +include ${PLAT_COMMON_PATH}/ocram/ocram.mk +endif diff --git a/plat/nxp/common/psci/aarch64/psci_utils.S b/plat/nxp/common/psci/aarch64/psci_utils.S new file mode 100644 index 0000000..ec69aea --- /dev/null +++ b/plat/nxp/common/psci/aarch64/psci_utils.S @@ -0,0 +1,1155 @@ + +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <asm_macros.S> +#include <assert_macros.S> + +#include <lib/psci/psci.h> + +#include <bl31_data.h> +#include <plat_psci.h> + + +#define RESET_RETRY_CNT 800 +#define PSCI_ABORT_CNT 100 + +#if (SOC_CORE_RELEASE) + +.global _psci_cpu_on + +/* + * int _psci_cpu_on(u_register_t core_mask) + * x0 = target cpu core mask + * + * Called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + * + */ + +func _psci_cpu_on + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x6, x0 + + /* x0 = core mask (lsb) + * x6 = core mask (lsb) + */ + + /* check if core disabled */ + bl _soc_ck_disabled /* 0-2 */ + cbnz w0, psci_disabled + + /* check core data area to see if core cannot be turned on + * read the core state + */ + mov x0, x6 + bl _getCoreState /* 0-5 */ + mov x9, x0 + + /* x6 = core mask (lsb) + * x9 = core state (from data area) + */ + + cmp x9, #CORE_DISABLED + mov x0, #PSCI_E_DISABLED + b.eq cpu_on_done + + cmp x9, #CORE_PENDING + mov x0, #PSCI_E_ON_PENDING + b.eq cpu_on_done + + cmp x9, #CORE_RELEASED + mov x0, #PSCI_E_ALREADY_ON + b.eq cpu_on_done + +8: + /* x6 = core mask (lsb) + * x9 = core state (from data area) + */ + + cmp x9, #CORE_WFE + b.eq core_in_wfe + cmp x9, #CORE_IN_RESET + b.eq core_in_reset + cmp x9, #CORE_OFF + b.eq core_is_off + cmp x9, #CORE_OFF_PENDING + + /* if state == CORE_OFF_PENDING, set abort */ + mov x0, x6 + mov x1, #ABORT_FLAG_DATA + mov x2, #CORE_ABORT_OP + bl _setCoreData /* 0-3, [13-15] */ + + ldr x3, =PSCI_ABORT_CNT +7: + /* watch for abort to take effect */ + mov x0, x6 + bl _getCoreState /* 0-5 */ + cmp x0, #CORE_OFF + b.eq core_is_off + cmp x0, #CORE_PENDING + mov x0, #PSCI_E_SUCCESS + b.eq cpu_on_done + + /* loop til finished */ + sub x3, x3, #1 + cbnz x3, 7b + + /* if we didn't see either CORE_OFF or CORE_PENDING, then this + * core is in CORE_OFF_PENDING - exit with success, as the core will + * respond to the abort request + */ + mov x0, #PSCI_E_SUCCESS + b cpu_on_done + +/* this is where we start up a core out of reset */ +core_in_reset: + /* see if the soc-specific module supports this op */ + ldr x7, =SOC_CORE_RELEASE + cbnz x7, 3f + + mov x0, #PSCI_E_NOT_SUPPORTED + b cpu_on_done + + /* x6 = core mask (lsb) */ +3: + /* set core state in data area */ + mov x0, x6 + mov x1, #CORE_PENDING + bl _setCoreState /* 0-3, [13-15] */ + + /* release the core from reset */ + mov x0, x6 + bl _soc_core_release /* 0-3 */ + mov x0, #PSCI_E_SUCCESS + b cpu_on_done + + /* Start up the core that has been powered-down via CPU_OFF + */ +core_is_off: + /* see if the soc-specific module supports this op + */ + ldr x7, =SOC_CORE_RESTART + cbnz x7, 2f + + mov x0, #PSCI_E_NOT_SUPPORTED + b cpu_on_done + + /* x6 = core mask (lsb) */ +2: + /* set core state in data area */ + mov x0, x6 + mov x1, #CORE_WAKEUP + bl _setCoreState /* 0-3, [13-15] */ + + /* put the core back into service */ + mov x0, x6 +#if (SOC_CORE_RESTART) + bl _soc_core_restart /* 0-5 */ +#endif + mov x0, #PSCI_E_SUCCESS + b cpu_on_done + +/* this is where we release a core that is being held in wfe */ +core_in_wfe: + /* x6 = core mask (lsb) */ + + /* set core state in data area */ + mov x0, x6 + mov x1, #CORE_PENDING + bl _setCoreState /* 0-3, [13-15] */ + dsb sy + isb + + /* put the core back into service */ + sev + sev + isb + mov x0, #PSCI_E_SUCCESS + +cpu_on_done: + /* restore the aarch32/64 non-volatile registers */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_cpu_on + +#endif + + +#if (SOC_CORE_OFF) + +.global _psci_cpu_prep_off +.global _psci_cpu_off_wfi + +/* + * void _psci_cpu_prep_off(u_register_t core_mask) + * this function performs the SoC-specific programming prior + * to shutting the core down + * x0 = core_mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_cpu_prep_off + + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x10, x0 /* x10 = core_mask */ + + /* the core does not return from cpu_off, so no need + * to save/restore non-volatile registers + */ + + /* mask interrupts by setting DAIF[7:4] to 'b1111 */ + msr DAIFSet, #0xF + + /* read cpuectlr and save current value */ + mrs x4, CPUECTLR_EL1 + mov x1, #CPUECTLR_DATA + mov x2, x4 + mov x0, x10 + bl _setCoreData + + /* remove the core from coherency */ + bic x4, x4, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x4 + + /* save scr_el3 */ + mov x0, x10 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* x4 = scr_el3 */ + + /* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */ + orr x4, x4, #SCR_FIQ_MASK + msr scr_el3, x4 + + /* x10 = core_mask */ + + /* prep the core for shutdown */ + mov x0, x10 + bl _soc_core_prep_off + + /* restore the aarch32/64 non-volatile registers */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_cpu_prep_off + +/* + * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr) + * - this function shuts down the core + * - this function does not return!! + */ + +func _psci_cpu_off_wfi + /* save the wakeup address */ + mov x29, x1 + + /* x0 = core_mask */ + + /* shutdown the core */ + bl _soc_core_entr_off + + /* branch to resume execution */ + br x29 +endfunc _psci_cpu_off_wfi + +#endif + + +#if (SOC_CORE_RESTART) + +.global _psci_wakeup + +/* + * void _psci_wakeup(u_register_t core_mask) + * this function performs the SoC-specific programming + * after a core wakes up from OFF + * x0 = core mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_wakeup + + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x4, x0 /* x4 = core mask */ + + /* restore scr_el3 */ + mov x0, x4 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x4 = core mask */ + + /* restore CPUECTLR */ + mov x0, x4 + mov x1, #CPUECTLR_DATA + bl _getCoreData + orr x0, x0, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x0 + + /* x4 = core mask */ + + /* start the core back up */ + mov x0, x4 + bl _soc_core_exit_off + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_wakeup + +#endif + + +#if (SOC_SYSTEM_RESET) + +.global _psci_system_reset + +func _psci_system_reset + + /* system reset is mandatory + * system reset is soc-specific + * Note: under no circumstances do we return from this call + */ + bl _soc_sys_reset +endfunc _psci_system_reset + +#endif + + +#if (SOC_SYSTEM_OFF) + +.global _psci_system_off + +func _psci_system_off + + /* system off is mandatory + * system off is soc-specific + * Note: under no circumstances do we return from this call */ + b _soc_sys_off +endfunc _psci_system_off + +#endif + + +#if (SOC_CORE_STANDBY) + +.global _psci_core_entr_stdby +.global _psci_core_prep_stdby +.global _psci_core_exit_stdby + +/* + * void _psci_core_entr_stdby(u_register_t core_mask) - this + * is the fast-path for simple core standby + */ + +func _psci_core_entr_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 /* x5 = core mask */ + + /* save scr_el3 */ + mov x0, x5 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* x4 = SCR_EL3 + * x5 = core mask + */ + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* x5 = core mask */ + + /* put the core into standby */ + mov x0, x5 + bl _soc_core_entr_stdby + + /* restore scr_el3 */ + mov x0, x5 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_core_entr_stdby + +/* + * void _psci_core_prep_stdby(u_register_t core_mask) - this + * sets up the core to enter standby state thru the normal path + */ + +func _psci_core_prep_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 + + /* x5 = core mask */ + + /* save scr_el3 */ + mov x0, x5 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* x5 = core mask */ + + /* call for any SoC-specific programming */ + mov x0, x5 + bl _soc_core_prep_stdby + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_core_prep_stdby + +/* + * void _psci_core_exit_stdby(u_register_t core_mask) - this + * exits the core from standby state thru the normal path + */ + +func _psci_core_exit_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 + + /* x5 = core mask */ + + /* restore scr_el3 */ + mov x0, x5 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x5 = core mask */ + + /* perform any SoC-specific programming after standby state */ + mov x0, x5 + bl _soc_core_exit_stdby + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_core_exit_stdby + +#endif + + +#if (SOC_CORE_PWR_DWN) + +.global _psci_core_prep_pwrdn +.global _psci_cpu_pwrdn_wfi +.global _psci_core_exit_pwrdn + +/* + * void _psci_core_prep_pwrdn_(u_register_t core_mask) + * this function prepares the core for power-down + * x0 = core mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_core_prep_pwrdn + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x6, x0 + + /* x6 = core mask */ + + /* mask interrupts by setting DAIF[7:4] to 'b1111 */ + msr DAIFSet, #0xF + + /* save scr_el3 */ + mov x0, x6 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* save cpuectlr */ + mov x0, x6 + mov x1, #CPUECTLR_DATA + mrs x2, CPUECTLR_EL1 + bl _setCoreData + + /* x6 = core mask */ + + /* SoC-specific programming for power-down */ + mov x0, x6 + bl _soc_core_prep_pwrdn + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_core_prep_pwrdn + +/* + * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr) + * this function powers down the core + */ + +func _psci_cpu_pwrdn_wfi + /* save the wakeup address */ + mov x29, x1 + + /* x0 = core mask */ + + /* shutdown the core */ + bl _soc_core_entr_pwrdn + + /* branch to resume execution */ + br x29 +endfunc _psci_cpu_pwrdn_wfi + +/* + * void _psci_core_exit_pwrdn_(u_register_t core_mask) + * this function cleans up after a core power-down + * x0 = core mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_core_exit_pwrdn + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x5, x0 /* x5 = core mask */ + + /* restore scr_el3 */ + mov x0, x5 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x5 = core mask */ + + /* restore cpuectlr */ + mov x0, x5 + mov x1, #CPUECTLR_DATA + bl _getCoreData + /* make sure smp is set */ + orr x0, x0, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x0 + + /* x5 = core mask */ + + /* SoC-specific cleanup */ + mov x0, x5 + bl _soc_core_exit_pwrdn + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_core_exit_pwrdn + +#endif + +#if (SOC_CLUSTER_STANDBY) + +.global _psci_clstr_prep_stdby +.global _psci_clstr_exit_stdby + +/* + * void _psci_clstr_prep_stdby(u_register_t core_mask) - this + * sets up the clstr to enter standby state thru the normal path + */ + +func _psci_clstr_prep_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 + + /* x5 = core mask */ + + /* save scr_el3 */ + mov x0, x5 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* x5 = core mask */ + + /* call for any SoC-specific programming */ + mov x0, x5 + bl _soc_clstr_prep_stdby + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_clstr_prep_stdby + +/* + * void _psci_clstr_exit_stdby(u_register_t core_mask) - this + * exits the clstr from standby state thru the normal path + */ + +func _psci_clstr_exit_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 /* x5 = core mask */ + + /* restore scr_el3 */ + mov x0, x5 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x5 = core mask */ + + /* perform any SoC-specific programming after standby state */ + mov x0, x5 + bl _soc_clstr_exit_stdby + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_clstr_exit_stdby + +#endif + +#if (SOC_CLUSTER_PWR_DWN) + +.global _psci_clstr_prep_pwrdn +.global _psci_clstr_exit_pwrdn + +/* + * void _psci_clstr_prep_pwrdn_(u_register_t core_mask) + * this function prepares the cluster+core for power-down + * x0 = core mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_clstr_prep_pwrdn + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x6, x0 /* x6 = core mask */ + + /* mask interrupts by setting DAIF[7:4] to 'b1111 */ + msr DAIFSet, #0xF + + /* save scr_el3 */ + mov x0, x6 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* save cpuectlr */ + mov x0, x6 + mov x1, #CPUECTLR_DATA + mrs x2, CPUECTLR_EL1 + mov x4, x2 + bl _setCoreData + + /* remove core from coherency */ + bic x4, x4, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x4 + + /* x6 = core mask */ + + /* SoC-specific programming for power-down */ + mov x0, x6 + bl _soc_clstr_prep_pwrdn + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_clstr_prep_pwrdn + +/* + * void _psci_clstr_exit_pwrdn_(u_register_t core_mask) + * this function cleans up after a cluster power-down + * x0 = core mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_clstr_exit_pwrdn + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x4, x0 /* x4 = core mask */ + + /* restore scr_el3 */ + mov x0, x4 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x4 = core mask */ + + /* restore cpuectlr */ + mov x0, x4 + mov x1, #CPUECTLR_DATA + bl _getCoreData + /* make sure smp is set */ + orr x0, x0, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x0 + + /* x4 = core mask */ + + /* SoC-specific cleanup */ + mov x0, x4 + bl _soc_clstr_exit_pwrdn + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_clstr_exit_pwrdn + +#endif + +#if (SOC_SYSTEM_STANDBY) + +.global _psci_sys_prep_stdby +.global _psci_sys_exit_stdby + +/* + * void _psci_sys_prep_stdby(u_register_t core_mask) - this + * sets up the system to enter standby state thru the normal path + */ + +func _psci_sys_prep_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 /* x5 = core mask */ + + /* save scr_el3 */ + mov x0, x5 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* x5 = core mask */ + + /* call for any SoC-specific programming */ + mov x0, x5 + bl _soc_sys_prep_stdby + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_sys_prep_stdby + +/* + * void _psci_sys_exit_stdby(u_register_t core_mask) - this + * exits the system from standby state thru the normal path + */ + +func _psci_sys_exit_stdby + stp x4, x5, [sp, #-16]! + stp x6, x30, [sp, #-16]! + + mov x5, x0 + + /* x5 = core mask */ + + /* restore scr_el3 */ + mov x0, x5 + mov x1, #SCR_EL3_DATA + bl _getCoreData + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x5 = core mask */ + + /* perform any SoC-specific programming after standby state */ + mov x0, x5 + bl _soc_sys_exit_stdby + + ldp x6, x30, [sp], #16 + ldp x4, x5, [sp], #16 + isb + ret +endfunc _psci_sys_exit_stdby + +#endif + +#if (SOC_SYSTEM_PWR_DWN) + +.global _psci_sys_prep_pwrdn +.global _psci_sys_pwrdn_wfi +.global _psci_sys_exit_pwrdn + +/* + * void _psci_sys_prep_pwrdn_(u_register_t core_mask) + * this function prepares the system+core for power-down + * x0 = core mask + * + * called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_sys_prep_pwrdn + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x6, x0 /* x6 = core mask */ + + /* mask interrupts by setting DAIF[7:4] to 'b1111 */ + msr DAIFSet, #0xF + + /* save scr_el3 */ + mov x0, x6 + mrs x4, SCR_EL3 + mov x2, x4 + mov x1, #SCR_EL3_DATA + bl _setCoreData + + /* allow interrupts @ EL3 */ + orr x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK) + msr SCR_EL3, x4 + + /* save cpuectlr */ + mov x0, x6 + mov x1, #CPUECTLR_DATA + mrs x2, CPUECTLR_EL1 + mov x4, x2 + bl _setCoreData + + /* remove core from coherency */ + bic x4, x4, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x4 + + /* x6 = core mask */ + + /* SoC-specific programming for power-down */ + mov x0, x6 + bl _soc_sys_prep_pwrdn + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_sys_prep_pwrdn + + +/* + * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr) + * this function powers down the system + */ + +func _psci_sys_pwrdn_wfi + /* save the wakeup address */ + mov x29, x1 + + /* x0 = core mask */ + + /* shutdown the system */ + bl _soc_sys_pwrdn_wfi + + /* branch to resume execution */ + br x29 +endfunc _psci_sys_pwrdn_wfi + +/* + * void _psci_sys_exit_pwrdn_(u_register_t core_mask) + * this function cleans up after a system power-down + * x0 = core mask + * + * Called from C, so save the non-volatile regs + * save these as pairs of registers to maintain the + * required 16-byte alignment on the stack + */ + +func _psci_sys_exit_pwrdn + + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x18, x30, [sp, #-16]! + + mov x4, x0 /* x4 = core mask */ + + /* restore scr_el3 */ + mov x0, x4 + mov x1, #SCR_EL3_DATA + bl _getCoreData + + /* x0 = saved scr_el3 */ + msr SCR_EL3, x0 + + /* x4 = core mask */ + + /* restore cpuectlr */ + mov x0, x4 + mov x1, #CPUECTLR_DATA + bl _getCoreData + + /* make sure smp is set */ + orr x0, x0, #CPUECTLR_SMPEN_MASK + msr CPUECTLR_EL1, x0 + + /* x4 = core mask */ + + /* SoC-specific cleanup */ + mov x0, x4 + bl _soc_sys_exit_pwrdn + + /* restore the aarch32/64 non-volatile registers + */ + ldp x18, x30, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + b psci_completed +endfunc _psci_sys_exit_pwrdn + +#endif + + +/* psci std returns */ +func psci_disabled + ldr w0, =PSCI_E_DISABLED + b psci_completed +endfunc psci_disabled + + +func psci_not_present + ldr w0, =PSCI_E_NOT_PRESENT + b psci_completed +endfunc psci_not_present + + +func psci_on_pending + ldr w0, =PSCI_E_ON_PENDING + b psci_completed +endfunc psci_on_pending + + +func psci_already_on + ldr w0, =PSCI_E_ALREADY_ON + b psci_completed +endfunc psci_already_on + + +func psci_failure + ldr w0, =PSCI_E_INTERN_FAIL + b psci_completed +endfunc psci_failure + + +func psci_unimplemented + ldr w0, =PSCI_E_NOT_SUPPORTED + b psci_completed +endfunc psci_unimplemented + + +func psci_denied + ldr w0, =PSCI_E_DENIED + b psci_completed +endfunc psci_denied + + +func psci_invalid + ldr w0, =PSCI_E_INVALID_PARAMS + b psci_completed +endfunc psci_invalid + + +func psci_success + mov x0, #PSCI_E_SUCCESS +endfunc psci_success + + +func psci_completed + /* x0 = status code */ + ret +endfunc psci_completed diff --git a/plat/nxp/common/psci/include/plat_psci.h b/plat/nxp/common/psci/include/plat_psci.h new file mode 100644 index 0000000..7fc48fb --- /dev/null +++ b/plat/nxp/common/psci/include/plat_psci.h @@ -0,0 +1,145 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_PSCI_H +#define PLAT_PSCI_H +#include <cortex_a53.h> +#include <cortex_a72.h> + + /* core abort current op */ +#define CORE_ABORT_OP 0x1 + + /* psci power levels - these are actually affinity levels + * in the psci_power_state_t array + */ +#define PLAT_CORE_LVL PSCI_CPU_PWR_LVL +#define PLAT_CLSTR_LVL U(1) +#define PLAT_SYS_LVL U(2) +#define PLAT_MAX_LVL PLAT_SYS_LVL + + /* core state */ + /* OFF states 0x0 - 0xF */ +#define CORE_IN_RESET 0x0 +#define CORE_DISABLED 0x1 +#define CORE_OFF 0x2 +#define CORE_STANDBY 0x3 +#define CORE_PWR_DOWN 0x4 +#define CORE_WFE 0x6 +#define CORE_WFI 0x7 +#define CORE_LAST 0x8 +#define CORE_OFF_PENDING 0x9 +#define CORE_WORKING_INIT 0xA +#define SYS_OFF_PENDING 0xB +#define SYS_OFF 0xC + + /* ON states 0x10 - 0x1F */ +#define CORE_PENDING 0x10 +#define CORE_RELEASED 0x11 +#define CORE_WAKEUP 0x12 + /* highest off state */ +#define CORE_OFF_MAX 0xF + /* lowest on state */ +#define CORE_ON_MIN CORE_PENDING + +#define DAIF_SET_MASK 0x3C0 +#define SCTLR_I_C_M_MASK 0x00001005 +#define SCTLR_C_MASK 0x00000004 +#define SCTLR_I_MASK 0x00001000 +#define CPUACTLR_L1PCTL_MASK 0x0000E000 +#define DCSR_RCPM2_BASE 0x20170000 +#define CPUECTLR_SMPEN_MASK 0x40 +#define CPUECTLR_SMPEN_EN 0x40 +#define CPUECTLR_RET_MASK 0x7 +#define CPUECTLR_RET_SET 0x2 +#define CPUECTLR_TIMER_MASK 0x7 +#define CPUECTLR_TIMER_8TICKS 0x2 +#define CPUECTLR_TIMER_2TICKS 0x1 +#define SCR_IRQ_MASK 0x2 +#define SCR_FIQ_MASK 0x4 + +/* pwr mgmt features supported in the soc-specific code: + * value == 0x0, the soc code does not support this feature + * value != 0x0, the soc code supports this feature + */ +#ifndef SOC_CORE_RELEASE +#define SOC_CORE_RELEASE 0x1 +#endif + +#ifndef SOC_CORE_RESTART +#define SOC_CORE_RESTART 0x1 +#endif + +#ifndef SOC_CORE_OFF +#define SOC_CORE_OFF 0x1 +#endif + +#ifndef SOC_CORE_STANDBY +#define SOC_CORE_STANDBY 0x1 +#endif + +#ifndef SOC_CORE_PWR_DWN +#define SOC_CORE_PWR_DWN 0x1 +#endif + +#ifndef SOC_CLUSTER_STANDBY +#define SOC_CLUSTER_STANDBY 0x1 +#endif + +#ifndef SOC_CLUSTER_PWR_DWN +#define SOC_CLUSTER_PWR_DWN 0x1 +#endif + +#ifndef SOC_SYSTEM_STANDBY +#define SOC_SYSTEM_STANDBY 0x1 +#endif + +#ifndef SOC_SYSTEM_PWR_DWN +#define SOC_SYSTEM_PWR_DWN 0x1 +#endif + +#ifndef SOC_SYSTEM_OFF +#define SOC_SYSTEM_OFF 0x1 +#endif + +#ifndef SOC_SYSTEM_RESET +#define SOC_SYSTEM_RESET 0x1 +#endif + +#ifndef SOC_SYSTEM_RESET2 +#define SOC_SYSTEM_RESET2 0x1 +#endif + +#ifndef __ASSEMBLER__ + +void __dead2 _psci_system_reset(void); +void __dead2 _psci_system_off(void); +int _psci_cpu_on(u_register_t core_mask); +void _psci_cpu_prep_off(u_register_t core_mask); +void __dead2 _psci_cpu_off_wfi(u_register_t core_mask, + u_register_t wakeup_address); +void __dead2 _psci_cpu_pwrdn_wfi(u_register_t core_mask, + u_register_t wakeup_address); +void __dead2 _psci_sys_pwrdn_wfi(u_register_t core_mask, + u_register_t wakeup_address); +void _psci_wakeup(u_register_t core_mask); +void _psci_core_entr_stdby(u_register_t core_mask); +void _psci_core_prep_stdby(u_register_t core_mask); +void _psci_core_exit_stdby(u_register_t core_mask); +void _psci_core_prep_pwrdn(u_register_t core_mask); +void _psci_core_exit_pwrdn(u_register_t core_mask); +void _psci_clstr_prep_stdby(u_register_t core_mask); +void _psci_clstr_exit_stdby(u_register_t core_mask); +void _psci_clstr_prep_pwrdn(u_register_t core_mask); +void _psci_clstr_exit_pwrdn(u_register_t core_mask); +void _psci_sys_prep_stdby(u_register_t core_mask); +void _psci_sys_exit_stdby(u_register_t core_mask); +void _psci_sys_prep_pwrdn(u_register_t core_mask); +void _psci_sys_exit_pwrdn(u_register_t core_mask); + +#endif + +#endif /* __PLAT_PSCI_H__ */ diff --git a/plat/nxp/common/psci/plat_psci.c b/plat/nxp/common/psci/plat_psci.c new file mode 100644 index 0000000..9281e97 --- /dev/null +++ b/plat/nxp/common/psci/plat_psci.c @@ -0,0 +1,475 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <common/debug.h> + +#include <plat_gic.h> +#include <plat_common.h> +#include <plat_psci.h> +#ifdef NXP_WARM_BOOT +#include <plat_warm_rst.h> +#endif + +#include <platform_def.h> + +#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN) +static void __dead2 _no_return_wfi(void) +{ +_bl31_dead_wfi: + wfi(); + goto _bl31_dead_wfi; +} +#endif + +#if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN) + /* the entry for core warm boot */ +static uintptr_t warmboot_entry = (uintptr_t) NULL; +#endif + +#if (SOC_CORE_RELEASE) +static int _pwr_domain_on(u_register_t mpidr) +{ + int core_pos = plat_core_pos(mpidr); + int rc = PSCI_E_INVALID_PARAMS; + u_register_t core_mask; + + if (core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT) { + + _soc_set_start_addr(warmboot_entry); + + dsb(); + isb(); + + core_mask = (1 << core_pos); + rc = _psci_cpu_on(core_mask); + } + + return (rc); +} +#endif + +#if (SOC_CORE_OFF) +static void _pwr_domain_off(const psci_power_state_t *target_state) +{ + u_register_t core_mask = plat_my_core_mask(); + u_register_t core_state = _getCoreState(core_mask); + + /* set core state in internal data */ + core_state = CORE_OFF_PENDING; + _setCoreState(core_mask, core_state); + + _psci_cpu_prep_off(core_mask); +} +#endif + +#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN) +static void __dead2 _pwr_down_wfi(const psci_power_state_t *target_state) +{ + u_register_t core_mask = plat_my_core_mask(); + u_register_t core_state = _getCoreState(core_mask); + + switch (core_state) { +#if (SOC_CORE_OFF) + case CORE_OFF_PENDING: + /* set core state in internal data */ + core_state = CORE_OFF; + _setCoreState(core_mask, core_state); + + /* turn the core off */ + _psci_cpu_off_wfi(core_mask, warmboot_entry); + break; +#endif +#if (SOC_CORE_PWR_DWN) + case CORE_PWR_DOWN: + /* power-down the core */ + _psci_cpu_pwrdn_wfi(core_mask, warmboot_entry); + break; +#endif +#if (SOC_SYSTEM_PWR_DWN) + case SYS_OFF_PENDING: + /* set core state in internal data */ + core_state = SYS_OFF; + _setCoreState(core_mask, core_state); + + /* power-down the system */ + _psci_sys_pwrdn_wfi(core_mask, warmboot_entry); + break; +#endif + default: + _no_return_wfi(); + break; + } +} +#endif + +#if (SOC_CORE_RELEASE || SOC_CORE_RESTART) +static void _pwr_domain_wakeup(const psci_power_state_t *target_state) +{ + u_register_t core_mask = plat_my_core_mask(); + u_register_t core_state = _getCoreState(core_mask); + + switch (core_state) { + case CORE_PENDING: /* this core is coming out of reset */ + + /* soc per cpu setup */ + soc_init_percpu(); + + /* gic per cpu setup */ + plat_gic_pcpu_init(); + + /* set core state in internal data */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); + break; + +#if (SOC_CORE_RESTART) + case CORE_WAKEUP: + + /* this core is waking up from OFF */ + _psci_wakeup(core_mask); + + /* set core state in internal data */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); + + break; +#endif + } +} +#endif + +#if (SOC_CORE_STANDBY) +static void _pwr_cpu_standby(plat_local_state_t cpu_state) +{ + u_register_t core_mask = plat_my_core_mask(); + u_register_t core_state; + + if (cpu_state == PLAT_MAX_RET_STATE) { + + /* set core state to standby */ + core_state = CORE_STANDBY; + _setCoreState(core_mask, core_state); + + _psci_core_entr_stdby(core_mask); + + /* when we are here, the core is waking up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); + } +} +#endif + +#if (SOC_CORE_PWR_DWN) +static void _pwr_suspend(const psci_power_state_t *state) +{ + + u_register_t core_mask = plat_my_core_mask(); + u_register_t core_state; + + if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) { +#if (SOC_SYSTEM_PWR_DWN) + _psci_sys_prep_pwrdn(core_mask); + + /* set core state */ + core_state = SYS_OFF_PENDING; + _setCoreState(core_mask, core_state); +#endif + } else if (state->pwr_domain_state[PLAT_MAX_LVL] + == PLAT_MAX_RET_STATE) { +#if (SOC_SYSTEM_STANDBY) + _psci_sys_prep_stdby(core_mask); + + /* set core state */ + core_state = CORE_STANDBY; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == + PLAT_MAX_OFF_STATE) { +#if (SOC_CLUSTER_PWR_DWN) + _psci_clstr_prep_pwrdn(core_mask); + + /* set core state */ + core_state = CORE_PWR_DOWN; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == + PLAT_MAX_RET_STATE) { +#if (SOC_CLUSTER_STANDBY) + _psci_clstr_prep_stdby(core_mask); + + /* set core state */ + core_state = CORE_STANDBY; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) { +#if (SOC_CORE_PWR_DWN) + /* prep the core for power-down */ + _psci_core_prep_pwrdn(core_mask); + + /* set core state */ + core_state = CORE_PWR_DOWN; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) { +#if (SOC_CORE_STANDBY) + _psci_core_prep_stdby(core_mask); + + /* set core state */ + core_state = CORE_STANDBY; + _setCoreState(core_mask, core_state); +#endif + } + +} +#endif + +#if (SOC_CORE_PWR_DWN) +static void _pwr_suspend_finish(const psci_power_state_t *state) +{ + + u_register_t core_mask = plat_my_core_mask(); + u_register_t core_state; + + + if (state->pwr_domain_state[PLAT_MAX_LVL] == PLAT_MAX_OFF_STATE) { +#if (SOC_SYSTEM_PWR_DWN) + _psci_sys_exit_pwrdn(core_mask); + + /* when we are here, the core is back up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); +#endif + } else if (state->pwr_domain_state[PLAT_MAX_LVL] + == PLAT_MAX_RET_STATE) { +#if (SOC_SYSTEM_STANDBY) + _psci_sys_exit_stdby(core_mask); + + /* when we are here, the core is waking up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == + PLAT_MAX_OFF_STATE) { +#if (SOC_CLUSTER_PWR_DWN) + _psci_clstr_exit_pwrdn(core_mask); + + /* when we are here, the core is waking up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CLSTR_LVL] == + PLAT_MAX_RET_STATE) { +#if (SOC_CLUSTER_STANDBY) + _psci_clstr_exit_stdby(core_mask); + + /* when we are here, the core is waking up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_OFF_STATE) { +#if (SOC_CORE_PWR_DWN) + _psci_core_exit_pwrdn(core_mask); + + /* when we are here, the core is back up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); +#endif + } + + else if (state->pwr_domain_state[PLAT_CORE_LVL] == PLAT_MAX_RET_STATE) { +#if (SOC_CORE_STANDBY) + _psci_core_exit_stdby(core_mask); + + /* when we are here, the core is waking up + * set core state to released + */ + core_state = CORE_RELEASED; + _setCoreState(core_mask, core_state); +#endif + } + +} +#endif + +#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN) + +#define PWR_STATE_TYPE_MASK 0x00010000 +#define PWR_STATE_TYPE_STNDBY 0x0 +#define PWR_STATE_TYPE_PWRDWN 0x00010000 +#define PWR_STATE_LVL_MASK 0x03000000 +#define PWR_STATE_LVL_CORE 0x0 +#define PWR_STATE_LVL_CLSTR 0x01000000 +#define PWR_STATE_LVL_SYS 0x02000000 +#define PWR_STATE_LVL_MAX 0x03000000 + + /* turns a requested power state into a target power state + * based on SoC capabilities + */ +static int _pwr_state_validate(uint32_t pwr_state, + psci_power_state_t *state) +{ + int stat = PSCI_E_INVALID_PARAMS; + int pwrdn = (pwr_state & PWR_STATE_TYPE_MASK); + int lvl = (pwr_state & PWR_STATE_LVL_MASK); + + switch (lvl) { + case PWR_STATE_LVL_MAX: + if (pwrdn && SOC_SYSTEM_PWR_DWN) + state->pwr_domain_state[PLAT_MAX_LVL] = + PLAT_MAX_OFF_STATE; + else if (SOC_SYSTEM_STANDBY) + state->pwr_domain_state[PLAT_MAX_LVL] = + PLAT_MAX_RET_STATE; + /* intentional fall-thru condition */ + case PWR_STATE_LVL_SYS: + if (pwrdn && SOC_SYSTEM_PWR_DWN) + state->pwr_domain_state[PLAT_SYS_LVL] = + PLAT_MAX_OFF_STATE; + else if (SOC_SYSTEM_STANDBY) + state->pwr_domain_state[PLAT_SYS_LVL] = + PLAT_MAX_RET_STATE; + /* intentional fall-thru condition */ + case PWR_STATE_LVL_CLSTR: + if (pwrdn && SOC_CLUSTER_PWR_DWN) + state->pwr_domain_state[PLAT_CLSTR_LVL] = + PLAT_MAX_OFF_STATE; + else if (SOC_CLUSTER_STANDBY) + state->pwr_domain_state[PLAT_CLSTR_LVL] = + PLAT_MAX_RET_STATE; + /* intentional fall-thru condition */ + case PWR_STATE_LVL_CORE: + stat = PSCI_E_SUCCESS; + + if (pwrdn && SOC_CORE_PWR_DWN) + state->pwr_domain_state[PLAT_CORE_LVL] = + PLAT_MAX_OFF_STATE; + else if (SOC_CORE_STANDBY) + state->pwr_domain_state[PLAT_CORE_LVL] = + PLAT_MAX_RET_STATE; + break; + } + return (stat); +} + +#endif + +#if (SOC_SYSTEM_PWR_DWN) +static void _pwr_state_sys_suspend(psci_power_state_t *req_state) +{ + + /* if we need to have per-SoC settings, then we need to + * extend this by calling into psci_utils.S and from there + * on down to the SoC.S files + */ + + req_state->pwr_domain_state[PLAT_MAX_LVL] = PLAT_MAX_OFF_STATE; + req_state->pwr_domain_state[PLAT_SYS_LVL] = PLAT_MAX_OFF_STATE; + req_state->pwr_domain_state[PLAT_CLSTR_LVL] = PLAT_MAX_OFF_STATE; + req_state->pwr_domain_state[PLAT_CORE_LVL] = PLAT_MAX_OFF_STATE; + +} +#endif + +#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2) +static int psci_system_reset2(int is_vendor, + int reset_type, + u_register_t cookie) +{ + int ret = 0; + + INFO("Executing the sequence of warm reset.\n"); + ret = prep_n_execute_warm_reset(); + + return ret; +} +#endif + +static plat_psci_ops_t _psci_pm_ops = { +#if (SOC_SYSTEM_OFF) + .system_off = _psci_system_off, +#endif +#if (SOC_SYSTEM_RESET) + .system_reset = _psci_system_reset, +#endif +#if defined(NXP_WARM_BOOT) && (SOC_SYSTEM_RESET2) + .system_reset2 = psci_system_reset2, +#endif +#if (SOC_CORE_RELEASE || SOC_CORE_RESTART) + /* core released or restarted */ + .pwr_domain_on_finish = _pwr_domain_wakeup, +#endif +#if (SOC_CORE_OFF) + /* core shutting down */ + .pwr_domain_off = _pwr_domain_off, +#endif +#if (SOC_CORE_OFF || SOC_CORE_PWR_DWN) + .pwr_domain_pwr_down_wfi = _pwr_down_wfi, +#endif +#if (SOC_CORE_STANDBY || SOC_CORE_PWR_DWN) + /* cpu_suspend */ + .validate_power_state = _pwr_state_validate, +#if (SOC_CORE_STANDBY) + .cpu_standby = _pwr_cpu_standby, +#endif +#if (SOC_CORE_PWR_DWN) + .pwr_domain_suspend = _pwr_suspend, + .pwr_domain_suspend_finish = _pwr_suspend_finish, +#endif +#endif +#if (SOC_SYSTEM_PWR_DWN) + .get_sys_suspend_power_state = _pwr_state_sys_suspend, +#endif +#if (SOC_CORE_RELEASE) + /* core executing psci_cpu_on */ + .pwr_domain_on = _pwr_domain_on +#endif +}; + +#if (SOC_CORE_RELEASE || SOC_CORE_PWR_DWN) +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + warmboot_entry = sec_entrypoint; + *psci_ops = &_psci_pm_ops; + return 0; +} + +#else + +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const plat_psci_ops_t **psci_ops) +{ + *psci_ops = &_psci_pm_ops; + return 0; +} +#endif diff --git a/plat/nxp/common/psci/psci.mk b/plat/nxp/common/psci/psci.mk new file mode 100644 index 0000000..a2791c2 --- /dev/null +++ b/plat/nxp/common/psci/psci.mk @@ -0,0 +1,35 @@ +# +# Copyright 2018-2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# +#------------------------------------------------------------------------------ +# +# Select the PSCI files +# +# ----------------------------------------------------------------------------- + +ifeq (${ADD_PSCI},) + +ADD_PSCI := 1 +PLAT_PSCI_PATH := $(PLAT_COMMON_PATH)/psci + +PSCI_SOURCES := ${PLAT_PSCI_PATH}/plat_psci.c \ + ${PLAT_PSCI_PATH}/$(ARCH)/psci_utils.S \ + plat/common/plat_psci_common.c + +PLAT_INCLUDES += -I${PLAT_PSCI_PATH}/include + +ifeq (${BL_COMM_PSCI_NEEDED},yes) +BL_COMMON_SOURCES += ${PSCI_SOURCES} +else +ifeq (${BL2_PSCI_NEEDED},yes) +BL2_SOURCES += ${PSCI_SOURCES} +endif +ifeq (${BL31_PSCI_NEEDED},yes) +BL31_SOURCES += ${PSCI_SOURCES} +endif +endif +endif +# ----------------------------------------------------------------------------- diff --git a/plat/nxp/common/setup/aarch64/ls_bl2_mem_params_desc.c b/plat/nxp/common/setup/aarch64/ls_bl2_mem_params_desc.c new file mode 100644 index 0000000..7463d47 --- /dev/null +++ b/plat/nxp/common/setup/aarch64/ls_bl2_mem_params_desc.c @@ -0,0 +1,103 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <common/bl_common.h> +#include <common/desc_image_load.h> +#ifdef CSF_HEADER_PREPENDED +#include <csf_hdr.h> +#endif +#include <plat/common/platform.h> +#include <platform_def.h> + +/******************************************************************************* + * Following descriptor provides BL image/ep information that gets used + * by BL2 to load the images and also subset of this information is + * passed to next BL image. The image loading sequence is managed by + * populating the images in required loading order. The image execution + * sequence is managed by populating the `next_handoff_image_id` with + * the next executable image id. + ******************************************************************************/ +static bl_mem_params_node_t bl2_mem_params_descs[] = { + /* Fill BL31 related information */ + { + .image_id = BL31_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, + SECURE | EXECUTABLE | EP_FIRST_EXE), + .ep_info.pc = BL31_BASE, + .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), +#if DEBUG + .ep_info.args.arg1 = LS_BL31_PLAT_PARAM_VAL, +#endif + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP), +#ifdef CSF_HEADER_PREPENDED + .image_info.image_base = BL31_BASE - CSF_HDR_SZ, + .image_info.image_max_size = (BL31_LIMIT - BL31_BASE) + + CSF_HDR_SZ, +#else + .image_info.image_base = BL31_BASE, + .image_info.image_max_size = (BL31_LIMIT - BL31_BASE), +#endif + +# ifdef NXP_LOAD_BL32 + .next_handoff_image_id = BL32_IMAGE_ID, +# else + .next_handoff_image_id = BL33_IMAGE_ID, +# endif + }, +# ifdef NXP_LOAD_BL32 + /* Fill BL32 related information */ + { + .image_id = BL32_IMAGE_ID, + + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, SECURE | EXECUTABLE), + .ep_info.pc = BL32_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), +#ifdef CSF_HEADER_PREPENDED + .image_info.image_base = BL32_BASE - CSF_HDR_SZ, + .image_info.image_max_size = (BL32_LIMIT - BL32_BASE) + + CSF_HDR_SZ, +#else + .image_info.image_base = BL32_BASE, + .image_info.image_max_size = (BL32_LIMIT - BL32_BASE), +#endif + .next_handoff_image_id = BL33_IMAGE_ID, + }, +# endif /* BL32_BASE */ + + /* Fill BL33 related information */ + { + .image_id = BL33_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE), + .ep_info.pc = BL33_BASE, + + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), +#ifdef CSF_HEADER_PREPENDED + .image_info.image_base = BL33_BASE - CSF_HDR_SZ, + .image_info.image_max_size = (BL33_LIMIT - BL33_BASE) + + CSF_HDR_SZ, +#else + .image_info.image_base = BL33_BASE, + .image_info.image_max_size = BL33_LIMIT - BL33_BASE, +#endif + .ep_info.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS), + + .next_handoff_image_id = INVALID_IMAGE_ID, + } +}; + +REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs) diff --git a/plat/nxp/common/setup/common.mk b/plat/nxp/common/setup/common.mk new file mode 100644 index 0000000..1fcf1d0 --- /dev/null +++ b/plat/nxp/common/setup/common.mk @@ -0,0 +1,105 @@ +# +# Copyright 2018-2021 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# + +############################################################################### +# Flow begins in BL2 at EL3 mode +BL2_AT_EL3 := 1 + +# Though one core is powered up by default, there are +# platform specific ways to release more than one core +COLD_BOOT_SINGLE_CPU := 0 + +PROGRAMMABLE_RESET_ADDRESS := 1 + +USE_COHERENT_MEM := 0 + +# Use generic OID definition (tbbr_oid.h) +USE_TBBR_DEFS := 1 + +PLAT_XLAT_TABLES_DYNAMIC := 0 + +ENABLE_SVE_FOR_NS := 0 + +ENABLE_STACK_PROTECTOR := 0 + +ERROR_DEPRECATED := 0 + +LS_DISABLE_TRUSTED_WDOG := 1 + +# On ARM platforms, separate the code and read-only data sections to allow +# mapping the former as executable and the latter as execute-never. +SEPARATE_CODE_AND_RODATA := 1 + +# Enable new version of image loading on ARM platforms +LOAD_IMAGE_V2 := 1 + +RCW := "" + +ifneq (${SPD},none) +$(eval $(call add_define, NXP_LOAD_BL32)) +endif + +############################################################################### + +PLAT_TOOL_PATH := tools/nxp +CREATE_PBL_TOOL_PATH := ${PLAT_TOOL_PATH}/create_pbl +PLAT_SETUP_PATH := ${PLAT_PATH}/common/setup + +PLAT_INCLUDES += -I${PLAT_SETUP_PATH}/include \ + -Iinclude/plat/arm/common \ + -Iinclude/drivers/arm \ + -Iinclude/lib \ + -Iinclude/drivers/io \ + -Ilib/psci + +# Required without TBBR. +# To include the defines for DDR PHY Images. +PLAT_INCLUDES += -Iinclude/common/tbbr + +include ${PLAT_SETUP_PATH}/core.mk +PLAT_BL_COMMON_SOURCES += ${CPU_LIBS} \ + plat/nxp/common/setup/ls_err.c \ + plat/nxp/common/setup/ls_common.c + +ifneq (${ENABLE_STACK_PROTECTOR},0) +PLAT_BL_COMMON_SOURCES += ${PLAT_SETUP_PATH}/ls_stack_protector.c +endif + +include lib/xlat_tables_v2/xlat_tables.mk + +PLAT_BL_COMMON_SOURCES += ${XLAT_TABLES_LIB_SRCS} + +BL2_SOURCES += drivers/io/io_fip.c \ + drivers/io/io_memmap.c \ + drivers/io/io_storage.c \ + common/desc_image_load.c \ + plat/nxp/common/setup/ls_image_load.c \ + plat/nxp/common/setup/ls_io_storage.c \ + plat/nxp/common/setup/ls_bl2_el3_setup.c \ + plat/nxp/common/setup/${ARCH}/ls_bl2_mem_params_desc.c + +BL31_SOURCES += plat/nxp/common/setup/ls_bl31_setup.c \ + +ifeq (${LS_EL3_INTERRUPT_HANDLER}, yes) +$(eval $(call add_define, LS_EL3_INTERRUPT_HANDLER)) +BL31_SOURCES += plat/nxp/common/setup/ls_interrupt_mgmt.c +endif + +ifeq (${TEST_BL31}, 1) +BL31_SOURCES += ${TEST_SOURCES} +endif + +# Verify build config +# ------------------- + +ifneq (${LOAD_IMAGE_V2}, 1) + $(error Error: Layerscape needs LOAD_IMAGE_V2=1) +else +$(eval $(call add_define,LOAD_IMAGE_V2)) +endif + +include $(CREATE_PBL_TOOL_PATH)/create_pbl.mk diff --git a/plat/nxp/common/setup/core.mk b/plat/nxp/common/setup/core.mk new file mode 100644 index 0000000..82ce30e --- /dev/null +++ b/plat/nxp/common/setup/core.mk @@ -0,0 +1,22 @@ +# Copyright 2018-2021 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# +#------------------------------------------------------------------------------ +# +# Select the CORE files +# +# ----------------------------------------------------------------------------- + +CPU_LIBS := lib/cpus/${ARCH}/aem_generic.S + +ifeq (,$(filter $(CORE_TYPE),a53 a72)) +$(error "CORE_TYPE not specified or incorrect") +else +UPPER_CORE_TYPE=$(shell echo $(CORE_TYPE) | tr a-z A-Z) +$(eval $(call add_define_val,CPUECTLR_EL1,CORTEX_$(UPPER_CORE_TYPE)_ECTLR_EL1)) +CPU_LIBS += lib/cpus/${ARCH}/cortex_$(CORE_TYPE).S +endif + +# ----------------------------------------------------------------------------- diff --git a/plat/nxp/common/setup/include/bl31_data.h b/plat/nxp/common/setup/include/bl31_data.h new file mode 100644 index 0000000..dd20d43 --- /dev/null +++ b/plat/nxp/common/setup/include/bl31_data.h @@ -0,0 +1,61 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef BL31_DATA_H +#define BL31_DATA_H + +#define SECURE_DATA_BASE NXP_OCRAM_ADDR +#define SECURE_DATA_SIZE NXP_OCRAM_SIZE +#define SECURE_DATA_TOP (SECURE_DATA_BASE + SECURE_DATA_SIZE) +#define SMC_REGION_SIZE 0x80 +#define SMC_GLBL_BASE (SECURE_DATA_TOP - SMC_REGION_SIZE) +#define BC_PSCI_DATA_SIZE 0xC0 +#define BC_PSCI_BASE (SMC_GLBL_BASE - BC_PSCI_DATA_SIZE) +#define SECONDARY_TOP BC_PSCI_BASE + +#define SEC_PSCI_DATA_SIZE 0xC0 +#define SEC_REGION_SIZE SEC_PSCI_DATA_SIZE + +/* SMC global data */ +#define BOOTLOC_OFFSET 0x0 +#define BOOT_SVCS_OSET 0x8 + +/* offset to prefetch disable mask */ +#define PREFETCH_DIS_OFFSET 0x10 +/* must reference last smc global entry */ +#define LAST_SMC_GLBL_OFFSET 0x18 + +#define SMC_TASK_OFFSET 0xC +#define TSK_START_OFFSET 0x0 +#define TSK_DONE_OFFSET 0x4 +#define TSK_CORE_OFFSET 0x8 +#define SMC_TASK1_BASE (SMC_GLBL_BASE + 32) +#define SMC_TASK2_BASE (SMC_TASK1_BASE + SMC_TASK_OFFSET) +#define SMC_TASK3_BASE (SMC_TASK2_BASE + SMC_TASK_OFFSET) +#define SMC_TASK4_BASE (SMC_TASK3_BASE + SMC_TASK_OFFSET) + +/* psci data area offsets */ +#define CORE_STATE_DATA 0x0 +#define SPSR_EL3_DATA 0x8 +#define CNTXT_ID_DATA 0x10 +#define START_ADDR_DATA 0x18 +#define LINK_REG_DATA 0x20 +#define GICC_CTLR_DATA 0x28 +#define ABORT_FLAG_DATA 0x30 +#define SCTLR_DATA 0x38 +#define CPUECTLR_DATA 0x40 +#define AUX_01_DATA 0x48 /* usage defined per SoC */ +#define AUX_02_DATA 0x50 /* usage defined per SoC */ +#define AUX_03_DATA 0x58 /* usage defined per SoC */ +#define AUX_04_DATA 0x60 /* usage defined per SoC */ +#define AUX_05_DATA 0x68 /* usage defined per SoC */ +#define AUX_06_DATA 0x70 /* usage defined per SoC */ +#define AUX_07_DATA 0x78 /* usage defined per SoC */ +#define SCR_EL3_DATA 0x80 +#define HCR_EL2_DATA 0x88 + +#endif /* BL31_DATA_H */ diff --git a/plat/nxp/common/setup/include/ls_interrupt_mgmt.h b/plat/nxp/common/setup/include/ls_interrupt_mgmt.h new file mode 100644 index 0000000..7dbddfb --- /dev/null +++ b/plat/nxp/common/setup/include/ls_interrupt_mgmt.h @@ -0,0 +1,23 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef LS_EL3_INTRPT_MGMT_H +#define LS_EL3_INTRPT_MGMT_H + +#include <bl31/interrupt_mgmt.h> + +#define MAX_INTR_EL3 128 + +/* + * Register handler to specific GIC entrance + * for INTR_TYPE_EL3 type of interrupt + */ +int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler); + +void ls_el3_interrupt_config(void); + +#endif /* LS_EL3_INTRPT_MGMT_H */ diff --git a/plat/nxp/common/setup/include/mmu_def.h b/plat/nxp/common/setup/include/mmu_def.h new file mode 100644 index 0000000..2a7771b --- /dev/null +++ b/plat/nxp/common/setup/include/mmu_def.h @@ -0,0 +1,34 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef MMU_MAP_DEF_H +#define MMU_MAP_DEF_H + +#include <lib/xlat_tables/xlat_tables_defs.h> + +#include <platform_def.h> + + +#define LS_MAP_CCSR MAP_REGION_FLAT(NXP_CCSR_ADDR, \ + NXP_CCSR_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#ifdef NXP_DCSR_ADDR +#define LS_MAP_DCSR MAP_REGION_FLAT(NXP_DCSR_ADDR, \ + NXP_DCSR_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) +#endif + +#define LS_MAP_CONSOLE MAP_REGION_FLAT(NXP_DUART1_ADDR, \ + NXP_DUART_SIZE, \ + MT_DEVICE | MT_RW | MT_NS) + +#define LS_MAP_OCRAM MAP_REGION_FLAT(NXP_OCRAM_ADDR, \ + NXP_OCRAM_SIZE, \ + MT_DEVICE | MT_RW | MT_SECURE) + +#endif /* MMU_MAP_DEF_H */ diff --git a/plat/nxp/common/setup/include/plat_common.h b/plat/nxp/common/setup/include/plat_common.h new file mode 100644 index 0000000..e13f45c --- /dev/null +++ b/plat/nxp/common/setup/include/plat_common.h @@ -0,0 +1,152 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_COMMON_H +#define PLAT_COMMON_H + +#include <stdbool.h> + +#include <dcfg.h> +#include <lib/el3_runtime/cpu_data.h> + +#include <platform_def.h> + +#ifdef IMAGE_BL31 + +#define BL31_END (uintptr_t)(&__BL31_END__) + +/******************************************************************************* + * This structure represents the superset of information that can be passed to + * BL31 e.g. while passing control to it from BL2. The BL32 parameters will be + * populated only if BL2 detects its presence. A pointer to a structure of this + * type should be passed in X0 to BL31's cold boot entrypoint. + * + * Use of this structure and the X0 parameter is not mandatory: the BL31 + * platform code can use other mechanisms to provide the necessary information + * about BL32 and BL33 to the common and SPD code. + * + * BL31 image information is mandatory if this structure is used. If either of + * the optional BL32 and BL33 image information is not provided, this is + * indicated by the respective image_info pointers being zero. + ******************************************************************************/ +typedef struct bl31_params { + param_header_t h; + image_info_t *bl31_image_info; + entry_point_info_t *bl32_ep_info; + image_info_t *bl32_image_info; + entry_point_info_t *bl33_ep_info; + image_info_t *bl33_image_info; +} bl31_params_t; + +/* BL3 utility functions */ +void ls_bl31_early_platform_setup(void *from_bl2, + void *plat_params_from_bl2); +/* LS Helper functions */ +unsigned int plat_my_core_mask(void); +unsigned int plat_core_mask(u_register_t mpidr); +unsigned int plat_core_pos(u_register_t mpidr); +//unsigned int plat_my_core_pos(void); + +/* BL31 Data API(s) */ +void _init_global_data(void); +void _initialize_psci(void); +uint32_t _getCoreState(u_register_t core_mask); +void _setCoreState(u_register_t core_mask, u_register_t core_state); + +/* SoC defined structure and API(s) */ +void soc_runtime_setup(void); +void soc_init(void); +void soc_platform_setup(void); +void soc_early_platform_setup2(void); +#endif /* IMAGE_BL31 */ + +#ifdef IMAGE_BL2 +void soc_early_init(void); +void soc_mem_access(void); +void soc_preload_setup(void); +void soc_bl2_prepare_exit(void); + +/* IO storage utility functions */ +int plat_io_setup(void); +int open_backend(const uintptr_t spec); + +void ls_bl2_plat_arch_setup(void); +void ls_bl2_el3_plat_arch_setup(void); + +enum boot_device { + BOOT_DEVICE_IFC_NOR, + BOOT_DEVICE_IFC_NAND, + BOOT_DEVICE_QSPI, + BOOT_DEVICE_EMMC, + BOOT_DEVICE_SDHC2_EMMC, + BOOT_DEVICE_FLEXSPI_NOR, + BOOT_DEVICE_FLEXSPI_NAND, + BOOT_DEVICE_NONE +}; + +enum boot_device get_boot_dev(void); + +/* DDR Related functions */ +#if DDR_INIT +#ifdef NXP_WARM_BOOT +long long init_ddr(uint32_t wrm_bt_flg); +#else +long long init_ddr(void); +#endif +#endif + +/* Board specific weak functions */ +bool board_enable_povdd(void); +bool board_disable_povdd(void); + +void mmap_add_ddr_region_dynamically(void); +#endif /* IMAGE_BL2 */ + +typedef struct { + uint64_t addr; + uint64_t size; +} region_info_t; + +typedef struct { + uint64_t num_dram_regions; + int64_t total_dram_size; + region_info_t region[NUM_DRAM_REGIONS]; +} dram_regions_info_t; + +dram_regions_info_t *get_dram_regions_info(void); + +void ls_setup_page_tables(uintptr_t total_base, + size_t total_size, + uintptr_t code_start, + uintptr_t code_limit, + uintptr_t rodata_start, + uintptr_t rodata_limit +#if USE_COHERENT_MEM + , uintptr_t coh_start, + uintptr_t coh_limit +#endif +); + +#define SOC_NAME_MAX_LEN (20) + +/* Structure to define SoC personality */ +struct soc_type { + char name[SOC_NAME_MAX_LEN]; + uint32_t version; + uint8_t num_clusters; + uint8_t cores_per_cluster; +}; +void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count, + uint8_t *num_clusters, uint8_t *cores_per_cluster); + +#define SOC_ENTRY(n, v, ncl, nc) { \ + .name = #n, \ + .version = SVR_##v, \ + .num_clusters = (ncl), \ + .cores_per_cluster = (nc)} + +#endif /* PLAT_COMMON_H */ diff --git a/plat/nxp/common/setup/include/plat_macros.S b/plat/nxp/common/setup/include/plat_macros.S new file mode 100644 index 0000000..69a3b08 --- /dev/null +++ b/plat/nxp/common/setup/include/plat_macros.S @@ -0,0 +1,22 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_MACROS_S +#define PLAT_MACROS_S + + /* --------------------------------------------- + * The below required platform porting macro + * prints out relevant GIC and CCI registers + * whenever an unhandled exception is taken in + * BL31. + * Clobbers: x0 - x10, x16, x17, sp + * --------------------------------------------- + */ + .macro plat_crash_print_regs + .endm + +#endif /* PLAT_MACROS_S */ diff --git a/plat/nxp/common/setup/ls_bl2_el3_setup.c b/plat/nxp/common/setup/ls_bl2_el3_setup.c new file mode 100644 index 0000000..a4cbaef --- /dev/null +++ b/plat/nxp/common/setup/ls_bl2_el3_setup.c @@ -0,0 +1,303 @@ +/* + * Copyright 2018-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> + +#include <common/desc_image_load.h> +#include <dcfg.h> +#ifdef POLICY_FUSE_PROVISION +#include <fuse_io.h> +#endif +#include <mmu_def.h> +#include <plat_common.h> +#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA +#include <plat_nv_storage.h> +#endif + +#pragma weak bl2_el3_early_platform_setup +#pragma weak bl2_el3_plat_arch_setup +#pragma weak bl2_el3_plat_prepare_exit + +static dram_regions_info_t dram_regions_info = {0}; + +/******************************************************************************* + * Return the pointer to the 'dram_regions_info structure of the DRAM. + * This structure is populated after init_ddr(). + ******************************************************************************/ +dram_regions_info_t *get_dram_regions_info(void) +{ + return &dram_regions_info; +} + +#ifdef DDR_INIT +static void populate_dram_regions_info(void) +{ + long long dram_remain_size = dram_regions_info.total_dram_size; + uint8_t reg_id = 0U; + + dram_regions_info.region[reg_id].addr = NXP_DRAM0_ADDR; + dram_regions_info.region[reg_id].size = + dram_remain_size > NXP_DRAM0_MAX_SIZE ? + NXP_DRAM0_MAX_SIZE : dram_remain_size; + + if (dram_regions_info.region[reg_id].size != NXP_DRAM0_SIZE) { + ERROR("Incorrect DRAM0 size is defined in platform_def.h\n"); + } + + dram_remain_size -= dram_regions_info.region[reg_id].size; + dram_regions_info.region[reg_id].size -= (NXP_SECURE_DRAM_SIZE + + NXP_SP_SHRD_DRAM_SIZE); + + assert(dram_regions_info.region[reg_id].size > 0); + + /* Reducing total dram size by 66MB */ + dram_regions_info.total_dram_size -= (NXP_SECURE_DRAM_SIZE + + NXP_SP_SHRD_DRAM_SIZE); + +#if defined(NXP_DRAM1_ADDR) && defined(NXP_DRAM1_MAX_SIZE) + if (dram_remain_size > 0) { + reg_id++; + dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR; + dram_regions_info.region[reg_id].size = + dram_remain_size > NXP_DRAM1_MAX_SIZE ? + NXP_DRAM1_MAX_SIZE : dram_remain_size; + dram_remain_size -= dram_regions_info.region[reg_id].size; + } +#endif +#if defined(NXP_DRAM2_ADDR) && defined(NXP_DRAM2_MAX_SIZE) + if (dram_remain_size > 0) { + reg_id++; + dram_regions_info.region[reg_id].addr = NXP_DRAM1_ADDR; + dram_regions_info.region[reg_id].size = + dram_remain_size > NXP_DRAM1_MAX_SIZE ? + NXP_DRAM1_MAX_SIZE : dram_remain_size; + dram_remain_size -= dram_regions_info.region[reg_id].size; + } +#endif + reg_id++; + dram_regions_info.num_dram_regions = reg_id; +} +#endif + +#ifdef IMAGE_BL32 +/******************************************************************************* + * Gets SPSR for BL32 entry + ******************************************************************************/ +static uint32_t ls_get_spsr_for_bl32_entry(void) +{ + /* + * The Secure Payload Dispatcher service is responsible for + * setting the SPSR prior to entry into the BL32 image. + */ + return 0U; +} +#endif + +/******************************************************************************* + * Gets SPSR for BL33 entry + ******************************************************************************/ +#ifndef AARCH32 +static uint32_t ls_get_spsr_for_bl33_entry(void) +{ + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} +#else +/******************************************************************************* + * Gets SPSR for BL33 entry + ******************************************************************************/ +static uint32_t ls_get_spsr_for_bl33_entry(void) +{ + unsigned int hyp_status, mode, spsr; + + hyp_status = GET_VIRT_EXT(read_id_pfr1()); + + mode = (hyp_status) ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1, + SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); + return spsr; +} +#endif /* AARCH32 */ + +void bl2_el3_early_platform_setup(u_register_t arg0 __unused, + u_register_t arg1 __unused, + u_register_t arg2 __unused, + u_register_t arg3 __unused) +{ + /* + * SoC specific early init + * Any errata handling or SoC specific early initialization can + * be done here + * Set Counter Base Frequency in CNTFID0 and in cntfrq_el0. + * Initialize the interconnect. + * Enable coherency for primary CPU cluster + */ + soc_early_init(); + + /* Initialise the IO layer and register platform IO devices */ + plat_io_setup(); + + if (dram_regions_info.total_dram_size > 0) { + populate_dram_regions_info(); + } + +#ifdef NXP_NV_SW_MAINT_LAST_EXEC_DATA + read_nv_app_data(); +#if DEBUG + const nv_app_data_t *nv_app_data = get_nv_data(); + + INFO("Value of warm_reset flag = 0x%x\n", nv_app_data->warm_rst_flag); + INFO("Value of WDT flag = 0x%x\n", nv_app_data->wdt_rst_flag); +#endif +#endif +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup here. At the + * moment this is only initializes the mmu in a quick and dirty way. + ******************************************************************************/ +void ls_bl2_el3_plat_arch_setup(void) +{ + unsigned int flags = 0U; + /* Initialise the IO layer and register platform IO devices */ + ls_setup_page_tables( +#if SEPARATE_BL2_NOLOAD_REGION + BL2_START, + BL2_LIMIT - BL2_START, +#else + BL2_BASE, + (unsigned long)(&__BL2_END__) - BL2_BASE, +#endif + BL_CODE_BASE, + BL_CODE_END, + BL_RO_DATA_BASE, + BL_RO_DATA_END +#if USE_COHERENT_MEM + , BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END +#endif + ); + + if ((dram_regions_info.region[0].addr == 0) + && (dram_regions_info.total_dram_size == 0)) { + flags = XLAT_TABLE_NC; + } + +#ifdef AARCH32 + enable_mmu_secure(0); +#else + enable_mmu_el3(flags); +#endif +} + +void bl2_el3_plat_arch_setup(void) +{ + ls_bl2_el3_plat_arch_setup(); +} + +void bl2_platform_setup(void) +{ + /* + * Perform platform setup before loading the image. + */ +} + +/* Handling image information by platform. */ +int ls_bl2_handle_post_image_load(unsigned int image_id) +{ + int err = 0; + bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); + + assert(bl_mem_params); + + switch (image_id) { + case BL31_IMAGE_ID: + bl_mem_params->ep_info.args.arg3 = + (u_register_t) &dram_regions_info; + + /* Pass the value of PORSR1 register in Argument 4 */ + bl_mem_params->ep_info.args.arg4 = + (u_register_t)read_reg_porsr1(); + flush_dcache_range((uintptr_t)&dram_regions_info, + sizeof(dram_regions_info)); + break; +#if defined(AARCH64) && defined(IMAGE_BL32) + case BL32_IMAGE_ID: + bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry(); + break; +#endif + case BL33_IMAGE_ID: + /* BL33 expects to receive the primary CPU MPID (through r0) */ + bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); + bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl33_entry(); + break; + } + + return err; +} + +/******************************************************************************* + * This function can be used by the platforms to update/use image + * information for given `image_id`. + ******************************************************************************/ +int bl2_plat_handle_post_image_load(unsigned int image_id) +{ + return ls_bl2_handle_post_image_load(image_id); +} + +void bl2_el3_plat_prepare_exit(void) +{ + return soc_bl2_prepare_exit(); +} + +/* Called to do the dynamic initialization required + * before loading the next image. + */ +void bl2_plat_preload_setup(void) +{ + + soc_preload_setup(); + +#ifdef DDR_INIT + if (dram_regions_info.total_dram_size <= 0) { + ERROR("Asserting as the DDR is not initialized yet."); + assert(false); + } +#endif + + if ((dram_regions_info.region[0].addr == 0) + && (dram_regions_info.total_dram_size > 0)) { + populate_dram_regions_info(); +#ifdef PLAT_XLAT_TABLES_DYNAMIC + mmap_add_ddr_region_dynamically(); +#endif + } + + /* setup the memory region access permissions */ + soc_mem_access(); + +#ifdef POLICY_FUSE_PROVISION + fip_fuse_provisioning((uintptr_t)FUSE_BUF, FUSE_SZ); +#endif +} diff --git a/plat/nxp/common/setup/ls_bl31_setup.c b/plat/nxp/common/setup/ls_bl31_setup.c new file mode 100644 index 0000000..bd0ab4f --- /dev/null +++ b/plat/nxp/common/setup/ls_bl31_setup.c @@ -0,0 +1,212 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <inttypes.h> +#include <stdint.h> + +#ifdef LS_EL3_INTERRUPT_HANDLER +#include <ls_interrupt_mgmt.h> +#endif +#include <mmu_def.h> +#include <plat_common.h> + +/* + * Placeholder variables for copying the arguments that have been passed to + * BL31 from BL2. + */ +#ifdef TEST_BL31 +#define SPSR_FOR_EL2H 0x3C9 +#define SPSR_FOR_EL1H 0x3C5 +#else +static entry_point_info_t bl31_image_ep_info; +#endif + +static entry_point_info_t bl32_image_ep_info; +static entry_point_info_t bl33_image_ep_info; + +static dram_regions_info_t dram_regions_info = {0}; +static uint64_t rcw_porsr1; + +/* Return the pointer to the 'dram_regions_info structure of the DRAM. + * This structure is populated after init_ddr(). + */ +dram_regions_info_t *get_dram_regions_info(void) +{ + return &dram_regions_info; +} + +/* Return the RCW.PORSR1 value which was passed in from BL2 + */ +uint64_t bl31_get_porsr1(void) +{ + return rcw_porsr1; +} + +/* + * Return pointer to the 'entry_point_info' structure of the next image for the + * security state specified: + * - BL33 corresponds to the non-secure image type; while + * - BL32 corresponds to the secure image type. + * - A NULL pointer is returned, if the image does not exist. + */ +entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type) +{ + entry_point_info_t *next_image_info; + + assert(sec_state_is_valid(type)); + next_image_info = (type == NON_SECURE) + ? &bl33_image_ep_info : &bl32_image_ep_info; + +#ifdef TEST_BL31 + next_image_info->pc = _get_test_entry(); + next_image_info->spsr = SPSR_FOR_EL2H; + next_image_info->h.attr = NON_SECURE; +#endif + + if (next_image_info->pc != 0U) { + return next_image_info; + } else { + return NULL; + } +} + +/* + * Perform any BL31 early platform setup common to NXP platforms. + * - Here is an opportunity to copy parameters passed by the calling EL (S-EL1 + * in BL2 & S-EL3 in BL1) before they are lost (potentially). + * - This needs to be done before the MMU is initialized so that the + * memory layout can be used while creating page tables. + * - BL2 has flushed this information to memory, in order to fetch latest data. + */ + +void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, + u_register_t arg2, u_register_t arg3) +{ +#ifndef TEST_BL31 + int i = 0; + void *from_bl2 = (void *)arg0; +#endif + soc_early_platform_setup2(); + +#ifdef TEST_BL31 + dram_regions_info.num_dram_regions = 2; + dram_regions_info.total_dram_size = 0x100000000; + dram_regions_info.region[0].addr = 0x80000000; + dram_regions_info.region[0].size = 0x80000000; + dram_regions_info.region[1].addr = 0x880000000; + dram_regions_info.region[1].size = 0x80000000; + + bl33_image_ep_info.pc = _get_test_entry(); +#else + /* + * Check params passed from BL2 should not be NULL, + */ + bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; + + assert(params_from_bl2 != NULL); + assert(params_from_bl2->h.type == PARAM_BL_PARAMS); + assert(params_from_bl2->h.version >= VERSION_2); + + bl_params_node_t *bl_params = params_from_bl2->head; + + /* + * Copy BL33 and BL32 (if present), entry point information. + * They are stored in Secure RAM, in BL2's address space. + */ + while (bl_params != NULL) { + if (bl_params->image_id == BL31_IMAGE_ID) { + bl31_image_ep_info = *bl_params->ep_info; + dram_regions_info_t *loc_dram_regions_info = + (dram_regions_info_t *) bl31_image_ep_info.args.arg3; + + dram_regions_info.num_dram_regions = + loc_dram_regions_info->num_dram_regions; + dram_regions_info.total_dram_size = + loc_dram_regions_info->total_dram_size; + VERBOSE("Number of DRAM Regions = %" PRIx64 "\n", + dram_regions_info.num_dram_regions); + + for (i = 0; i < dram_regions_info.num_dram_regions; + i++) { + dram_regions_info.region[i].addr = + loc_dram_regions_info->region[i].addr; + dram_regions_info.region[i].size = + loc_dram_regions_info->region[i].size; + VERBOSE("DRAM%d Size = %" PRIx64 "\n", i, + dram_regions_info.region[i].size); + } + rcw_porsr1 = bl31_image_ep_info.args.arg4; + } + + if (bl_params->image_id == BL32_IMAGE_ID) { + bl32_image_ep_info = *bl_params->ep_info; + } + + if (bl_params->image_id == BL33_IMAGE_ID) { + bl33_image_ep_info = *bl_params->ep_info; + } + + bl_params = bl_params->next_params_info; + } +#endif /* TEST_BL31 */ + + if (bl33_image_ep_info.pc == 0) { + panic(); + } + + /* + * perform basic initialization on the soc + */ + soc_init(); +} + +/******************************************************************************* + * Perform any BL31 platform setup common to ARM standard platforms + ******************************************************************************/ +void bl31_platform_setup(void) +{ + NOTICE("Welcome to %s BL31 Phase\n", BOARD); + soc_platform_setup(); + + /* Console logs gone missing as part going to + * EL1 for initilizing Bl32 if present. + * console flush is necessary to avoid it. + */ + (void)console_flush(); +} + +void bl31_plat_runtime_setup(void) +{ +#ifdef LS_EL3_INTERRUPT_HANDLER + ls_el3_interrupt_config(); +#endif + soc_runtime_setup(); +} + +/******************************************************************************* + * Perform the very early platform specific architectural setup shared between + * ARM standard platforms. This only does basic initialization. Later + * architectural setup (bl31_arch_setup()) does not do anything platform + * specific. + ******************************************************************************/ +void bl31_plat_arch_setup(void) +{ + + ls_setup_page_tables(BL31_BASE, + BL31_END - BL31_BASE, + BL_CODE_BASE, + BL_CODE_END, + BL_RO_DATA_BASE, + BL_RO_DATA_END +#if USE_COHERENT_MEM + , BL_COHERENT_RAM_BASE, + BL_COHERENT_RAM_END +#endif + ); + enable_mmu_el3(0); +} diff --git a/plat/nxp/common/setup/ls_common.c b/plat/nxp/common/setup/ls_common.c new file mode 100644 index 0000000..28d6b72 --- /dev/null +++ b/plat/nxp/common/setup/ls_common.c @@ -0,0 +1,277 @@ +/* + * Copyright 2018-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> + +#include <arch.h> +#include <arch_helpers.h> +#include <common/debug.h> +#include <lib/mmio.h> +#include <lib/xlat_tables/xlat_tables_v2.h> +#include <mmu_def.h> +#include <plat/common/platform.h> + +#include "plat_common.h" +#include "platform_def.h" + +const mmap_region_t *plat_ls_get_mmap(void); + +/* + * Table of memory regions for various BL stages to map using the MMU. + * This doesn't include Trusted SRAM as arm_setup_page_tables() already + * takes care of mapping it. + * + * The flash needs to be mapped as writable in order to erase the FIP's Table of + * Contents in case of unrecoverable error (see plat_error_handler()). + */ +#ifdef IMAGE_BL2 +const mmap_region_t plat_ls_mmap[] = { + LS_MAP_CCSR, + {0} +}; +#endif + +#ifdef IMAGE_BL31 +const mmap_region_t plat_ls_mmap[] = { + LS_MAP_CCSR, +#ifdef NXP_DCSR_ADDR + LS_MAP_DCSR, +#endif + LS_MAP_OCRAM, + {0} +}; +#endif +#ifdef IMAGE_BL32 +const mmap_region_t plat_ls_mmap[] = { + LS_MAP_CCSR, + LS_MAP_BL32_SEC_MEM, + {0} +}; +#endif + +/* Weak definitions may be overridden in specific NXP SoC */ +#pragma weak plat_get_ns_image_entrypoint +#pragma weak plat_ls_get_mmap + +#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE) +static void mmap_add_ddr_regions_statically(void) +{ + int i = 0; + dram_regions_info_t *info_dram_regions = get_dram_regions_info(); + /* MMU map for Non-Secure DRAM Regions */ + VERBOSE("DRAM Region %d: %p - %p\n", i, + (void *) info_dram_regions->region[i].addr, + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size + - 1)); + mmap_add_region(info_dram_regions->region[i].addr, + info_dram_regions->region[i].addr, + info_dram_regions->region[i].size, + MT_MEMORY | MT_RW | MT_NS); + + /* MMU map for Secure DDR Region on DRAM-0 */ + if (info_dram_regions->region[i].size > + (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) { + VERBOSE("Secure DRAM Region %d: %p - %p\n", i, + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size), + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size + + NXP_SECURE_DRAM_SIZE + + NXP_SP_SHRD_DRAM_SIZE + - 1)); + mmap_add_region((info_dram_regions->region[i].addr + + info_dram_regions->region[i].size), + (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size), + (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE), + MT_MEMORY | MT_RW | MT_SECURE); + } + +#ifdef IMAGE_BL31 + for (i = 1; i < info_dram_regions->num_dram_regions; i++) { + if (info_dram_regions->region[i].size == 0) + break; + VERBOSE("DRAM Region %d: %p - %p\n", i, + (void *) info_dram_regions->region[i].addr, + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size + - 1)); + mmap_add_region(info_dram_regions->region[i].addr, + info_dram_regions->region[i].addr, + info_dram_regions->region[i].size, + MT_MEMORY | MT_RW | MT_NS); + } +#endif +} +#endif + +#if defined(PLAT_XLAT_TABLES_DYNAMIC) +void mmap_add_ddr_region_dynamically(void) +{ + int ret, i = 0; + + dram_regions_info_t *info_dram_regions = get_dram_regions_info(); + /* MMU map for Non-Secure DRAM Regions */ + VERBOSE("DRAM Region %d: %p - %p\n", i, + (void *) info_dram_regions->region[i].addr, + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size + - 1)); + ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr, + info_dram_regions->region[i].addr, + info_dram_regions->region[i].size, + MT_MEMORY | MT_RW | MT_NS); + if (ret != 0) { + ERROR("Failed to add dynamic memory region\n"); + panic(); + } + + /* MMU map for Secure DDR Region on DRAM-0 */ + if (info_dram_regions->region[i].size > + (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) { + VERBOSE("Secure DRAM Region %d: %p - %p\n", i, + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size), + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size + + NXP_SECURE_DRAM_SIZE + + NXP_SP_SHRD_DRAM_SIZE + - 1)); + ret = mmap_add_dynamic_region((info_dram_regions->region[i].addr + + info_dram_regions->region[i].size), + (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size), + (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE), + MT_MEMORY | MT_RW | MT_SECURE); + if (ret != 0) { + ERROR("Failed to add dynamic memory region\n"); + panic(); + } + } + +#ifdef IMAGE_BL31 + for (i = 1; i < info_dram_regions->num_dram_regions; i++) { + if (info_dram_regions->region[i].size == 0) { + break; + } + VERBOSE("DRAM Region %d: %p - %p\n", i, + (void *) info_dram_regions->region[i].addr, + (void *) (info_dram_regions->region[i].addr + + info_dram_regions->region[i].size + - 1)); + ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr, + info_dram_regions->region[i].addr, + info_dram_regions->region[i].size, + MT_MEMORY | MT_RW | MT_NS); + if (ret != 0) { + ERROR("Failed to add dynamic memory region\n"); + panic(); + } + } +#endif +} +#endif + +/* + * Set up the page tables for the generic and platform-specific memory regions. + * The extents of the generic memory regions are specified by the function + * arguments and consist of: + * - Trusted SRAM seen by the BL image; + * - Code section; + * - Read-only data section; + * - Coherent memory region, if applicable. + */ +void ls_setup_page_tables(uintptr_t total_base, + size_t total_size, + uintptr_t code_start, + uintptr_t code_limit, + uintptr_t rodata_start, + uintptr_t rodata_limit +#if USE_COHERENT_MEM + , + uintptr_t coh_start, + uintptr_t coh_limit +#endif + ) +{ + /* + * Map the Trusted SRAM with appropriate memory attributes. + * Subsequent mappings will adjust the attributes for specific regions. + */ + VERBOSE("Memory seen by this BL image: %p - %p\n", + (void *) total_base, (void *) (total_base + total_size)); + mmap_add_region(total_base, total_base, + total_size, + MT_MEMORY | MT_RW | MT_SECURE); + + /* Re-map the code section */ + VERBOSE("Code region: %p - %p\n", + (void *) code_start, (void *) code_limit); + mmap_add_region(code_start, code_start, + code_limit - code_start, + MT_CODE | MT_SECURE); + + /* Re-map the read-only data section */ + VERBOSE("Read-only data region: %p - %p\n", + (void *) rodata_start, (void *) rodata_limit); + mmap_add_region(rodata_start, rodata_start, + rodata_limit - rodata_start, + MT_RO_DATA | MT_SECURE); + +#if USE_COHERENT_MEM + /* Re-map the coherent memory region */ + VERBOSE("Coherent region: %p - %p\n", + (void *) coh_start, (void *) coh_limit); + mmap_add_region(coh_start, coh_start, + coh_limit - coh_start, + MT_DEVICE | MT_RW | MT_SECURE); +#endif + + /* Now (re-)map the platform-specific memory regions */ + mmap_add(plat_ls_get_mmap()); + + +#if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE) + mmap_add_ddr_regions_statically(); +#endif + + /* Create the page tables to reflect the above mappings */ + init_xlat_tables(); +} + +/******************************************************************************* + * Returns NXP platform specific memory map regions. + ******************************************************************************/ +const mmap_region_t *plat_ls_get_mmap(void) +{ + return plat_ls_mmap; +} + +/* + * This function get the number of clusters and cores count per cluster + * in the SoC. + */ +void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count, + uint8_t *num_clusters, uint8_t *cores_per_cluster) +{ + const soc_info_t *soc_info = get_soc_info(); + *num_clusters = NUMBER_OF_CLUSTERS; + *cores_per_cluster = CORES_PER_CLUSTER; + unsigned int i; + + for (i = 0U; i < ps_count; i++) { + if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) { + *num_clusters = soc_list[i].num_clusters; + *cores_per_cluster = soc_list[i].cores_per_cluster; + break; + } + } + + VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n", + *num_clusters, *cores_per_cluster); +} diff --git a/plat/nxp/common/setup/ls_err.c b/plat/nxp/common/setup/ls_err.c new file mode 100644 index 0000000..845cd15 --- /dev/null +++ b/plat/nxp/common/setup/ls_err.c @@ -0,0 +1,55 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <errno.h> +#include <stdbool.h> +#include <stdint.h> + +#include <arch_helpers.h> +#include <common/debug.h> + +#if TRUSTED_BOARD_BOOT +#include <dcfg.h> +#include <snvs.h> +#endif + +#include "plat_common.h" + +/* + * Error handler + */ +void plat_error_handler(int err) +{ +#if TRUSTED_BOARD_BOOT + uint32_t mode; + bool sb = check_boot_mode_secure(&mode); +#endif + + switch (err) { + case -ENOENT: + case -EAUTH: + printf("Authentication failure\n"); +#if TRUSTED_BOARD_BOOT + /* For SB production mode i.e ITS = 1 */ + if (sb == true) { + if (mode == 1U) { + transition_snvs_soft_fail(); + } else { + transition_snvs_non_secure(); + } + } +#endif + break; + default: + /* Unexpected error */ + break; + } + + /* Loop until the watchdog resets the system */ + for (;;) + wfi(); +} diff --git a/plat/nxp/common/setup/ls_image_load.c b/plat/nxp/common/setup/ls_image_load.c new file mode 100644 index 0000000..259ab31 --- /dev/null +++ b/plat/nxp/common/setup/ls_image_load.c @@ -0,0 +1,33 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <common/desc_image_load.h> + +/******************************************************************************* + * This function flushes the data structures so that they are visible + * in memory for the next BL image. + ******************************************************************************/ +void plat_flush_next_bl_params(void) +{ + flush_bl_params_desc(); +} + +/******************************************************************************* + * This function returns the list of loadable images. + ******************************************************************************/ +bl_load_info_t *plat_get_bl_image_load_info(void) +{ + return get_bl_load_info_from_mem_params_desc(); +} + +/******************************************************************************* + * This function returns the list of executable images. + ******************************************************************************/ +bl_params_t *plat_get_next_bl_params(void) +{ + return get_next_bl_params_from_mem_params_desc(); +} diff --git a/plat/nxp/common/setup/ls_interrupt_mgmt.c b/plat/nxp/common/setup/ls_interrupt_mgmt.c new file mode 100644 index 0000000..a81cb2b --- /dev/null +++ b/plat/nxp/common/setup/ls_interrupt_mgmt.c @@ -0,0 +1,66 @@ +/* + * Copyright 2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <bl31/interrupt_mgmt.h> +#include <common/debug.h> +#include <ls_interrupt_mgmt.h> +#include <plat/common/platform.h> + +static interrupt_type_handler_t type_el3_interrupt_table[MAX_INTR_EL3]; + +int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) +{ + /* Validate 'handler' and 'id' parameters */ + if (!handler || id >= MAX_INTR_EL3) { + return -EINVAL; + } + + /* Check if a handler has already been registered */ + if (type_el3_interrupt_table[id] != NULL) { + return -EALREADY; + } + + type_el3_interrupt_table[id] = handler; + + return 0; +} + +static uint64_t ls_el3_interrupt_handler(uint32_t id, uint32_t flags, + void *handle, void *cookie) +{ + uint32_t intr_id; + interrupt_type_handler_t handler; + + intr_id = plat_ic_get_pending_interrupt_id(); + + INFO("Interrupt recvd is %d\n", intr_id); + + handler = type_el3_interrupt_table[intr_id]; + if (handler != NULL) { + handler(intr_id, flags, handle, cookie); + } + + /* + * Mark this interrupt as complete to avoid a interrupt storm. + */ + plat_ic_end_of_interrupt(intr_id); + + return 0U; +} + +void ls_el3_interrupt_config(void) +{ + uint64_t flags = 0U; + uint64_t rc; + + set_interrupt_rm_flag(flags, NON_SECURE); + rc = register_interrupt_type_handler(INTR_TYPE_EL3, + ls_el3_interrupt_handler, flags); + if (rc != 0U) { + panic(); + } +} diff --git a/plat/nxp/common/setup/ls_io_storage.c b/plat/nxp/common/setup/ls_io_storage.c new file mode 100644 index 0000000..7f01e72 --- /dev/null +++ b/plat/nxp/common/setup/ls_io_storage.c @@ -0,0 +1,556 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <endian.h> +#include <string.h> + +#include <common/debug.h> +#include <common/tbbr/tbbr_img_def.h> +#include <drivers/io/io_block.h> +#include <drivers/io/io_driver.h> +#include <drivers/io/io_fip.h> +#include <drivers/io/io_memmap.h> +#include <drivers/io/io_storage.h> +#ifdef FLEXSPI_NOR_BOOT +#include <flexspi_nor.h> +#endif +#if defined(NAND_BOOT) +#include <ifc_nand.h> +#endif +#if defined(NOR_BOOT) +#include <ifc_nor.h> +#endif +#if defined(QSPI_BOOT) +#include <qspi.h> +#endif +#if defined(SD_BOOT) || defined(EMMC_BOOT) +#include <sd_mmc.h> +#endif +#include <tools_share/firmware_image_package.h> + +#ifdef CONFIG_DDR_FIP_IMAGE +#include <ddr_io_storage.h> +#endif +#ifdef POLICY_FUSE_PROVISION +#include <fuse_io.h> +#endif +#include "plat_common.h" +#include "platform_def.h" + +uint32_t fip_device; +/* IO devices */ +uintptr_t backend_dev_handle; + +static const io_dev_connector_t *fip_dev_con; +static uintptr_t fip_dev_handle; +static const io_dev_connector_t *backend_dev_con; + +static io_block_spec_t fip_block_spec = { + .offset = PLAT_FIP_OFFSET, + .length = PLAT_FIP_MAX_SIZE +}; + +static const io_uuid_spec_t bl2_uuid_spec = { + .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2, +}; + +static const io_uuid_spec_t fuse_bl2_uuid_spec = { + .uuid = UUID_SCP_FIRMWARE_SCP_BL2, +}; + +static const io_uuid_spec_t bl31_uuid_spec = { + .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31, +}; + +static const io_uuid_spec_t bl32_uuid_spec = { + .uuid = UUID_SECURE_PAYLOAD_BL32, +}; + +static const io_uuid_spec_t bl33_uuid_spec = { + .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33, +}; + +static const io_uuid_spec_t tb_fw_config_uuid_spec = { + .uuid = UUID_TB_FW_CONFIG, +}; + +static const io_uuid_spec_t hw_config_uuid_spec = { + .uuid = UUID_HW_CONFIG, +}; + +#if TRUSTED_BOARD_BOOT +static const io_uuid_spec_t tb_fw_cert_uuid_spec = { + .uuid = UUID_TRUSTED_BOOT_FW_CERT, +}; + +static const io_uuid_spec_t trusted_key_cert_uuid_spec = { + .uuid = UUID_TRUSTED_KEY_CERT, +}; + +static const io_uuid_spec_t fuse_key_cert_uuid_spec = { + .uuid = UUID_SCP_FW_KEY_CERT, +}; + +static const io_uuid_spec_t soc_fw_key_cert_uuid_spec = { + .uuid = UUID_SOC_FW_KEY_CERT, +}; + +static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = { + .uuid = UUID_TRUSTED_OS_FW_KEY_CERT, +}; + +static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = { + .uuid = UUID_NON_TRUSTED_FW_KEY_CERT, +}; + +static const io_uuid_spec_t fuse_cert_uuid_spec = { + .uuid = UUID_SCP_FW_CONTENT_CERT, +}; + +static const io_uuid_spec_t soc_fw_cert_uuid_spec = { + .uuid = UUID_SOC_FW_CONTENT_CERT, +}; + +static const io_uuid_spec_t tos_fw_cert_uuid_spec = { + .uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT, +}; + +static const io_uuid_spec_t nt_fw_cert_uuid_spec = { + .uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT, +}; +#endif /* TRUSTED_BOARD_BOOT */ + +static int open_fip(const uintptr_t spec); + +struct plat_io_policy { + uintptr_t *dev_handle; + uintptr_t image_spec; + int (*check)(const uintptr_t spec); +}; + +/* By default, ARM platforms load images from the FIP */ +static const struct plat_io_policy policies[] = { + [FIP_IMAGE_ID] = { + &backend_dev_handle, + (uintptr_t)&fip_block_spec, + open_backend + }, + [BL2_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&bl2_uuid_spec, + open_fip + }, + [SCP_BL2_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&fuse_bl2_uuid_spec, + open_fip + }, + [BL31_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&bl31_uuid_spec, + open_fip + }, + [BL32_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&bl32_uuid_spec, + open_fip + }, + [BL33_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&bl33_uuid_spec, + open_fip + }, + [TB_FW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&tb_fw_config_uuid_spec, + open_fip + }, + [HW_CONFIG_ID] = { + &fip_dev_handle, + (uintptr_t)&hw_config_uuid_spec, + open_fip + }, +#if TRUSTED_BOARD_BOOT + [TRUSTED_BOOT_FW_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&tb_fw_cert_uuid_spec, + open_fip + }, + [TRUSTED_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&trusted_key_cert_uuid_spec, + open_fip + }, + [SCP_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&fuse_key_cert_uuid_spec, + open_fip + }, + [SOC_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&soc_fw_key_cert_uuid_spec, + open_fip + }, + [TRUSTED_OS_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&tos_fw_key_cert_uuid_spec, + open_fip + }, + [NON_TRUSTED_FW_KEY_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&nt_fw_key_cert_uuid_spec, + open_fip + }, + [SCP_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&fuse_cert_uuid_spec, + open_fip + }, + [SOC_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&soc_fw_cert_uuid_spec, + open_fip + }, + [TRUSTED_OS_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&tos_fw_cert_uuid_spec, + open_fip + }, + [NON_TRUSTED_FW_CONTENT_CERT_ID] = { + &fip_dev_handle, + (uintptr_t)&nt_fw_cert_uuid_spec, + open_fip + }, +#endif /* TRUSTED_BOARD_BOOT */ +}; + + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak plat_io_setup + +/* + * Return an IO device handle and specification which can be used to access + */ +static int open_fip(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + /* See if a Firmware Image Package is available */ + result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID); + if (result == 0) { + result = io_open(fip_dev_handle, spec, &local_image_handle); + if (result == 0) { + VERBOSE("Using FIP\n"); + io_close(local_image_handle); + } + } + return result; +} + + +int open_backend(const uintptr_t spec) +{ + int result; + uintptr_t local_image_handle; + + result = io_dev_init(backend_dev_handle, (uintptr_t)NULL); + if (result == 0) { + result = io_open(backend_dev_handle, spec, &local_image_handle); + if (result == 0) { + io_close(local_image_handle); + } + } + return result; +} + +#if defined(SD_BOOT) || defined(EMMC_BOOT) || defined(NAND_BOOT) +static int plat_io_block_setup(size_t fip_offset, uintptr_t block_dev_spec) +{ + int io_result; + + fip_block_spec.offset = fip_offset; + + io_result = register_io_dev_block(&backend_dev_con); + assert(io_result == 0); + + /* Open connections to devices and cache the handles */ + io_result = io_dev_open(backend_dev_con, block_dev_spec, + &backend_dev_handle); + assert(io_result == 0); + + return io_result; +} +#endif + +#if defined(FLEXSPI_NOR_BOOT) || defined(QSPI_BOOT) || defined(NOR_BOOT) +static int plat_io_memmap_setup(size_t fip_offset) +{ + int io_result; + + fip_block_spec.offset = fip_offset; + + io_result = register_io_dev_memmap(&backend_dev_con); + assert(io_result == 0); + + /* Open connections to devices and cache the handles */ + io_result = io_dev_open(backend_dev_con, (uintptr_t)NULL, + &backend_dev_handle); + assert(io_result == 0); + + return io_result; +} +#endif + +static int ls_io_fip_setup(unsigned int boot_dev) +{ + int io_result; + + io_result = register_io_dev_fip(&fip_dev_con); + assert(io_result == 0); + + /* Open connections to devices and cache the handles */ + io_result = io_dev_open(fip_dev_con, (uintptr_t)&fip_device, + &fip_dev_handle); + assert(io_result == 0); + +#ifdef CONFIG_DDR_FIP_IMAGE + /* Open connection to DDR FIP image if available */ + io_result = ddr_fip_setup(fip_dev_con, boot_dev); + + assert(io_result == 0); +#endif + +#ifdef POLICY_FUSE_PROVISION + /* Open connection to FUSE FIP image if available */ + io_result = fuse_fip_setup(fip_dev_con, boot_dev); + + assert(io_result == 0); +#endif + + return io_result; +} + +int ls_qspi_io_setup(void) +{ +#ifdef QSPI_BOOT + qspi_io_setup(NXP_QSPI_FLASH_ADDR, + NXP_QSPI_FLASH_SIZE, + PLAT_FIP_OFFSET); + return plat_io_memmap_setup(NXP_QSPI_FLASH_ADDR + PLAT_FIP_OFFSET); +#else + ERROR("QSPI driver not present. Check your BUILD\n"); + + /* Should never reach here */ + assert(false); + return -1; +#endif +} + +int emmc_sdhc2_io_setup(void) +{ +#if defined(EMMC_BOOT) && defined(NXP_ESDHC2_ADDR) + uintptr_t block_dev_spec; + int ret; + + ret = sd_emmc_init(&block_dev_spec, + NXP_ESDHC2_ADDR, + NXP_SD_BLOCK_BUF_ADDR, + NXP_SD_BLOCK_BUF_SIZE, + false); + if (ret != 0) { + return ret; + } + + return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec); +#else + ERROR("EMMC driver not present. Check your BUILD\n"); + + /* Should never reach here */ + assert(false); + return -1; +#endif +} + +int emmc_io_setup(void) +{ +/* On the platforms which only has one ESDHC controller, + * eMMC-boot will use the first ESDHC controller. + */ +#if defined(SD_BOOT) || defined(EMMC_BOOT) + uintptr_t block_dev_spec; + int ret; + + ret = sd_emmc_init(&block_dev_spec, + NXP_ESDHC_ADDR, + NXP_SD_BLOCK_BUF_ADDR, + NXP_SD_BLOCK_BUF_SIZE, + true); + if (ret != 0) { + return ret; + } + + return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec); +#else + ERROR("SD driver not present. Check your BUILD\n"); + + /* Should never reach here */ + assert(false); + return -1; +#endif +} + +int ifc_nor_io_setup(void) +{ +#if defined(NOR_BOOT) + int ret; + + ret = ifc_nor_init(NXP_NOR_FLASH_ADDR, + NXP_NOR_FLASH_SIZE); + + if (ret != 0) { + return ret; + } + + return plat_io_memmap_setup(NXP_NOR_FLASH_ADDR + PLAT_FIP_OFFSET); +#else + ERROR("NOR driver not present. Check your BUILD\n"); + + /* Should never reach here */ + assert(false); + return -1; +#endif +} + +int ifc_nand_io_setup(void) +{ +#if defined(NAND_BOOT) + uintptr_t block_dev_spec; + int ret; + + ret = ifc_nand_init(&block_dev_spec, + NXP_IFC_REGION_ADDR, + NXP_IFC_ADDR, + NXP_IFC_SRAM_BUFFER_SIZE, + NXP_SD_BLOCK_BUF_ADDR, + NXP_SD_BLOCK_BUF_SIZE); + if (ret != 0) { + return ret; + } + + return plat_io_block_setup(PLAT_FIP_OFFSET, block_dev_spec); +#else + + ERROR("NAND driver not present. Check your BUILD\n"); + + /* Should never reach here */ + assert(false); + return -1; +#endif +} + +int ls_flexspi_nor_io_setup(void) +{ +#ifdef FLEXSPI_NOR_BOOT + int ret = 0; + + ret = flexspi_nor_io_setup(NXP_FLEXSPI_FLASH_ADDR, + NXP_FLEXSPI_FLASH_SIZE, + NXP_FLEXSPI_ADDR); + + if (ret != 0) { + ERROR("FlexSPI NOR driver initialization error.\n"); + /* Should never reach here */ + assert(0); + panic(); + return -1; + } + + return plat_io_memmap_setup(NXP_FLEXSPI_FLASH_ADDR + PLAT_FIP_OFFSET); +#else + ERROR("FlexSPI NOR driver not present. Check your BUILD\n"); + + /* Should never reach here */ + assert(false); + return -1; +#endif +} + +static int (* const ls_io_setup_table[])(void) = { + [BOOT_DEVICE_IFC_NOR] = ifc_nor_io_setup, + [BOOT_DEVICE_IFC_NAND] = ifc_nand_io_setup, + [BOOT_DEVICE_QSPI] = ls_qspi_io_setup, + [BOOT_DEVICE_EMMC] = emmc_io_setup, + [BOOT_DEVICE_SDHC2_EMMC] = emmc_sdhc2_io_setup, + [BOOT_DEVICE_FLEXSPI_NOR] = ls_flexspi_nor_io_setup, + [BOOT_DEVICE_FLEXSPI_NAND] = ls_flexspi_nor_io_setup, +}; + + +int plat_io_setup(void) +{ + int (*io_setup)(void); + unsigned int boot_dev = BOOT_DEVICE_NONE; + int ret; + + boot_dev = get_boot_dev(); + if (boot_dev == BOOT_DEVICE_NONE) { + ERROR("Boot Device detection failed, Check RCW_SRC\n"); + return -EINVAL; + } + + io_setup = ls_io_setup_table[boot_dev]; + ret = io_setup(); + if (ret != 0) { + return ret; + } + + ret = ls_io_fip_setup(boot_dev); + if (ret != 0) { + return ret; + } + + return 0; +} + + +/* Return an IO device handle and specification which can be used to access + * an image. Use this to enforce platform load policy + */ +int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle, + uintptr_t *image_spec) +{ + int result = -1; + const struct plat_io_policy *policy; + + if (image_id < ARRAY_SIZE(policies)) { + + policy = &policies[image_id]; + result = policy->check(policy->image_spec); + if (result == 0) { + *image_spec = policy->image_spec; + *dev_handle = *(policy->dev_handle); + } + } +#ifdef CONFIG_DDR_FIP_IMAGE + else { + VERBOSE("Trying alternative IO\n"); + result = plat_get_ddr_fip_image_source(image_id, dev_handle, + image_spec, open_backend); + } +#endif +#ifdef POLICY_FUSE_PROVISION + if (result != 0) { + VERBOSE("Trying FUSE IO\n"); + result = plat_get_fuse_image_source(image_id, dev_handle, + image_spec, open_backend); + } +#endif + + return result; +} diff --git a/plat/nxp/common/setup/ls_stack_protector.c b/plat/nxp/common/setup/ls_stack_protector.c new file mode 100644 index 0000000..ab78f88 --- /dev/null +++ b/plat/nxp/common/setup/ls_stack_protector.c @@ -0,0 +1,22 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <stdint.h> + +#include <arch_helpers.h> + +#include <plat/common/platform.h> + +#define RANDOM_CANARY_VALUE ((u_register_t) 3288484550995823360ULL) + +u_register_t plat_get_stack_protector_canary(void) +{ + /* + * TBD: Generate Random Number from NXP CAAM Block. + */ + return RANDOM_CANARY_VALUE ^ read_cntpct_el0(); +} diff --git a/plat/nxp/common/sip_svc/aarch64/sipsvc.S b/plat/nxp/common/sip_svc/aarch64/sipsvc.S new file mode 100644 index 0000000..6a47cbf --- /dev/null +++ b/plat/nxp/common/sip_svc/aarch64/sipsvc.S @@ -0,0 +1,152 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <asm_macros.S> +#include <bl31_data.h> + +.global el2_2_aarch32 +.global prefetch_disable + +#define SPSR_EL3_M4 0x10 +#define SPSR_EL_MASK 0xC +#define SPSR_EL2 0x8 +#define SCR_EL3_4_EL2_AARCH32 0x131 +#define SPSR32_EL2_LE 0x1DA + +#define MIDR_PARTNUM_START 4 +#define MIDR_PARTNUM_WIDTH 12 +#define MIDR_PARTNUM_A53 0xD03 +#define MIDR_PARTNUM_A57 0xD07 +#define MIDR_PARTNUM_A72 0xD08 + +/* + * uint64_t el2_2_aarch32(u_register_t smc_id, + * u_register_t start_addr, + * u_register_t parm1, + * u_register_t parm2) + * this function allows changing the execution width of EL2 from Aarch64 + * to Aarch32 + * Note: MUST be called from EL2 @ Aarch64 + * in: x0 = smc function id + * x1 = start address for EL2 @ Aarch32 + * x2 = first parameter to pass to EL2 @ Aarch32 + * x3 = second parameter to pass to EL2 @ Aarch32 + * out: x0 = 0, on success + * x0 = -1, on failure + * uses x0, x1, x2, x3 + */ +func el2_2_aarch32 + + /* check that caller is EL2 @ Aarch64 - err return if not */ + mrs x0, spsr_el3 + /* see if we were called from Aarch32 */ + tst x0, #SPSR_EL3_M4 + b.ne 2f + + /* see if we were called from EL2 */ + and x0, x0, SPSR_EL_MASK + cmp x0, SPSR_EL2 + b.ne 2f + + /* set ELR_EL3 */ + msr elr_el3, x1 + + /* set scr_el3 */ + mov x0, #SCR_EL3_4_EL2_AARCH32 + msr scr_el3, x0 + + /* set sctlr_el2 */ + ldr x1, =SCTLR_EL2_RES1 + msr sctlr_el2, x1 + + /* set spsr_el3 */ + ldr x0, =SPSR32_EL2_LE + msr spsr_el3, x0 + + /* x2 = parm 1 + * x3 = parm2 + */ + + /* set the parameters to be passed-thru to EL2 @ Aarch32 */ + mov x1, x2 + mov x2, x3 + + /* x1 = parm 1 + * x2 = parm2 + */ + + mov x0, xzr + /* invalidate the icache */ + ic iallu + dsb sy + isb + b 1f +2: + /* error return */ + mvn x0, xzr + ret +1: + eret +endfunc el2_2_aarch32 + +/* + * int prefetch_disable(u_register_t smc_id, u_register_t mask) + * this function marks cores which need to have the prefetch disabled - + * secondary cores have prefetch disabled when they are released from reset - + * the bootcore has prefetch disabled when this call is made + * in: x0 = function id + * x1 = core mask, where bit[0]=core0, bit[1]=core1, etc + * if a bit in the mask is set, then prefetch is disabled for that + * core + * out: x0 = SMC_SUCCESS + */ +func prefetch_disable + stp x4, x30, [sp, #-16]! + + mov x3, x1 + + /* x1 = core prefetch disable mask */ + /* x3 = core prefetch disable mask */ + + /* store the mask */ + mov x0, #PREFETCH_DIS_OFFSET + bl _set_global_data + + /* x3 = core prefetch disable mask */ + + /* see if we need to disable prefetch on THIS core */ + bl plat_my_core_mask + + /* x0 = core mask lsb */ + /* x3 = core prefetch disable mask */ + + tst x3, x0 + b.eq 1f + + /* read midr_el1 */ + mrs x1, midr_el1 + + /* x1 = midr_el1 */ + + mov x0, xzr + bfxil x0, x1, #MIDR_PARTNUM_START, #MIDR_PARTNUM_WIDTH + + /* x0 = part number (a53, a57, a72, etc) */ + + /* branch on cpu-specific */ + cmp x0, #MIDR_PARTNUM_A57 + b.eq 1f + cmp x0, #MIDR_PARTNUM_A72 + b.ne 1f + + bl _disable_ldstr_pfetch_A72 + b 1f +1: + ldp x4, x30, [sp], #16 + mov x0, xzr + ret +endfunc prefetch_disable diff --git a/plat/nxp/common/sip_svc/include/sipsvc.h b/plat/nxp/common/sip_svc/include/sipsvc.h new file mode 100644 index 0000000..d9e61e9 --- /dev/null +++ b/plat/nxp/common/sip_svc/include/sipsvc.h @@ -0,0 +1,80 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef SIPSVC_H +#define SIPSVC_H + +#include <stdint.h> + +#define SMC_FUNC_MASK 0x0000ffff +#define SMC32_PARAM_MASK 0xffffffff + +/* SMC function IDs for SiP Service queries */ +#define SIP_SVC_CALL_COUNT 0xff00 +#define SIP_SVC_UID 0xff01 +#define SIP_SVC_VERSION 0xff03 +#define SIP_SVC_PRNG 0xff10 +#define SIP_SVC_RNG 0xff11 +#define SIP_SVC_MEM_BANK 0xff12 +#define SIP_SVC_PREFETCH_DIS 0xff13 +#define SIP_SVC_HUK 0xff14 +#define SIP_SVC_ALLOW_L1L2_ERR 0xff15 +#define SIP_SVC_ALLOW_L2_CLR 0xff16 +#define SIP_SVC_2_AARCH32 0xff17 +#define SIP_SVC_PORSR1 0xff18 + +/* Layerscape SiP Service Calls version numbers */ +#define LS_SIP_SVC_VERSION_MAJOR 0x0 +#define LS_SIP_SVC_VERSION_MINOR 0x1 + +/* Number of Layerscape SiP Calls implemented */ +#define LS_COMMON_SIP_NUM_CALLS 10 + +/* Parameter Type Constants */ +#define SIP_PARAM_TYPE_NONE 0x0 +#define SIP_PARAM_TYPE_VALUE_INPUT 0x1 +#define SIP_PARAM_TYPE_VALUE_OUTPUT 0x2 +#define SIP_PARAM_TYPE_VALUE_INOUT 0x3 +#define SIP_PARAM_TYPE_MEMREF_INPUT 0x5 +#define SIP_PARAM_TYPE_MEMREF_OUTPUT 0x6 +#define SIP_PARAM_TYPE_MEMREF_INOUT 0x7 + +#define SIP_PARAM_TYPE_MASK 0xF + +/* + * The macro SIP_PARAM_TYPES can be used to construct a value that you can + * compare against an incoming paramTypes to check the type of all the + * parameters in one comparison. + */ +#define SIP_PARAM_TYPES(t0, t1, t2, t3) \ + ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12)) + +/* + * The macro SIP_PARAM_TYPE_GET can be used to extract the type of a given + * parameter from paramTypes if you need more fine-grained type checking. + */ +#define SIP_PARAM_TYPE_GET(t, i) ((((uint32_t)(t)) >> ((i) * 4)) & 0xF) + +/* + * The macro SIP_PARAM_TYPE_SET can be used to load the type of a given + * parameter from paramTypes without specifying all types (SIP_PARAM_TYPES) + */ +#define SIP_PARAM_TYPE_SET(t, i) (((uint32_t)(t) & 0xF) << ((i) * 4)) + +#define SIP_SVC_RNG_PARAMS (SIP_PARAM_TYPE_VALUE_INPUT, \ + SIP_PARAM_TYPE_MEMREF_OUTPUT, \ + SIP_PARAM_TYPE_NONE, \ + SIP_PARAM_TYPE_NONE) + +/* Layerscape SiP Calls error code */ +enum { + LS_SIP_SUCCESS = 0, + LS_SIP_INVALID_PARAM = -1, + LS_SIP_NOT_SUPPORTED = -2, +}; + +#endif /* SIPSVC_H */ diff --git a/plat/nxp/common/sip_svc/sip_svc.c b/plat/nxp/common/sip_svc/sip_svc.c new file mode 100644 index 0000000..1c8668e --- /dev/null +++ b/plat/nxp/common/sip_svc/sip_svc.c @@ -0,0 +1,194 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <string.h> + +#include <caam.h> +#include <common/runtime_svc.h> +#include <dcfg.h> +#include <lib/mmio.h> +#include <tools_share/uuid.h> + +#include <plat_common.h> +#include <sipsvc.h> + +/* Layerscape SiP Service UUID */ +DEFINE_SVC_UUID2(nxp_sip_svc_uid, + 0x871de4ef, 0xedfc, 0x4209, 0xa4, 0x23, + 0x8d, 0x23, 0x75, 0x9d, 0x3b, 0x9f); + +#pragma weak nxp_plat_sip_handler +static uintptr_t nxp_plat_sip_handler(unsigned int smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid); + SMC_RET1(handle, SMC_UNK); +} + +uint64_t el2_2_aarch32(u_register_t smc_id, u_register_t start_addr, + u_register_t parm1, u_register_t parm2); + +uint64_t prefetch_disable(u_register_t smc_id, u_register_t mask); +uint64_t bl31_get_porsr1(void); + +static void clean_top_32b_of_param(uint32_t smc_fid, + u_register_t *px1, + u_register_t *px2, + u_register_t *px3, + u_register_t *px4) +{ + /* if parameters from SMC32. Clean top 32 bits */ + if (GET_SMC_CC(smc_fid) == SMC_32) { + *px1 = *px1 & SMC32_PARAM_MASK; + *px2 = *px2 & SMC32_PARAM_MASK; + *px3 = *px3 & SMC32_PARAM_MASK; + *px4 = *px4 & SMC32_PARAM_MASK; + } +} + +/* This function handles Layerscape defined SiP Calls */ +static uintptr_t nxp_sip_handler(unsigned int smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + uint32_t ns; + uint64_t ret; + dram_regions_info_t *info_dram_regions; + + /* if parameter is sent from SMC32. Clean top 32 bits */ + clean_top_32b_of_param(smc_fid, &x1, &x2, &x3, &x4); + + /* Determine which security state this SMC originated from */ + ns = is_caller_non_secure(flags); + if (ns == 0) { + /* SiP SMC service secure world's call */ + ; + } else { + /* SiP SMC service normal world's call */ + ; + } + + switch (smc_fid & SMC_FUNC_MASK) { + case SIP_SVC_RNG: + if (is_sec_enabled() == false) { + NOTICE("SEC is disabled.\n"); + SMC_RET1(handle, SMC_UNK); + } + + /* Return zero on failure */ + ret = get_random((int)x1); + if (ret != 0) { + SMC_RET2(handle, SMC_OK, ret); + } else { + SMC_RET1(handle, SMC_UNK); + } + /* break is not required as SMC_RETx return */ + case SIP_SVC_HUK: + if (is_sec_enabled() == false) { + NOTICE("SEC is disabled.\n"); + SMC_RET1(handle, SMC_UNK); + } + ret = get_hw_unq_key_blob_hw((uint8_t *) x1, (uint32_t) x2); + + if (ret == SMC_OK) { + SMC_RET1(handle, SMC_OK); + } else { + SMC_RET1(handle, SMC_UNK); + } + /* break is not required as SMC_RETx return */ + case SIP_SVC_MEM_BANK: + VERBOSE("Handling SMC SIP_SVC_MEM_BANK.\n"); + info_dram_regions = get_dram_regions_info(); + + if (x1 == -1) { + SMC_RET2(handle, SMC_OK, + info_dram_regions->total_dram_size); + } else if (x1 >= info_dram_regions->num_dram_regions) { + SMC_RET1(handle, SMC_UNK); + } else { + SMC_RET3(handle, SMC_OK, + info_dram_regions->region[x1].addr, + info_dram_regions->region[x1].size); + } + /* break is not required as SMC_RETx return */ + case SIP_SVC_PREFETCH_DIS: + VERBOSE("In SIP_SVC_PREFETCH_DIS call\n"); + ret = prefetch_disable(smc_fid, x1); + if (ret == SMC_OK) { + SMC_RET1(handle, SMC_OK); + } else { + SMC_RET1(handle, SMC_UNK); + } + /* break is not required as SMC_RETx return */ + case SIP_SVC_2_AARCH32: + ret = el2_2_aarch32(smc_fid, x1, x2, x3); + + /* In success case, control should not reach here. */ + NOTICE("SMC: SIP_SVC_2_AARCH32 Failed.\n"); + SMC_RET1(handle, SMC_UNK); + /* break is not required as SMC_RETx return */ + case SIP_SVC_PORSR1: + ret = bl31_get_porsr1(); + SMC_RET2(handle, SMC_OK, ret); + /* break is not required as SMC_RETx return */ + default: + return nxp_plat_sip_handler(smc_fid, x1, x2, x3, x4, + cookie, handle, flags); + } +} + +/* This function is responsible for handling all SiP calls */ +static uintptr_t sip_smc_handler(unsigned int smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + switch (smc_fid & SMC_FUNC_MASK) { + case SIP_SVC_CALL_COUNT: + /* Return the number of Layerscape SiP Service Calls. */ + SMC_RET1(handle, LS_COMMON_SIP_NUM_CALLS); + break; + case SIP_SVC_UID: + /* Return UID to the caller */ + SMC_UUID_RET(handle, nxp_sip_svc_uid); + break; + case SIP_SVC_VERSION: + /* Return the version of current implementation */ + SMC_RET2(handle, LS_SIP_SVC_VERSION_MAJOR, + LS_SIP_SVC_VERSION_MINOR); + break; + default: + return nxp_sip_handler(smc_fid, x1, x2, x3, x4, + cookie, handle, flags); + } +} + +/* Define a runtime service descriptor for fast SMC calls */ +DECLARE_RT_SVC( + nxp_sip_svc, + OEN_SIP_START, + OEN_SIP_END, + SMC_TYPE_FAST, + NULL, + sip_smc_handler +); diff --git a/plat/nxp/common/sip_svc/sipsvc.mk b/plat/nxp/common/sip_svc/sipsvc.mk new file mode 100644 index 0000000..c3a57de --- /dev/null +++ b/plat/nxp/common/sip_svc/sipsvc.mk @@ -0,0 +1,35 @@ +# +# Copyright 2018-2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# +#------------------------------------------------------------------------------ +# +# Select the SIP SVC files +# +# ----------------------------------------------------------------------------- + +ifeq (${ADD_SIPSVC},) + +ADD_SIPSVC := 1 + +PLAT_SIPSVC_PATH := $(PLAT_COMMON_PATH)/sip_svc + +SIPSVC_SOURCES := ${PLAT_SIPSVC_PATH}/sip_svc.c \ + ${PLAT_SIPSVC_PATH}/$(ARCH)/sipsvc.S + +PLAT_INCLUDES += -I${PLAT_SIPSVC_PATH}/include + +ifeq (${BL_COMM_SIPSVC_NEEDED},yes) +BL_COMMON_SOURCES += ${SIPSVC_SOURCES} +else +ifeq (${BL2_SIPSVC_NEEDED},yes) +BL2_SOURCES += ${SIPSVC_SOURCES} +endif +ifeq (${BL31_SIPSVC_NEEDED},yes) +BL31_SOURCES += ${SIPSVC_SOURCES} +endif +endif +endif +# ----------------------------------------------------------------------------- diff --git a/plat/nxp/common/soc_errata/errata.c b/plat/nxp/common/soc_errata/errata.c new file mode 100644 index 0000000..55ef604 --- /dev/null +++ b/plat/nxp/common/soc_errata/errata.c @@ -0,0 +1,59 @@ +/* + * Copyright 2021-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <common/debug.h> + +#include "errata_list.h" + +void soc_errata(void) +{ +#ifdef ERRATA_SOC_A050426 + INFO("SoC workaround for Errata A050426 was applied\n"); + erratum_a050426(); +#endif +#ifdef ERRATA_SOC_A008850 + INFO("SoC workaround for Errata A008850 Early-Phase was applied\n"); + erratum_a008850_early(); +#endif +#if ERRATA_SOC_A009660 + INFO("SoC workaround for Errata A009660 was applied\n"); + erratum_a009660(); +#endif +#if ERRATA_SOC_A010539 + INFO("SoC workaround for Errata A010539 was applied\n"); + erratum_a010539(); +#endif + + /* + * The following DDR Erratas workaround are implemented in DDR driver, + * but print information here. + */ +#if ERRATA_DDR_A011396 + INFO("SoC workaround for DDR Errata A011396 was applied\n"); +#endif +#if ERRATA_DDR_A050450 + INFO("SoC workaround for DDR Errata A050450 was applied\n"); +#endif +#if ERRATA_DDR_A050958 + INFO("SoC workaround for DDR Errata A050958 was applied\n"); +#endif +#if ERRATA_DDR_A008511 + INFO("SoC workaround for DDR Errata A008511 was applied\n"); +#endif +#if ERRATA_DDR_A009803 + INFO("SoC workaround for DDR Errata A009803 was applied\n"); +#endif +#if ERRATA_DDR_A009942 + INFO("SoC workaround for DDR Errata A009942 was applied\n"); +#endif +#if ERRATA_DDR_A010165 + INFO("SoC workaround for DDR Errata A010165 was applied\n"); +#endif +#if ERRATA_DDR_A009663 + INFO("SoC workaround for DDR Errata A009663 was applied\n"); +#endif +} diff --git a/plat/nxp/common/soc_errata/errata.h b/plat/nxp/common/soc_errata/errata.h new file mode 100644 index 0000000..ab67995 --- /dev/null +++ b/plat/nxp/common/soc_errata/errata.h @@ -0,0 +1,15 @@ +/* + * Copyright 2020-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef ERRATA_H +#define ERRATA_H + +#include "errata_list.h" + +void soc_errata(void); + +#endif /* ERRATA_H */ diff --git a/plat/nxp/common/soc_errata/errata.mk b/plat/nxp/common/soc_errata/errata.mk new file mode 100644 index 0000000..3deef3d --- /dev/null +++ b/plat/nxp/common/soc_errata/errata.mk @@ -0,0 +1,26 @@ +# +# Copyright 2021-2022 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +# Platform Errata Build flags. +# These should be enabled by the platform if the erratum workaround needs to be +# applied. + +ERRATA := \ + ERRATA_SOC_A050426 \ + ERRATA_SOC_A008850 \ + ERRATA_SOC_A009660 \ + ERRATA_SOC_A010539 + +define enable_errata + $(1) ?= 0 + ifeq ($$($(1)),1) + $$(eval $$(call add_define,$(1))) + BL2_SOURCES += $(PLAT_COMMON_PATH)/soc_errata/errata_a$(shell echo $(1)|awk -F '_A' '{print $$NF}').c + endif +endef + +$(foreach e,$(ERRATA),$(eval $(call enable_errata,$(strip $(e))))) + +BL2_SOURCES += $(PLAT_COMMON_PATH)/soc_errata/errata.c diff --git a/plat/nxp/common/soc_errata/errata_a008850.c b/plat/nxp/common/soc_errata/errata_a008850.c new file mode 100644 index 0000000..e8c0f64 --- /dev/null +++ b/plat/nxp/common/soc_errata/errata_a008850.c @@ -0,0 +1,42 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ +#include <cci.h> +#include <common/debug.h> +#include <ls_interconnect.h> +#include <mmio.h> + +#include <platform_def.h> + +void erratum_a008850_early(void) +{ + /* part 1 of 2 */ + uintptr_t cci_base = NXP_CCI_ADDR; + uint32_t val = mmio_read_32(cci_base + CTRL_OVERRIDE_REG); + + /* enabling forced barrier termination on CCI400 */ + mmio_write_32(cci_base + CTRL_OVERRIDE_REG, + (val | CCI_TERMINATE_BARRIER_TX)); + +} + +void erratum_a008850_post(void) +{ + /* part 2 of 2 */ + uintptr_t cci_base = NXP_CCI_ADDR; + uint32_t val = mmio_read_32(cci_base + CTRL_OVERRIDE_REG); + + /* Clear the BARRIER_TX bit */ + val = val & ~(CCI_TERMINATE_BARRIER_TX); + + /* + * Disable barrier termination on CCI400, allowing + * barriers to propagate across CCI + */ + mmio_write_32(cci_base + CTRL_OVERRIDE_REG, val); + + INFO("SoC workaround for Errata A008850 Post-Phase was applied\n"); +} diff --git a/plat/nxp/common/soc_errata/errata_a009660.c b/plat/nxp/common/soc_errata/errata_a009660.c new file mode 100644 index 0000000..d31a4d7 --- /dev/null +++ b/plat/nxp/common/soc_errata/errata_a009660.c @@ -0,0 +1,14 @@ +/* + * Copyright 2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <mmio.h> +#include <soc_default_base_addr.h> + +void erratum_a009660(void) +{ + mmio_write_32(NXP_SCFG_ADDR + 0x20c, 0x63b20042); +} diff --git a/plat/nxp/common/soc_errata/errata_a010539.c b/plat/nxp/common/soc_errata/errata_a010539.c new file mode 100644 index 0000000..3dcbdc8 --- /dev/null +++ b/plat/nxp/common/soc_errata/errata_a010539.c @@ -0,0 +1,26 @@ +/* + * Copyright 2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <mmio.h> + +#include <plat_common.h> + +void erratum_a010539(void) +{ + if (get_boot_dev() == BOOT_DEVICE_QSPI) { + unsigned int *porsr1 = (void *)(NXP_DCFG_ADDR + + DCFG_PORSR1_OFFSET); + uint32_t val; + + val = (gur_in32(porsr1) & ~PORSR1_RCW_MASK); + mmio_write_32((uint32_t)(NXP_DCSR_DCFG_ADDR + + DCFG_DCSR_PORCR1_OFFSET), htobe32(val)); + /* Erratum need to set '1' to all bits for reserved SCFG register 0x1a8 */ + mmio_write_32((uint32_t)(NXP_SCFG_ADDR + 0x1a8), + htobe32(0xffffffff)); + } +} diff --git a/plat/nxp/common/soc_errata/errata_a050426.c b/plat/nxp/common/soc_errata/errata_a050426.c new file mode 100644 index 0000000..ba4f71f --- /dev/null +++ b/plat/nxp/common/soc_errata/errata_a050426.c @@ -0,0 +1,201 @@ +/* + * Copyright 2021-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <common/debug.h> +#include <mmio.h> + +void erratum_a050426(void) +{ + uint32_t i, val3, val4; + + /* + * Part of this Errata is implemented in RCW and SCRATCHRW5 + * register is updated to hold Errata number. + * Validate whether RCW has already included required changes + */ + if (mmio_read_32(0x01e00210) != 0x00050426) { + ERROR("%s: Invalid RCW : ERR050426 not implemented\n", __func__); + } + + /* Enable BIST to access Internal memory locations */ + val3 = mmio_read_32(0x700117E60); + mmio_write_32(0x700117E60, (val3 | 0x80000001)); + val4 = mmio_read_32(0x700117E90); + mmio_write_32(0x700117E90, (val4 & 0xFFDFFFFF)); + + /* wriop Internal Memory.*/ + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x706312000 + (i * 4), 0x55555555); + mmio_write_32(0x706312400 + (i * 4), 0x55555555); + mmio_write_32(0x706312800 + (i * 4), 0x55555555); + mmio_write_32(0x706314000 + (i * 4), 0x55555555); + mmio_write_32(0x706314400 + (i * 4), 0x55555555); + mmio_write_32(0x706314800 + (i * 4), 0x55555555); + mmio_write_32(0x706314c00 + (i * 4), 0x55555555); + } + for (i = 0U; i < 3U; i++) { + mmio_write_32(0x706316000 + (i * 4), 0x55555555); + mmio_write_32(0x706320000 + (i * 4), 0x55555555); + mmio_write_32(0x706320400 + (i * 4), 0x55555555); + } + for (i = 0U; i < 2U; i++) { + mmio_write_32(0x70640a000 + (i * 4), 0x55555555); + } + for (i = 0U; i < 3U; i++) { + mmio_write_32(0x706518000 + (i * 4), 0x55555555); + mmio_write_32(0x706519000 + (i * 4), 0x55555555); + } + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x706522000 + (i * 4), 0x55555555); + mmio_write_32(0x706522800 + (i * 4), 0x55555555); + mmio_write_32(0x706523000 + (i * 4), 0x55555555); + mmio_write_32(0x706523800 + (i * 4), 0x55555555); + mmio_write_32(0x706524000 + (i * 4), 0x55555555); + mmio_write_32(0x706524800 + (i * 4), 0x55555555); + mmio_write_32(0x706608000 + (i * 4), 0x55555555); + mmio_write_32(0x706608800 + (i * 4), 0x55555555); + mmio_write_32(0x706609000 + (i * 4), 0x55555555); + mmio_write_32(0x706609800 + (i * 4), 0x55555555); + mmio_write_32(0x70660a000 + (i * 4), 0x55555555); + mmio_write_32(0x70660a800 + (i * 4), 0x55555555); + mmio_write_32(0x70660b000 + (i * 4), 0x55555555); + mmio_write_32(0x70660b800 + (i * 4), 0x55555555); + } + for (i = 0U; i < 3U; i++) { + mmio_write_32(0x70660c000 + (i * 4), 0x55555555); + mmio_write_32(0x70660c800 + (i * 4), 0x55555555); + } + for (i = 0U; i < 2U; i++) { + mmio_write_32(0x706718000 + (i * 4), 0x55555555); + mmio_write_32(0x706718800 + (i * 4), 0x55555555); + } + mmio_write_32(0x706b0a000, 0x55555555); + + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x706b0e000 + (i * 4), 0x55555555); + mmio_write_32(0x706b0e800 + (i * 4), 0x55555555); + } + for (i = 0U; i < 2U; i++) { + mmio_write_32(0x706b10000 + (i * 4), 0x55555555); + mmio_write_32(0x706b10400 + (i * 4), 0x55555555); + } + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x706b14000 + (i * 4), 0x55555555); + mmio_write_32(0x706b14800 + (i * 4), 0x55555555); + mmio_write_32(0x706b15000 + (i * 4), 0x55555555); + mmio_write_32(0x706b15800 + (i * 4), 0x55555555); + } + mmio_write_32(0x706e12000, 0x55555555); + + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x706e14000 + (i * 4), 0x55555555); + mmio_write_32(0x706e14800 + (i * 4), 0x55555555); + } + for (i = 0U; i < 2U; i++) { + mmio_write_32(0x706e16000 + (i * 4), 0x55555555); + mmio_write_32(0x706e16400 + (i * 4), 0x55555555); + } + for (i = 0U; i < 3U; i++) { + mmio_write_32(0x706e1a000 + (i * 4), 0x55555555); + mmio_write_32(0x706e1a800 + (i * 4), 0x55555555); + mmio_write_32(0x706e1b000 + (i * 4), 0x55555555); + mmio_write_32(0x706e1b800 + (i * 4), 0x55555555); + mmio_write_32(0x706e1c000 + (i * 4), 0x55555555); + mmio_write_32(0x706e1c800 + (i * 4), 0x55555555); + mmio_write_32(0x706e1e000 + (i * 4), 0x55555555); + mmio_write_32(0x706e1e800 + (i * 4), 0x55555555); + mmio_write_32(0x706e1f000 + (i * 4), 0x55555555); + mmio_write_32(0x706e1f800 + (i * 4), 0x55555555); + mmio_write_32(0x706e20000 + (i * 4), 0x55555555); + mmio_write_32(0x706e20800 + (i * 4), 0x55555555); + } + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x707108000 + (i * 4), 0x55555555); + mmio_write_32(0x707109000 + (i * 4), 0x55555555); + mmio_write_32(0x70710a000 + (i * 4), 0x55555555); + } + for (i = 0U; i < 2U; i++) { + mmio_write_32(0x70711c000 + (i * 4), 0x55555555); + mmio_write_32(0x70711c800 + (i * 4), 0x55555555); + mmio_write_32(0x70711d000 + (i * 4), 0x55555555); + mmio_write_32(0x70711d800 + (i * 4), 0x55555555); + mmio_write_32(0x70711e000 + (i * 4), 0x55555555); + } + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x707120000 + (i * 4), 0x55555555); + mmio_write_32(0x707121000 + (i * 4), 0x55555555); + } + for (i = 0U; i < 3U; i++) { + mmio_write_32(0x707122000 + (i * 4), 0x55555555); + mmio_write_32(0x70725a000 + (i * 4), 0x55555555); + mmio_write_32(0x70725b000 + (i * 4), 0x55555555); + mmio_write_32(0x70725c000 + (i * 4), 0x55555555); + mmio_write_32(0x70725e000 + (i * 4), 0x55555555); + mmio_write_32(0x70725e400 + (i * 4), 0x55555555); + mmio_write_32(0x70725e800 + (i * 4), 0x55555555); + mmio_write_32(0x70725ec00 + (i * 4), 0x55555555); + mmio_write_32(0x70725f000 + (i * 4), 0x55555555); + mmio_write_32(0x70725f400 + (i * 4), 0x55555555); + mmio_write_32(0x707340000 + (i * 4), 0x55555555); + mmio_write_32(0x707346000 + (i * 4), 0x55555555); + mmio_write_32(0x707484000 + (i * 4), 0x55555555); + mmio_write_32(0x70748a000 + (i * 4), 0x55555555); + mmio_write_32(0x70748b000 + (i * 4), 0x55555555); + mmio_write_32(0x70748c000 + (i * 4), 0x55555555); + mmio_write_32(0x70748d000 + (i * 4), 0x55555555); + } + + /* EDMA Internal Memory.*/ + for (i = 0U; i < 5U; i++) { + mmio_write_32(0x70a208000 + (i * 4), 0x55555555); + mmio_write_32(0x70a208800 + (i * 4), 0x55555555); + mmio_write_32(0x70a209000 + (i * 4), 0x55555555); + mmio_write_32(0x70a209800 + (i * 4), 0x55555555); + } + + /* QDMA Internal Memory.*/ + for (i = 0U; i < 5U; i++) { + mmio_write_32(0x70b008000 + (i * 4), 0x55555555); + mmio_write_32(0x70b00c000 + (i * 4), 0x55555555); + mmio_write_32(0x70b010000 + (i * 4), 0x55555555); + mmio_write_32(0x70b014000 + (i * 4), 0x55555555); + mmio_write_32(0x70b018000 + (i * 4), 0x55555555); + mmio_write_32(0x70b018400 + (i * 4), 0x55555555); + mmio_write_32(0x70b01a000 + (i * 4), 0x55555555); + mmio_write_32(0x70b01a400 + (i * 4), 0x55555555); + mmio_write_32(0x70b01c000 + (i * 4), 0x55555555); + mmio_write_32(0x70b01d000 + (i * 4), 0x55555555); + mmio_write_32(0x70b01e000 + (i * 4), 0x55555555); + mmio_write_32(0x70b01e800 + (i * 4), 0x55555555); + mmio_write_32(0x70b01f000 + (i * 4), 0x55555555); + mmio_write_32(0x70b01f800 + (i * 4), 0x55555555); + mmio_write_32(0x70b020000 + (i * 4), 0x55555555); + mmio_write_32(0x70b020400 + (i * 4), 0x55555555); + mmio_write_32(0x70b020800 + (i * 4), 0x55555555); + mmio_write_32(0x70b020c00 + (i * 4), 0x55555555); + mmio_write_32(0x70b022000 + (i * 4), 0x55555555); + mmio_write_32(0x70b022400 + (i * 4), 0x55555555); + mmio_write_32(0x70b024000 + (i * 4), 0x55555555); + mmio_write_32(0x70b024800 + (i * 4), 0x55555555); + mmio_write_32(0x70b025000 + (i * 4), 0x55555555); + mmio_write_32(0x70b025800 + (i * 4), 0x55555555); + } + for (i = 0U; i < 4U; i++) { + mmio_write_32(0x70b026000 + (i * 4), 0x55555555); + mmio_write_32(0x70b026200 + (i * 4), 0x55555555); + } + for (i = 0U; i < 5U; i++) { + mmio_write_32(0x70b028000 + (i * 4), 0x55555555); + mmio_write_32(0x70b028800 + (i * 4), 0x55555555); + mmio_write_32(0x70b029000 + (i * 4), 0x55555555); + mmio_write_32(0x70b029800 + (i * 4), 0x55555555); + } + + /* Disable BIST */ + mmio_write_32(0x700117E60, val3); + mmio_write_32(0x700117E90, val4); +} diff --git a/plat/nxp/common/soc_errata/errata_list.h b/plat/nxp/common/soc_errata/errata_list.h new file mode 100644 index 0000000..f6741e2 --- /dev/null +++ b/plat/nxp/common/soc_errata/errata_list.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021-2022 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef ERRATA_LIST_H +#define ERRATA_LIST_H + +#ifdef ERRATA_SOC_A050426 +void erratum_a050426(void); +#endif + +#ifdef ERRATA_SOC_A008850 +void erratum_a008850_early(void); +void erratum_a008850_post(void); +#endif + +#ifdef ERRATA_SOC_A009660 +void erratum_a009660(void); +#endif + +#ifdef ERRATA_SOC_A010539 +void erratum_a010539(void); +#endif + +#endif /* ERRATA_LIST_H */ diff --git a/plat/nxp/common/tbbr/csf_tbbr.c b/plat/nxp/common/tbbr/csf_tbbr.c new file mode 100644 index 0000000..8f38f3e --- /dev/null +++ b/plat/nxp/common/tbbr/csf_tbbr.c @@ -0,0 +1,81 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + * + */ + +#include <errno.h> + +#include <common/debug.h> +#include <csf_hdr.h> +#include <dcfg.h> +#include <drivers/auth/crypto_mod.h> +#include <snvs.h> + +#include <plat/common/platform.h> +#include "plat_common.h" + +extern bool rotpk_not_dpld; +extern uint8_t rotpk_hash_table[MAX_KEY_ENTRIES][SHA256_BYTES]; +extern uint32_t num_rotpk_hash_entries; + +/* + * In case of secure boot, return ptr of rotpk_hash table in key_ptr and + * number of hashes in key_len + */ +int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, + unsigned int *flags) +{ + uint32_t mode = 0U; + *flags = ROTPK_NOT_DEPLOYED; + + /* ROTPK hash table must be available for secure boot */ + if (rotpk_not_dpld == true) { + if (check_boot_mode_secure(&mode) == true) { + /* Production mode, don;t continue further */ + if (mode == 1U) { + return -EAUTH; + } + + /* For development mode, rotpk flag false + * indicates that SRK hash comparison might + * have failed. This is not fatal error. + * Continue in this case but transition SNVS + * to non-secure state + */ + transition_snvs_non_secure(); + return 0; + } else { + return 0; + } + } + + /* + * We return the complete hash table and number of entries in + * table for NXP platform specific implementation. + * Here hash is always assume as SHA-256 + */ + *key_ptr = rotpk_hash_table; + *key_len = num_rotpk_hash_entries; + *flags = ROTPK_IS_HASH; + + return 0; +} + +int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr) +{ + /* + * No support for non-volatile counter. Update the ROT key to protect + * the system against rollback. + */ + *nv_ctr = 0U; + + return 0; +} + +int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr) +{ + return 0; +} diff --git a/plat/nxp/common/tbbr/nxp_rotpk.S b/plat/nxp/common/tbbr/nxp_rotpk.S new file mode 100644 index 0000000..8e084d1 --- /dev/null +++ b/plat/nxp/common/tbbr/nxp_rotpk.S @@ -0,0 +1,21 @@ +/* + * Copyright 2018-2020 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + * + */ + +#ifndef _CSF_HDR_H_ + + .global nxp_rotpk_hash + .global nxp_rotpk_hash_end + .section .rodata.nxp_rotpk_hash, "a" +nxp_rotpk_hash: + /* DER header */ + .byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48 + .byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 + /* SHA256 */ + .incbin ROTPK_HASH +nxp_rotpk_hash_end: +#endif diff --git a/plat/nxp/common/tbbr/tbbr.mk b/plat/nxp/common/tbbr/tbbr.mk new file mode 100644 index 0000000..4aac9d6 --- /dev/null +++ b/plat/nxp/common/tbbr/tbbr.mk @@ -0,0 +1,162 @@ +# +# Copyright 2020-2022 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# For TRUSTED_BOARD_BOOT platforms need to include this makefile +# Following definations are to be provided by platform.mk file or +# by user - BL33_INPUT_FILE, BL32_INPUT_FILE, BL31_INPUT_FILE + +ifeq ($(CHASSIS), 2) +include $(PLAT_DRIVERS_PATH)/csu/csu.mk +CSF_FILE := input_blx_ch${CHASSIS} +BL2_CSF_FILE := input_bl2_ch${CHASSIS} +else +ifeq ($(CHASSIS), 3) +CSF_FILE := input_blx_ch${CHASSIS} +BL2_CSF_FILE := input_bl2_ch${CHASSIS} +PBI_CSF_FILE := input_pbi_ch${CHASSIS} +$(eval $(call add_define, CSF_HDR_CH3)) +else +ifeq ($(CHASSIS), 3_2) +CSF_FILE := input_blx_ch3 +BL2_CSF_FILE := input_bl2_ch${CHASSIS} +PBI_CSF_FILE := input_pbi_ch${CHASSIS} +$(eval $(call add_define, CSF_HDR_CH3)) +else + $(error -> CHASSIS not set!) +endif +endif +endif + +PLAT_AUTH_PATH := $(PLAT_DRIVERS_PATH)/auth + + +ifeq (${BL2_INPUT_FILE},) + BL2_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${BL2_CSF_FILE} +endif + +ifeq (${PBI_INPUT_FILE},) + PBI_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${PBI_CSF_FILE} +endif + +# If MBEDTLS_DIR is not specified, use CSF Header option +ifeq (${MBEDTLS_DIR},) + # Generic image processing filters to prepend CSF header + ifeq (${BL33_INPUT_FILE},) + BL33_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE} + endif + + ifeq (${BL31_INPUT_FILE},) + BL31_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE} + endif + + ifeq (${BL32_INPUT_FILE},) + BL32_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE} + endif + + ifeq (${FUSE_INPUT_FILE},) + FUSE_INPUT_FILE := $(PLAT_AUTH_PATH)/csf_hdr_parser/${CSF_FILE} + endif + + PLAT_INCLUDES += -I$(PLAT_DRIVERS_PATH)/sfp + PLAT_TBBR_SOURCES += $(PLAT_AUTH_PATH)/csf_hdr_parser/cot.c \ + $(PLAT_COMMON_PATH)/tbbr/csf_tbbr.c + # IMG PARSER here is CSF header parser + include $(PLAT_DRIVERS_PATH)/auth/csf_hdr_parser/csf_hdr.mk + PLAT_TBBR_SOURCES += $(CSF_HDR_SOURCES) + + SCP_BL2_PRE_TOOL_FILTER := CST_SCP_BL2 + BL31_PRE_TOOL_FILTER := CST_BL31 + BL32_PRE_TOOL_FILTER := CST_BL32 + BL33_PRE_TOOL_FILTER := CST_BL33 +else + + ifeq (${DISABLE_FUSE_WRITE}, 1) + $(eval $(call add_define,DISABLE_FUSE_WRITE)) + endif + + # For Mbedtls currently crypto is not supported via CAAM + # enable it when that support is there + CAAM_INTEG := 0 + KEY_ALG := rsa + KEY_SIZE := 2048 + + $(eval $(call add_define,MBEDTLS_X509)) + ifeq (${PLAT_DDR_PHY},PHY_GEN2) + $(eval $(call add_define,PLAT_DEF_OID)) + endif + include drivers/auth/mbedtls/mbedtls_x509.mk + + + PLAT_TBBR_SOURCES += $(PLAT_AUTH_PATH)/tbbr/tbbr_cot.c \ + $(PLAT_COMMON_PATH)/tbbr/nxp_rotpk.S \ + $(PLAT_COMMON_PATH)/tbbr/x509_tbbr.c + + #ROTPK key is embedded in BL2 image + ifeq (${ROT_KEY},) + ROT_KEY = $(BUILD_PLAT)/rot_key.pem + endif + + ifeq (${SAVE_KEYS},1) + + ifeq (${TRUSTED_WORLD_KEY},) + TRUSTED_WORLD_KEY = ${BUILD_PLAT}/trusted.pem + endif + + ifeq (${NON_TRUSTED_WORLD_KEY},) + NON_TRUSTED_WORLD_KEY = ${BUILD_PLAT}/non-trusted.pem + endif + + ifeq (${BL31_KEY},) + BL31_KEY = ${BUILD_PLAT}/soc.pem + endif + + ifeq (${BL32_KEY},) + BL32_KEY = ${BUILD_PLAT}/trusted_os.pem + endif + + ifeq (${BL33_KEY},) + BL33_KEY = ${BUILD_PLAT}/non-trusted_os.pem + endif + + endif + + ROTPK_HASH = $(BUILD_PLAT)/rotpk_sha256.bin + + $(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"')) + + $(BUILD_PLAT)/bl2/nxp_rotpk.o: $(ROTPK_HASH) + + certificates: $(ROT_KEY) + $(ROT_KEY): | $(BUILD_PLAT) + @echo " OPENSSL $@" + @if [ ! -f $(ROT_KEY) ]; then \ + ${OPENSSL_BIN_PATH}/openssl genrsa 2048 > $@ 2>/dev/null; \ + fi + + $(ROTPK_HASH): $(ROT_KEY) + @echo " OPENSSL $@" + $(Q)${OPENSSL_BIN_PATH}/openssl rsa -in $< -pubout -outform DER 2>/dev/null |\ + ${OPENSSL_BIN_PATH}/openssl dgst -sha256 -binary > $@ 2>/dev/null + +endif #MBEDTLS_DIR + +PLAT_INCLUDES += -Iinclude/common/tbbr + +# Generic files for authentication framework +TBBR_SOURCES += drivers/auth/auth_mod.c \ + drivers/auth/crypto_mod.c \ + drivers/auth/img_parser_mod.c \ + plat/common/tbbr/plat_tbbr.c \ + ${PLAT_TBBR_SOURCES} + +# If CAAM_INTEG is not defined (would be scenario with MBED TLS) +# include mbedtls_crypto +ifeq (${CAAM_INTEG},0) + include drivers/auth/mbedtls/mbedtls_crypto.mk +else + include $(PLAT_DRIVERS_PATH)/crypto/caam/src/auth/auth.mk + TBBR_SOURCES += ${AUTH_SOURCES} +endif diff --git a/plat/nxp/common/tbbr/x509_tbbr.c b/plat/nxp/common/tbbr/x509_tbbr.c new file mode 100644 index 0000000..ec87674 --- /dev/null +++ b/plat/nxp/common/tbbr/x509_tbbr.c @@ -0,0 +1,105 @@ +/* + * Copyright 2018-2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <assert.h> +#include <stdint.h> +#include <string.h> + +#include <common/debug.h> +#include <lib/cassert.h> +#include <sfp.h> +#include <tools_share/tbbr_oid.h> + +#include <plat/common/platform.h> +#include "plat_common.h" + +extern char nxp_rotpk_hash[], nxp_rotpk_hash_end[]; + +int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, + unsigned int *flags) +{ + *key_ptr = nxp_rotpk_hash; + *key_len = nxp_rotpk_hash_end - nxp_rotpk_hash; + *flags = ROTPK_IS_HASH; + + return 0; +} + +int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr) +{ + const char *oid; + uint32_t uid_num; + uint32_t val = 0U; + + assert(cookie != NULL); + assert(nv_ctr != NULL); + + oid = (const char *)cookie; + if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) { + uid_num = 3U; + } else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) { + uid_num = 4U; + } else { + return 1; + } + + val = sfp_read_oem_uid(uid_num); + + INFO("SFP Value read is %x from UID %d\n", val, uid_num); + if (val == 0U) { + *nv_ctr = 0U; + } else { + *nv_ctr = (32U - __builtin_clz(val)); + } + + INFO("NV Counter value for UID %d is %d\n", uid_num, *nv_ctr); + return 0; + +} + +int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr) +{ + const char *oid; + uint32_t uid_num, sfp_val; + + assert(cookie != NULL); + + /* Counter values upto 32 are supported */ + if (nv_ctr > 32U) { + return 1; + } + + oid = (const char *)cookie; + if (strcmp(oid, TRUSTED_FW_NVCOUNTER_OID) == 0) { + uid_num = 3U; + } else if (strcmp(oid, NON_TRUSTED_FW_NVCOUNTER_OID) == 0) { + uid_num = 4U; + } else { + return 1; + } + sfp_val = (1U << (nv_ctr - 1)); + + if (sfp_write_oem_uid(uid_num, sfp_val) == 1) { + /* Enable POVDD on board */ + if (board_enable_povdd()) { + sfp_program_fuses(); + } + + /* Disable POVDD on board */ + board_disable_povdd(); + } else { + ERROR("Invalid OEM UID sent.\n"); + return 1; + } + + return 0; +} + +int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size) +{ + return get_mbedtls_heap_helper(heap_addr, heap_size); +} diff --git a/plat/nxp/common/warm_reset/plat_warm_reset.c b/plat/nxp/common/warm_reset/plat_warm_reset.c new file mode 100644 index 0000000..966a73c --- /dev/null +++ b/plat/nxp/common/warm_reset/plat_warm_reset.c @@ -0,0 +1,121 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#include <errno.h> + +#include <common/debug.h> +#include <ddr.h> +#ifndef NXP_COINED_BB +#include <flash_info.h> +#include <fspi.h> +#include <fspi_api.h> +#endif +#include <lib/mmio.h> +#include <lib/psci/psci.h> +#ifdef NXP_COINED_BB +#include <snvs.h> +#endif + +#include <plat_nv_storage.h> +#include "plat_warm_rst.h" +#include "platform_def.h" + +#if defined(IMAGE_BL2) + +uint32_t is_warm_boot(void) +{ + uint32_t ret = mmio_read_32(NXP_RESET_ADDR + RST_RSTRQSR1_OFFSET) + & ~(RSTRQSR1_SWRR); + + const nv_app_data_t *nv_app_data = get_nv_data(); + + if (ret == 0U) { + INFO("Not a SW(Warm) triggered reset.\n"); + return 0U; + } + + ret = (nv_app_data->warm_rst_flag == WARM_BOOT_SUCCESS) ? 1 : 0; + + if (ret != 0U) { + INFO("Warm Reset was triggered..\n"); + } else { + INFO("Warm Reset was not triggered..\n"); + } + + return ret; +} + +#endif + +#if defined(IMAGE_BL31) +int prep_n_execute_warm_reset(void) +{ +#ifdef NXP_COINED_BB +#if !TRUSTED_BOARD_BOOT + snvs_disable_zeroize_lp_gpr(); +#endif +#else + int ret; + uint8_t warm_reset = WARM_BOOT_SUCCESS; + + ret = fspi_init(NXP_FLEXSPI_ADDR, NXP_FLEXSPI_FLASH_ADDR); + + if (ret != 0) { + ERROR("Failed to initialized driver flexspi-nor.\n"); + ERROR("exiting warm-reset request.\n"); + return PSCI_E_INTERN_FAIL; + } + + /* Sector starting from NV_STORAGE_BASE_ADDR is already + * erased for writing. + */ + +#if (ERLY_WRM_RST_FLG_FLSH_UPDT) + ret = xspi_write((uint32_t)NV_STORAGE_BASE_ADDR, + &warm_reset, + sizeof(warm_reset)); +#else + /* Preparation for writing the Warm reset flag. */ + ret = xspi_wren((uint32_t)NV_STORAGE_BASE_ADDR); + + /* IP Control Register0 - SF Address to be read */ + fspi_out32((NXP_FLEXSPI_ADDR + FSPI_IPCR0), + (uint32_t) NV_STORAGE_BASE_ADDR); + + while ((fspi_in32(NXP_FLEXSPI_ADDR + FSPI_INTR) & + FSPI_INTR_IPTXWE_MASK) == 0) { + ; + } + /* Write TX FIFO Data Register */ + fspi_out32(NXP_FLEXSPI_ADDR + FSPI_TFDR, (uint32_t) warm_reset); + + fspi_out32(NXP_FLEXSPI_ADDR + FSPI_INTR, FSPI_INTR_IPTXWE); + + /* IP Control Register1 - SEQID_WRITE operation, Size = 1 Byte */ + fspi_out32(NXP_FLEXSPI_ADDR + FSPI_IPCR1, + (uint32_t)(FSPI_WRITE_SEQ_ID << FSPI_IPCR1_ISEQID_SHIFT) | + (uint16_t) sizeof(warm_reset)); + + /* Trigger XSPI-IP-Write cmd only if: + * - Putting DDR in-self refresh mode is successfully. + * to complete the writing of the warm-reset flag + * to flash. + * + * This code is as part of assembly. + */ +#endif +#endif + INFO("Doing DDR Self refresh.\n"); + _soc_sys_warm_reset(); + + /* Expected behaviour is to do the power cycle */ + while (1 != 0) + ; + + return -1; +} +#endif diff --git a/plat/nxp/common/warm_reset/plat_warm_rst.h b/plat/nxp/common/warm_reset/plat_warm_rst.h new file mode 100644 index 0000000..e0c39c5 --- /dev/null +++ b/plat/nxp/common/warm_reset/plat_warm_rst.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021 NXP + * + * SPDX-License-Identifier: BSD-3-Clause + * + */ + +#ifndef PLAT_WARM_RST_H +#define PLAT_WARM_RST_H + +#ifndef NXP_COINED_BB +#define ERLY_WRM_RST_FLG_FLSH_UPDT 0 +#endif + +#ifndef __ASSEMBLER__ + +#if defined(IMAGE_BL2) +uint32_t is_warm_boot(void); +#endif + +#if defined(IMAGE_BL31) +int prep_n_execute_warm_reset(void); +int _soc_sys_warm_reset(void); +#endif + +#endif /* __ASSEMBLER__ */ + +#endif /* PLAT_WARM_RST_H */ diff --git a/plat/nxp/common/warm_reset/warm_reset.mk b/plat/nxp/common/warm_reset/warm_reset.mk new file mode 100644 index 0000000..236004f --- /dev/null +++ b/plat/nxp/common/warm_reset/warm_reset.mk @@ -0,0 +1,20 @@ +# +# Copyright 2020 NXP +# +# SPDX-License-Identifier: BSD-3-Clause +# +#----------------------------------------------------------------------------- +ifeq (${WARM_RST_ADDED},) + +WARM_RST_ADDED := 1 +NXP_NV_SW_MAINT_LAST_EXEC_DATA := yes + +$(eval $(call add_define,NXP_WARM_BOOT)) + + +WARM_RST_INCLUDES += -I${PLAT_COMMON_PATH}/warm_reset +WARM_RST_BL31_SOURCES += ${PLAT_SOC_PATH}/$(ARCH)/${SOC}_warm_rst.S + +WARM_RST_BL_COMM_SOURCES += ${PLAT_COMMON_PATH}/warm_reset/plat_warm_reset.c + +endif |