summaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--common/aarch32/debug.S239
-rw-r--r--common/aarch64/debug.S202
-rw-r--r--common/aarch64/early_exceptions.S129
-rw-r--r--common/backtrace/backtrace.c266
-rw-r--r--common/backtrace/backtrace.mk31
-rw-r--r--common/bl_common.c280
-rw-r--r--common/desc_image_load.c351
-rw-r--r--common/fdt_fixup.c626
-rw-r--r--common/fdt_wrappers.c641
-rw-r--r--common/fdt_wrappers.mk7
-rw-r--r--common/feat_detect.c239
-rw-r--r--common/image_decompress.c80
-rw-r--r--common/runtime_svc.c154
-rw-r--r--common/tf_crc32.c45
-rw-r--r--common/tf_log.c79
-rw-r--r--common/uuid.c157
16 files changed, 3526 insertions, 0 deletions
diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S
new file mode 100644
index 0000000..2937f56
--- /dev/null
+++ b/common/aarch32/debug.S
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/debug.h>
+
+ .globl asm_print_str
+ .globl asm_print_hex
+ .globl asm_print_hex_bits
+ .globl asm_assert
+ .globl el3_panic
+ .globl report_exception
+ .globl report_prefetch_abort
+ .globl report_data_abort
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM '0'
+
+#if ENABLE_ASSERTIONS
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ /******************************************************************
+ * Virtualization comes with the UDIV/SDIV instructions. If missing
+ * write file line number in hexadecimal format.
+ ******************************************************************/
+ .asciz " Line 0x"
+#else
+ .asciz " Line "
+
+ /*
+ * This macro is intended to be used to print the
+ * line number in decimal. Used by asm_assert macro.
+ * The max number expected is 65536.
+ * In: r4 = the decimal to print.
+ * Clobber: lr, r0, r1, r2, r5, r6
+ */
+ .macro asm_print_line_dec
+ mov r6, #10 /* Divide by 10 after every loop iteration */
+ ldr r5, =MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv r0, r4, r5 /* Get the quotient */
+ mls r4, r0, r5, r4 /* Find the remainder */
+ add r0, r0, #ASCII_OFFSET_NUM /* Convert to ascii */
+ bl plat_crash_console_putc
+ udiv r5, r5, r6 /* Reduce divisor */
+ cmp r5, #0
+ bne dec_print_loop
+ .endm
+#endif
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * r0 - File name
+ * r1 - Line no
+ * Clobber list : lr, r0 - r6
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+ /*
+ * Only print the output if LOG_LEVEL is higher or equal to
+ * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+ */
+ /* Stash the parameters already in r0 and r1 */
+ mov r5, r0
+ mov r6, r1
+
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+
+ /* Check if the console is initialized */
+ cmp r0, #0
+ beq _assert_loop
+
+ /* The console is initialized */
+ ldr r4, =assert_msg1
+ bl asm_print_str
+ mov r4, r5
+ bl asm_print_str
+ ldr r4, =assert_msg2
+ bl asm_print_str
+
+ /* Check if line number higher than max permitted */
+ ldr r4, =~0xffff
+ tst r6, r4
+ bne _assert_loop
+ mov r4, r6
+
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ /******************************************************************
+ * Virtualization comes with the UDIV/SDIV instructions. If missing
+ * write file line number in hexadecimal format.
+ ******************************************************************/
+ bl asm_print_hex
+#else
+ asm_print_line_dec
+#endif
+ bl plat_crash_console_flush
+_assert_loop:
+#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */
+ no_ret plat_panic_handler
+endfunc asm_assert
+#endif /* ENABLE_ASSERTIONS */
+
+/*
+ * This function prints a string from address in r4
+ * Clobber: lr, r0 - r4
+ */
+func asm_print_str
+ mov r3, lr
+1:
+ ldrb r0, [r4], #0x1
+ cmp r0, #0
+ beq 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ bx r3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in r4.
+ * In: r4 = the hexadecimal to print.
+ * Clobber: lr, r0 - r3, r5
+ */
+func asm_print_hex
+ mov r5, #32 /* No of bits to convert to ascii */
+
+ /* Convert to ascii number of bits in r5 */
+asm_print_hex_bits:
+ mov r3, lr
+1:
+ sub r5, r5, #4
+ lsr r0, r4, r5
+ and r0, r0, #0xf
+ cmp r0, #0xa
+ blo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add r0, r0, #0x27
+2:
+ add r0, r0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cmp r5, #0
+ bne 1b
+ bx r3
+endfunc asm_print_hex
+
+ /***********************************************************
+ * The common implementation of el3_panic for all BL stages
+ ***********************************************************/
+
+.section .rodata.panic_str, "aS"
+ panic_msg: .asciz "PANIC at PC : 0x"
+ panic_end: .asciz "\r\n"
+
+func el3_panic
+ /* Have LR copy point to PC at the time of panic */
+ sub r6, lr, #4
+
+ /* Initialize crash console and verify success */
+ bl plat_crash_console_init
+
+ /* Check if the console is initialized */
+ cmp r0, #0
+ beq _panic_handler
+
+ /* The console is initialized */
+ ldr r4, =panic_msg
+ bl asm_print_str
+
+ /* Print LR in hex */
+ mov r4, r6
+ bl asm_print_hex
+
+ /* Print new line */
+ ldr r4, =panic_end
+ bl asm_print_str
+
+ bl plat_crash_console_flush
+
+_panic_handler:
+ mov lr, r6
+ b plat_panic_handler
+endfunc el3_panic
+
+ /***********************************************************
+ * This function is called from the vector table for
+ * unhandled exceptions. It reads the current mode and
+ * passes it to platform.
+ ***********************************************************/
+func report_exception
+ mrs r0, cpsr
+ and r0, #MODE32_MASK
+ bl plat_report_exception
+ no_ret plat_panic_handler
+endfunc report_exception
+
+ /***********************************************************
+ * This function is called from the vector table for
+ * unhandled exceptions. The lr_abt is given as an
+ * argument to platform handler.
+ ***********************************************************/
+func report_prefetch_abort
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ b report_exception
+#else
+ mrs r0, lr_abt
+ bl plat_report_prefetch_abort
+ no_ret plat_panic_handler
+#endif
+endfunc report_prefetch_abort
+
+ /***********************************************************
+ * This function is called from the vector table for
+ * unhandled exceptions. The lr_abt is given as an
+ * argument to platform handler.
+ ***********************************************************/
+func report_data_abort
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
+ b report_exception
+#else
+ mrs r0, lr_abt
+ bl plat_report_data_abort
+ no_ret plat_panic_handler
+#endif
+endfunc report_data_abort
diff --git a/common/aarch64/debug.S b/common/aarch64/debug.S
new file mode 100644
index 0000000..8768a1f
--- /dev/null
+++ b/common/aarch64/debug.S
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2014-2023 Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/debug.h>
+
+ .globl asm_print_str
+ .globl asm_print_hex
+ .globl asm_print_hex_bits
+ .globl asm_print_newline
+ .globl asm_assert
+ .globl el3_panic
+ .globl elx_panic
+
+/* Since the max decimal input number is 65536 */
+#define MAX_DEC_DIVISOR 10000
+/* The offset to add to get ascii for numerals '0 - 9' */
+#define ASCII_OFFSET_NUM 0x30
+
+#if ENABLE_ASSERTIONS
+.section .rodata.assert_str, "aS"
+assert_msg1:
+ .asciz "ASSERT: File "
+assert_msg2:
+ .asciz " Line "
+
+ /*
+ * This macro is intended to be used to print the
+ * line number in decimal. Used by asm_assert macro.
+ * The max number expected is 65536.
+ * In: x4 = the decimal to print.
+ * Clobber: x30, x0, x1, x2, x5, x6
+ */
+ .macro asm_print_line_dec
+ mov x6, #10 /* Divide by 10 after every loop iteration */
+ mov x5, #MAX_DEC_DIVISOR
+dec_print_loop:
+ udiv x0, x4, x5 /* Get the quotient */
+ msub x4, x0, x5, x4 /* Find the remainder */
+ add x0, x0, #ASCII_OFFSET_NUM /* Convert to ascii */
+ bl plat_crash_console_putc
+ udiv x5, x5, x6 /* Reduce divisor */
+ cbnz x5, dec_print_loop
+ .endm
+
+
+/* ---------------------------------------------------------------------------
+ * Assertion support in assembly.
+ * The below function helps to support assertions in assembly where we do not
+ * have a C runtime stack. Arguments to the function are :
+ * x0 - File name
+ * x1 - Line no
+ * Clobber list : x30, x0, x1, x2, x3, x4, x5, x6.
+ * ---------------------------------------------------------------------------
+ */
+func asm_assert
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+ /*
+ * Only print the output if LOG_LEVEL is higher or equal to
+ * LOG_LEVEL_INFO, which is the default value for builds with DEBUG=1.
+ */
+ mov x5, x0
+ mov x6, x1
+
+ /* Ensure the console is initialized */
+ bl plat_crash_console_init
+
+ /* Check if the console is initialized */
+ cbz x0, _assert_loop
+
+ /* The console is initialized */
+ adr x4, assert_msg1
+ bl asm_print_str
+ mov x4, x5
+ bl asm_print_str
+ adr x4, assert_msg2
+ bl asm_print_str
+
+ /* Check if line number higher than max permitted */
+ tst x6, #~0xffff
+ b.ne _assert_loop
+ mov x4, x6
+ asm_print_line_dec
+ bl plat_crash_console_flush
+_assert_loop:
+#endif /* LOG_LEVEL >= LOG_LEVEL_INFO */
+ no_ret plat_panic_handler
+endfunc asm_assert
+#endif /* ENABLE_ASSERTIONS */
+
+/*
+ * This function prints a string from address in x4.
+ * In: x4 = pointer to string.
+ * Clobber: x30, x0, x1, x2, x3
+ */
+func asm_print_str
+ mov x3, x30
+1:
+ ldrb w0, [x4], #0x1
+ cbz x0, 2f
+ bl plat_crash_console_putc
+ b 1b
+2:
+ ret x3
+endfunc asm_print_str
+
+/*
+ * This function prints a hexadecimal number in x4.
+ * In: x4 = the hexadecimal to print.
+ * Clobber: x30, x0 - x3, x5
+ */
+func asm_print_hex
+ mov x5, #64 /* No of bits to convert to ascii */
+
+ /* Convert to ascii number of bits in x5 */
+asm_print_hex_bits:
+ mov x3, x30
+1:
+ sub x5, x5, #4
+ lsrv x0, x4, x5
+ and x0, x0, #0xf
+ cmp x0, #0xA
+ b.lo 2f
+ /* Add by 0x27 in addition to ASCII_OFFSET_NUM
+ * to get ascii for characters 'a - f'.
+ */
+ add x0, x0, #0x27
+2:
+ add x0, x0, #ASCII_OFFSET_NUM
+ bl plat_crash_console_putc
+ cbnz x5, 1b
+ ret x3
+endfunc asm_print_hex
+
+/*
+ * Helper function to print newline to console
+ * Clobber: x0
+ */
+func asm_print_newline
+ mov x0, '\n'
+ b plat_crash_console_putc
+endfunc asm_print_newline
+
+ /***********************************************************
+ * The common implementation of el3_panic for all BL stages
+ ***********************************************************/
+
+.section .rodata.panic_str, "aS"
+ panic_msg: .asciz "PANIC at PC : 0x"
+
+func elx_panic
+#if CRASH_REPORTING && defined(IMAGE_BL31)
+ b report_elx_panic
+#endif /* CRASH_REPORTING && IMAGE_BL31 */
+
+ b panic_common
+endfunc elx_panic
+
+/* ---------------------------------------------------------------------------
+ * el3_panic assumes that it is invoked from a C Runtime Environment ie a
+ * valid stack exists. This call will not return.
+ * Clobber list : if CRASH_REPORTING is not enabled then x30, x0 - x6
+ * ---------------------------------------------------------------------------
+ */
+
+func el3_panic
+#if CRASH_REPORTING && defined(IMAGE_BL31)
+ b report_el3_panic
+#endif /* CRASH_REPORTING && IMAGE_BL31 */
+
+panic_common:
+ mov x6, x30
+ bl plat_crash_console_init
+
+ /* Check if the console is initialized */
+ cbz x0, _panic_handler
+
+ /* The console is initialized */
+ adr x4, panic_msg
+ bl asm_print_str
+ mov x4, x6
+
+ /* The panic location is lr -4 */
+ sub x4, x4, #4
+ bl asm_print_hex
+
+ /* Print new line */
+ bl asm_print_newline
+
+ bl plat_crash_console_flush
+
+_panic_handler:
+ /* Pass to plat_panic_handler the address from where el3_panic was
+ * called, not the address of the call from el3_panic. */
+ mov x30, x6
+ b plat_panic_handler
+
+endfunc el3_panic
diff --git a/common/aarch64/early_exceptions.S b/common/aarch64/early_exceptions.S
new file mode 100644
index 0000000..36a8724
--- /dev/null
+++ b/common/aarch64/early_exceptions.S
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <common/bl_common.h>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by BL2 and BL31 stages.
+ * BL31 uses them before stacks are setup. BL2 uses them throughout.
+ * -----------------------------------------------------------------------------
+ */
+ .globl early_exceptions
+
+vector_base early_exceptions
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSP0
+ mov x0, #SYNC_EXCEPTION_SP_EL0
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SynchronousExceptionSP0
+
+vector_entry IrqSP0
+ mov x0, #IRQ_SP_EL0
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry IrqSP0
+
+vector_entry FiqSP0
+ mov x0, #FIQ_SP_EL0
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry FiqSP0
+
+vector_entry SErrorSP0
+ mov x0, #SERROR_SP_EL0
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SErrorSP0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionSPx
+ mov x0, #SYNC_EXCEPTION_SP_ELX
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SynchronousExceptionSPx
+
+vector_entry IrqSPx
+ mov x0, #IRQ_SP_ELX
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry IrqSPx
+
+vector_entry FiqSPx
+ mov x0, #FIQ_SP_ELX
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry FiqSPx
+
+vector_entry SErrorSPx
+ mov x0, #SERROR_SP_ELX
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SErrorSPx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA64
+ mov x0, #SYNC_EXCEPTION_AARCH64
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SynchronousExceptionA64
+
+vector_entry IrqA64
+ mov x0, #IRQ_AARCH64
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry IrqA64
+
+vector_entry FiqA64
+ mov x0, #FIQ_AARCH64
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry FiqA64
+
+vector_entry SErrorA64
+ mov x0, #SERROR_AARCH64
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SErrorA64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * -----------------------------------------------------
+ */
+vector_entry SynchronousExceptionA32
+ mov x0, #SYNC_EXCEPTION_AARCH32
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SynchronousExceptionA32
+
+vector_entry IrqA32
+ mov x0, #IRQ_AARCH32
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry IrqA32
+
+vector_entry FiqA32
+ mov x0, #FIQ_AARCH32
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry FiqA32
+
+vector_entry SErrorA32
+ mov x0, #SERROR_AARCH32
+ bl plat_report_exception
+ no_ret plat_panic_handler
+end_vector_entry SErrorA32
diff --git a/common/backtrace/backtrace.c b/common/backtrace/backtrace.c
new file mode 100644
index 0000000..f994ae5
--- /dev/null
+++ b/common/backtrace/backtrace.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/console.h>
+
+/* Maximum number of entries in the backtrace to display */
+#define UNWIND_LIMIT 20U
+
+/*
+ * If -fno-omit-frame-pointer is used:
+ *
+ * - AArch64: The AAPCS defines the format of the frame records and mandates the
+ * usage of r29 as frame pointer.
+ *
+ * - AArch32: The format of the frame records is not defined in the AAPCS.
+ * However, at least GCC and Clang use the same format. When they are forced
+ * to only generate A32 code (with -marm), they use r11 as frame pointer and a
+ * similar format as in AArch64. If interworking with T32 is enabled, the
+ * frame pointer is r7 and the format is different. This is not supported by
+ * this implementation of backtrace, so it is needed to use -marm.
+ */
+
+/* Frame records form a linked list in the stack */
+struct frame_record {
+ /* Previous frame record in the list */
+ struct frame_record *parent;
+ /* Return address of the function at this level */
+ uintptr_t return_addr;
+};
+
+static inline uintptr_t extract_address(uintptr_t address)
+{
+ uintptr_t ret = address;
+
+#if ENABLE_PAUTH
+ /*
+ * When pointer authentication is enabled, the LR value saved on the
+ * stack contains a PAC. It must be stripped to retrieve the return
+ * address.
+ */
+
+ xpaci(ret);
+#endif
+
+ return ret;
+}
+
+/*
+ * Returns true if the address points to a virtual address that can be read at
+ * the current EL, false otherwise.
+ */
+#ifdef __aarch64__
+static bool is_address_readable(uintptr_t address)
+{
+ unsigned int el = get_current_el();
+ uintptr_t addr = extract_address(address);
+
+ if (el == 3U) {
+ ats1e3r(addr);
+ } else if (el == 2U) {
+ ats1e2r(addr);
+ } else {
+ AT(ats1e1r, addr);
+ }
+
+ isb();
+
+ /* If PAR.F == 1 the address translation was aborted. */
+ if ((read_par_el1() & PAR_F_MASK) != 0U)
+ return false;
+
+ return true;
+}
+#else /* !__aarch64__ */
+static bool is_address_readable(uintptr_t addr)
+{
+ unsigned int el = get_current_el();
+
+ if (el == 3U) {
+ write_ats1cpr(addr);
+ } else if (el == 2U) {
+ write_ats1hr(addr);
+ } else {
+ write_ats1cpr(addr);
+ }
+
+ isb();
+
+ /* If PAR.F == 1 the address translation was aborted. */
+ if ((read64_par() & PAR_F_MASK) != 0U)
+ return false;
+
+ return true;
+}
+#endif /* __aarch64__ */
+
+/*
+ * Returns true if all the bytes in a given object are in mapped memory and an
+ * LDR using this pointer would succeed, false otherwise.
+ */
+static bool is_valid_object(uintptr_t addr, size_t size)
+{
+ assert(size > 0U);
+
+ if (addr == 0U)
+ return false;
+
+ /* Detect overflows */
+ if ((addr + size) < addr)
+ return false;
+
+ /* A pointer not aligned properly could trigger an alignment fault. */
+ if ((addr & (sizeof(uintptr_t) - 1U)) != 0U)
+ return false;
+
+ /* Check that all the object is readable */
+ for (size_t i = 0; i < size; i++) {
+ if (!is_address_readable(addr + i))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Returns true if the specified address is correctly aligned and points to a
+ * valid memory region.
+ */
+static bool is_valid_jump_address(uintptr_t addr)
+{
+ if (addr == 0U)
+ return false;
+
+ /* Check alignment. Both A64 and A32 use 32-bit opcodes */
+ if ((addr & (sizeof(uint32_t) - 1U)) != 0U)
+ return false;
+
+ if (!is_address_readable(addr))
+ return false;
+
+ return true;
+}
+
+/*
+ * Returns true if the pointer points at a valid frame record, false otherwise.
+ */
+static bool is_valid_frame_record(struct frame_record *fr)
+{
+ return is_valid_object((uintptr_t)fr, sizeof(struct frame_record));
+}
+
+/*
+ * Adjust the frame-pointer-register value by 4 bytes on AArch32 to have the
+ * same layout as AArch64.
+ */
+static struct frame_record *adjust_frame_record(struct frame_record *fr)
+{
+#ifdef __aarch64__
+ return fr;
+#else
+ return (struct frame_record *)((uintptr_t)fr - 4U);
+#endif
+}
+
+static void unwind_stack(struct frame_record *fr, uintptr_t current_pc,
+ uintptr_t link_register)
+{
+ uintptr_t call_site;
+ static const char *backtrace_str = "%u: %s: 0x%lx\n";
+ const char *el_str = get_el_str(get_current_el());
+
+ if (!is_valid_frame_record(fr)) {
+ printf("ERROR: Corrupted frame pointer (frame record address = %p)\n",
+ fr);
+ return;
+ }
+
+ call_site = extract_address(fr->return_addr);
+ if (call_site != link_register) {
+ printf("ERROR: Corrupted stack (frame record address = %p)\n",
+ fr);
+ return;
+ }
+
+ /* The level 0 of the backtrace is the current backtrace function */
+ printf(backtrace_str, 0U, el_str, current_pc);
+
+ /*
+ * The last frame record pointer in the linked list at the beginning of
+ * the stack should be NULL unless stack is corrupted.
+ */
+ for (unsigned int i = 1U; i < UNWIND_LIMIT; i++) {
+ /* If an invalid frame record is found, exit. */
+ if (!is_valid_frame_record(fr))
+ return;
+ /*
+ * A32 and A64 are fixed length so the address from where the
+ * call was made is the instruction before the return address,
+ * which is always 4 bytes before it.
+ */
+
+ call_site = extract_address(fr->return_addr) - 4U;
+
+ /*
+ * If the address is invalid it means that the frame record is
+ * probably corrupted.
+ */
+ if (!is_valid_jump_address(call_site))
+ return;
+
+ printf(backtrace_str, i, el_str, call_site);
+
+ fr = adjust_frame_record(fr->parent);
+ }
+
+ printf("ERROR: Max backtrace depth reached\n");
+}
+
+/*
+ * Display a backtrace. The cookie string parameter is displayed along the
+ * trace to help filter the log messages.
+ *
+ * Many things can prevent displaying the expected backtrace. For example,
+ * compiler optimizations can use a branch instead of branch with link when it
+ * detects a tail call. The backtrace level for this caller will not be
+ * displayed, as it does not appear in the call stack anymore. Also, assembly
+ * functions will not be displayed unless they setup AAPCS compliant frame
+ * records on AArch64 and compliant with GCC-specific frame record format on
+ * AArch32.
+ *
+ * Usage of the trace: addr2line can be used to map the addresses to function
+ * and source code location when given the ELF file compiled with debug
+ * information. The "-i" flag is highly recommended to improve display of
+ * inlined function. The *.dump files generated when building each image can
+ * also be used.
+ *
+ * WARNING: In case of corrupted stack, this function could display security
+ * sensitive information past the beginning of the stack so it must not be used
+ * in production build. This function is only compiled in when ENABLE_BACKTRACE
+ * is set to 1.
+ */
+void backtrace(const char *cookie)
+{
+ uintptr_t return_address = (uintptr_t)__builtin_return_address(0U);
+ struct frame_record *fr = __builtin_frame_address(0U);
+
+ /* Printing the backtrace may crash the system, flush before starting */
+ console_flush();
+
+ fr = adjust_frame_record(fr);
+
+ printf("BACKTRACE: START: %s\n", cookie);
+
+ unwind_stack(fr, (uintptr_t)&backtrace, return_address);
+
+ printf("BACKTRACE: END: %s\n", cookie);
+}
diff --git a/common/backtrace/backtrace.mk b/common/backtrace/backtrace.mk
new file mode 100644
index 0000000..e669331
--- /dev/null
+++ b/common/backtrace/backtrace.mk
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Enable backtrace by default in DEBUG AArch64 builds
+ifeq (${ARCH},aarch32)
+ ENABLE_BACKTRACE := 0
+else
+ ENABLE_BACKTRACE := ${DEBUG}
+endif
+
+ifeq (${ENABLE_BACKTRACE},1)
+ # Force the compiler to include the frame pointer
+ TF_CFLAGS += -fno-omit-frame-pointer
+
+ BL_COMMON_SOURCES += common/backtrace/backtrace.c
+endif
+
+ifeq (${ARCH},aarch32)
+ ifeq (${ENABLE_BACKTRACE},1)
+ ifneq (${AARCH32_INSTRUCTION_SET},A32)
+ $(error Error: AARCH32_INSTRUCTION_SET=A32 is needed \
+ for ENABLE_BACKTRACE when compiling for AArch32.)
+ endif
+ endif
+endif
+
+$(eval $(call assert_boolean,ENABLE_BACKTRACE))
+$(eval $(call add_define,ENABLE_BACKTRACE))
diff --git a/common/bl_common.c b/common/bl_common.c
new file mode 100644
index 0000000..8fce02f
--- /dev/null
+++ b/common/bl_common.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <drivers/auth/auth_mod.h>
+#include <drivers/io/io_storage.h>
+#include <lib/utils.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <plat/common/platform.h>
+
+#if TRUSTED_BOARD_BOOT
+# ifdef DYN_DISABLE_AUTH
+static int disable_auth;
+
+/******************************************************************************
+ * API to dynamically disable authentication. Only meant for development
+ * systems. This is only invoked if DYN_DISABLE_AUTH is defined.
+ *****************************************************************************/
+void dyn_disable_auth(void)
+{
+ INFO("Disabling authentication of images dynamically\n");
+ disable_auth = 1;
+}
+# endif /* DYN_DISABLE_AUTH */
+
+/******************************************************************************
+ * Function to determine whether the authentication is disabled dynamically.
+ *****************************************************************************/
+static int dyn_is_auth_disabled(void)
+{
+# ifdef DYN_DISABLE_AUTH
+ return disable_auth;
+# else
+ return 0;
+# endif
+}
+#endif /* TRUSTED_BOARD_BOOT */
+
+uintptr_t page_align(uintptr_t value, unsigned dir)
+{
+ /* Round up the limit to the next page boundary */
+ if ((value & PAGE_SIZE_MASK) != 0U) {
+ value &= ~PAGE_SIZE_MASK;
+ if (dir == UP)
+ value += PAGE_SIZE;
+ }
+
+ return value;
+}
+
+/*******************************************************************************
+ * Internal function to load an image at a specific address given
+ * an image ID and extents of free memory.
+ *
+ * If the load is successful then the image information is updated.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ ******************************************************************************/
+static int load_image(unsigned int image_id, image_info_t *image_data)
+{
+ uintptr_t dev_handle;
+ uintptr_t image_handle;
+ uintptr_t image_spec;
+ uintptr_t image_base;
+ size_t image_size;
+ size_t bytes_read;
+ int io_result;
+
+ assert(image_data != NULL);
+ assert(image_data->h.version >= VERSION_2);
+
+ image_base = image_data->image_base;
+
+ /* Obtain a reference to the image by querying the platform layer */
+ io_result = plat_get_image_source(image_id, &dev_handle, &image_spec);
+ if (io_result != 0) {
+ WARN("Failed to obtain reference to image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ /* Attempt to access the image */
+ io_result = io_open(dev_handle, image_spec, &image_handle);
+ if (io_result != 0) {
+ WARN("Failed to access image id=%u (%i)\n",
+ image_id, io_result);
+ return io_result;
+ }
+
+ INFO("Loading image id=%u at address 0x%lx\n", image_id, image_base);
+
+ /* Find the size of the image */
+ io_result = io_size(image_handle, &image_size);
+ if ((io_result != 0) || (image_size == 0U)) {
+ WARN("Failed to determine the size of the image id=%u (%i)\n",
+ image_id, io_result);
+ goto exit;
+ }
+
+ /* Check that the image size to load is within limit */
+ if (image_size > image_data->image_max_size) {
+ WARN("Image id=%u size out of bounds\n", image_id);
+ io_result = -EFBIG;
+ goto exit;
+ }
+
+ /*
+ * image_data->image_max_size is a uint32_t so image_size will always
+ * fit in image_data->image_size.
+ */
+ image_data->image_size = (uint32_t)image_size;
+
+ /* We have enough space so load the image now */
+ /* TODO: Consider whether to try to recover/retry a partially successful read */
+ io_result = io_read(image_handle, image_base, image_size, &bytes_read);
+ if ((io_result != 0) || (bytes_read < image_size)) {
+ WARN("Failed to load image id=%u (%i)\n", image_id, io_result);
+ goto exit;
+ }
+
+ INFO("Image id=%u loaded: 0x%lx - 0x%lx\n", image_id, image_base,
+ (uintptr_t)(image_base + image_size));
+
+exit:
+ (void)io_close(image_handle);
+ /* Ignore improbable/unrecoverable error in 'close' */
+
+ /* TODO: Consider maintaining open device connection from this bootloader stage */
+ (void)io_dev_close(dev_handle);
+ /* Ignore improbable/unrecoverable error in 'dev_close' */
+
+ return io_result;
+}
+
+#if TRUSTED_BOARD_BOOT
+/*
+ * This function uses recursion to authenticate the parent images up to the root
+ * of trust.
+ */
+static int load_auth_image_recursive(unsigned int image_id,
+ image_info_t *image_data,
+ int is_parent_image)
+{
+ int rc;
+ unsigned int parent_id;
+
+ /* Use recursion to authenticate parent images */
+ rc = auth_mod_get_parent_id(image_id, &parent_id);
+ if (rc == 0) {
+ rc = load_auth_image_recursive(parent_id, image_data, 1);
+ if (rc != 0) {
+ return rc;
+ }
+ }
+
+ /* Load the image */
+ rc = load_image(image_id, image_data);
+ if (rc != 0) {
+ return rc;
+ }
+
+ /* Authenticate it */
+ rc = auth_mod_verify_img(image_id,
+ (void *)image_data->image_base,
+ image_data->image_size);
+ if (rc != 0) {
+ /* Authentication error, zero memory and flush it right away. */
+ zero_normalmem((void *)image_data->image_base,
+ image_data->image_size);
+ flush_dcache_range(image_data->image_base,
+ image_data->image_size);
+ return -EAUTH;
+ }
+
+ return 0;
+}
+#endif /* TRUSTED_BOARD_BOOT */
+
+static int load_auth_image_internal(unsigned int image_id,
+ image_info_t *image_data)
+{
+#if TRUSTED_BOARD_BOOT
+ if (dyn_is_auth_disabled() == 0) {
+ return load_auth_image_recursive(image_id, image_data, 0);
+ }
+#endif
+
+ return load_image(image_id, image_data);
+}
+
+/*******************************************************************************
+ * Generic function to load and authenticate an image. The image is actually
+ * loaded by calling the 'load_image()' function. Therefore, it returns the
+ * same error codes if the loading operation failed, or -EAUTH if the
+ * authentication failed. In addition, this function uses recursion to
+ * authenticate the parent images up to the root of trust (if TBB is enabled).
+ ******************************************************************************/
+int load_auth_image(unsigned int image_id, image_info_t *image_data)
+{
+ int err;
+
+/*
+ * All firmware banks should be part of the same non-volatile storage as per
+ * PSA FWU specification, hence don't check for any alternate boot source
+ * when PSA FWU is enabled.
+ */
+#if PSA_FWU_SUPPORT
+ err = load_auth_image_internal(image_id, image_data);
+#else
+ do {
+ err = load_auth_image_internal(image_id, image_data);
+ } while ((err != 0) && (plat_try_next_boot_source() != 0));
+#endif /* PSA_FWU_SUPPORT */
+
+ if (err == 0) {
+ /*
+ * If loading of the image gets passed (along with its
+ * authentication in case of Trusted-Boot flow) then measure
+ * it (if MEASURED_BOOT flag is enabled).
+ */
+ err = plat_mboot_measure_image(image_id, image_data);
+ if (err != 0) {
+ return err;
+ }
+
+ /*
+ * Flush the image to main memory so that it can be executed
+ * later by any CPU, regardless of cache and MMU state.
+ */
+ flush_dcache_range(image_data->image_base,
+ image_data->image_size);
+ }
+
+ return err;
+}
+
+/*******************************************************************************
+ * Print the content of an entry_point_info_t structure.
+ ******************************************************************************/
+void print_entry_point_info(const entry_point_info_t *ep_info)
+{
+ INFO("Entry point address = 0x%lx\n", ep_info->pc);
+ INFO("SPSR = 0x%x\n", ep_info->spsr);
+
+#define PRINT_IMAGE_ARG(n) \
+ VERBOSE("Argument #" #n " = 0x%llx\n", \
+ (unsigned long long) ep_info->args.arg##n)
+
+ PRINT_IMAGE_ARG(0);
+ PRINT_IMAGE_ARG(1);
+ PRINT_IMAGE_ARG(2);
+ PRINT_IMAGE_ARG(3);
+#ifdef __aarch64__
+ PRINT_IMAGE_ARG(4);
+ PRINT_IMAGE_ARG(5);
+ PRINT_IMAGE_ARG(6);
+ PRINT_IMAGE_ARG(7);
+#endif
+#undef PRINT_IMAGE_ARG
+}
+
+/*
+ * This function is for returning the TF-A version
+ */
+const char *get_version(void)
+{
+ extern const char version[];
+ return version;
+}
diff --git a/common/desc_image_load.c b/common/desc_image_load.c
new file mode 100644
index 0000000..30b97e0
--- /dev/null
+++ b/common/desc_image_load.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <common/tbbr/tbbr_img_def.h>
+
+static bl_load_info_t bl_load_info;
+static bl_params_t next_bl_params;
+
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void flush_bl_params_desc(void)
+{
+ flush_bl_params_desc_args(bl_mem_params_desc_ptr,
+ bl_mem_params_desc_num,
+ &next_bl_params);
+}
+
+/*******************************************************************************
+ * This function flushes the data structures specified as arguments so that they
+ * are visible in memory for the next BL image.
+ ******************************************************************************/
+void flush_bl_params_desc_args(bl_mem_params_node_t *mem_params_desc_ptr,
+ unsigned int mem_params_desc_num,
+ bl_params_t *next_bl_params_ptr)
+{
+ assert(mem_params_desc_ptr != NULL);
+ assert(mem_params_desc_num != 0U);
+ assert(next_bl_params_ptr != NULL);
+
+ flush_dcache_range((uintptr_t)mem_params_desc_ptr,
+ sizeof(*mem_params_desc_ptr) * mem_params_desc_num);
+
+ flush_dcache_range((uintptr_t)next_bl_params_ptr,
+ sizeof(*next_bl_params_ptr));
+}
+
+/*******************************************************************************
+ * This function returns the index for given image_id, within the
+ * image descriptor array provided by bl_image_info_descs_ptr, if the
+ * image is found else it returns -1.
+ ******************************************************************************/
+int get_bl_params_node_index(unsigned int image_id)
+{
+ unsigned int index;
+ assert(image_id != INVALID_IMAGE_ID);
+
+ for (index = 0U; index < bl_mem_params_desc_num; index++) {
+ if (bl_mem_params_desc_ptr[index].image_id == image_id)
+ return (int)index;
+ }
+
+ return -1;
+}
+
+/*******************************************************************************
+ * This function returns the pointer to `bl_mem_params_node_t` object for
+ * given image_id, within the image descriptor array provided by
+ * bl_mem_params_desc_ptr, if the image is found else it returns NULL.
+ ******************************************************************************/
+bl_mem_params_node_t *get_bl_mem_params_node(unsigned int image_id)
+{
+ int index;
+ assert(image_id != INVALID_IMAGE_ID);
+
+ index = get_bl_params_node_index(image_id);
+ if (index >= 0)
+ return &bl_mem_params_desc_ptr[index];
+ else
+ return NULL;
+}
+
+/*******************************************************************************
+ * This function creates the list of loadable images, by populating and
+ * linking each `bl_load_info_node_t` type node, using the internal array
+ * of image descriptor provided by bl_mem_params_desc_ptr. It also populates
+ * and returns `bl_load_info_t` type structure that contains head of the list
+ * of loadable images.
+ ******************************************************************************/
+bl_load_info_t *get_bl_load_info_from_mem_params_desc(void)
+{
+ unsigned int index = 0;
+
+ /* If there is no image to start with, return NULL */
+ if (bl_mem_params_desc_num == 0U)
+ return NULL;
+
+ /* Assign initial data structures */
+ bl_load_info_node_t *bl_node_info =
+ &bl_mem_params_desc_ptr[index].load_node_mem;
+ bl_load_info.head = bl_node_info;
+ SET_PARAM_HEAD(&bl_load_info, PARAM_BL_LOAD_INFO, VERSION_2, 0U);
+
+ /* Go through the image descriptor array and create the list */
+ for (; index < bl_mem_params_desc_num; index++) {
+
+ /* Populate the image information */
+ bl_node_info->image_id = bl_mem_params_desc_ptr[index].image_id;
+ bl_node_info->image_info = &bl_mem_params_desc_ptr[index].image_info;
+
+ /* Link next image if present */
+ if ((index + 1U) < bl_mem_params_desc_num) {
+ /* Get the memory and link the next node */
+ bl_node_info->next_load_info =
+ &bl_mem_params_desc_ptr[index + 1U].load_node_mem;
+ bl_node_info = bl_node_info->next_load_info;
+ }
+ }
+
+ return &bl_load_info;
+}
+
+/*******************************************************************************
+ * This function creates the list of executable images, by populating and
+ * linking each `bl_params_node_t` type node, using the internal array of
+ * image descriptor provided by bl_mem_params_desc_ptr. It also populates
+ * and returns `bl_params_t` type structure that contains head of the list
+ * of executable images.
+ ******************************************************************************/
+bl_params_t *get_next_bl_params_from_mem_params_desc(void)
+{
+ unsigned int count;
+ unsigned int img_id = 0U;
+ unsigned int link_index = 0U;
+ bl_params_node_t *bl_current_exec_node = NULL;
+ bl_params_node_t *bl_last_exec_node = NULL;
+ bl_mem_params_node_t *desc_ptr;
+
+ /* If there is no image to start with, return NULL */
+ if (bl_mem_params_desc_num == 0U)
+ return NULL;
+
+ /* Get the list HEAD */
+ for (count = 0U; count < bl_mem_params_desc_num; count++) {
+
+ desc_ptr = &bl_mem_params_desc_ptr[count];
+
+ if ((EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE) &&
+ (EP_GET_FIRST_EXE(desc_ptr->ep_info.h.attr) == EP_FIRST_EXE)) {
+ next_bl_params.head = &desc_ptr->params_node_mem;
+ link_index = count;
+ break;
+ }
+ }
+
+ /* Make sure we have a HEAD node */
+ assert(next_bl_params.head != NULL);
+
+ /* Populate the HEAD information */
+ SET_PARAM_HEAD(&next_bl_params, PARAM_BL_PARAMS, VERSION_2, 0U);
+
+ /*
+ * Go through the image descriptor array and create the list.
+ * This bounded loop is to make sure that we are not looping forever.
+ */
+ for (count = 0U; count < bl_mem_params_desc_num; count++) {
+
+ desc_ptr = &bl_mem_params_desc_ptr[link_index];
+
+ /* Make sure the image is executable */
+ assert(EP_GET_EXE(desc_ptr->ep_info.h.attr) == EXECUTABLE);
+
+ /* Get the memory for current node */
+ bl_current_exec_node = &desc_ptr->params_node_mem;
+
+ /* Populate the image information */
+ bl_current_exec_node->image_id = desc_ptr->image_id;
+ bl_current_exec_node->image_info = &desc_ptr->image_info;
+ bl_current_exec_node->ep_info = &desc_ptr->ep_info;
+
+ if (bl_last_exec_node != NULL) {
+ /* Assert if loop detected */
+ assert(bl_last_exec_node->next_params_info == NULL);
+
+ /* Link the previous node to the current one */
+ bl_last_exec_node->next_params_info = bl_current_exec_node;
+ }
+
+ /* Update the last node */
+ bl_last_exec_node = bl_current_exec_node;
+
+ /* If no next hand-off image then break out */
+ img_id = desc_ptr->next_handoff_image_id;
+ if (img_id == INVALID_IMAGE_ID)
+ break;
+
+ /* Get the index for the next hand-off image */
+ link_index = get_bl_params_node_index(img_id);
+ assert((link_index > 0U) &&
+ (link_index < bl_mem_params_desc_num));
+ }
+
+ /* Invalid image is expected to terminate the loop */
+ assert(img_id == INVALID_IMAGE_ID);
+
+ return &next_bl_params;
+}
+
+/*******************************************************************************
+ * This function populates the entry point information with the corresponding
+ * config file for all executable BL images described in bl_params.
+ ******************************************************************************/
+void populate_next_bl_params_config(bl_params_t *bl2_to_next_bl_params)
+{
+ bl_params_node_t *params_node;
+ unsigned int fw_config_id;
+ uintptr_t fw_config_base;
+ bl_mem_params_node_t *mem_params;
+ uintptr_t hw_config_base = 0;
+
+ assert(bl2_to_next_bl_params != NULL);
+
+ /*
+ * Get the `bl_mem_params_node_t` corresponding to HW_CONFIG
+ * if available.
+ */
+ mem_params = get_bl_mem_params_node(HW_CONFIG_ID);
+
+ if (mem_params != NULL)
+ hw_config_base = mem_params->image_info.image_base;
+
+ for (params_node = bl2_to_next_bl_params->head; params_node != NULL;
+ params_node = params_node->next_params_info) {
+
+ fw_config_base = 0;
+
+ switch (params_node->image_id) {
+ case BL31_IMAGE_ID:
+ fw_config_id = SOC_FW_CONFIG_ID;
+ break;
+ case BL32_IMAGE_ID:
+ /*
+ * At the moment, OPTEE cannot accept a DTB in secure memory,
+ * so fall back and use NT_FW_CONFIG instead.
+ * This MUST be fixed as soon as OPTEE has support to
+ * receive DTBs in secure memory.
+ */
+#ifndef SPD_opteed
+ fw_config_id = TOS_FW_CONFIG_ID;
+ break;
+#endif
+ case BL33_IMAGE_ID:
+ fw_config_id = NT_FW_CONFIG_ID;
+ break;
+ default:
+ fw_config_id = INVALID_IMAGE_ID;
+ break;
+ }
+
+ if (fw_config_id != INVALID_IMAGE_ID) {
+ mem_params = get_bl_mem_params_node(fw_config_id);
+ if (mem_params != NULL) {
+ fw_config_base = mem_params->image_info.image_base;
+ }
+ }
+
+#ifdef SPD_opteed
+ /*
+ * If SPD_opteed is enabled, arg[0,2] are populated by
+ * parse_optee_header(), which is called by
+ * arm_bl2_handle_post_image_load(). The meaning of the
+ * arguments are:
+ * arg0 <-- MODE_RW
+ * arg1 <-- Paged image base
+ * arg2 <-- Paged image size
+ */
+ if (params_node->image_id == BL32_IMAGE_ID) {
+ params_node->ep_info->args.arg3 = fw_config_base;
+ } else {
+#endif
+ /*
+ * Pass hw and tb_fw config addresses to next images.
+ * NOTE - for EL3 runtime images (BL31 for AArch64
+ * and BL32 for AArch32), arg0 is already used by
+ * generic code. Take care of not overwriting the
+ * previous initialisations.
+ */
+ if (params_node == bl2_to_next_bl_params->head) {
+ if (params_node->ep_info->args.arg1 == 0U)
+ params_node->ep_info->args.arg1 =
+ fw_config_base;
+ if (params_node->ep_info->args.arg2 == 0U)
+ params_node->ep_info->args.arg2 =
+ hw_config_base;
+ } else {
+ if (params_node->ep_info->args.arg0 == 0U)
+ params_node->ep_info->args.arg0 =
+ fw_config_base;
+ if (params_node->ep_info->args.arg1 == 0U)
+ params_node->ep_info->args.arg1 =
+ hw_config_base;
+ }
+#ifdef SPD_opteed
+ }
+#endif
+ }
+}
+
+/*******************************************************************************
+ * Helper to extract BL32/BL33 entry point info from arg0 passed to BL31, for
+ * platforms that are only interested in those. Platforms that need to extract
+ * more information can parse the structures themselves.
+ ******************************************************************************/
+
+void bl31_params_parse_helper(u_register_t param,
+ entry_point_info_t *bl32_ep_info_out,
+ entry_point_info_t *bl33_ep_info_out)
+{
+ bl_params_node_t *node;
+ bl_params_t *v2 = (void *)(uintptr_t)param;
+
+#if !ERROR_DEPRECATED
+ if (v2->h.version == PARAM_VERSION_1) {
+ struct { /* Deprecated version 1 parameter structure. */
+ param_header_t h;
+ image_info_t *bl31_image_info;
+ entry_point_info_t *bl32_ep_info;
+ image_info_t *bl32_image_info;
+ entry_point_info_t *bl33_ep_info;
+ image_info_t *bl33_image_info;
+ } *v1 = (void *)(uintptr_t)param;
+ assert(v1->h.type == PARAM_BL31);
+ if (bl32_ep_info_out != NULL)
+ *bl32_ep_info_out = *v1->bl32_ep_info;
+ if (bl33_ep_info_out != NULL)
+ *bl33_ep_info_out = *v1->bl33_ep_info;
+ return;
+ }
+#endif /* !ERROR_DEPRECATED */
+
+ assert(v2->h.version == PARAM_VERSION_2);
+ assert(v2->h.type == PARAM_BL_PARAMS);
+ for (node = v2->head; node != NULL; node = node->next_params_info) {
+ if (node->image_id == BL32_IMAGE_ID)
+ if (bl32_ep_info_out != NULL)
+ *bl32_ep_info_out = *node->ep_info;
+ if (node->image_id == BL33_IMAGE_ID)
+ if (bl33_ep_info_out != NULL)
+ *bl33_ep_info_out = *node->ep_info;
+ }
+}
diff --git a/common/fdt_fixup.c b/common/fdt_fixup.c
new file mode 100644
index 0000000..1bad74f
--- /dev/null
+++ b/common/fdt_fixup.c
@@ -0,0 +1,626 @@
+/*
+ * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Contains generic routines to fix up the device tree blob passed on to
+ * payloads like BL32 and BL33 (and further down the boot chain).
+ * This allows to easily add PSCI nodes, when the original DT does not have
+ * it or advertises another method.
+ * Also it supports to add reserved memory nodes to describe memory that
+ * is used by the secure world, so that non-secure software avoids using
+ * that.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <libfdt.h>
+
+#include <arch.h>
+#include <common/debug.h>
+#include <common/fdt_fixup.h>
+#include <common/fdt_wrappers.h>
+#include <drivers/console.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+
+static int append_psci_compatible(void *fdt, int offs, const char *str)
+{
+ return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
+}
+
+/*
+ * Those defines are for PSCI v0.1 legacy clients, which we expect to use
+ * the same execution state (AArch32/AArch64) as TF-A.
+ * Kernels running in AArch32 on an AArch64 TF-A should use PSCI v0.2.
+ */
+#ifdef __aarch64__
+#define PSCI_CPU_SUSPEND_FNID PSCI_CPU_SUSPEND_AARCH64
+#define PSCI_CPU_ON_FNID PSCI_CPU_ON_AARCH64
+#else
+#define PSCI_CPU_SUSPEND_FNID PSCI_CPU_SUSPEND_AARCH32
+#define PSCI_CPU_ON_FNID PSCI_CPU_ON_AARCH32
+#endif
+
+/*******************************************************************************
+ * dt_add_psci_node() - Add a PSCI node into an existing device tree
+ * @fdt: pointer to the device tree blob in memory
+ *
+ * Add a device tree node describing PSCI into the root level of an existing
+ * device tree blob in memory.
+ * This will add v0.1, v0.2 and v1.0 compatible strings and the standard
+ * function IDs for v0.1 compatibility.
+ * An existing PSCI node will not be touched, the function will return success
+ * in this case. This function will not touch the /cpus enable methods, use
+ * dt_add_psci_cpu_enable_methods() for that.
+ *
+ * Return: 0 on success, -1 otherwise.
+ ******************************************************************************/
+int dt_add_psci_node(void *fdt)
+{
+ int offs;
+
+ if (fdt_path_offset(fdt, "/psci") >= 0) {
+ WARN("PSCI Device Tree node already exists!\n");
+ return 0;
+ }
+
+ offs = fdt_path_offset(fdt, "/");
+ if (offs < 0)
+ return -1;
+ offs = fdt_add_subnode(fdt, offs, "psci");
+ if (offs < 0)
+ return -1;
+ if (append_psci_compatible(fdt, offs, "arm,psci-1.0"))
+ return -1;
+ if (append_psci_compatible(fdt, offs, "arm,psci-0.2"))
+ return -1;
+ if (append_psci_compatible(fdt, offs, "arm,psci"))
+ return -1;
+ if (fdt_setprop_string(fdt, offs, "method", "smc"))
+ return -1;
+ if (fdt_setprop_u32(fdt, offs, "cpu_suspend", PSCI_CPU_SUSPEND_FNID))
+ return -1;
+ if (fdt_setprop_u32(fdt, offs, "cpu_off", PSCI_CPU_OFF))
+ return -1;
+ if (fdt_setprop_u32(fdt, offs, "cpu_on", PSCI_CPU_ON_FNID))
+ return -1;
+ return 0;
+}
+
+/*
+ * Find the first subnode that has a "device_type" property with the value
+ * "cpu" and which's enable-method is not "psci" (yet).
+ * Returns 0 if no such subnode is found, so all have already been patched
+ * or none have to be patched in the first place.
+ * Returns 1 if *one* such subnode has been found and successfully changed
+ * to "psci".
+ * Returns negative values on error.
+ *
+ * Call in a loop until it returns 0. Recalculate the node offset after
+ * it has returned 1.
+ */
+static int dt_update_one_cpu_node(void *fdt, int offset)
+{
+ int offs;
+
+ /* Iterate over all subnodes to find those with device_type = "cpu". */
+ for (offs = fdt_first_subnode(fdt, offset); offs >= 0;
+ offs = fdt_next_subnode(fdt, offs)) {
+ const char *prop;
+ int len;
+ int ret;
+
+ prop = fdt_getprop(fdt, offs, "device_type", &len);
+ if (prop == NULL)
+ continue;
+ if ((strcmp(prop, "cpu") != 0) || (len != 4))
+ continue;
+
+ /* Ignore any nodes which already use "psci". */
+ prop = fdt_getprop(fdt, offs, "enable-method", &len);
+ if ((prop != NULL) &&
+ (strcmp(prop, "psci") == 0) && (len == 5))
+ continue;
+
+ ret = fdt_setprop_string(fdt, offs, "enable-method", "psci");
+ if (ret < 0)
+ return ret;
+ /*
+ * Subnode found and patched.
+ * Restart to accommodate potentially changed offsets.
+ */
+ return 1;
+ }
+
+ if (offs == -FDT_ERR_NOTFOUND)
+ return 0;
+
+ return offs;
+}
+
+/*******************************************************************************
+ * dt_add_psci_cpu_enable_methods() - switch CPU nodes in DT to use PSCI
+ * @fdt: pointer to the device tree blob in memory
+ *
+ * Iterate over all CPU device tree nodes (/cpus/cpu@x) in memory to change
+ * the enable-method to PSCI. This will add the enable-method properties, if
+ * required, or will change existing properties to read "psci".
+ *
+ * Return: 0 on success, or a negative error value otherwise.
+ ******************************************************************************/
+
+int dt_add_psci_cpu_enable_methods(void *fdt)
+{
+ int offs, ret;
+
+ do {
+ offs = fdt_path_offset(fdt, "/cpus");
+ if (offs < 0)
+ return offs;
+
+ ret = dt_update_one_cpu_node(fdt, offs);
+ } while (ret > 0);
+
+ return ret;
+}
+
+#define HIGH_BITS(x) ((sizeof(x) > 4) ? ((x) >> 32) : (typeof(x))0)
+
+/*******************************************************************************
+ * fdt_add_reserved_memory() - reserve (secure) memory regions in DT
+ * @dtb: pointer to the device tree blob in memory
+ * @node_name: name of the subnode to be used
+ * @base: physical base address of the reserved region
+ * @size: size of the reserved region
+ *
+ * Add a region of memory to the /reserved-memory node in a device tree in
+ * memory, creating that node if required. Each region goes into a subnode
+ * of that node and has a @node_name, a @base address and a @size.
+ * This will prevent any device tree consumer from using that memory. It
+ * can be used to announce secure memory regions, as it adds the "no-map"
+ * property to prevent mapping and speculative operations on that region.
+ *
+ * See reserved-memory/reserved-memory.txt in the (Linux kernel) DT binding
+ * documentation for details.
+ * According to this binding, the address-cells and size-cells must match
+ * those of the root node.
+ *
+ * Return: 0 on success, a negative error value otherwise.
+ ******************************************************************************/
+int fdt_add_reserved_memory(void *dtb, const char *node_name,
+ uintptr_t base, size_t size)
+{
+ int offs = fdt_path_offset(dtb, "/reserved-memory");
+ uint32_t addresses[4];
+ int ac, sc;
+ unsigned int idx = 0;
+
+ ac = fdt_address_cells(dtb, 0);
+ sc = fdt_size_cells(dtb, 0);
+ if (offs < 0) { /* create if not existing yet */
+ offs = fdt_add_subnode(dtb, 0, "reserved-memory");
+ if (offs < 0) {
+ return offs;
+ }
+ fdt_setprop_u32(dtb, offs, "#address-cells", ac);
+ fdt_setprop_u32(dtb, offs, "#size-cells", sc);
+ fdt_setprop(dtb, offs, "ranges", NULL, 0);
+ }
+
+ if (ac > 1) {
+ addresses[idx] = cpu_to_fdt32(HIGH_BITS(base));
+ idx++;
+ }
+ addresses[idx] = cpu_to_fdt32(base & 0xffffffff);
+ idx++;
+ if (sc > 1) {
+ addresses[idx] = cpu_to_fdt32(HIGH_BITS(size));
+ idx++;
+ }
+ addresses[idx] = cpu_to_fdt32(size & 0xffffffff);
+ idx++;
+ offs = fdt_add_subnode(dtb, offs, node_name);
+ fdt_setprop(dtb, offs, "no-map", NULL, 0);
+ fdt_setprop(dtb, offs, "reg", addresses, idx * sizeof(uint32_t));
+
+ return 0;
+}
+
+/*******************************************************************************
+ * fdt_add_cpu() Add a new CPU node to the DT
+ * @dtb: Pointer to the device tree blob in memory
+ * @parent: Offset of the parent node
+ * @mpidr: MPIDR for the current CPU
+ *
+ * Create and add a new cpu node to a DTB.
+ *
+ * Return the offset of the new node or a negative value in case of error
+ ******************************************************************************/
+
+static int fdt_add_cpu(void *dtb, int parent, u_register_t mpidr)
+{
+ int cpu_offs;
+ int err;
+ char snode_name[15];
+ uint64_t reg_prop;
+
+ reg_prop = mpidr & MPID_MASK & ~MPIDR_MT_MASK;
+
+ snprintf(snode_name, sizeof(snode_name), "cpu@%x",
+ (unsigned int)reg_prop);
+
+ cpu_offs = fdt_add_subnode(dtb, parent, snode_name);
+ if (cpu_offs < 0) {
+ ERROR ("FDT: add subnode \"%s\" failed: %i\n",
+ snode_name, cpu_offs);
+ return cpu_offs;
+ }
+
+ err = fdt_setprop_string(dtb, cpu_offs, "compatible", "arm,armv8");
+ if (err < 0) {
+ ERROR ("FDT: write to \"%s\" property of node at offset %i failed\n",
+ "compatible", cpu_offs);
+ return err;
+ }
+
+ err = fdt_setprop_u64(dtb, cpu_offs, "reg", reg_prop);
+ if (err < 0) {
+ ERROR ("FDT: write to \"%s\" property of node at offset %i failed\n",
+ "reg", cpu_offs);
+ return err;
+ }
+
+ err = fdt_setprop_string(dtb, cpu_offs, "device_type", "cpu");
+ if (err < 0) {
+ ERROR ("FDT: write to \"%s\" property of node at offset %i failed\n",
+ "device_type", cpu_offs);
+ return err;
+ }
+
+ err = fdt_setprop_string(dtb, cpu_offs, "enable-method", "psci");
+ if (err < 0) {
+ ERROR ("FDT: write to \"%s\" property of node at offset %i failed\n",
+ "enable-method", cpu_offs);
+ return err;
+ }
+
+ return cpu_offs;
+}
+
+/******************************************************************************
+ * fdt_add_cpus_node() - Add the cpus node to the DTB
+ * @dtb: pointer to the device tree blob in memory
+ * @afflv0: Maximum number of threads per core (affinity level 0).
+ * @afflv1: Maximum number of CPUs per cluster (affinity level 1).
+ * @afflv2: Maximum number of clusters (affinity level 2).
+ *
+ * Iterate over all the possible MPIDs given the maximum affinity levels and
+ * add a cpus node to the DTB with all the valid CPUs on the system.
+ * If there is already a /cpus node, exit gracefully
+ *
+ * A system with two CPUs would generate a node equivalent or similar to:
+ *
+ * cpus {
+ * #address-cells = <2>;
+ * #size-cells = <0>;
+ *
+ * cpu0: cpu@0 {
+ * compatible = "arm,armv8";
+ * reg = <0x0 0x0>;
+ * device_type = "cpu";
+ * enable-method = "psci";
+ * };
+ * cpu1: cpu@10000 {
+ * compatible = "arm,armv8";
+ * reg = <0x0 0x100>;
+ * device_type = "cpu";
+ * enable-method = "psci";
+ * };
+ * };
+ *
+ * Full documentation about the CPU bindings can be found at:
+ * https://www.kernel.org/doc/Documentation/devicetree/bindings/arm/cpus.txt
+ *
+ * Return the offset of the node or a negative value on error.
+ ******************************************************************************/
+
+int fdt_add_cpus_node(void *dtb, unsigned int afflv0,
+ unsigned int afflv1, unsigned int afflv2)
+{
+ int offs;
+ int err;
+ unsigned int i, j, k;
+ u_register_t mpidr;
+ int cpuid;
+
+ if (fdt_path_offset(dtb, "/cpus") >= 0) {
+ return -EEXIST;
+ }
+
+ offs = fdt_add_subnode(dtb, 0, "cpus");
+ if (offs < 0) {
+ ERROR ("FDT: add subnode \"cpus\" node to parent node failed");
+ return offs;
+ }
+
+ err = fdt_setprop_u32(dtb, offs, "#address-cells", 2);
+ if (err < 0) {
+ ERROR ("FDT: write to \"%s\" property of node at offset %i failed\n",
+ "#address-cells", offs);
+ return err;
+ }
+
+ err = fdt_setprop_u32(dtb, offs, "#size-cells", 0);
+ if (err < 0) {
+ ERROR ("FDT: write to \"%s\" property of node at offset %i failed\n",
+ "#size-cells", offs);
+ return err;
+ }
+
+ /*
+ * Populate the node with the CPUs.
+ * As libfdt prepends subnodes within a node, reverse the index count
+ * so the CPU nodes would be better ordered.
+ */
+ for (i = afflv2; i > 0U; i--) {
+ for (j = afflv1; j > 0U; j--) {
+ for (k = afflv0; k > 0U; k--) {
+ mpidr = ((i - 1) << MPIDR_AFF2_SHIFT) |
+ ((j - 1) << MPIDR_AFF1_SHIFT) |
+ ((k - 1) << MPIDR_AFF0_SHIFT) |
+ (read_mpidr_el1() & MPIDR_MT_MASK);
+
+ cpuid = plat_core_pos_by_mpidr(mpidr);
+ if (cpuid >= 0) {
+ /* Valid MPID found */
+ err = fdt_add_cpu(dtb, offs, mpidr);
+ if (err < 0) {
+ ERROR ("FDT: %s 0x%08x\n",
+ "error adding CPU",
+ (uint32_t)mpidr);
+ return err;
+ }
+ }
+ }
+ }
+ }
+
+ return offs;
+}
+
+/*******************************************************************************
+ * fdt_add_cpu_idle_states() - add PSCI CPU idle states to cpu nodes in the DT
+ * @dtb: pointer to the device tree blob in memory
+ * @states: array of idle state descriptions, ending with empty element
+ *
+ * Add information about CPU idle states to the devicetree. This function
+ * assumes that CPU idle states are not already present in the devicetree, and
+ * that all CPU states are equally applicable to all CPUs.
+ *
+ * See arm/idle-states.yaml and arm/psci.yaml in the (Linux kernel) DT binding
+ * documentation for more details.
+ *
+ * Return: 0 on success, a negative error value otherwise.
+ ******************************************************************************/
+int fdt_add_cpu_idle_states(void *dtb, const struct psci_cpu_idle_state *state)
+{
+ int cpu_node, cpus_node, idle_states_node, ret;
+ uint32_t count, phandle;
+
+ ret = fdt_find_max_phandle(dtb, &phandle);
+ phandle++;
+ if (ret < 0) {
+ return ret;
+ }
+
+ cpus_node = fdt_path_offset(dtb, "/cpus");
+ if (cpus_node < 0) {
+ return cpus_node;
+ }
+
+ /* Create the idle-states node and its child nodes. */
+ idle_states_node = fdt_add_subnode(dtb, cpus_node, "idle-states");
+ if (idle_states_node < 0) {
+ return idle_states_node;
+ }
+
+ ret = fdt_setprop_string(dtb, idle_states_node, "entry-method", "psci");
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (count = 0U; state->name != NULL; count++, phandle++, state++) {
+ int idle_state_node;
+
+ idle_state_node = fdt_add_subnode(dtb, idle_states_node,
+ state->name);
+ if (idle_state_node < 0) {
+ return idle_state_node;
+ }
+
+ fdt_setprop_string(dtb, idle_state_node, "compatible",
+ "arm,idle-state");
+ fdt_setprop_u32(dtb, idle_state_node, "arm,psci-suspend-param",
+ state->power_state);
+ if (state->local_timer_stop) {
+ fdt_setprop_empty(dtb, idle_state_node,
+ "local-timer-stop");
+ }
+ fdt_setprop_u32(dtb, idle_state_node, "entry-latency-us",
+ state->entry_latency_us);
+ fdt_setprop_u32(dtb, idle_state_node, "exit-latency-us",
+ state->exit_latency_us);
+ fdt_setprop_u32(dtb, idle_state_node, "min-residency-us",
+ state->min_residency_us);
+ if (state->wakeup_latency_us) {
+ fdt_setprop_u32(dtb, idle_state_node,
+ "wakeup-latency-us",
+ state->wakeup_latency_us);
+ }
+ fdt_setprop_u32(dtb, idle_state_node, "phandle", phandle);
+ }
+
+ if (count == 0U) {
+ return 0;
+ }
+
+ /* Link each cpu node to the idle state nodes. */
+ fdt_for_each_subnode(cpu_node, dtb, cpus_node) {
+ const char *device_type;
+ fdt32_t *value;
+
+ /* Only process child nodes with device_type = "cpu". */
+ device_type = fdt_getprop(dtb, cpu_node, "device_type", NULL);
+ if (device_type == NULL || strcmp(device_type, "cpu") != 0) {
+ continue;
+ }
+
+ /* Allocate space for the list of phandles. */
+ ret = fdt_setprop_placeholder(dtb, cpu_node, "cpu-idle-states",
+ count * sizeof(phandle),
+ (void **)&value);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Fill in the phandles of the idle state nodes. */
+ for (uint32_t i = 0U; i < count; ++i) {
+ value[i] = cpu_to_fdt32(phandle - count + i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * fdt_adjust_gic_redist() - Adjust GICv3 redistributor size
+ * @dtb: Pointer to the DT blob in memory
+ * @nr_cores: Number of CPU cores on this system.
+ * @gicr_base: Base address of the first GICR frame, or ~0 if unchanged
+ * @gicr_frame_size: Size of the GICR frame per core
+ *
+ * On a GICv3 compatible interrupt controller, the redistributor provides
+ * a number of 64k pages per each supported core. So with a dynamic topology,
+ * this size cannot be known upfront and thus can't be hardcoded into the DTB.
+ *
+ * Find the DT node describing the GICv3 interrupt controller, and adjust
+ * the size of the redistributor to match the number of actual cores on
+ * this system.
+ * A GICv4 compatible redistributor uses four 64K pages per core, whereas GICs
+ * without support for direct injection of virtual interrupts use two 64K pages.
+ * The @gicr_frame_size parameter should be 262144 and 131072, respectively.
+ * Also optionally allow adjusting the GICR frame base address, when this is
+ * different due to ITS frames between distributor and redistributor.
+ *
+ * Return: 0 on success, negative error value otherwise.
+ */
+int fdt_adjust_gic_redist(void *dtb, unsigned int nr_cores,
+ uintptr_t gicr_base, unsigned int gicr_frame_size)
+{
+ int offset = fdt_node_offset_by_compatible(dtb, 0, "arm,gic-v3");
+ uint64_t reg_64;
+ uint32_t reg_32;
+ void *val;
+ int parent, ret;
+ int ac, sc;
+
+ if (offset < 0) {
+ return offset;
+ }
+
+ parent = fdt_parent_offset(dtb, offset);
+ if (parent < 0) {
+ return parent;
+ }
+ ac = fdt_address_cells(dtb, parent);
+ sc = fdt_size_cells(dtb, parent);
+ if (ac < 0 || sc < 0) {
+ return -EINVAL;
+ }
+
+ if (gicr_base != INVALID_BASE_ADDR) {
+ if (ac == 1) {
+ reg_32 = cpu_to_fdt32(gicr_base);
+ val = &reg_32;
+ } else {
+ reg_64 = cpu_to_fdt64(gicr_base);
+ val = &reg_64;
+ }
+ /*
+ * The redistributor base address is the second address in
+ * the "reg" entry, so we have to skip one address and one
+ * size cell.
+ */
+ ret = fdt_setprop_inplace_namelen_partial(dtb, offset,
+ "reg", 3,
+ (ac + sc) * 4,
+ val, ac * 4);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (sc == 1) {
+ reg_32 = cpu_to_fdt32(nr_cores * gicr_frame_size);
+ val = &reg_32;
+ } else {
+ reg_64 = cpu_to_fdt64(nr_cores * (uint64_t)gicr_frame_size);
+ val = &reg_64;
+ }
+
+ /*
+ * The redistributor is described in the second "reg" entry.
+ * So we have to skip one address and one size cell, then another
+ * address cell to get to the second size cell.
+ */
+ return fdt_setprop_inplace_namelen_partial(dtb, offset, "reg", 3,
+ (ac + sc + ac) * 4,
+ val, sc * 4);
+}
+/**
+ * fdt_set_mac_address () - store MAC address in device tree
+ * @dtb: pointer to the device tree blob in memory
+ * @eth_idx: number of Ethernet interface in /aliases node
+ * @mac_addr: pointer to 6 byte MAC address to store
+ *
+ * Use the generic local-mac-address property in a network device DT node
+ * to define the MAC address this device should be using. Many platform
+ * network devices lack device-specific non-volatile storage to hold this
+ * address, and leave it up to firmware to find and store a unique MAC
+ * address in the DT.
+ * The MAC address could be read from some board or firmware defined storage,
+ * or could be derived from some other unique property like a serial number.
+ *
+ * Return: 0 on success, a negative libfdt error value otherwise.
+ */
+int fdt_set_mac_address(void *dtb, unsigned int ethernet_idx,
+ const uint8_t *mac_addr)
+{
+ char eth_alias[12];
+ const char *path;
+ int node;
+
+ if (ethernet_idx > 9U) {
+ return -FDT_ERR_BADVALUE;
+ }
+ snprintf(eth_alias, sizeof(eth_alias), "ethernet%d", ethernet_idx);
+
+ path = fdt_get_alias(dtb, eth_alias);
+ if (path == NULL) {
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ node = fdt_path_offset(dtb, path);
+ if (node < 0) {
+ ERROR("Path \"%s\" not found in DT: %d\n", path, node);
+ return node;
+ }
+
+ return fdt_setprop(dtb, node, "local-mac-address", mac_addr, 6);
+}
diff --git a/common/fdt_wrappers.c b/common/fdt_wrappers.c
new file mode 100644
index 0000000..783b660
--- /dev/null
+++ b/common/fdt_wrappers.c
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Helper functions to offer easier navigation of Device Tree Blob */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <libfdt.h>
+
+#include <common/debug.h>
+#include <common/fdt_wrappers.h>
+#include <common/uuid.h>
+
+/*
+ * Read cells from a given property of the given node. Any number of 32-bit
+ * cells of the property can be read. Returns 0 on success, or a negative
+ * FDT error value otherwise.
+ */
+int fdt_read_uint32_array(const void *dtb, int node, const char *prop_name,
+ unsigned int cells, uint32_t *value)
+{
+ const fdt32_t *prop;
+ int value_len;
+
+ assert(dtb != NULL);
+ assert(prop_name != NULL);
+ assert(value != NULL);
+ assert(node >= 0);
+
+ /* Access property and obtain its length (in bytes) */
+ prop = fdt_getprop(dtb, node, prop_name, &value_len);
+ if (prop == NULL) {
+ VERBOSE("Couldn't find property %s in dtb\n", prop_name);
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Verify that property length can fill the entire array. */
+ if (NCELLS((unsigned int)value_len) < cells) {
+ WARN("Property length mismatch\n");
+ return -FDT_ERR_BADVALUE;
+ }
+
+ for (unsigned int i = 0U; i < cells; i++) {
+ value[i] = fdt32_to_cpu(prop[i]);
+ }
+
+ return 0;
+}
+
+int fdt_read_uint32(const void *dtb, int node, const char *prop_name,
+ uint32_t *value)
+{
+ return fdt_read_uint32_array(dtb, node, prop_name, 1, value);
+}
+
+uint32_t fdt_read_uint32_default(const void *dtb, int node,
+ const char *prop_name, uint32_t dflt_value)
+{
+ uint32_t ret = dflt_value;
+ int err = fdt_read_uint32(dtb, node, prop_name, &ret);
+
+ if (err < 0) {
+ return dflt_value;
+ }
+
+ return ret;
+}
+
+int fdt_read_uint64(const void *dtb, int node, const char *prop_name,
+ uint64_t *value)
+{
+ uint32_t array[2] = {0, 0};
+ int ret;
+
+ ret = fdt_read_uint32_array(dtb, node, prop_name, 2, array);
+ if (ret < 0) {
+ return ret;
+ }
+
+ *value = ((uint64_t)array[0] << 32) | array[1];
+ return 0;
+}
+
+/*
+ * Read bytes from a given property of the given node. Any number of
+ * bytes of the property can be read. The fdt pointer is updated.
+ * Returns 0 on success, and -1 on error.
+ */
+int fdtw_read_bytes(const void *dtb, int node, const char *prop,
+ unsigned int length, void *value)
+{
+ const void *ptr;
+ int value_len;
+
+ assert(dtb != NULL);
+ assert(prop != NULL);
+ assert(value != NULL);
+ assert(node >= 0);
+
+ /* Access property and obtain its length (in bytes) */
+ ptr = fdt_getprop_namelen(dtb, node, prop, (int)strlen(prop),
+ &value_len);
+ if (ptr == NULL) {
+ WARN("Couldn't find property %s in dtb\n", prop);
+ return -1;
+ }
+
+ /* Verify that property length is not less than number of bytes */
+ if ((unsigned int)value_len < length) {
+ WARN("Property length mismatch\n");
+ return -1;
+ }
+
+ (void)memcpy(value, ptr, length);
+
+ return 0;
+}
+
+/*
+ * Read string from a given property of the given node. Up to 'size - 1'
+ * characters are read, and a NUL terminator is added. Returns 0 on success,
+ * and -1 upon error.
+ */
+int fdtw_read_string(const void *dtb, int node, const char *prop,
+ char *str, size_t size)
+{
+ const char *ptr;
+ size_t len;
+
+ assert(dtb != NULL);
+ assert(node >= 0);
+ assert(prop != NULL);
+ assert(str != NULL);
+ assert(size > 0U);
+
+ ptr = fdt_getprop_namelen(dtb, node, prop, (int)strlen(prop), NULL);
+ if (ptr == NULL) {
+ WARN("Couldn't find property %s in dtb\n", prop);
+ return -1;
+ }
+
+ len = strlcpy(str, ptr, size);
+ if (len >= size) {
+ WARN("String of property %s in dtb has been truncated\n", prop);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Read UUID from a given property of the given node. Returns 0 on success,
+ * and a negative value upon error.
+ */
+int fdtw_read_uuid(const void *dtb, int node, const char *prop,
+ unsigned int length, uint8_t *uuid)
+{
+ /* Buffer for UUID string (plus NUL terminator) */
+ char uuid_string[UUID_STRING_LENGTH + 1U];
+ int err;
+
+ assert(dtb != NULL);
+ assert(prop != NULL);
+ assert(uuid != NULL);
+ assert(node >= 0);
+
+ if (length < UUID_BYTES_LENGTH) {
+ return -EINVAL;
+ }
+
+ err = fdtw_read_string(dtb, node, prop, uuid_string,
+ UUID_STRING_LENGTH + 1U);
+ if (err != 0) {
+ return err;
+ }
+
+ if (read_uuid(uuid, uuid_string) != 0) {
+ return -FDT_ERR_BADVALUE;
+ }
+
+ return 0;
+}
+
+/*
+ * Write cells in place to a given property of the given node. At most 2 cells
+ * of the property are written. Returns 0 on success, and -1 upon error.
+ */
+int fdtw_write_inplace_cells(void *dtb, int node, const char *prop,
+ unsigned int cells, void *value)
+{
+ int err, len;
+
+ assert(dtb != NULL);
+ assert(prop != NULL);
+ assert(value != NULL);
+ assert(node >= 0);
+
+ /* We expect either 1 or 2 cell property */
+ assert(cells <= 2U);
+
+ if (cells == 2U)
+ *(fdt64_t *)value = cpu_to_fdt64(*(uint64_t *)value);
+ else
+ *(fdt32_t *)value = cpu_to_fdt32(*(uint32_t *)value);
+
+ len = (int)cells * 4;
+
+ /* Set property value in place */
+ err = fdt_setprop_inplace(dtb, node, prop, value, len);
+ if (err != 0) {
+ WARN("Modify property %s failed with error %d\n", prop, err);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Write bytes in place to a given property of the given node.
+ * Any number of bytes of the property can be written.
+ * Returns 0 on success, and < 0 on error.
+ */
+int fdtw_write_inplace_bytes(void *dtb, int node, const char *prop,
+ unsigned int length, const void *data)
+{
+ const void *ptr;
+ int namelen, value_len, err;
+
+ assert(dtb != NULL);
+ assert(prop != NULL);
+ assert(data != NULL);
+ assert(node >= 0);
+
+ namelen = (int)strlen(prop);
+
+ /* Access property and obtain its length in bytes */
+ ptr = fdt_getprop_namelen(dtb, node, prop, namelen, &value_len);
+ if (ptr == NULL) {
+ WARN("Couldn't find property %s in dtb\n", prop);
+ return -1;
+ }
+
+ /* Verify that property length is not less than number of bytes */
+ if ((unsigned int)value_len < length) {
+ WARN("Property length mismatch\n");
+ return -1;
+ }
+
+ /* Set property value in place */
+ err = fdt_setprop_inplace_namelen_partial(dtb, node, prop,
+ namelen, 0,
+ data, (int)length);
+ if (err != 0) {
+ WARN("Set property %s failed with error %d\n", prop, err);
+ }
+
+ return err;
+}
+
+static uint64_t fdt_read_prop_cells(const fdt32_t *prop, int nr_cells)
+{
+ uint64_t reg = fdt32_to_cpu(prop[0]);
+
+ if (nr_cells > 1) {
+ reg = (reg << 32) | fdt32_to_cpu(prop[1]);
+ }
+
+ return reg;
+}
+
+int fdt_get_reg_props_by_index(const void *dtb, int node, int index,
+ uintptr_t *base, size_t *size)
+{
+ const fdt32_t *prop;
+ int parent, len;
+ int ac, sc;
+ int cell;
+
+ parent = fdt_parent_offset(dtb, node);
+ if (parent < 0) {
+ return -FDT_ERR_BADOFFSET;
+ }
+
+ ac = fdt_address_cells(dtb, parent);
+ sc = fdt_size_cells(dtb, parent);
+
+ cell = index * (ac + sc);
+
+ prop = fdt_getprop(dtb, node, "reg", &len);
+ if (prop == NULL) {
+ WARN("Couldn't find \"reg\" property in dtb\n");
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ if (((cell + ac + sc) * (int)sizeof(uint32_t)) > len) {
+ return -FDT_ERR_BADVALUE;
+ }
+
+ if (base != NULL) {
+ *base = (uintptr_t)fdt_read_prop_cells(&prop[cell], ac);
+ }
+
+ if (size != NULL) {
+ *size = (size_t)fdt_read_prop_cells(&prop[cell + ac], sc);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function fills reg node info (base & size) with an index found by
+ * checking the reg-names node.
+ * Returns 0 on success and a negative FDT error code on failure.
+ ******************************************************************************/
+int fdt_get_reg_props_by_name(const void *dtb, int node, const char *name,
+ uintptr_t *base, size_t *size)
+{
+ int index;
+
+ index = fdt_stringlist_search(dtb, node, "reg-names", name);
+ if (index < 0) {
+ return index;
+ }
+
+ return fdt_get_reg_props_by_index(dtb, node, index, base, size);
+}
+
+/*******************************************************************************
+ * This function gets the stdout path node.
+ * It reads the value indicated inside the device tree.
+ * Returns node offset on success and a negative FDT error code on failure.
+ ******************************************************************************/
+int fdt_get_stdout_node_offset(const void *dtb)
+{
+ int node;
+ const char *prop, *path;
+ int len;
+
+ /* The /secure-chosen node takes precedence over the standard one. */
+ node = fdt_path_offset(dtb, "/secure-chosen");
+ if (node < 0) {
+ node = fdt_path_offset(dtb, "/chosen");
+ if (node < 0) {
+ return -FDT_ERR_NOTFOUND;
+ }
+ }
+
+ prop = fdt_getprop(dtb, node, "stdout-path", NULL);
+ if (prop == NULL) {
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Determine the actual path length, as a colon terminates the path. */
+ path = strchr(prop, ':');
+ if (path == NULL) {
+ len = strlen(prop);
+ } else {
+ len = path - prop;
+ }
+
+ /* Aliases cannot start with a '/', so it must be the actual path. */
+ if (prop[0] == '/') {
+ return fdt_path_offset_namelen(dtb, prop, len);
+ }
+
+ /* Lookup the alias, as this contains the actual path. */
+ path = fdt_get_alias_namelen(dtb, prop, len);
+ if (path == NULL) {
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ return fdt_path_offset(dtb, path);
+}
+
+
+/*******************************************************************************
+ * Only devices which are direct children of root node use CPU address domain.
+ * All other devices use addresses that are local to the device node and cannot
+ * directly used by CPU. Device tree provides an address translation mechanism
+ * through "ranges" property which provides mappings from local address space to
+ * parent address space. Since a device could be a child of a child node to the
+ * root node, there can be more than one level of address translation needed to
+ * map the device local address space to CPU address space.
+ * fdtw_translate_address() API performs address translation of a local address
+ * to a global address with help of various helper functions.
+ ******************************************************************************/
+
+static bool fdtw_xlat_hit(const fdt32_t *value, int child_addr_size,
+ int parent_addr_size, int range_size, uint64_t base_address,
+ uint64_t *translated_addr)
+{
+ uint64_t local_address, parent_address, addr_range;
+
+ local_address = fdt_read_prop_cells(value, child_addr_size);
+ parent_address = fdt_read_prop_cells(value + child_addr_size,
+ parent_addr_size);
+ addr_range = fdt_read_prop_cells(value + child_addr_size +
+ parent_addr_size,
+ range_size);
+ VERBOSE("DT: Address %" PRIx64 " mapped to %" PRIx64 " with range %" PRIx64 "\n",
+ local_address, parent_address, addr_range);
+
+ /* Perform range check */
+ if ((base_address < local_address) ||
+ (base_address >= local_address + addr_range)) {
+ return false;
+ }
+
+ /* Found hit for the addr range that needs to be translated */
+ *translated_addr = parent_address + (base_address - local_address);
+ VERBOSE("DT: child address %" PRIx64 "mapped to %" PRIx64 " in parent bus\n",
+ local_address, parent_address);
+ return true;
+}
+
+#define ILLEGAL_ADDR ULL(~0)
+
+static uint64_t fdtw_search_all_xlat_entries(const void *dtb,
+ const struct fdt_property *ranges_prop,
+ int local_bus, uint64_t base_address)
+{
+ uint64_t translated_addr;
+ const fdt32_t *next_entry;
+ int parent_bus_node, nxlat_entries, length;
+ int self_addr_cells, parent_addr_cells, self_size_cells, ncells_xlat;
+
+ /*
+ * The number of cells in one translation entry in ranges is the sum of
+ * the following values:
+ * self#address-cells + parent#address-cells + self#size-cells
+ * Ex: the iofpga ranges property has one translation entry with 4 cells
+ * They represent iofpga#addr-cells + motherboard#addr-cells + iofpga#size-cells
+ * = 1 + 2 + 1
+ */
+
+ parent_bus_node = fdt_parent_offset(dtb, local_bus);
+ self_addr_cells = fdt_address_cells(dtb, local_bus);
+ self_size_cells = fdt_size_cells(dtb, local_bus);
+ parent_addr_cells = fdt_address_cells(dtb, parent_bus_node);
+
+ /* Number of cells per translation entry i.e., mapping */
+ ncells_xlat = self_addr_cells + parent_addr_cells + self_size_cells;
+
+ assert(ncells_xlat > 0);
+
+ /*
+ * Find the number of translations(mappings) specified in the current
+ * `ranges` property. Note that length represents number of bytes and
+ * is stored in big endian mode.
+ */
+ length = fdt32_to_cpu(ranges_prop->len);
+ nxlat_entries = (length/sizeof(uint32_t))/ncells_xlat;
+
+ assert(nxlat_entries > 0);
+
+ next_entry = (const fdt32_t *)ranges_prop->data;
+
+ /* Iterate over the entries in the "ranges" */
+ for (int i = 0; i < nxlat_entries; i++) {
+ if (fdtw_xlat_hit(next_entry, self_addr_cells,
+ parent_addr_cells, self_size_cells, base_address,
+ &translated_addr)){
+ return translated_addr;
+ }
+ next_entry = next_entry + ncells_xlat;
+ }
+
+ INFO("DT: No translation found for address %" PRIx64 " in node %s\n",
+ base_address, fdt_get_name(dtb, local_bus, NULL));
+ return ILLEGAL_ADDR;
+}
+
+
+/*******************************************************************************
+ * address mapping needs to be done recursively starting from current node to
+ * root node through all intermediate parent nodes.
+ * Sample device tree is shown here:
+
+smb@0,0 {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ motherboard {
+ arm,v2m-memory-map = "rs1";
+ compatible = "arm,vexpress,v2m-p1", "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ iofpga@3,00000000 {
+ compatible = "arm,amba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 3 0 0x200000>;
+ v2m_serial1: uart@a0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0a0000 0x1000>;
+ interrupts = <0 6 4>;
+ clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+ };
+};
+
+ * As seen above, there are 3 levels of address translations needed. An empty
+ * `ranges` property denotes identity mapping (as seen in `motherboard` node).
+ * Each ranges property can map a set of child addresses to parent bus. Hence
+ * there can be more than 1 (translation) entry in the ranges property as seen
+ * in the `smb` node which has 6 translation entries.
+ ******************************************************************************/
+
+/* Recursive implementation */
+uint64_t fdtw_translate_address(const void *dtb, int node,
+ uint64_t base_address)
+{
+ int length, local_bus_node;
+ const char *node_name;
+ uint64_t global_address;
+
+ local_bus_node = fdt_parent_offset(dtb, node);
+ node_name = fdt_get_name(dtb, local_bus_node, NULL);
+
+ /*
+ * In the example given above, starting from the leaf node:
+ * uart@a000 represents the current node
+ * iofpga@3,00000000 represents the local bus
+ * motherboard represents the parent bus
+ */
+
+ /* Read the ranges property */
+ const struct fdt_property *property = fdt_get_property(dtb,
+ local_bus_node, "ranges", &length);
+
+ if (property == NULL) {
+ if (local_bus_node == 0) {
+ /*
+ * root node doesn't have range property as addresses
+ * are in CPU address space.
+ */
+ return base_address;
+ }
+ INFO("DT: Couldn't find ranges property in node %s\n",
+ node_name);
+ return ILLEGAL_ADDR;
+ } else if (length == 0) {
+ /* empty ranges indicates identity map to parent bus */
+ return fdtw_translate_address(dtb, local_bus_node, base_address);
+ }
+
+ VERBOSE("DT: Translation lookup in node %s at offset %d\n", node_name,
+ local_bus_node);
+ global_address = fdtw_search_all_xlat_entries(dtb, property,
+ local_bus_node, base_address);
+
+ if (global_address == ILLEGAL_ADDR) {
+ return ILLEGAL_ADDR;
+ }
+
+ /* Translate the local device address recursively */
+ return fdtw_translate_address(dtb, local_bus_node, global_address);
+}
+
+/*
+ * For every CPU node (`/cpus/cpu@n`) in an FDT, execute a callback passing a
+ * pointer to the FDT and the offset of the CPU node. If the return value of the
+ * callback is negative, it is treated as an error and the loop is aborted. In
+ * this situation, the value of the callback is returned from the function.
+ *
+ * Returns `0` on success, or a negative integer representing an error code.
+ */
+int fdtw_for_each_cpu(const void *dtb,
+ int (*callback)(const void *dtb, int node, uintptr_t mpidr))
+{
+ int ret = 0;
+ int parent, node = 0;
+
+ parent = fdt_path_offset(dtb, "/cpus");
+ if (parent < 0) {
+ return parent;
+ }
+
+ fdt_for_each_subnode(node, dtb, parent) {
+ const char *name;
+ int len;
+
+ uintptr_t mpidr = 0U;
+
+ name = fdt_get_name(dtb, node, &len);
+ if (strncmp(name, "cpu@", 4) != 0) {
+ continue;
+ }
+
+ ret = fdt_get_reg_props_by_index(dtb, node, 0, &mpidr, NULL);
+ if (ret < 0) {
+ break;
+ }
+
+ ret = callback(dtb, node, mpidr);
+ if (ret < 0) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Find a given node in device tree. If not present, add it.
+ * Returns offset of node found/added on success, and < 0 on error.
+ */
+int fdtw_find_or_add_subnode(void *fdt, int parentoffset, const char *name)
+{
+ int offset;
+
+ offset = fdt_subnode_offset(fdt, parentoffset, name);
+
+ if (offset == -FDT_ERR_NOTFOUND) {
+ offset = fdt_add_subnode(fdt, parentoffset, name);
+ }
+
+ if (offset < 0) {
+ ERROR("%s: %s: %s\n", __func__, name, fdt_strerror(offset));
+ }
+
+ return offset;
+}
diff --git a/common/fdt_wrappers.mk b/common/fdt_wrappers.mk
new file mode 100644
index 0000000..62b8c6e
--- /dev/null
+++ b/common/fdt_wrappers.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+FDT_WRAPPERS_SOURCES := common/fdt_wrappers.c
diff --git a/common/feat_detect.c b/common/feat_detect.c
new file mode 100644
index 0000000..be22c6e
--- /dev/null
+++ b/common/feat_detect.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <common/debug.h>
+#include <common/feat_detect.h>
+
+static bool tainted;
+
+/*******************************************************************************
+ * This section lists the wrapper modules for each feature to evaluate the
+ * feature states (FEAT_STATE_ALWAYS and FEAT_STATE_CHECK) and perform
+ * necessary action as below:
+ *
+ * It verifies whether the FEAT_XXX (eg: FEAT_SB) is supported by the PE or not.
+ * Without this check an exception would occur during context save/restore
+ * routines, if the feature is enabled but not supported by PE.
+ ******************************************************************************/
+
+#define feat_detect_panic(a, b) ((a) ? (void)0 : feature_panic(b))
+
+/*******************************************************************************
+ * Function : feature_panic
+ * Customised panic function with error logging mechanism to list the feature
+ * not supported by the PE.
+ ******************************************************************************/
+static inline void feature_panic(char *feat_name)
+{
+ ERROR("FEAT_%s not supported by the PE\n", feat_name);
+ panic();
+}
+
+/*******************************************************************************
+ * Function : check_feature
+ * Check for a valid combination of build time flags (ENABLE_FEAT_xxx) and
+ * feature availability on the hardware. <min> is the smallest feature
+ * ID field value that is required for that feature.
+ * Triggers a panic later if a feature is forcefully enabled, but not
+ * available on the PE. Also will panic if the hardware feature ID field
+ * is larger than the maximum known and supported number, specified by <max>.
+ *
+ * We force inlining here to let the compiler optimise away the whole check
+ * if the feature is disabled at build time (FEAT_STATE_DISABLED).
+ ******************************************************************************/
+static inline void __attribute((__always_inline__))
+check_feature(int state, unsigned long field, const char *feat_name,
+ unsigned int min, unsigned int max)
+{
+ if (state == FEAT_STATE_ALWAYS && field < min) {
+ ERROR("FEAT_%s not supported by the PE\n", feat_name);
+ tainted = true;
+ }
+ if (state >= FEAT_STATE_ALWAYS && field > max) {
+ ERROR("FEAT_%s is version %ld, but is only known up to version %d\n",
+ feat_name, field, max);
+ tainted = true;
+ }
+}
+
+/************************************************
+ * Feature : FEAT_PAUTH (Pointer Authentication)
+ ***********************************************/
+static void read_feat_pauth(void)
+{
+#if (ENABLE_PAUTH == FEAT_STATE_ALWAYS) || (CTX_INCLUDE_PAUTH_REGS == FEAT_STATE_ALWAYS)
+ feat_detect_panic(is_armv8_3_pauth_present(), "PAUTH");
+#endif
+}
+
+/************************************************
+ * Feature : FEAT_MTE (Memory Tagging Extension)
+ ***********************************************/
+static void read_feat_mte(void)
+{
+#if (CTX_INCLUDE_MTE_REGS == FEAT_STATE_ALWAYS)
+ unsigned int mte = get_armv8_5_mte_support();
+
+ feat_detect_panic((mte != MTE_UNIMPLEMENTED), "MTE");
+#endif
+}
+
+/****************************************************
+ * Feature : FEAT_BTI (Branch Target Identification)
+ ***************************************************/
+static void read_feat_bti(void)
+{
+#if (ENABLE_BTI == FEAT_STATE_ALWAYS)
+ feat_detect_panic(is_armv8_5_bti_present(), "BTI");
+#endif
+}
+
+/**************************************************
+ * Feature : FEAT_RME (Realm Management Extension)
+ *************************************************/
+static void read_feat_rme(void)
+{
+#if (ENABLE_RME == FEAT_STATE_ALWAYS)
+ feat_detect_panic((get_armv9_2_feat_rme_support() !=
+ ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED), "RME");
+#endif
+}
+
+/******************************************************************
+ * Feature : FEAT_RNG_TRAP (Trapping support for RNDR/RNDRRS)
+ *****************************************************************/
+static void read_feat_rng_trap(void)
+{
+#if (ENABLE_FEAT_RNG_TRAP == FEAT_STATE_ALWAYS)
+ feat_detect_panic(is_feat_rng_trap_present(), "RNG_TRAP");
+#endif
+}
+
+/***********************************************************************************
+ * TF-A supports many Arm architectural features starting from arch version
+ * (8.0 till 8.7+). These features are mostly enabled through build flags. This
+ * mechanism helps in validating these build flags in the early boot phase
+ * either in BL1 or BL31 depending on the platform and assists in identifying
+ * and notifying the features which are enabled but not supported by the PE.
+ *
+ * It reads all the enabled features ID-registers and ensures the features
+ * are supported by the PE.
+ * In case if they aren't it stops booting at an early phase and logs the error
+ * messages, notifying the platforms about the features that are not supported.
+ *
+ * Further the procedure is implemented with a tri-state approach for each feature:
+ * ENABLE_FEAT_xxx = 0 : The feature is disabled statically at compile time
+ * ENABLE_FEAT_xxx = 1 : The feature is enabled and must be present in hardware.
+ * There will be panic if feature is not present at cold boot.
+ * ENABLE_FEAT_xxx = 2 : The feature is enabled but dynamically enabled at runtime
+ * depending on hardware capability.
+ *
+ * For better readability, state values are defined with macros, namely:
+ * { FEAT_STATE_DISABLED, FEAT_STATE_ALWAYS, FEAT_STATE_CHECK }, taking values
+ * { 0, 1, 2 }, respectively, as their naming.
+ **********************************************************************************/
+void detect_arch_features(void)
+{
+ tainted = false;
+
+ /* v8.0 features */
+ check_feature(ENABLE_FEAT_SB, read_feat_sb_id_field(), "SB", 1, 1);
+ check_feature(ENABLE_FEAT_CSV2_2, read_feat_csv2_id_field(),
+ "CSV2_2", 2, 3);
+ /*
+ * Even though the PMUv3 is an OPTIONAL feature, it is always
+ * implemented and Arm prescribes so. So assume it will be there and do
+ * away with a flag for it. This is used to check minor PMUv3px
+ * revisions so that we catch them as they come along
+ */
+ check_feature(FEAT_STATE_ALWAYS, read_feat_pmuv3_id_field(),
+ "PMUv3", 1, ID_AA64DFR0_PMUVER_PMUV3P7);
+
+ /* v8.1 features */
+ check_feature(ENABLE_FEAT_PAN, read_feat_pan_id_field(), "PAN", 1, 3);
+ check_feature(ENABLE_FEAT_VHE, read_feat_vhe_id_field(), "VHE", 1, 1);
+
+ /* v8.2 features */
+ check_feature(ENABLE_SVE_FOR_NS, read_feat_sve_id_field(),
+ "SVE", 1, 1);
+ check_feature(ENABLE_FEAT_RAS, read_feat_ras_id_field(), "RAS", 1, 2);
+
+ /* v8.3 features */
+ read_feat_pauth();
+
+ /* v8.4 features */
+ check_feature(ENABLE_FEAT_DIT, read_feat_dit_id_field(), "DIT", 1, 1);
+ check_feature(ENABLE_FEAT_AMU, read_feat_amu_id_field(),
+ "AMUv1", 1, 2);
+ check_feature(ENABLE_FEAT_MPAM, read_feat_mpam_version(),
+ "MPAM", 1, 17);
+ check_feature(CTX_INCLUDE_NEVE_REGS, read_feat_nv_id_field(),
+ "NV2", 2, 2);
+ check_feature(ENABLE_FEAT_SEL2, read_feat_sel2_id_field(),
+ "SEL2", 1, 1);
+ check_feature(ENABLE_TRF_FOR_NS, read_feat_trf_id_field(),
+ "TRF", 1, 1);
+
+ /* v8.5 features */
+ read_feat_mte();
+ check_feature(ENABLE_FEAT_RNG, read_feat_rng_id_field(), "RNG", 1, 1);
+ read_feat_bti();
+ read_feat_rng_trap();
+
+ /* v8.6 features */
+ check_feature(ENABLE_FEAT_AMUv1p1, read_feat_amu_id_field(),
+ "AMUv1p1", 2, 2);
+ check_feature(ENABLE_FEAT_FGT, read_feat_fgt_id_field(), "FGT", 1, 1);
+ check_feature(ENABLE_FEAT_ECV, read_feat_ecv_id_field(), "ECV", 1, 2);
+ check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
+ "TWED", 1, 1);
+
+ /*
+ * even though this is a "DISABLE" it does confusingly perform feature
+ * enablement duties like all other flags here. Check it against the HW
+ * feature when we intend to diverge from the default behaviour
+ */
+ check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(), "MTPMU", 1, 1);
+
+ /* v8.7 features */
+ check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(), "HCX", 1, 1);
+
+ /* v8.9 features */
+ check_feature(ENABLE_FEAT_TCR2, read_feat_tcr2_id_field(),
+ "TCR2", 1, 1);
+ check_feature(ENABLE_FEAT_S2PIE, read_feat_s2pie_id_field(),
+ "S2PIE", 1, 1);
+ check_feature(ENABLE_FEAT_S1PIE, read_feat_s1pie_id_field(),
+ "S1PIE", 1, 1);
+ check_feature(ENABLE_FEAT_S2POE, read_feat_s2poe_id_field(),
+ "S2POE", 1, 1);
+ check_feature(ENABLE_FEAT_S1POE, read_feat_s1poe_id_field(),
+ "S1POE", 1, 1);
+ check_feature(ENABLE_FEAT_MTE_PERM, read_feat_mte_perm_id_field(),
+ "MTE_PERM", 1, 1);
+
+ /* v9.0 features */
+ check_feature(ENABLE_BRBE_FOR_NS, read_feat_brbe_id_field(),
+ "BRBE", 1, 2);
+ check_feature(ENABLE_TRBE_FOR_NS, read_feat_trbe_id_field(),
+ "TRBE", 1, 1);
+
+ /* v9.2 features */
+ check_feature(ENABLE_SME_FOR_NS, read_feat_sme_id_field(),
+ "SME", 1, 2);
+ check_feature(ENABLE_SME2_FOR_NS, read_feat_sme_id_field(),
+ "SME2", 2, 2);
+
+ /* v9.4 features */
+ check_feature(ENABLE_FEAT_GCS, read_feat_gcs_id_field(), "GCS", 1, 1);
+
+ read_feat_rme();
+
+ if (tainted) {
+ panic();
+ }
+}
diff --git a/common/image_decompress.c b/common/image_decompress.c
new file mode 100644
index 0000000..a4586ae
--- /dev/null
+++ b/common/image_decompress.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/image_decompress.h>
+
+static uintptr_t decompressor_buf_base;
+static uint32_t decompressor_buf_size;
+static decompressor_t *decompressor;
+static struct image_info saved_image_info;
+
+void image_decompress_init(uintptr_t buf_base, uint32_t buf_size,
+ decompressor_t *_decompressor)
+{
+ decompressor_buf_base = buf_base;
+ decompressor_buf_size = buf_size;
+ decompressor = _decompressor;
+}
+
+void image_decompress_prepare(struct image_info *info)
+{
+ /*
+ * If the image is compressed, it should be loaded into the temporary
+ * buffer instead of its final destination. We save image_info, then
+ * override ->image_base and ->image_max_size so that load_image() will
+ * transfer the compressed data to the temporary buffer.
+ */
+ saved_image_info = *info;
+ info->image_base = decompressor_buf_base;
+ info->image_max_size = decompressor_buf_size;
+}
+
+int image_decompress(struct image_info *info)
+{
+ uintptr_t compressed_image_base, image_base, work_base;
+ uint32_t compressed_image_size, work_size;
+ int ret;
+
+ /*
+ * The size of compressed data has been filled by load_image().
+ * Read it out before restoring image_info.
+ */
+ compressed_image_size = info->image_size;
+ compressed_image_base = info->image_base;
+ *info = saved_image_info;
+
+ assert(compressed_image_size <= decompressor_buf_size);
+
+ image_base = info->image_base;
+
+ /*
+ * Use the rest of the temporary buffer as workspace of the
+ * decompressor since the decompressor may need additional memory.
+ */
+ work_base = compressed_image_base + compressed_image_size;
+ work_size = decompressor_buf_size - compressed_image_size;
+
+ ret = decompressor(&compressed_image_base, compressed_image_size,
+ &image_base, info->image_max_size,
+ work_base, work_size);
+ if (ret) {
+ ERROR("Failed to decompress image (err=%d)\n", ret);
+ return ret;
+ }
+
+ /* image_base is updated to the final pos when decompressor() exits. */
+ info->image_size = image_base - info->image_base;
+
+ flush_dcache_range(info->image_base, info->image_size);
+
+ return 0;
+}
diff --git a/common/runtime_svc.c b/common/runtime_svc.c
new file mode 100644
index 0000000..a2c0c09
--- /dev/null
+++ b/common/runtime_svc.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+
+/*******************************************************************************
+ * The 'rt_svc_descs' array holds the runtime service descriptors exported by
+ * services by placing them in the 'rt_svc_descs' linker section.
+ * The 'rt_svc_descs_indices' array holds the index of a descriptor in the
+ * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call
+ * type[31] bit in the function id are combined to get an index into the
+ * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the
+ * 'rt_svc_descs' array which contains the SMC handler.
+ ******************************************************************************/
+uint8_t rt_svc_descs_indices[MAX_RT_SVCS];
+
+#define RT_SVC_DECS_NUM ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\
+ / sizeof(rt_svc_desc_t))
+
+/*******************************************************************************
+ * Function to invoke the registered `handle` corresponding to the smc_fid in
+ * AArch32 mode.
+ ******************************************************************************/
+uintptr_t handle_runtime_svc(uint32_t smc_fid,
+ void *cookie,
+ void *handle,
+ unsigned int flags)
+{
+ u_register_t x1, x2, x3, x4;
+ unsigned int index;
+ unsigned int idx;
+ const rt_svc_desc_t *rt_svc_descs;
+
+ assert(handle != NULL);
+ idx = get_unique_oen_from_smc_fid(smc_fid);
+ assert(idx < MAX_RT_SVCS);
+
+ index = rt_svc_descs_indices[idx];
+ if (index >= RT_SVC_DECS_NUM)
+ SMC_RET1(handle, SMC_UNK);
+
+ rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
+
+ get_smc_params_from_ctx(handle, x1, x2, x3, x4);
+
+ return rt_svc_descs[index].handle(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+}
+
+/*******************************************************************************
+ * Simple routine to sanity check a runtime service descriptor before using it
+ ******************************************************************************/
+static int32_t validate_rt_svc_desc(const rt_svc_desc_t *desc)
+{
+ if (desc == NULL)
+ return -EINVAL;
+
+ if (desc->start_oen > desc->end_oen)
+ return -EINVAL;
+
+ if (desc->end_oen >= OEN_LIMIT)
+ return -EINVAL;
+
+ if ((desc->call_type != SMC_TYPE_FAST) &&
+ (desc->call_type != SMC_TYPE_YIELD))
+ return -EINVAL;
+
+ /* A runtime service having no init or handle function doesn't make sense */
+ if ((desc->init == NULL) && (desc->handle == NULL))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function calls the initialisation routine in the descriptor exported by
+ * a runtime service. Once a descriptor has been validated, its start & end
+ * owning entity numbers and the call type are combined to form a unique oen.
+ * The unique oen is used as an index into the 'rt_svc_descs_indices' array.
+ * The index of the runtime service descriptor is stored at this index.
+ ******************************************************************************/
+void __init runtime_svc_init(void)
+{
+ int rc = 0;
+ uint8_t index, start_idx, end_idx;
+ rt_svc_desc_t *rt_svc_descs;
+
+ /* Assert the number of descriptors detected are less than maximum indices */
+ assert((RT_SVC_DESCS_END >= RT_SVC_DESCS_START) &&
+ (RT_SVC_DECS_NUM < MAX_RT_SVCS));
+
+ /* If no runtime services are implemented then simply bail out */
+ if (RT_SVC_DECS_NUM == 0U)
+ return;
+
+ /* Initialise internal variables to invalid state */
+ (void)memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices));
+
+ rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START;
+ for (index = 0U; index < RT_SVC_DECS_NUM; index++) {
+ rt_svc_desc_t *service = &rt_svc_descs[index];
+
+ /*
+ * An invalid descriptor is an error condition since it is
+ * difficult to predict the system behaviour in the absence
+ * of this service.
+ */
+ rc = validate_rt_svc_desc(service);
+ if (rc != 0) {
+ ERROR("Invalid runtime service descriptor %p\n",
+ (void *) service);
+ panic();
+ }
+
+ /*
+ * The runtime service may have separate rt_svc_desc_t
+ * for its fast smc and yielding smc. Since the service itself
+ * need to be initialized only once, only one of them will have
+ * an initialisation routine defined. Call the initialisation
+ * routine for this runtime service, if it is defined.
+ */
+ if (service->init != NULL) {
+ rc = service->init();
+ if (rc != 0) {
+ ERROR("Error initializing runtime service %s\n",
+ service->name);
+ continue;
+ }
+ }
+
+ /*
+ * Fill the indices corresponding to the start and end
+ * owning entity numbers with the index of the
+ * descriptor which will handle the SMCs for this owning
+ * entity range.
+ */
+ start_idx = (uint8_t)get_unique_oen(service->start_oen,
+ service->call_type);
+ end_idx = (uint8_t)get_unique_oen(service->end_oen,
+ service->call_type);
+ assert(start_idx <= end_idx);
+ assert(end_idx < MAX_RT_SVCS);
+ for (; start_idx <= end_idx; start_idx++)
+ rt_svc_descs_indices[start_idx] = index;
+ }
+}
diff --git a/common/tf_crc32.c b/common/tf_crc32.c
new file mode 100644
index 0000000..b33d36e
--- /dev/null
+++ b/common/tf_crc32.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <assert.h>
+
+#include <arm_acle.h>
+#include <common/debug.h>
+#include <common/tf_crc32.h>
+
+/* compute CRC using Arm intrinsic function
+ *
+ * This function is useful for the platforms with the CPU ARMv8.0
+ * (with CRC instructions supported), and onwards.
+ * Platforms with CPU ARMv8.0 should make sure to add a compile switch
+ * '-march=armv8-a+crc" for successful compilation of this file.
+ *
+ * @crc: previous accumulated CRC
+ * @buf: buffer base address
+ * @size: the size of the buffer
+ *
+ * Return calculated CRC value
+ */
+uint32_t tf_crc32(uint32_t crc, const unsigned char *buf, size_t size)
+{
+ assert(buf != NULL);
+
+ uint32_t calc_crc = ~crc;
+ const unsigned char *local_buf = buf;
+ size_t local_size = size;
+
+ /*
+ * calculate CRC over byte data
+ */
+ while (local_size != 0UL) {
+ calc_crc = __crc32b(calc_crc, *local_buf);
+ local_buf++;
+ local_size--;
+ }
+
+ return ~calc_crc;
+}
diff --git a/common/tf_log.c b/common/tf_log.c
new file mode 100644
index 0000000..68f1be4
--- /dev/null
+++ b/common/tf_log.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <assert.h>
+#include <stdio.h>
+
+#include <common/debug.h>
+#include <plat/common/platform.h>
+
+/* Set the default maximum log level to the `LOG_LEVEL` build flag */
+static unsigned int max_log_level = LOG_LEVEL;
+
+/*
+ * The common log function which is invoked by TF-A code.
+ * This function should not be directly invoked and is meant to be
+ * only used by the log macros defined in debug.h. The function
+ * expects the first character in the format string to be one of the
+ * LOG_MARKER_* macros defined in debug.h.
+ */
+void tf_log(const char *fmt, ...)
+{
+ unsigned int log_level;
+ va_list args;
+ const char *prefix_str;
+
+ /* We expect the LOG_MARKER_* macro as the first character */
+ log_level = fmt[0];
+
+ /* Verify that log_level is one of LOG_MARKER_* macro defined in debug.h */
+ assert((log_level > 0U) && (log_level <= LOG_LEVEL_VERBOSE));
+ assert((log_level % 10U) == 0U);
+
+ if (log_level > max_log_level)
+ return;
+
+ prefix_str = plat_log_get_prefix(log_level);
+
+ while (*prefix_str != '\0') {
+ (void)putchar(*prefix_str);
+ prefix_str++;
+ }
+
+ va_start(args, fmt);
+ (void)vprintf(fmt + 1, args);
+ va_end(args);
+}
+
+void tf_log_newline(const char log_fmt[2])
+{
+ unsigned int log_level = log_fmt[0];
+
+ /* Verify that log_level is one of LOG_MARKER_* macro defined in debug.h */
+ assert((log_level > 0U) && (log_level <= LOG_LEVEL_VERBOSE));
+ assert((log_level % 10U) == 0U);
+
+ if (log_level > max_log_level)
+ return;
+
+ putchar('\n');
+}
+
+/*
+ * The helper function to set the log level dynamically by platform. The
+ * maximum log level is determined by `LOG_LEVEL` build flag at compile time
+ * and this helper can set a lower (or equal) log level than the one at compile.
+ */
+void tf_log_set_max_level(unsigned int log_level)
+{
+ assert(log_level <= LOG_LEVEL_VERBOSE);
+ assert((log_level % 10U) == 0U);
+
+ /* Cap log_level to the compile time maximum. */
+ if (log_level <= (unsigned int)LOG_LEVEL)
+ max_log_level = log_level;
+}
diff --git a/common/uuid.c b/common/uuid.c
new file mode 100644
index 0000000..969eda1
--- /dev/null
+++ b/common/uuid.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <common/uuid.h>
+
+/* Return the hex nibble value of a char */
+static int8_t hex_val(char hex)
+{
+ int8_t val = 0;
+
+ if ((hex >= '0') && (hex <= '9')) {
+ val = (int8_t)(hex - '0');
+ } else if ((hex >= 'a') && (hex <= 'f')) {
+ val = (int8_t)(hex - 'a' + 0xa);
+ } else if ((hex >= 'A') && (hex <= 'F')) {
+ val = (int8_t)(hex - 'A' + 0xa);
+ } else {
+ val = -1;
+ }
+
+ return val;
+}
+
+/*
+ * Read hex_src_len hex characters from hex_src, convert to bytes and
+ * store in buffer pointed to by dest
+ */
+static int read_hex(uint8_t *dest, char *hex_src, unsigned int hex_src_len)
+{
+ int8_t nibble;
+ uint8_t byte;
+
+ /*
+ * The string length must be a multiple of 2 to represent an
+ * exact number of bytes.
+ */
+ assert((hex_src_len % 2U) == 0U);
+
+ for (unsigned int i = 0U; i < (hex_src_len / 2U); i++) {
+ nibble = 0;
+ byte = 0U;
+
+ nibble = hex_val(hex_src[2U * i]);
+ if (nibble < 0) {
+ return -1;
+ }
+ byte = (uint8_t)nibble;
+ byte <<= 4U;
+
+ nibble = hex_val(hex_src[(2U * i) + 1U]);
+ if (nibble < 0) {
+ return -1;
+ }
+ byte |= (uint8_t)nibble;
+
+ *dest = byte;
+ dest++;
+ }
+
+ return 0;
+}
+
+/* Parse UUIDs of the form aabbccdd-eeff-4099-8877-665544332211 */
+int read_uuid(uint8_t *dest, char *uuid)
+{
+ int err;
+ uint8_t *dest_start = dest;
+
+ /* Check that we have enough characters */
+ if (strnlen(uuid, UUID_STRING_LENGTH) != UUID_STRING_LENGTH) {
+ WARN("UUID string is too short\n");
+ return -EINVAL;
+ }
+
+ /* aabbccdd */
+ err = read_hex(dest, uuid, 8);
+ uuid += 8;
+ dest += 4;
+
+ /* Check for '-' */
+ err |= ((*uuid == '-') ? 0 : -1);
+ uuid++;
+
+ /* eeff */
+ err |= read_hex(dest, uuid, 4);
+ uuid += 4;
+ dest += 2;
+
+ /* Check for '-' */
+ err |= ((*uuid == '-') ? 0 : -1);
+ uuid++;
+
+ /* 4099 */
+ err |= read_hex(dest, uuid, 4);
+ uuid += 4;
+ dest += 2;
+
+ /* Check for '-' */
+ err |= ((*uuid == '-') ? 0 : -1);
+ uuid++;
+
+ /* 8877 */
+ err |= read_hex(dest, uuid, 4);
+ uuid += 4;
+ dest += 2;
+
+ /* Check for '-' */
+ err |= ((*uuid == '-') ? 0 : -1);
+ uuid++;
+
+ /* 665544332211 */
+ err |= read_hex(dest, uuid, 12);
+ uuid += 12;
+ dest += 6;
+
+ if (err < 0) {
+ WARN("Error parsing UUID\n");
+ /* Clear the buffer on error */
+ memset((void *)dest_start, '\0', UUID_BYTES_LENGTH * sizeof(uint8_t));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper function to check if 2 UUIDs match.
+ */
+bool uuid_match(uint32_t *uuid1, uint32_t *uuid2)
+{
+ return !memcmp(uuid1, uuid2, sizeof(uint32_t) * 4);
+}
+
+/*
+ * Helper function to copy from one UUID struct to another.
+ */
+void copy_uuid(uint32_t *to_uuid, uint32_t *from_uuid)
+{
+ to_uuid[0] = from_uuid[0];
+ to_uuid[1] = from_uuid[1];
+ to_uuid[2] = from_uuid[2];
+ to_uuid[3] = from_uuid[3];
+}
+
+bool is_null_uuid(uint32_t *uuid)
+{
+ return (uuid[0] == 0 && uuid[1] == 0 &&
+ uuid[2] == 0 && uuid[3] == 0);
+}