summaryrefslogtreecommitdiffstats
path: root/include/lib
diff options
context:
space:
mode:
Diffstat (limited to 'include/lib')
-rw-r--r--include/lib/bakery_lock.h105
-rw-r--r--include/lib/bl_aux_params/bl_aux_params.h30
-rw-r--r--include/lib/bootmarker_capture.h22
-rw-r--r--include/lib/cassert.h23
-rw-r--r--include/lib/coreboot.h46
-rw-r--r--include/lib/cpus/aarch32/aem_generic.h15
-rw-r--r--include/lib/cpus/aarch32/cortex_a12.h22
-rw-r--r--include/lib/cpus/aarch32/cortex_a15.h30
-rw-r--r--include/lib/cpus/aarch32/cortex_a17.h27
-rw-r--r--include/lib/cpus/aarch32/cortex_a32.h22
-rw-r--r--include/lib/cpus/aarch32/cortex_a5.h22
-rw-r--r--include/lib/cpus/aarch32/cortex_a53.h73
-rw-r--r--include/lib/cpus/aarch32/cortex_a57.h84
-rw-r--r--include/lib/cpus/aarch32/cortex_a7.h22
-rw-r--r--include/lib/cpus/aarch32/cortex_a72.h63
-rw-r--r--include/lib/cpus/aarch32/cortex_a9.h33
-rw-r--r--include/lib/cpus/aarch32/cpu_macros.S259
-rw-r--r--include/lib/cpus/aarch64/a64fx.h15
-rw-r--r--include/lib/cpus/aarch64/aem_generic.h18
-rw-r--r--include/lib/cpus/aarch64/cortex_a35.h29
-rw-r--r--include/lib/cpus/aarch64/cortex_a510.h55
-rw-r--r--include/lib/cpus/aarch64/cortex_a520.h23
-rw-r--r--include/lib/cpus/aarch64/cortex_a53.h85
-rw-r--r--include/lib/cpus/aarch64/cortex_a55.h48
-rw-r--r--include/lib/cpus/aarch64/cortex_a57.h88
-rw-r--r--include/lib/cpus/aarch64/cortex_a65.h31
-rw-r--r--include/lib/cpus/aarch64/cortex_a65ae.h31
-rw-r--r--include/lib/cpus/aarch64/cortex_a710.h65
-rw-r--r--include/lib/cpus/aarch64/cortex_a715.h26
-rw-r--r--include/lib/cpus/aarch64/cortex_a72.h84
-rw-r--r--include/lib/cpus/aarch64/cortex_a720.h26
-rw-r--r--include/lib/cpus/aarch64/cortex_a73.h45
-rw-r--r--include/lib/cpus/aarch64/cortex_a75.h55
-rw-r--r--include/lib/cpus/aarch64/cortex_a76.h51
-rw-r--r--include/lib/cpus/aarch64/cortex_a76ae.h28
-rw-r--r--include/lib/cpus/aarch64/cortex_a77.h45
-rw-r--r--include/lib/cpus/aarch64/cortex_a78.h58
-rw-r--r--include/lib/cpus/aarch64/cortex_a78_ae.h31
-rw-r--r--include/lib/cpus/aarch64/cortex_a78c.h50
-rw-r--r--include/lib/cpus/aarch64/cortex_blackhawk.h23
-rw-r--r--include/lib/cpus/aarch64/cortex_chaberton.h23
-rw-r--r--include/lib/cpus/aarch64/cortex_gelas.h31
-rw-r--r--include/lib/cpus/aarch64/cortex_x1.h32
-rw-r--r--include/lib/cpus/aarch64/cortex_x2.h62
-rw-r--r--include/lib/cpus/aarch64/cortex_x3.h50
-rw-r--r--include/lib/cpus/aarch64/cortex_x4.h26
-rw-r--r--include/lib/cpus/aarch64/cpu_macros.S636
-rw-r--r--include/lib/cpus/aarch64/cpuamu.h48
-rw-r--r--include/lib/cpus/aarch64/denver.h54
-rw-r--r--include/lib/cpus/aarch64/dsu_def.h42
-rw-r--r--include/lib/cpus/aarch64/generic.h18
-rw-r--r--include/lib/cpus/aarch64/neoverse_e1.h31
-rw-r--r--include/lib/cpus/aarch64/neoverse_hermes.h23
-rw-r--r--include/lib/cpus/aarch64/neoverse_n1.h70
-rw-r--r--include/lib/cpus/aarch64/neoverse_n2.h72
-rw-r--r--include/lib/cpus/aarch64/neoverse_n_common.h18
-rw-r--r--include/lib/cpus/aarch64/neoverse_poseidon.h27
-rw-r--r--include/lib/cpus/aarch64/neoverse_v1.h51
-rw-r--r--include/lib/cpus/aarch64/neoverse_v2.h53
-rw-r--r--include/lib/cpus/aarch64/nevis.h23
-rw-r--r--include/lib/cpus/aarch64/qemu_max.h22
-rw-r--r--include/lib/cpus/aarch64/rainier.h66
-rw-r--r--include/lib/cpus/aarch64/travis.h29
-rw-r--r--include/lib/cpus/cpu_ops.h152
-rw-r--r--include/lib/cpus/errata.h85
-rw-r--r--include/lib/cpus/wa_cve_2017_5715.h12
-rw-r--r--include/lib/cpus/wa_cve_2018_3639.h12
-rw-r--r--include/lib/cpus/wa_cve_2022_23960.h12
-rw-r--r--include/lib/debugfs.h83
-rw-r--r--include/lib/el3_runtime/aarch32/context.h70
-rw-r--r--include/lib/el3_runtime/aarch64/context.h559
-rw-r--r--include/lib/el3_runtime/context_mgmt.h95
-rw-r--r--include/lib/el3_runtime/cpu_data.h237
-rw-r--r--include/lib/el3_runtime/pubsub.h106
-rw-r--r--include/lib/el3_runtime/pubsub_events.h43
-rw-r--r--include/lib/extensions/amu.h72
-rw-r--r--include/lib/extensions/brbe.h18
-rw-r--r--include/lib/extensions/mpam.h26
-rw-r--r--include/lib/extensions/pauth.h18
-rw-r--r--include/lib/extensions/pmuv3.h19
-rw-r--r--include/lib/extensions/ras.h203
-rw-r--r--include/lib/extensions/ras_arch.h265
-rw-r--r--include/lib/extensions/sme.h51
-rw-r--r--include/lib/extensions/spe.h28
-rw-r--r--include/lib/extensions/sve.h28
-rw-r--r--include/lib/extensions/sys_reg_trace.h42
-rw-r--r--include/lib/extensions/trbe.h22
-rw-r--r--include/lib/extensions/trf.h22
-rw-r--r--include/lib/fconf/fconf.h70
-rw-r--r--include/lib/fconf/fconf_amu_getter.h20
-rw-r--r--include/lib/fconf/fconf_dyn_cfg_getter.h38
-rw-r--r--include/lib/fconf/fconf_mpmm_getter.h20
-rw-r--r--include/lib/fconf/fconf_tbbr_getter.h32
-rw-r--r--include/lib/gpt_rme/gpt_rme.h280
-rw-r--r--include/lib/libc/aarch32/endian_.h146
-rw-r--r--include/lib/libc/aarch32/float.h100
-rw-r--r--include/lib/libc/aarch32/inttypes_.h28
-rw-r--r--include/lib/libc/aarch32/limits_.h26
-rw-r--r--include/lib/libc/aarch32/stddef_.h15
-rw-r--r--include/lib/libc/aarch32/stdint_.h28
-rw-r--r--include/lib/libc/aarch32/stdio_.h15
-rw-r--r--include/lib/libc/aarch64/endian_.h128
-rw-r--r--include/lib/libc/aarch64/float.h94
-rw-r--r--include/lib/libc/aarch64/inttypes_.h28
-rw-r--r--include/lib/libc/aarch64/limits_.h26
-rw-r--r--include/lib/libc/aarch64/setjmp_.h30
-rw-r--r--include/lib/libc/aarch64/stddef_.h15
-rw-r--r--include/lib/libc/aarch64/stdint_.h31
-rw-r--r--include/lib/libc/aarch64/stdio_.h15
-rw-r--r--include/lib/libc/arm_acle.h24
-rw-r--r--include/lib/libc/assert.h34
-rw-r--r--include/lib/libc/cdefs.h39
-rw-r--r--include/lib/libc/endian.h191
-rw-r--r--include/lib/libc/errno.h169
-rw-r--r--include/lib/libc/inttypes.h41
-rw-r--r--include/lib/libc/limits.h19
-rw-r--r--include/lib/libc/setjmp.h20
-rw-r--r--include/lib/libc/stdarg.h20
-rw-r--r--include/lib/libc/stdbool.h17
-rw-r--r--include/lib/libc/stddef.h27
-rw-r--r--include/lib/libc/stdint.h122
-rw-r--r--include/lib/libc/stdio.h31
-rw-r--r--include/lib/libc/stdlib.h32
-rw-r--r--include/lib/libc/string.h34
-rw-r--r--include/lib/libc/sys/cdefs.h922
-rw-r--r--include/lib/libc/time.h18
-rw-r--r--include/lib/libfdt/fdt.h66
-rw-r--r--include/lib/libfdt/libfdt.h2154
-rw-r--r--include/lib/libfdt/libfdt_env.h96
-rw-r--r--include/lib/mmio.h76
-rw-r--r--include/lib/mpmm/mpmm.h57
-rw-r--r--include/lib/object_pool.h79
-rw-r--r--include/lib/optee_utils.h56
-rw-r--r--include/lib/pmf/aarch32/pmf_asm_macros.S28
-rw-r--r--include/lib/pmf/aarch64/pmf_asm_macros.S30
-rw-r--r--include/lib/pmf/pmf.h75
-rw-r--r--include/lib/pmf/pmf_helpers.h256
-rw-r--r--include/lib/psa/delegated_attestation.h109
-rw-r--r--include/lib/psa/measured_boot.h126
-rw-r--r--include/lib/psa/psa/client.h102
-rw-r--r--include/lib/psa/psa/error.h42
-rw-r--r--include/lib/psa/psa_manifest/sid.h23
-rw-r--r--include/lib/psa/rss_crypto_defs.h58
-rw-r--r--include/lib/psa/rss_platform_api.h60
-rw-r--r--include/lib/psci/psci.h384
-rw-r--r--include/lib/psci/psci_lib.h100
-rw-r--r--include/lib/runtime_instr.h25
-rw-r--r--include/lib/semihosting.h60
-rw-r--r--include/lib/smccc.h230
-rw-r--r--include/lib/spinlock.h29
-rw-r--r--include/lib/transfer_list.h114
-rw-r--r--include/lib/utils.h94
-rw-r--r--include/lib/utils_def.h213
-rw-r--r--include/lib/xlat_mpu/xlat_mpu.h27
-rw-r--r--include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h72
-rw-r--r--include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h96
-rw-r--r--include/lib/xlat_tables/xlat_mmu_helpers.h94
-rw-r--r--include/lib/xlat_tables/xlat_tables.h99
-rw-r--r--include/lib/xlat_tables/xlat_tables_arch.h31
-rw-r--r--include/lib/xlat_tables/xlat_tables_compat.h16
-rw-r--r--include/lib/xlat_tables/xlat_tables_defs.h183
-rw-r--r--include/lib/xlat_tables/xlat_tables_v2.h416
-rw-r--r--include/lib/xlat_tables/xlat_tables_v2_helpers.h180
-rw-r--r--include/lib/zlib/tf_gunzip.h16
164 files changed, 14612 insertions, 0 deletions
diff --git a/include/lib/bakery_lock.h b/include/lib/bakery_lock.h
new file mode 100644
index 0000000..5d165c9
--- /dev/null
+++ b/include/lib/bakery_lock.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BAKERY_LOCK_H
+#define BAKERY_LOCK_H
+
+#include <platform_def.h>
+
+#define BAKERY_LOCK_MAX_CPUS PLATFORM_CORE_COUNT
+
+#ifndef __ASSEMBLER__
+#include <cdefs.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <lib/utils_def.h>
+
+/*****************************************************************************
+ * Internal helpers used by the bakery lock implementation.
+ ****************************************************************************/
+
+/* Convert a ticket to priority */
+static inline unsigned int bakery_get_priority(unsigned int t, unsigned int pos)
+{
+ return (t << 8) | pos;
+}
+
+#define CHOOSING_TICKET U(0x1)
+#define CHOSEN_TICKET U(0x0)
+
+static inline bool bakery_is_choosing(unsigned int info)
+{
+ return (info & 1U) == CHOOSING_TICKET;
+}
+
+static inline unsigned int bakery_ticket_number(unsigned int info)
+{
+ return (info >> 1) & 0x7FFFU;
+}
+
+static inline uint16_t make_bakery_data(unsigned int choosing, unsigned int num)
+{
+ unsigned int val = (choosing & 0x1U) | (num << 1);
+
+ return (uint16_t) val;
+}
+
+/*****************************************************************************
+ * External bakery lock interface.
+ ****************************************************************************/
+#if USE_COHERENT_MEM
+/*
+ * Bakery locks are stored in coherent memory
+ *
+ * Each lock's data is contiguous and fully allocated by the compiler
+ */
+
+typedef struct bakery_lock {
+ /*
+ * The lock_data is a bit-field of 2 members:
+ * Bit[0] : choosing. This field is set when the CPU is
+ * choosing its bakery number.
+ * Bits[1 - 15] : number. This is the bakery number allocated.
+ */
+ volatile uint16_t lock_data[BAKERY_LOCK_MAX_CPUS];
+} bakery_lock_t;
+
+#else
+/*
+ * Bakery locks are stored in normal .bss memory
+ *
+ * Each lock's data is spread across multiple cache lines, one per CPU,
+ * but multiple locks can share the same cache line.
+ * The compiler will allocate enough memory for one CPU's bakery locks,
+ * the remaining cache lines are allocated by the linker script
+ */
+
+typedef struct bakery_info {
+ /*
+ * The lock_data is a bit-field of 2 members:
+ * Bit[0] : choosing. This field is set when the CPU is
+ * choosing its bakery number.
+ * Bits[1 - 15] : number. This is the bakery number allocated.
+ */
+ volatile uint16_t lock_data;
+} bakery_info_t;
+
+typedef bakery_info_t bakery_lock_t;
+
+#endif /* __USE_COHERENT_MEM__ */
+
+static inline void bakery_lock_init(bakery_lock_t *bakery) {}
+void bakery_lock_get(bakery_lock_t *bakery);
+void bakery_lock_release(bakery_lock_t *bakery);
+
+#define DEFINE_BAKERY_LOCK(_name) bakery_lock_t _name __section(".bakery_lock")
+
+#define DECLARE_BAKERY_LOCK(_name) extern bakery_lock_t _name
+
+
+#endif /* __ASSEMBLER__ */
+#endif /* BAKERY_LOCK_H */
diff --git a/include/lib/bl_aux_params/bl_aux_params.h b/include/lib/bl_aux_params/bl_aux_params.h
new file mode 100644
index 0000000..c2da96c
--- /dev/null
+++ b/include/lib/bl_aux_params/bl_aux_params.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef LIB_BL_AUX_PARAMS_H
+#define LIB_BL_AUX_PARAMS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <export/lib/bl_aux_params/bl_aux_params_exp.h>
+
+/*
+ * Handler function that handles an individual aux parameter. Return true if
+ * the parameter was handled, and false if bl_aux_params_parse() should make its
+ * own attempt at handling it (for generic parameters).
+ */
+typedef bool (*bl_aux_param_handler_t)(struct bl_aux_param_header *param);
+
+/*
+ * Interprets head as the start of an aux parameter list, and passes the
+ * parameters individually to handler(). Handles generic parameters directly if
+ * handler() hasn't already done so. If only generic parameters are expected,
+ * handler() can be NULL.
+ */
+void bl_aux_params_parse(u_register_t head,
+ bl_aux_param_handler_t handler);
+
+#endif /* LIB_BL_AUX_PARAMS_H */
diff --git a/include/lib/bootmarker_capture.h b/include/lib/bootmarker_capture.h
new file mode 100644
index 0000000..31fe048
--- /dev/null
+++ b/include/lib/bootmarker_capture.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BOOTMARKER_CAPTURE_H
+#define BOOTMARKER_CAPTURE_H
+
+#define BL1_ENTRY U(0)
+#define BL1_EXIT U(1)
+#define BL2_ENTRY U(2)
+#define BL2_EXIT U(3)
+#define BL31_ENTRY U(4)
+#define BL31_EXIT U(5)
+#define BL_TOTAL_IDS U(6)
+
+#ifdef __ASSEMBLER__
+PMF_DECLARE_CAPTURE_TIMESTAMP(bl_svc)
+#endif /*__ASSEMBLER__*/
+
+#endif /*BOOTMARKER_CAPTURE_H*/
diff --git a/include/lib/cassert.h b/include/lib/cassert.h
new file mode 100644
index 0000000..512a2ad
--- /dev/null
+++ b/include/lib/cassert.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CASSERT_H
+#define CASSERT_H
+
+#include <cdefs.h>
+
+/*******************************************************************************
+ * Macro to flag a compile time assertion. It uses the preprocessor to generate
+ * an invalid C construct if 'cond' evaluates to false.
+ * The following compilation error is triggered if the assertion fails:
+ * "error: size of array 'msg' is negative"
+ * The 'unused' attribute ensures that the unused typedef does not emit a
+ * compiler warning.
+ ******************************************************************************/
+#define CASSERT(cond, msg) \
+ typedef char msg[(cond) ? 1 : -1] __unused
+
+#endif /* CASSERT_H */
diff --git a/include/lib/coreboot.h b/include/lib/coreboot.h
new file mode 100644
index 0000000..c8e1b2d
--- /dev/null
+++ b/include/lib/coreboot.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef COREBOOT_H
+#define COREBOOT_H
+
+#include <stdint.h>
+
+typedef struct {
+ uint32_t type; /* always 2 (memory-mapped) on ARM */
+ uint32_t baseaddr;
+ uint32_t baud;
+ uint32_t regwidth; /* in bytes, i.e. usually 4 */
+ uint32_t input_hertz;
+ uint32_t uart_pci_addr; /* unused on current ARM systems */
+} coreboot_serial_t;
+extern coreboot_serial_t coreboot_serial;
+
+#define COREBOOT_MAX_MEMRANGES 32 /* libpayload also uses this limit */
+
+typedef struct __packed {
+ uint64_t start;
+ uint64_t size;
+ uint32_t type;
+} coreboot_memrange_t;
+extern coreboot_memrange_t coreboot_memranges[COREBOOT_MAX_MEMRANGES];
+
+typedef enum {
+ CB_MEM_NONE = 0, /* coreboot will never report this */
+ CB_MEM_RAM = 1,
+ CB_MEM_RESERVED = 2,
+ CB_MEM_ACPI = 3,
+ CB_MEM_NVS = 4,
+ CB_MEM_UNUSABLE = 5,
+ CB_MEM_VENDOR_RSVD = 6,
+ CB_MEM_TABLE = 16,
+} coreboot_memory_t;
+
+coreboot_memory_t coreboot_get_memory_type(uintptr_t start, size_t size);
+void coreboot_table_setup(void *base);
+void coreboot_get_table_location(uint64_t *address, uint32_t *size);
+
+#endif /* COREBOOT_H */
diff --git a/include/lib/cpus/aarch32/aem_generic.h b/include/lib/cpus/aarch32/aem_generic.h
new file mode 100644
index 0000000..f631f26
--- /dev/null
+++ b/include/lib/cpus/aarch32/aem_generic.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AEM_GENERIC_H
+#define AEM_GENERIC_H
+
+#include <lib/utils_def.h>
+
+/* BASE AEM midr for revision 0 */
+#define BASE_AEM_MIDR U(0x410FD0F0)
+
+#endif /* AEM_GENERIC_H */
diff --git a/include/lib/cpus/aarch32/cortex_a12.h b/include/lib/cpus/aarch32/cortex_a12.h
new file mode 100644
index 0000000..789b4cf
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a12.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A12_H
+#define CORTEX_A12_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Cortex-A12 midr with version/revision set to 0
+ ******************************************************************************/
+#define CORTEX_A12_MIDR U(0x410FC0D0)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A12_ACTLR_SMP_BIT (U(1) << 6)
+
+#endif /* CORTEX_A12_H */
diff --git a/include/lib/cpus/aarch32/cortex_a15.h b/include/lib/cpus/aarch32/cortex_a15.h
new file mode 100644
index 0000000..aca4d34
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a15.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A15_H
+#define CORTEX_A15_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Auxiliary Control Register 2 specific definitions.
+ ******************************************************************************/
+#define CORTEX_A15_ACTLR2 p15, 1, c15, c0, 4
+
+#define CORTEX_A15_ACTLR2_INV_DCC_BIT (U(1) << 0)
+
+/*******************************************************************************
+ * Cortex-A15 midr with version/revision set to 0
+ ******************************************************************************/
+#define CORTEX_A15_MIDR U(0x410FC0F0)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A15_ACTLR_INV_BTB_BIT (U(1) << 0)
+#define CORTEX_A15_ACTLR_SMP_BIT (U(1) << 6)
+
+#endif /* CORTEX_A15_H */
diff --git a/include/lib/cpus/aarch32/cortex_a17.h b/include/lib/cpus/aarch32/cortex_a17.h
new file mode 100644
index 0000000..b9e754a
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a17.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A17_H
+#define CORTEX_A17_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Cortex-A17 midr with version/revision set to 0
+ ******************************************************************************/
+#define CORTEX_A17_MIDR U(0x410FC0E0)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A17_ACTLR_SMP_BIT (U(1) << 6)
+
+/*******************************************************************************
+ * Implementation defined register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A17_IMP_DEF_REG1 p15, 0, c15, c0, 1
+
+#endif /* CORTEX_A17_H */
diff --git a/include/lib/cpus/aarch32/cortex_a32.h b/include/lib/cpus/aarch32/cortex_a32.h
new file mode 100644
index 0000000..841898a
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a32.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A32_H
+#define CORTEX_A32_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A32 Main ID register for revision 0 */
+#define CORTEX_A32_MIDR U(0x410FD010)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ * CPUECTLR_EL1 is an implementation-specific register.
+ ******************************************************************************/
+#define CORTEX_A32_CPUECTLR_EL1 p15, 1, c15
+#define CORTEX_A32_CPUECTLR_SMPEN_BIT (ULL(1) << 6)
+
+#endif /* CORTEX_A32_H */
diff --git a/include/lib/cpus/aarch32/cortex_a5.h b/include/lib/cpus/aarch32/cortex_a5.h
new file mode 100644
index 0000000..c0763f9
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a5.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A5_H
+#define CORTEX_A5_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Cortex-A8 midr with version/revision set to 0
+ ******************************************************************************/
+#define CORTEX_A5_MIDR U(0x410FC050)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A5_ACTLR_SMP_BIT (U(1) << 6)
+
+#endif /* CORTEX_A5_H */
diff --git a/include/lib/cpus/aarch32/cortex_a53.h b/include/lib/cpus/aarch32/cortex_a53.h
new file mode 100644
index 0000000..b9bb310
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a53.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A53_H
+#define CORTEX_A53_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A53 midr for revision 0 */
+#define CORTEX_A53_MIDR U(0x410FD030)
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2 U(0x1)
+#define RETENTION_ENTRY_TICKS_8 U(0x2)
+#define RETENTION_ENTRY_TICKS_32 U(0x3)
+#define RETENTION_ENTRY_TICKS_64 U(0x4)
+#define RETENTION_ENTRY_TICKS_128 U(0x5)
+#define RETENTION_ENTRY_TICKS_256 U(0x6)
+#define RETENTION_ENTRY_TICKS_512 U(0x7)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_ECTLR p15, 1, c15
+
+#define CORTEX_A53_ECTLR_SMP_BIT (U(1) << 6)
+
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT U(0)
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK (ULL(0x7) << CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT)
+
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT U(3)
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_MASK (ULL(0x7) << CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_MERRSR p15, 2, c15
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_CPUACTLR p15, 0, c15
+
+#define CORTEX_A53_CPUACTLR_ENDCCASCI_SHIFT U(44)
+#define CORTEX_A53_CPUACTLR_ENDCCASCI (ULL(1) << CORTEX_A53_CPUACTLR_ENDCCASCI_SHIFT)
+#define CORTEX_A53_CPUACTLR_DTAH_SHIFT U(24)
+#define CORTEX_A53_CPUACTLR_DTAH (ULL(1) << CORTEX_A53_CPUACTLR_DTAH_SHIFT)
+
+/*******************************************************************************
+ * L2 Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ACTLR p15, 1, c15, c0, 0
+
+#define CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN (U(1) << 14)
+#define CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH (U(1) << 3)
+
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ECTLR p15, 1, c9, c0, 3
+
+#define CORTEX_A53_L2ECTLR_RET_CTRL_SHIFT U(0)
+#define CORTEX_A53_L2ECTLR_RET_CTRL_MASK (U(0x7) << L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2MERRSR p15, 3, c15
+
+#endif /* CORTEX_A53_H */
diff --git a/include/lib/cpus/aarch32/cortex_a57.h b/include/lib/cpus/aarch32/cortex_a57.h
new file mode 100644
index 0000000..bb977ff
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a57.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A57_H
+#define CORTEX_A57_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A57 midr for revision 0 */
+#define CORTEX_A57_MIDR U(0x410FD070)
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2 U(0x1)
+#define RETENTION_ENTRY_TICKS_8 U(0x2)
+#define RETENTION_ENTRY_TICKS_32 U(0x3)
+#define RETENTION_ENTRY_TICKS_64 U(0x4)
+#define RETENTION_ENTRY_TICKS_128 U(0x5)
+#define RETENTION_ENTRY_TICKS_256 U(0x6)
+#define RETENTION_ENTRY_TICKS_512 U(0x7)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_ECTLR p15, 1, c15
+
+#define CORTEX_A57_ECTLR_SMP_BIT (ULL(1) << 6)
+#define CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT (ULL(1) << 38)
+#define CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK (ULL(0x3) << 35)
+#define CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK (ULL(0x3) << 32)
+
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT U(0)
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK (ULL(0x7) << CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_CPUMERRSR p15, 2, c15
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_CPUACTLR p15, 0, c15
+
+#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_DMB (ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_DIS_DMB_NULLIFICATION (ULL(1) << 58)
+#define CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55)
+#define CORTEX_A57_CPUACTLR_GRE_NGRE_AS_NGNRE (ULL(1) << 54)
+#define CORTEX_A57_CPUACTLR_DIS_OVERREAD (ULL(1) << 52)
+#define CORTEX_A57_CPUACTLR_NO_ALLOC_WBWA (ULL(1) << 49)
+#define CORTEX_A57_CPUACTLR_DCC_AS_DCCI (ULL(1) << 44)
+#define CORTEX_A57_CPUACTLR_FORCE_FPSCR_FLUSH (ULL(1) << 38)
+#define CORTEX_A57_CPUACTLR_DIS_INSTR_PREFETCH (ULL(1) << 32)
+#define CORTEX_A57_CPUACTLR_DIS_STREAMING (ULL(3) << 27)
+#define CORTEX_A57_CPUACTLR_DIS_L1_STREAMING (ULL(3) << 25)
+#define CORTEX_A57_CPUACTLR_DIS_INDIRECT_PREDICTOR (ULL(1) << 4)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2CTLR p15, 1, c9, c0, 2
+
+#define CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT U(0)
+#define CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT U(6)
+
+#define CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES U(0x2)
+#define CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES U(0x2)
+
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2ECTLR p15, 1, c9, c0, 3
+
+#define CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT U(0)
+#define CORTEX_A57_L2ECTLR_RET_CTRL_MASK (U(0x7) << CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2MERRSR p15, 3, c15
+
+#endif /* CORTEX_A57_H */
diff --git a/include/lib/cpus/aarch32/cortex_a7.h b/include/lib/cpus/aarch32/cortex_a7.h
new file mode 100644
index 0000000..16fbfaa
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a7.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A7_H
+#define CORTEX_A7_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Cortex-A7 midr with version/revision set to 0
+ ******************************************************************************/
+#define CORTEX_A7_MIDR U(0x410FC070)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A7_ACTLR_SMP_BIT (U(1) << 6)
+
+#endif /* CORTEX_A7_H */
diff --git a/include/lib/cpus/aarch32/cortex_a72.h b/include/lib/cpus/aarch32/cortex_a72.h
new file mode 100644
index 0000000..0a3a23a
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a72.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A72_H
+#define CORTEX_A72_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A72 midr for revision 0 */
+#define CORTEX_A72_MIDR U(0x410FD080)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_ECTLR p15, 1, c15
+
+#define CORTEX_A72_ECTLR_SMP_BIT (ULL(1) << 6)
+#define CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT (ULL(1) << 38)
+#define CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK (ULL(0x3) << 35)
+#define CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK (ULL(0x3) << 32)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_MERRSR p15, 2, c15
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_CPUACTLR p15, 0, c15
+
+#define CORTEX_A72_CPUACTLR_DISABLE_L1_DCACHE_HW_PFTCH (ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE (ULL(1) << 55)
+#define CORTEX_A72_CPUACTLR_NO_ALLOC_WBWA (ULL(1) << 49)
+#define CORTEX_A72_CPUACTLR_DCC_AS_DCCI (ULL(1) << 44)
+#define CORTEX_A72_CPUACTLR_DIS_INSTR_PREFETCH (ULL(1) << 32)
+#define CORTEX_A72_CPUACTLR_DELAY_EXCLUSIVE_SNOOP (ULL(1) << 31)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2CTLR p15, 1, c9, c0, 2
+
+#define CORTEX_A72_L2CTLR_EL1_ECC_AND_PARITY_ENABLE (ULL(1) << 21)
+#define CORTEX_A72_L2CTLR_EL1_DATA_INLINE_ECC_ENABLE (ULL(1) << 20)
+
+#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT U(0)
+#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT U(6)
+
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES U(0x2)
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_4_CYCLES U(0x3)
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES U(0x1)
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES U(0x2)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2MERRSR p15, 3, c15
+
+#endif /* CORTEX_A72_H */
diff --git a/include/lib/cpus/aarch32/cortex_a9.h b/include/lib/cpus/aarch32/cortex_a9.h
new file mode 100644
index 0000000..337bad9
--- /dev/null
+++ b/include/lib/cpus/aarch32/cortex_a9.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A9_H
+#define CORTEX_A9_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Cortex-A9 midr with version/revision set to 0
+ ******************************************************************************/
+#define CORTEX_A9_MIDR U(0x410FC090)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A9_ACTLR_SMP_BIT (U(1) << 6)
+#define CORTEX_A9_ACTLR_FLZW_BIT (U(1) << 3)
+
+/*******************************************************************************
+ * CPU Power Control Register
+ ******************************************************************************/
+#define PCR p15, 0, c15, c0, 0
+
+#ifndef __ASSEMBLER__
+#include <arch_helpers.h>
+DEFINE_COPROCR_RW_FUNCS(pcr, PCR)
+#endif
+
+#endif /* CORTEX_A9_H */
diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S
new file mode 100644
index 0000000..096e0b1
--- /dev/null
+++ b/include/lib/cpus/aarch32/cpu_macros.S
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef CPU_MACROS_S
+#define CPU_MACROS_S
+
+#include <lib/cpus/cpu_ops.h>
+#include <lib/cpus/errata.h>
+
+ /*
+ * Write given expressions as words
+ *
+ * _count:
+ * Write at least _count words. If the given number of expressions
+ * is less than _count, repeat the last expression to fill _count
+ * words in total
+ * _rest:
+ * Optional list of expressions. _this is for parameter extraction
+ * only, and has no significance to the caller
+ *
+ * Invoked as:
+ * fill_constants 2, foo, bar, blah, ...
+ */
+ .macro fill_constants _count:req, _this, _rest:vararg
+ .ifgt \_count
+ /* Write the current expression */
+ .ifb \_this
+ .error "Nothing to fill"
+ .endif
+ .word \_this
+
+ /* Invoke recursively for remaining expressions */
+ .ifnb \_rest
+ fill_constants \_count-1, \_rest
+ .else
+ fill_constants \_count-1, \_this
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Declare CPU operations
+ *
+ * _name:
+ * Name of the CPU for which operations are being specified
+ * _midr:
+ * Numeric value expected to read from CPU's MIDR
+ * _resetfunc:
+ * Reset function for the CPU. If there's no CPU reset function,
+ * specify CPU_NO_RESET_FUNC
+ * _power_down_ops:
+ * Comma-separated list of functions to perform power-down
+ * operatios on the CPU. At least one, and up to
+ * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
+ * Starting at power level 0, these functions shall handle power
+ * down at subsequent power levels. If there aren't exactly
+ * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
+ * used to handle power down at subsequent levels
+ */
+ .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
+ _power_down_ops:vararg
+ .section .cpu_ops, "a"
+ .align 2
+ .type cpu_ops_\_name, %object
+ .word \_midr
+#if defined(IMAGE_AT_EL3)
+ .word \_resetfunc
+#endif
+#ifdef IMAGE_BL32
+ /* Insert list of functions */
+ fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
+#endif
+
+ /*
+ * It is possible (although unlikely) that a cpu may have no errata in
+ * code. In that case the start label will not be defined. The list is
+ * inteded to be used in a loop, so define it as zero-length for
+ * predictable behaviour. Since this macro is always called at the end
+ * of the cpu file (after all errata have been parsed) we can be sure
+ * that we are at the end of the list. Some cpus call the macro twice,
+ * so only do this once.
+ */
+ .pushsection .rodata.errata_entries
+ .ifndef \_name\()_errata_list_start
+ \_name\()_errata_list_start:
+ .endif
+ /* some call this multiple times, so only do this once */
+ .ifndef \_name\()_errata_list_end
+ \_name\()_errata_list_end:
+ .endif
+ .popsection
+
+ /* and now put them in cpu_ops */
+ .word \_name\()_errata_list_start
+ .word \_name\()_errata_list_end
+
+#if REPORT_ERRATA
+ .ifndef \_name\()_cpu_str
+ /*
+ * Place errata reported flag, and the spinlock to arbitrate access to
+ * it in the data section.
+ */
+ .pushsection .data
+ define_asm_spinlock \_name\()_errata_lock
+ \_name\()_errata_reported:
+ .word 0
+ .popsection
+
+ /* Place CPU string in rodata */
+ .pushsection .rodata
+ \_name\()_cpu_str:
+ .asciz "\_name"
+ .popsection
+ .endif
+
+ /*
+ * Mandatory errata status printing function for CPUs of
+ * this class.
+ */
+ .word \_name\()_errata_report
+ .word \_name\()_cpu_str
+
+#ifdef IMAGE_BL32
+ /* Pointers to errata lock and reported flag */
+ .word \_name\()_errata_lock
+ .word \_name\()_errata_reported
+#endif
+#endif
+ .endm
+
+#if REPORT_ERRATA
+ /*
+ * Print status of a CPU errata
+ *
+ * _chosen:
+ * Identifier indicating whether or not a CPU errata has been
+ * compiled in.
+ * _cpu:
+ * Name of the CPU
+ * _id:
+ * Errata identifier
+ * _rev_var:
+ * Register containing the combined value CPU revision and variant
+ * - typically the return value of cpu_get_rev_var
+ */
+ .macro report_errata _chosen, _cpu, _id, _rev_var=r4
+ /* Stash a string with errata ID */
+ .pushsection .rodata
+ \_cpu\()_errata_\_id\()_str:
+ .asciz "\_id"
+ .popsection
+
+ /* Check whether errata applies */
+ mov r0, \_rev_var
+ bl check_errata_\_id
+
+ .ifeq \_chosen
+ /*
+ * Errata workaround has not been compiled in. If the errata would have
+ * applied had it been compiled in, print its status as missing.
+ */
+ cmp r0, #0
+ movne r0, #ERRATA_MISSING
+ .endif
+ ldr r1, =\_cpu\()_cpu_str
+ ldr r2, =\_cpu\()_errata_\_id\()_str
+ bl errata_print_msg
+ .endm
+#endif
+ /*
+ * Helper macro that reads the part number of the current CPU and jumps
+ * to the given label if it matches the CPU MIDR provided.
+ *
+ * Clobbers: r0-r1
+ */
+ .macro jump_if_cpu_midr _cpu_midr, _label
+ ldcopr r0, MIDR
+ ubfx r0, r0, #MIDR_PN_SHIFT, #12
+ ldr r1, =((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+ cmp r0, r1
+ beq \_label
+ .endm
+
+/*
+ * NOTE an erratum and CVE id could clash. However, both numbers are very large
+ * and the probablity is minuscule. Working around this makes code very
+ * complicated and extremely difficult to read so it is not considered. In the
+ * unlikely event that this does happen, prepending the CVE id with a 0 should
+ * resolve the conflict
+ */
+
+/*
+ * Add an entry for this erratum to the errata framework
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ *
+ * _cve:
+ * Whether erratum is a CVE. CVE year if yes, 0 otherwise
+ *
+ * _id:
+ * Erratum or CVE number. Please combine with the previous field with the
+ * ERRATUM or CVE macros
+ *
+ * _chosen:
+ * Compile time flag on whether the erratum is included
+ *
+ * _special:
+ * The special non-standard name of an erratum
+ */
+.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _special
+ .pushsection .rodata.errata_entries
+ .align 2
+ .ifndef \_cpu\()_errata_list_start
+ \_cpu\()_errata_list_start:
+ .endif
+
+ /* unused on AArch32, maintain for portability */
+ .word 0
+ /* TODO(errata ABI): this prevents all checker functions from
+ * being optimised away. Can be done away with unless the ABI
+ * needs them */
+ .ifnb \_special
+ .word check_errata_\_special
+ .elseif \_cve
+ .word check_errata_cve_\_cve\()_\_id
+ .else
+ .word check_errata_\_id
+ .endif
+ /* Will fit CVEs with up to 10 character in the ID field */
+ .word \_id
+ .hword \_cve
+ .byte \_chosen
+ /* TODO(errata ABI): mitigated field for known but unmitigated
+ * errata*/
+ .byte 0x1
+ .popsection
+.endm
+
+/*
+ * Maintain compatibility with the old scheme of "each cpu has its own reporter".
+ * TODO remove entirely once all cpus have been converted. This includes the
+ * cpu_ops entry, as print_errata_status can call this directly for all cpus
+ */
+.macro errata_report_shim _cpu:req
+ #if REPORT_ERRATA
+ func \_cpu\()_errata_report
+ push {r12, lr}
+
+ bl generic_errata_report
+
+ pop {r12, lr}
+ bx lr
+ endfunc \_cpu\()_errata_report
+ #endif
+.endm
+#endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/a64fx.h b/include/lib/cpus/aarch64/a64fx.h
new file mode 100644
index 0000000..b7342b0
--- /dev/null
+++ b/include/lib/cpus/aarch64/a64fx.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2022, Fujitsu Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef A64FX_H
+#define A64FX_H
+
+#include <lib/utils_def.h>
+
+/* A64FX midr for revision 0 */
+#define A64FX_MIDR U(0x461f0010)
+
+#endif /* A64FX_H */
diff --git a/include/lib/cpus/aarch64/aem_generic.h b/include/lib/cpus/aarch64/aem_generic.h
new file mode 100644
index 0000000..acb6adb
--- /dev/null
+++ b/include/lib/cpus/aarch64/aem_generic.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2014-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AEM_GENERIC_H
+#define AEM_GENERIC_H
+
+#include <lib/utils_def.h>
+
+/* BASE AEM midr for revision 0 */
+#define BASE_AEM_MIDR U(0x410FD0F0)
+
+/* Foundation AEM midr for revision 0 */
+#define FOUNDATION_AEM_MIDR U(0x410FD000)
+
+#endif /* AEM_GENERIC_H */
diff --git a/include/lib/cpus/aarch64/cortex_a35.h b/include/lib/cpus/aarch64/cortex_a35.h
new file mode 100644
index 0000000..cef2960
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a35.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A35_H
+#define CORTEX_A35_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A35 Main ID register for revision 0 */
+#define CORTEX_A35_MIDR U(0x410FD040)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ * CPUECTLR_EL1 is an implementation-specific register.
+ ******************************************************************************/
+#define CORTEX_A35_CPUECTLR_EL1 S3_1_C15_C2_1
+#define CORTEX_A35_CPUECTLR_SMPEN_BIT (ULL(1) << 6)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A35_CPUACTLR_EL1 S3_1_C15_C2_0
+
+#define CORTEX_A35_CPUACTLR_EL1_ENDCCASCI (ULL(1) << 44)
+
+#endif /* CORTEX_A35_H */
diff --git a/include/lib/cpus/aarch64/cortex_a510.h b/include/lib/cpus/aarch64/cortex_a510.h
new file mode 100644
index 0000000..337aac3
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a510.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A510_H
+#define CORTEX_A510_H
+
+#define CORTEX_A510_MIDR U(0x410FD460)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A510_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_SHIFT U(19)
+#define CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_WIDTH U(1)
+#define CORTEX_A510_CPUECTLR_EL1_READPREFERUNIQUE_DISABLE U(1)
+#define CORTEX_A510_CPUECTLR_EL1_RSCTL_SHIFT U(23)
+#define CORTEX_A510_CPUECTLR_EL1_NTCTL_SHIFT U(46)
+#define CORTEX_A510_CPUECTLR_EL1_ATOM_EXECALLINSTRNEAR U(2)
+#define CORTEX_A510_CPUECTLR_EL1_ATOM_SHIFT U(38)
+#define CORTEX_A510_CPUECTLR_EL1_ATOM_WIDTH U(3)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A510_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A510_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+/*******************************************************************************
+ * Complex auxiliary control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A510_CMPXACTLR_EL1 S3_0_C15_C1_3
+#define CORTEX_A510_CMPXACTLR_EL1_ALIAS_LOADSTORE_DISABLE U(1)
+#define CORTEX_A510_CMPXACTLR_EL1_ALIAS_LOADSTORE_SHIFT U(25)
+#define CORTEX_A510_CMPXACTLR_EL1_ALIAS_LOADSTORE_WIDTH U(1)
+#define CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE U(3)
+#define CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_SHIFT U(10)
+#define CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_WIDTH U(2)
+
+/*******************************************************************************
+ * Auxiliary control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A510_CPUACTLR_EL1 S3_0_C15_C1_0
+#define CORTEX_A510_CPUACTLR_EL1_BIT_17 (ULL(1) << 17)
+#define CORTEX_A510_CPUACTLR_EL1_BIT_38 (ULL(1) << 38)
+#define CORTEX_A510_CPUACTLR_EL1_ALIAS_LOADSTORE_DISABLE U(1)
+#define CORTEX_A510_CPUACTLR_EL1_ALIAS_LOADSTORE_SHIFT U(18)
+#define CORTEX_A510_CPUACTLR_EL1_ALIAS_LOADSTORE_WIDTH U(1)
+#define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_DISABLE U(1)
+#define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_SHIFT U(18)
+#define CORTEX_A510_CPUACTLR_EL1_DATA_CORRUPT_WIDTH U(1)
+
+#endif /* CORTEX_A510_H */
diff --git a/include/lib/cpus/aarch64/cortex_a520.h b/include/lib/cpus/aarch64/cortex_a520.h
new file mode 100644
index 0000000..4176981
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a520.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A520_H
+#define CORTEX_A520_H
+
+#define CORTEX_A520_MIDR U(0x410FD800)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A520_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A520_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A520_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* CORTEX_A520_H */
diff --git a/include/lib/cpus/aarch64/cortex_a53.h b/include/lib/cpus/aarch64/cortex_a53.h
new file mode 100644
index 0000000..18796ee
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a53.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A53_H
+#define CORTEX_A53_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A53 midr for revision 0 */
+#define CORTEX_A53_MIDR U(0x410FD030)
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2 U(0x1)
+#define RETENTION_ENTRY_TICKS_8 U(0x2)
+#define RETENTION_ENTRY_TICKS_32 U(0x3)
+#define RETENTION_ENTRY_TICKS_64 U(0x4)
+#define RETENTION_ENTRY_TICKS_128 U(0x5)
+#define RETENTION_ENTRY_TICKS_256 U(0x6)
+#define RETENTION_ENTRY_TICKS_512 U(0x7)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_ECTLR_EL1 S3_1_C15_C2_1
+
+#define CORTEX_A53_ECTLR_SMP_BIT (ULL(1) << 6)
+
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT U(0)
+#define CORTEX_A53_ECTLR_CPU_RET_CTRL_MASK (ULL(0x7) << CORTEX_A53_ECTLR_CPU_RET_CTRL_SHIFT)
+
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT U(3)
+#define CORTEX_A53_ECTLR_FPU_RET_CTRL_MASK (ULL(0x7) << CORTEX_A53_ECTLR_FPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_MERRSR_EL1 S3_1_C15_C2_2
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_CPUACTLR_EL1 S3_1_C15_C2_0
+
+#define CORTEX_A53_CPUACTLR_EL1_ENDCCASCI_SHIFT U(44)
+#define CORTEX_A53_CPUACTLR_EL1_ENDCCASCI (ULL(1) << CORTEX_A53_CPUACTLR_EL1_ENDCCASCI_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_RADIS_SHIFT U(27)
+#define CORTEX_A53_CPUACTLR_EL1_RADIS (ULL(3) << CORTEX_A53_CPUACTLR_EL1_RADIS_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_L1RADIS_SHIFT U(25)
+#define CORTEX_A53_CPUACTLR_EL1_L1RADIS (ULL(3) << CORTEX_A53_CPUACTLR_EL1_L1RADIS_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_DTAH_SHIFT U(24)
+#define CORTEX_A53_CPUACTLR_EL1_DTAH (ULL(1) << CORTEX_A53_CPUACTLR_EL1_DTAH_SHIFT)
+#define CORTEX_A53_CPUACTLR_EL1_L1PCTL_SHIFT U(13)
+#define CORTEX_A53_CPUACTLR_EL1_L1PCTL (ULL(7) << CORTEX_A53_CPUACTLR_EL1_L1PCTL_SHIFT)
+
+/*******************************************************************************
+ * L2 Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ACTLR_EL1 S3_1_C15_C0_0
+
+#define CORTEX_A53_L2ACTLR_ENABLE_UNIQUECLEAN (U(1) << 14)
+#define CORTEX_A53_L2ACTLR_DISABLE_CLEAN_PUSH (U(1) << 3)
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2ECTLR_EL1 S3_1_C11_C0_3
+
+#define CORTEX_A53_L2ECTLR_RET_CTRL_SHIFT U(0)
+#define CORTEX_A53_L2ECTLR_RET_CTRL_MASK (U(0x7) << L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A53_L2MERRSR_EL1 S3_1_C15_C2_3
+
+/*******************************************************************************
+ * Helper function to access a53_cpuectlr_el1 register on Cortex-A53 CPUs
+ ******************************************************************************/
+#ifndef __ASSEMBLER__
+DEFINE_RENAME_SYSREG_RW_FUNCS(a53_cpuectlr_el1, CORTEX_A53_ECTLR_EL1)
+#endif /* __ASSEMBLER__ */
+
+#endif /* CORTEX_A53_H */
diff --git a/include/lib/cpus/aarch64/cortex_a55.h b/include/lib/cpus/aarch64/cortex_a55.h
new file mode 100644
index 0000000..0a1593a
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a55.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A55_H
+#define CORTEX_A55_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A55 MIDR for revision 0 */
+#define CORTEX_A55_MIDR U(0x410fd050)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A55_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A55_CPUECTLR_EL1 S3_0_C15_C1_4
+
+#define CORTEX_A55_CPUECTLR_EL1_L1WSCTL (ULL(3) << 25)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A55_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define CORTEX_A55_CPUACTLR_EL1_DISABLE_WRITE_STREAMING (ULL(1) << 24)
+#define CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE (ULL(1) << 31)
+#define CORTEX_A55_CPUACTLR_EL1_DISABLE_L1_PAGEWALKS (ULL(1) << 49)
+
+/*******************************************************************************
+ * CPU Identification register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A55_CLIDR_EL1 S3_1_C0_C0_1
+
+#define CORTEX_A55_CLIDR_EL1_CTYPE3 (ULL(7) << 6)
+
+/* Definitions of register field mask in CORTEX_A55_CPUPWRCTLR_EL1 */
+#define CORTEX_A55_CORE_PWRDN_EN_MASK U(0x1)
+
+/* Instruction patching registers */
+#define CPUPSELR_EL3 S3_6_C15_C8_0
+#define CPUPCR_EL3 S3_6_C15_C8_1
+#define CPUPOR_EL3 S3_6_C15_C8_2
+#define CPUPMR_EL3 S3_6_C15_C8_3
+
+#endif /* CORTEX_A55_H */
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
new file mode 100644
index 0000000..19ac513
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a57.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014-2019, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A57_H
+#define CORTEX_A57_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A57 midr for revision 0 */
+#define CORTEX_A57_MIDR U(0x410FD070)
+
+/* Retention timer tick definitions */
+#define RETENTION_ENTRY_TICKS_2 U(0x1)
+#define RETENTION_ENTRY_TICKS_8 U(0x2)
+#define RETENTION_ENTRY_TICKS_32 U(0x3)
+#define RETENTION_ENTRY_TICKS_64 U(0x4)
+#define RETENTION_ENTRY_TICKS_128 U(0x5)
+#define RETENTION_ENTRY_TICKS_256 U(0x6)
+#define RETENTION_ENTRY_TICKS_512 U(0x7)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_ECTLR_EL1 S3_1_C15_C2_1
+
+#define CORTEX_A57_ECTLR_SMP_BIT (ULL(1) << 6)
+#define CORTEX_A57_ECTLR_DIS_TWD_ACC_PFTCH_BIT (ULL(1) << 38)
+#define CORTEX_A57_ECTLR_L2_IPFTCH_DIST_MASK (ULL(0x3) << 35)
+#define CORTEX_A57_ECTLR_L2_DPFTCH_DIST_MASK (ULL(0x3) << 32)
+
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT U(0)
+#define CORTEX_A57_ECTLR_CPU_RET_CTRL_MASK (ULL(0x7) << CORTEX_A57_ECTLR_CPU_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_MERRSR_EL1 S3_1_C15_C2_2
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_CPUACTLR_EL1 S3_1_C15_C2_0
+
+#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_DMB (ULL(1) << 59)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_DMB_NULLIFICATION (ULL(1) << 58)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55)
+#define CORTEX_A57_CPUACTLR_EL1_GRE_NGRE_AS_NGNRE (ULL(1) << 54)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_OVERREAD (ULL(1) << 52)
+#define CORTEX_A57_CPUACTLR_EL1_NO_ALLOC_WBWA (ULL(1) << 49)
+#define CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI (ULL(1) << 44)
+#define CORTEX_A57_CPUACTLR_EL1_FORCE_FPSCR_FLUSH (ULL(1) << 38)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_INSTR_PREFETCH (ULL(1) << 32)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_STREAMING (ULL(3) << 27)
+#define CORTEX_A57_CPUACTLR_EL1_EN_NC_LOAD_FWD (ULL(1) << 24)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_L1_STREAMING (ULL(3) << 25)
+#define CORTEX_A57_CPUACTLR_EL1_DIS_INDIRECT_PREDICTOR (ULL(1) << 4)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2CTLR_EL1 S3_1_C11_C0_2
+
+#define CORTEX_A57_L2CTLR_DATA_RAM_LATENCY_SHIFT U(0)
+#define CORTEX_A57_L2CTLR_TAG_RAM_LATENCY_SHIFT U(6)
+
+#define CORTEX_A57_L2_DATA_RAM_LATENCY_3_CYCLES U(0x2)
+#define CORTEX_A57_L2_TAG_RAM_LATENCY_3_CYCLES U(0x2)
+
+#define CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT (U(1) << 21)
+
+/*******************************************************************************
+ * L2 Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2ECTLR_EL1 S3_1_C11_C0_3
+
+#define CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT U(0)
+#define CORTEX_A57_L2ECTLR_RET_CTRL_MASK (U(0x7) << CORTEX_A57_L2ECTLR_RET_CTRL_SHIFT)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A57_L2MERRSR_EL1 S3_1_C15_C2_3
+
+#endif /* CORTEX_A57_H */
diff --git a/include/lib/cpus/aarch64/cortex_a65.h b/include/lib/cpus/aarch64/cortex_a65.h
new file mode 100644
index 0000000..0df34c9
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a65.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A65_H
+#define CORTEX_A65_H
+
+#include <lib/utils_def.h>
+
+#define CORTEX_A65_MIDR U(0x410FD060)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A65_ECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A65_CPUACTLR_EL1 S3_0_C15_C1_0
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+
+#define CORTEX_A65_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A65_CPUPWRCTLR_EL1_CORE_PWRDN_BIT (U(1) << 0)
+
+#endif /* CORTEX_A65_H */
diff --git a/include/lib/cpus/aarch64/cortex_a65ae.h b/include/lib/cpus/aarch64/cortex_a65ae.h
new file mode 100644
index 0000000..bd4a881
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a65ae.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A65AE_H
+#define CORTEX_A65AE_H
+
+#include <lib/utils_def.h>
+
+#define CORTEX_A65AE_MIDR U(0x410FD430)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A65AE_ECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A65AE_CPUACTLR_EL1 S3_0_C15_C1_0
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+
+#define CORTEX_A65AE_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A65AE_CPUPWRCTLR_EL1_CORE_PWRDN_BIT (U(1) << 0)
+
+#endif /* CORTEX_A65AE_H */
diff --git a/include/lib/cpus/aarch64/cortex_a710.h b/include/lib/cpus/aarch64/cortex_a710.h
new file mode 100644
index 0000000..432e17a
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a710.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A710_H
+#define CORTEX_A710_H
+
+#define CORTEX_A710_MIDR U(0x410FD470)
+
+/* Cortex-A710 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A710_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A710_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A710_CPUECTLR_EL1_PFSTIDIS_BIT (ULL(1) << 8)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A710_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A710_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUACTLR_EL1 S3_0_C15_C1_0
+#define CORTEX_A710_CPUACTLR_EL1_BIT_46 (ULL(1) << 46)
+#define CORTEX_A710_CPUACTLR_EL1_BIT_22 (ULL(1) << 22)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 2 specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define CORTEX_A710_CPUACTLR2_EL1_BIT_40 (ULL(1) << 40)
+#define CORTEX_A710_CPUACTLR2_EL1_BIT_36 (ULL(1) << 36)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 5 specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUACTLR5_EL1 S3_0_C15_C8_0
+#define CORTEX_A710_CPUACTLR5_EL1_BIT_13 (ULL(1) << 13)
+#define CORTEX_A710_CPUACTLR5_EL1_BIT_17 (ULL(1) << 17)
+#define CORTEX_A710_CPUACTLR5_EL1_BIT_44 (ULL(1) << 44)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUECTLR2_EL1 S3_0_C15_C1_5
+#define CORTEX_A710_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(9)
+#define CPUECTLR2_EL1_PF_MODE_LSB U(11)
+#define CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
+
+/*******************************************************************************
+ * CPU Selected Instruction Private register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A710_CPUPSELR_EL3 S3_6_C15_C8_0
+#define CORTEX_A710_CPUPCR_EL3 S3_6_C15_C8_1
+#define CORTEX_A710_CPUPOR_EL3 S3_6_C15_C8_2
+#define CORTEX_A710_CPUPMR_EL3 S3_6_C15_C8_3
+
+#endif /* CORTEX_A710_H */
diff --git a/include/lib/cpus/aarch64/cortex_a715.h b/include/lib/cpus/aarch64/cortex_a715.h
new file mode 100644
index 0000000..950d02f
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a715.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A715_H
+#define CORTEX_A715_H
+
+#define CORTEX_A715_MIDR U(0x410FD4D0)
+
+/* Cortex-A715 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A715_BHB_LOOP_COUNT U(38)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A715_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A715_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A715_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* CORTEX_A715_H */
diff --git a/include/lib/cpus/aarch64/cortex_a72.h b/include/lib/cpus/aarch64/cortex_a72.h
new file mode 100644
index 0000000..a00f6d6
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a72.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A72_H
+#define CORTEX_A72_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A72 midr for revision 0 */
+#define CORTEX_A72_MIDR U(0x410FD080)
+
+/* Cortex-A72 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A72_BHB_LOOP_COUNT U(8)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_ECTLR_EL1 S3_1_C15_C2_1
+
+#define CORTEX_A72_ECTLR_SMP_BIT (ULL(1) << 6)
+#define CORTEX_A72_ECTLR_DIS_TWD_ACC_PFTCH_BIT (ULL(1) << 38)
+#define CORTEX_A72_ECTLR_L2_IPFTCH_DIST_MASK (ULL(0x3) << 35)
+#define CORTEX_A72_ECTLR_L2_DPFTCH_DIST_MASK (ULL(0x3) << 32)
+
+/*******************************************************************************
+ * CPU Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_MERRSR_EL1 S3_1_C15_C2_2
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_CPUACTLR_EL1 S3_1_C15_C2_0
+
+#define CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH (ULL(1) << 56)
+#define CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE (ULL(1) << 55)
+#define CORTEX_A72_CPUACTLR_EL1_NO_ALLOC_WBWA (ULL(1) << 49)
+#define CORTEX_A72_CPUACTLR_EL1_DCC_AS_DCCI (ULL(1) << 44)
+#define CORTEX_A72_CPUACTLR_EL1_DIS_INSTR_PREFETCH (ULL(1) << 32)
+#define CORTEX_A72_CPUACTLR_EL1_DELAY_EXCLUSIVE_SNOOP (ULL(1) << 31)
+
+/*******************************************************************************
+ * L2 Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2ACTLR_EL1 S3_1_C15_C0_0
+
+#define CORTEX_A72_L2ACTLR_FORCE_TAG_BANK_CLK_ACTIVE (ULL(1) << 28)
+#define CORTEX_A72_L2ACTLR_FORCE_L2_LOGIC_CLK_ACTIVE (ULL(1) << 27)
+#define CORTEX_A72_L2ACTLR_FORCE_L2_GIC_TIMER_RCG_CLK_ACTIVE (ULL(1) << 26)
+#define CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN (ULL(1) << 14)
+#define CORTEX_A72_L2ACTLR_DISABLE_DSB_WITH_NO_DVM_SYNC (ULL(1) << 11)
+#define CORTEX_A72_L2ACTLR_DISABLE_DVM_CMO_BROADCAST (ULL(1) << 8)
+#define CORTEX_A72_L2ACTLR_ENABLE_HAZARD_DETECT_TIMEOUT (ULL(1) << 7)
+#define CORTEX_A72_L2ACTLR_DISABLE_ACE_SH_OR_CHI (ULL(1) << 6)
+
+/*******************************************************************************
+ * L2 Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2CTLR_EL1 S3_1_C11_C0_2
+
+#define CORTEX_A72_L2CTLR_EL1_ECC_AND_PARITY_ENABLE (ULL(1) << 21)
+#define CORTEX_A72_L2CTLR_EL1_DATA_INLINE_ECC_ENABLE (ULL(1) << 20)
+
+#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT U(0)
+#define CORTEX_A72_L2CTLR_DATA_RAM_SETUP_SHIFT U(5)
+#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT U(6)
+#define CORTEX_A72_L2CTLR_TAG_RAM_SETUP_SHIFT U(9)
+
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_MASK U(0x7)
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_MASK U(0x7)
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES U(0x2)
+#define CORTEX_A72_L2_DATA_RAM_LATENCY_4_CYCLES U(0x3)
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES U(0x1)
+#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES U(0x2)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A72_L2MERRSR_EL1 S3_1_C15_C2_3
+
+#endif /* CORTEX_A72_H */
diff --git a/include/lib/cpus/aarch64/cortex_a720.h b/include/lib/cpus/aarch64/cortex_a720.h
new file mode 100644
index 0000000..47bbbc0
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a720.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A720_H
+#define CORTEX_A720_H
+
+#define CORTEX_A720_MIDR U(0x410FD810)
+
+/* Cortex A720 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A720_BHB_LOOP_COUNT U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A720_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A720_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A720_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* CORTEX_A720_H */
diff --git a/include/lib/cpus/aarch64/cortex_a73.h b/include/lib/cpus/aarch64/cortex_a73.h
new file mode 100644
index 0000000..ede76d1
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a73.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A73_H
+#define CORTEX_A73_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A73 midr for revision 0 */
+#define CORTEX_A73_MIDR U(0x410FD090)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_CPUECTLR_EL1 S3_1_C15_C2_1 /* Instruction def. */
+
+#define CORTEX_A73_CPUECTLR_SMP_BIT (ULL(1) << 6)
+
+/*******************************************************************************
+ * L2 Memory Error Syndrome register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_L2MERRSR_EL1 S3_1_C15_C2_3 /* Instruction def. */
+
+/*******************************************************************************
+ * CPU implementation defined register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A73_IMP_DEF_REG1 S3_0_C15_C0_0
+
+#define CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE (ULL(1) << 3)
+
+#define CORTEX_A73_DIAGNOSTIC_REGISTER S3_0_C15_C0_1
+
+#define CORTEX_A73_IMP_DEF_REG2 S3_0_C15_C0_2
+
+/*******************************************************************************
+ * Helper function to access a73_cpuectlr_el1 register on Cortex-A73 CPUs
+ ******************************************************************************/
+#ifndef __ASSEMBLER__
+DEFINE_RENAME_SYSREG_RW_FUNCS(a73_cpuectlr_el1, CORTEX_A73_CPUECTLR_EL1)
+#endif /* __ASSEMBLER__ */
+
+#endif /* CORTEX_A73_H */
diff --git a/include/lib/cpus/aarch64/cortex_a75.h b/include/lib/cpus/aarch64/cortex_a75.h
new file mode 100644
index 0000000..ca79991
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a75.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A75_H
+#define CORTEX_A75_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A75 MIDR */
+#define CORTEX_A75_MIDR U(0x410fd0a0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A75_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A75_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A75_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE (ULL(1) << 35)
+
+/* Definitions of register field mask in CORTEX_A75_CPUPWRCTLR_EL1 */
+#define CORTEX_A75_CORE_PWRDN_EN_MASK U(0x1)
+
+#define CORTEX_A75_ACTLR_AMEN_BIT (ULL(1) << 4)
+
+/*
+ * The Cortex-A75 core implements five counters, 0-4. Events 0, 1, 2, are
+ * fixed and are enabled (Group 0). Events 3 and 4 (Group 1) are
+ * programmable by programming the appropriate Event count bits in
+ * CPUAMEVTYPER<n> register and are disabled by default. Platforms may
+ * enable this with suitable programming.
+ */
+#define CORTEX_A75_AMU_NR_COUNTERS U(5)
+#define CORTEX_A75_AMU_GROUP0_MASK U(0x7)
+#define CORTEX_A75_AMU_GROUP1_MASK (U(0) << 3)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+uint64_t cortex_a75_amu_cnt_read(int idx);
+void cortex_a75_amu_cnt_write(int idx, uint64_t val);
+unsigned int cortex_a75_amu_read_cpuamcntenset_el0(void);
+unsigned int cortex_a75_amu_read_cpuamcntenclr_el0(void);
+void cortex_a75_amu_write_cpuamcntenset_el0(unsigned int mask);
+void cortex_a75_amu_write_cpuamcntenclr_el0(unsigned int mask);
+#endif /* __ASSEMBLER__ */
+
+#endif /* CORTEX_A75_H */
diff --git a/include/lib/cpus/aarch64/cortex_a76.h b/include/lib/cpus/aarch64/cortex_a76.h
new file mode 100644
index 0000000..b2ec8aa
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a76.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A76_H
+#define CORTEX_A76_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A76 MIDR for revision 0 */
+#define CORTEX_A76_MIDR U(0x410fd0b0)
+
+/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A76_BHB_LOOP_COUNT U(24)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A76_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A76_CPUECTLR_EL1 S3_0_C15_C1_4
+
+#define CORTEX_A76_CPUECTLR_EL1_WS_THR_L2 (ULL(3) << 24)
+#define CORTEX_A76_CPUECTLR_EL1_BIT_51 (ULL(1) << 51)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A76_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION (ULL(1) << 6)
+
+#define CORTEX_A76_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
+
+#define CORTEX_A76_CPUACTLR2_EL1 S3_0_C15_C1_1
+
+#define CORTEX_A76_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define CORTEX_A76_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
+
+#define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE (ULL(1) << 16)
+
+#define CORTEX_A76_CPUACTLR3_EL1 S3_0_C15_C1_2
+
+#define CORTEX_A76_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
+
+
+/* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
+#define CORTEX_A76_CORE_PWRDN_EN_MASK U(0x1)
+
+#endif /* CORTEX_A76_H */
diff --git a/include/lib/cpus/aarch64/cortex_a76ae.h b/include/lib/cpus/aarch64/cortex_a76ae.h
new file mode 100644
index 0000000..0d30f70
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a76ae.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A76AE_H
+#define CORTEX_A76AE_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A76AE MIDR for revision 0 */
+#define CORTEX_A76AE_MIDR U(0x410FD0E0)
+
+/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A76AE_BHB_LOOP_COUNT U(24)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A76AE_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+
+/* Definitions of register field mask in CORTEX_A76AE_CPUPWRCTLR_EL1 */
+#define CORTEX_A76AE_CORE_PWRDN_EN_MASK U(0x1)
+
+#define CORTEX_A76AE_CPUECTLR_EL1 S3_0_C15_C1_4
+
+#endif /* CORTEX_A76AE_H */
diff --git a/include/lib/cpus/aarch64/cortex_a77.h b/include/lib/cpus/aarch64/cortex_a77.h
new file mode 100644
index 0000000..39717a3
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a77.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A77_H
+#define CORTEX_A77_H
+
+#include <lib/utils_def.h>
+
+/* Cortex-A77 MIDR */
+#define CORTEX_A77_MIDR U(0x410FD0D0)
+
+/* Cortex-A77 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A77_BHB_LOOP_COUNT U(24)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A77_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A77_CPUECTLR_EL1_BIT_8 (ULL(1) << 8)
+#define CORTEX_A77_CPUECTLR_EL1_BIT_53 (ULL(1) << 53)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A77_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A77_CPUPWRCTLR_EL1_CORE_PWRDN_BIT (U(1) << 0)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A77_ACTLR2_EL1 S3_0_C15_C1_1
+#define CORTEX_A77_ACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define CORTEX_A77_ACTLR2_EL1_BIT_0 ULL(1)
+
+#define CORTEX_A77_CPUPSELR_EL3 S3_6_C15_C8_0
+#define CORTEX_A77_CPUPCR_EL3 S3_6_C15_C8_1
+#define CORTEX_A77_CPUPOR_EL3 S3_6_C15_C8_2
+#define CORTEX_A77_CPUPMR_EL3 S3_6_C15_C8_3
+#define CORTEX_A77_CPUPOR2_EL3 S3_6_C15_C8_4
+#define CORTEX_A77_CPUPMR2_EL3 S3_6_C15_C8_5
+
+#endif /* CORTEX_A77_H */
diff --git a/include/lib/cpus/aarch64/cortex_a78.h b/include/lib/cpus/aarch64/cortex_a78.h
new file mode 100644
index 0000000..2984f82
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a78.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A78_H
+#define CORTEX_A78_H
+
+#include <lib/utils_def.h>
+
+#define CORTEX_A78_MIDR U(0x410FD410)
+
+/* Cortex-A78 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A78_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A78_CPUECTLR_EL1_BIT_8 (ULL(1) << 8)
+#define CORTEX_A78_CPUECTLR_EL1_PF_MODE_CNSRV ULL(3)
+#define CPUECTLR_EL1_PF_MODE_LSB U(6)
+#define CPUECTLR_EL1_PF_MODE_WIDTH U(2)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A78_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT U(1)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A78_ACTLR_TAM_BIT (ULL(1) << 30)
+
+#define CORTEX_A78_ACTLR2_EL1 S3_0_C15_C1_1
+#define CORTEX_A78_ACTLR2_EL1_BIT_0 (ULL(1) << 0)
+#define CORTEX_A78_ACTLR2_EL1_BIT_1 (ULL(1) << 1)
+#define CORTEX_A78_ACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define CORTEX_A78_ACTLR2_EL1_BIT_40 (ULL(1) << 40)
+
+#define CORTEX_A78_ACTLR3_EL1 S3_0_C15_C1_2
+
+#define CORTEX_A78_ACTLR5_EL1 S3_0_C15_C9_0
+
+/*******************************************************************************
+ * CPU Activity Monitor Unit register specific definitions.
+ ******************************************************************************/
+#define CPUAMCNTENCLR0_EL0 S3_3_C15_C2_4
+#define CPUAMCNTENSET0_EL0 S3_3_C15_C2_5
+#define CPUAMCNTENCLR1_EL0 S3_3_C15_C3_0
+#define CPUAMCNTENSET1_EL0 S3_3_C15_C3_1
+
+#define CORTEX_A78_AMU_GROUP0_MASK U(0xF)
+#define CORTEX_A78_AMU_GROUP1_MASK U(0x7)
+
+#endif /* CORTEX_A78_H */
diff --git a/include/lib/cpus/aarch64/cortex_a78_ae.h b/include/lib/cpus/aarch64/cortex_a78_ae.h
new file mode 100644
index 0000000..4ada845
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a78_ae.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A78_AE_H
+#define CORTEX_A78_AE_H
+
+#include <cortex_a78.h>
+
+#define CORTEX_A78_AE_MIDR U(0x410FD420)
+
+/* Cortex-A78AE loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78_AE_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A78_AE_CPUECTLR_EL1 CORTEX_A78_CPUECTLR_EL1
+#define CORTEX_A78_AE_CPUECTLR_EL1_BIT_8 CORTEX_A78_CPUECTLR_EL1_BIT_8
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 2 specific definitions.
+ ******************************************************************************/
+#define CORTEX_A78_AE_ACTLR2_EL1 CORTEX_A78_ACTLR2_EL1
+#define CORTEX_A78_AE_ACTLR2_EL1_BIT_0 CORTEX_A78_ACTLR2_EL1_BIT_0
+#define CORTEX_A78_AE_ACTLR2_EL1_BIT_40 CORTEX_A78_ACTLR2_EL1_BIT_40
+
+#endif /* CORTEX_A78_AE_H */
diff --git a/include/lib/cpus/aarch64/cortex_a78c.h b/include/lib/cpus/aarch64/cortex_a78c.h
new file mode 100644
index 0000000..301be69
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a78c.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_A78C_H
+#define CORTEX_A78C_H
+
+
+#define CORTEX_A78C_MIDR U(0x410FD4B1)
+
+/* Cortex-A76 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_A78C_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 2 specific definitions.
+ * ****************************************************************************/
+#define CORTEX_A78C_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define CORTEX_A78C_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
+#define CORTEX_A78C_CPUACTLR2_EL1_BIT_40 (ULL(1) << 40)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A78C_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_A78C_CPUECTLR_EL1_BIT_6 (ULL(1) << 6)
+#define CORTEX_A78C_CPUECTLR_EL1_BIT_7 (ULL(1) << 7)
+#define CORTEX_A78C_CPUECTLR_EL1_MM_ASP_EN (ULL(1) << 53)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_A78C_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_A78C_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT U(1)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 3 specific definitions.
+ ******************************************************************************/
+#define CORTEX_A78C_ACTLR3_EL1 S3_0_C15_C1_2
+
+/*******************************************************************************
+ * CPU Implementation Specific Selected Instruction registers
+ ******************************************************************************/
+#define CORTEX_A78C_IMP_CPUPSELR_EL3 S3_6_C15_C8_0
+#define CORTEX_A78C_IMP_CPUPCR_EL3 S3_6_C15_C8_1
+#define CORTEX_A78C_IMP_CPUPOR_EL3 S3_6_C15_C8_2
+#define CORTEX_A78C_IMP_CPUPMR_EL3 S3_6_C15_C8_3
+
+#endif /* CORTEX_A78C_H */
diff --git a/include/lib/cpus/aarch64/cortex_blackhawk.h b/include/lib/cpus/aarch64/cortex_blackhawk.h
new file mode 100644
index 0000000..bfb3039
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_blackhawk.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_BLACKHAWK_H
+#define CORTEX_BLACKHAWK_H
+
+#define CORTEX_BLACKHAWK_MIDR U(0x410FD850)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_BLACKHAWK_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_BLACKHAWK_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_BLACKHAWK_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* CORTEX_BLACKHAWK_H */
diff --git a/include/lib/cpus/aarch64/cortex_chaberton.h b/include/lib/cpus/aarch64/cortex_chaberton.h
new file mode 100644
index 0000000..8f10b68
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_chaberton.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_CHABERTON_H
+#define CORTEX_CHABERTON_H
+
+#define CORTEX_CHABERTON_MIDR U(0x410FD870)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_CHABERTON_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_CHABERTON_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_CHABERTON_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* CORTEX_CHABERTON_H */
diff --git a/include/lib/cpus/aarch64/cortex_gelas.h b/include/lib/cpus/aarch64/cortex_gelas.h
new file mode 100644
index 0000000..90bb78f
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_gelas.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_GELAS_H
+#define CORTEX_GELAS_H
+
+#include <lib/utils_def.h>
+
+#define CORTEX_GELAS_MIDR U(0x410FD8B0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_GELAS_IMP_CPUECTLR_EL1 S3_0_C15_C1_5
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_GELAS_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_GELAS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+/*******************************************************************************
+ * SME Control registers
+ ******************************************************************************/
+#define CORTEX_GELAS_SVCRSM S0_3_C4_C2_3
+#define CORTEX_GELAS_SVCRZA S0_3_C4_C4_3
+
+#endif /* CORTEX_GELAS_H */
diff --git a/include/lib/cpus/aarch64/cortex_x1.h b/include/lib/cpus/aarch64/cortex_x1.h
new file mode 100644
index 0000000..e3661a8
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x1.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2022, Google LLC. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X1_H
+#define CORTEX_X1_H
+
+/* Cortex-X1 MIDR for r1p0 */
+#define CORTEX_X1_MIDR U(0x411fd440)
+
+/* Cortex-X1 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X1_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_X1_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_X1_ACTLR2_EL1 S3_0_C15_C1_1
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_X1_CORE_PWRDN_EN_MASK U(0x1)
+
+#endif /* CORTEX_X1_H */
diff --git a/include/lib/cpus/aarch64/cortex_x2.h b/include/lib/cpus/aarch64/cortex_x2.h
new file mode 100644
index 0000000..863b8c8
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x2.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X2_H
+#define CORTEX_X2_H
+
+#define CORTEX_X2_MIDR U(0x410FD480)
+
+/* Cortex-X2 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X2_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUECTLR_EL1 S3_0_C15_C1_4
+#define CORTEX_X2_CPUECTLR_EL1_PFSTIDIS_BIT (ULL(1) << 8)
+
+/*******************************************************************************
+ * CPU Extended Control register 2 specific definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUECTLR2_EL1 S3_0_C15_C1_5
+
+#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_SHIFT U(11)
+#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
+#define CORTEX_X2_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(0x9)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_X2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+/*******************************************************************************
+ * CPU Auxiliary Control Register definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUACTLR_EL1 S3_0_C15_C1_0
+#define CORTEX_X2_CPUACTLR_EL1_BIT_22 (ULL(1) << 22)
+
+/*******************************************************************************
+ * CPU Auxiliary Control Register 2 definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define CORTEX_X2_CPUACTLR2_EL1_BIT_40 (ULL(1) << 40)
+
+/*******************************************************************************
+ * CPU Auxiliary Control Register 5 definitions
+ ******************************************************************************/
+#define CORTEX_X2_CPUACTLR5_EL1 S3_0_C15_C8_0
+#define CORTEX_X2_CPUACTLR5_EL1_BIT_17 (ULL(1) << 17)
+
+/*******************************************************************************
+ * CPU Implementation Specific Selected Instruction registers
+ ******************************************************************************/
+#define CORTEX_X2_IMP_CPUPSELR_EL3 S3_6_C15_C8_0
+#define CORTEX_X2_IMP_CPUPCR_EL3 S3_6_C15_C8_1
+#define CORTEX_X2_IMP_CPUPOR_EL3 S3_6_C15_C8_2
+#define CORTEX_X2_IMP_CPUPMR_EL3 S3_6_C15_C8_3
+
+#endif /* CORTEX_X2_H */
diff --git a/include/lib/cpus/aarch64/cortex_x3.h b/include/lib/cpus/aarch64/cortex_x3.h
new file mode 100644
index 0000000..04548ea
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x3.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X3_H
+#define CORTEX_X3_H
+
+#define CORTEX_X3_MIDR U(0x410FD4E0)
+
+/* Cortex-X3 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X3_BHB_LOOP_COUNT U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X3_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X3_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_X3_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+#define CORTEX_X3_CPUPWRCTLR_EL1_WFI_RET_CTRL_BITS_SHIFT U(4)
+#define CORTEX_X3_CPUPWRCTLR_EL1_WFE_RET_CTRL_BITS_SHIFT U(7)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 2 specific definitions.
+ ******************************************************************************/
+#define CORTEX_X3_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define CORTEX_X3_CPUACTLR2_EL1_BIT_36 (ULL(1) << 36)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 5 specific definitions.
+ ******************************************************************************/
+#define CORTEX_X3_CPUACTLR5_EL1 S3_0_C15_C8_0
+#define CORTEX_X3_CPUACTLR5_EL1_BIT_55 (ULL(1) << 55)
+#define CORTEX_X3_CPUACTLR5_EL1_BIT_56 (ULL(1) << 56)
+
+/*******************************************************************************
+ * CPU Extended Control register 2 specific definitions.
+ ******************************************************************************/
+#define CORTEX_X3_CPUECTLR2_EL1 S3_0_C15_C1_5
+
+#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_LSB U(11)
+#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
+#define CORTEX_X3_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(0x9)
+
+#endif /* CORTEX_X3_H */
diff --git a/include/lib/cpus/aarch64/cortex_x4.h b/include/lib/cpus/aarch64/cortex_x4.h
new file mode 100644
index 0000000..17d07c8
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_x4.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CORTEX_X4_H
+#define CORTEX_X4_H
+
+#define CORTEX_X4_MIDR U(0x410FD821)
+
+/* Cortex X4 loop count for CVE-2022-23960 mitigation */
+#define CORTEX_X4_BHB_LOOP_COUNT U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define CORTEX_X4_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define CORTEX_X4_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* CORTEX_X4_H */
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
new file mode 100644
index 0000000..6faef5d
--- /dev/null
+++ b/include/lib/cpus/aarch64/cpu_macros.S
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef CPU_MACROS_S
+#define CPU_MACROS_S
+
+#include <assert_macros.S>
+#include <lib/cpus/cpu_ops.h>
+#include <lib/cpus/errata.h>
+
+ /*
+ * Write given expressions as quad words
+ *
+ * _count:
+ * Write at least _count quad words. If the given number of
+ * expressions is less than _count, repeat the last expression to
+ * fill _count quad words in total
+ * _rest:
+ * Optional list of expressions. _this is for parameter extraction
+ * only, and has no significance to the caller
+ *
+ * Invoked as:
+ * fill_constants 2, foo, bar, blah, ...
+ */
+ .macro fill_constants _count:req, _this, _rest:vararg
+ .ifgt \_count
+ /* Write the current expression */
+ .ifb \_this
+ .error "Nothing to fill"
+ .endif
+ .quad \_this
+
+ /* Invoke recursively for remaining expressions */
+ .ifnb \_rest
+ fill_constants \_count-1, \_rest
+ .else
+ fill_constants \_count-1, \_this
+ .endif
+ .endif
+ .endm
+
+ /*
+ * Declare CPU operations
+ *
+ * _name:
+ * Name of the CPU for which operations are being specified
+ * _midr:
+ * Numeric value expected to read from CPU's MIDR
+ * _resetfunc:
+ * Reset function for the CPU. If there's no CPU reset function,
+ * specify CPU_NO_RESET_FUNC
+ * _extra1:
+ * This is a placeholder for future per CPU operations. Currently,
+ * some CPUs use this entry to set a test function to determine if
+ * the workaround for CVE-2017-5715 needs to be applied or not.
+ * _extra2:
+ * This is a placeholder for future per CPU operations. Currently
+ * some CPUs use this entry to set a function to disable the
+ * workaround for CVE-2018-3639.
+ * _extra3:
+ * This is a placeholder for future per CPU operations. Currently,
+ * some CPUs use this entry to set a test function to determine if
+ * the workaround for CVE-2022-23960 needs to be applied or not.
+ * _e_handler:
+ * This is a placeholder for future per CPU exception handlers.
+ * _power_down_ops:
+ * Comma-separated list of functions to perform power-down
+ * operatios on the CPU. At least one, and up to
+ * CPU_MAX_PWR_DWN_OPS number of functions may be specified.
+ * Starting at power level 0, these functions shall handle power
+ * down at subsequent power levels. If there aren't exactly
+ * CPU_MAX_PWR_DWN_OPS functions, the last specified one will be
+ * used to handle power down at subsequent levels
+ */
+ .macro declare_cpu_ops_base _name:req, _midr:req, _resetfunc:req, \
+ _extra1:req, _extra2:req, _extra3:req, _e_handler:req, _power_down_ops:vararg
+ .section .cpu_ops, "a"
+ .align 3
+ .type cpu_ops_\_name, %object
+ .quad \_midr
+#if defined(IMAGE_AT_EL3)
+ .quad \_resetfunc
+#endif
+ .quad \_extra1
+ .quad \_extra2
+ .quad \_extra3
+ .quad \_e_handler
+#ifdef IMAGE_BL31
+ /* Insert list of functions */
+ fill_constants CPU_MAX_PWR_DWN_OPS, \_power_down_ops
+#endif
+ /*
+ * It is possible (although unlikely) that a cpu may have no errata in
+ * code. In that case the start label will not be defined. The list is
+ * intended to be used in a loop, so define it as zero-length for
+ * predictable behaviour. Since this macro is always called at the end
+ * of the cpu file (after all errata have been parsed) we can be sure
+ * that we are at the end of the list. Some cpus call declare_cpu_ops
+ * twice, so only do this once.
+ */
+ .pushsection .rodata.errata_entries
+ .ifndef \_name\()_errata_list_start
+ \_name\()_errata_list_start:
+ .endif
+ .ifndef \_name\()_errata_list_end
+ \_name\()_errata_list_end:
+ .endif
+ .popsection
+
+ /* and now put them in cpu_ops */
+ .quad \_name\()_errata_list_start
+ .quad \_name\()_errata_list_end
+
+#if REPORT_ERRATA
+ .ifndef \_name\()_cpu_str
+ /*
+ * Place errata reported flag, and the spinlock to arbitrate access to
+ * it in the data section.
+ */
+ .pushsection .data
+ define_asm_spinlock \_name\()_errata_lock
+ \_name\()_errata_reported:
+ .word 0
+ .popsection
+
+ /* Place CPU string in rodata */
+ .pushsection .rodata
+ \_name\()_cpu_str:
+ .asciz "\_name"
+ .popsection
+ .endif
+
+
+ /*
+ * Mandatory errata status printing function for CPUs of
+ * this class.
+ */
+ .quad \_name\()_errata_report
+ .quad \_name\()_cpu_str
+
+#ifdef IMAGE_BL31
+ /* Pointers to errata lock and reported flag */
+ .quad \_name\()_errata_lock
+ .quad \_name\()_errata_reported
+#endif /* IMAGE_BL31 */
+#endif /* REPORT_ERRATA */
+
+#if defined(IMAGE_BL31) && CRASH_REPORTING
+ .quad \_name\()_cpu_reg_dump
+#endif
+ .endm
+
+ .macro declare_cpu_ops _name:req, _midr:req, _resetfunc:req, \
+ _power_down_ops:vararg
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, 0, 0, 0, 0, \
+ \_power_down_ops
+ .endm
+
+ .macro declare_cpu_ops_eh _name:req, _midr:req, _resetfunc:req, \
+ _e_handler:req, _power_down_ops:vararg
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
+ 0, 0, 0, \_e_handler, \_power_down_ops
+ .endm
+
+ .macro declare_cpu_ops_wa _name:req, _midr:req, \
+ _resetfunc:req, _extra1:req, _extra2:req, \
+ _extra3:req, _power_down_ops:vararg
+ declare_cpu_ops_base \_name, \_midr, \_resetfunc, \
+ \_extra1, \_extra2, \_extra3, 0, \_power_down_ops
+ .endm
+
+/* TODO can be deleted once all CPUs have been converted */
+#if REPORT_ERRATA
+ /*
+ * Print status of a CPU errata
+ *
+ * _chosen:
+ * Identifier indicating whether or not a CPU errata has been
+ * compiled in.
+ * _cpu:
+ * Name of the CPU
+ * _id:
+ * Errata identifier
+ * _rev_var:
+ * Register containing the combined value CPU revision and variant
+ * - typically the return value of cpu_get_rev_var
+ */
+ .macro report_errata _chosen, _cpu, _id, _rev_var=x8
+ /* Stash a string with errata ID */
+ .pushsection .rodata
+ \_cpu\()_errata_\_id\()_str:
+ .asciz "\_id"
+ .popsection
+
+ /* Check whether errata applies */
+ mov x0, \_rev_var
+ /* Shall clobber: x0-x7 */
+ bl check_errata_\_id
+
+ .ifeq \_chosen
+ /*
+ * Errata workaround has not been compiled in. If the errata would have
+ * applied had it been compiled in, print its status as missing.
+ */
+ cbz x0, 900f
+ mov x0, #ERRATA_MISSING
+ .endif
+900:
+ adr x1, \_cpu\()_cpu_str
+ adr x2, \_cpu\()_errata_\_id\()_str
+ bl errata_print_msg
+ .endm
+#endif
+
+ /*
+ * This macro is used on some CPUs to detect if they are vulnerable
+ * to CVE-2017-5715.
+ */
+ .macro cpu_check_csv2 _reg _label
+ mrs \_reg, id_aa64pfr0_el1
+ ubfx \_reg, \_reg, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH
+ /*
+ * If the field equals 1, branch targets trained in one context cannot
+ * affect speculative execution in a different context.
+ *
+ * If the field equals 2, it means that the system is also aware of
+ * SCXTNUM_ELx register contexts. We aren't using them in the TF, so we
+ * expect users of the registers to do the right thing.
+ *
+ * Only apply mitigations if the value of this field is 0.
+ */
+#if ENABLE_ASSERTIONS
+ cmp \_reg, #3 /* Only values 0 to 2 are expected */
+ ASM_ASSERT(lo)
+#endif
+
+ cmp \_reg, #0
+ bne \_label
+ .endm
+
+ /*
+ * Helper macro that reads the part number of the current
+ * CPU and jumps to the given label if it matches the CPU
+ * MIDR provided.
+ *
+ * Clobbers x0.
+ */
+ .macro jump_if_cpu_midr _cpu_midr, _label
+ mrs x0, midr_el1
+ ubfx x0, x0, MIDR_PN_SHIFT, #12
+ cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+ b.eq \_label
+ .endm
+
+
+/*
+ * Workaround wrappers for errata that apply at reset or runtime. Reset errata
+ * will be applied automatically
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ *
+ * _cve:
+ * Whether erratum is a CVE. CVE year if yes, 0 otherwise
+ *
+ * _id:
+ * Erratum or CVE number. Please combine with previous field with ERRATUM
+ * or CVE macros
+ *
+ * _chosen:
+ * Compile time flag on whether the erratum is included
+ *
+ * _apply_at_reset:
+ * Whether the erratum should be automatically applied at reset
+ */
+.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
+ .pushsection .rodata.errata_entries
+ .align 3
+ .ifndef \_cpu\()_errata_list_start
+ \_cpu\()_errata_list_start:
+ .endif
+
+ /* check if unused and compile out if no references */
+ .if \_apply_at_reset && \_chosen
+ .quad erratum_\_cpu\()_\_id\()_wa
+ .else
+ .quad 0
+ .endif
+ /* TODO(errata ABI): this prevents all checker functions from
+ * being optimised away. Can be done away with unless the ABI
+ * needs them */
+ .quad check_erratum_\_cpu\()_\_id
+ /* Will fit CVEs with up to 10 character in the ID field */
+ .word \_id
+ .hword \_cve
+ .byte \_chosen
+ /* TODO(errata ABI): mitigated field for known but unmitigated
+ * errata */
+ .byte 0x1
+ .popsection
+.endm
+
+.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
+ add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
+
+ func erratum_\_cpu\()_\_id\()_wa
+ mov x8, x30
+
+ /* save rev_var for workarounds that might need it but don't
+ * restore to x0 because few will care */
+ mov x7, x0
+ bl check_erratum_\_cpu\()_\_id
+ cbz x0, erratum_\_cpu\()_\_id\()_skip
+.endm
+
+.macro _workaround_end _cpu:req, _id:req
+ erratum_\_cpu\()_\_id\()_skip:
+ ret x8
+ endfunc erratum_\_cpu\()_\_id\()_wa
+.endm
+
+/*******************************************************************************
+ * Errata workaround wrappers
+ ******************************************************************************/
+/*
+ * Workaround wrappers for errata that apply at reset or runtime. Reset errata
+ * will be applied automatically
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ *
+ * _cve:
+ * Whether erratum is a CVE. CVE year if yes, 0 otherwise
+ *
+ * _id:
+ * Erratum or CVE number. Please combine with previous field with ERRATUM
+ * or CVE macros
+ *
+ * _chosen:
+ * Compile time flag on whether the erratum is included
+ *
+ * in body:
+ * clobber x0 to x7 (please only use those)
+ * argument x7 - cpu_rev_var
+ *
+ * _wa clobbers: x0-x8 (PCS compliant)
+ */
+.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
+ _workaround_start \_cpu, \_cve, \_id, \_chosen, 1
+.endm
+
+/*
+ * See `workaround_reset_start` for usage info. Additional arguments:
+ *
+ * _midr:
+ * Check if CPU's MIDR matches the CPU it's meant for. Must be specified
+ * for errata applied in generic code
+ */
+.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
+ /*
+ * Let errata specify if they need MIDR checking. Sadly, storing the
+ * MIDR in an .equ to retrieve automatically blows up as it stores some
+ * brackets in the symbol
+ */
+ .ifnb \_midr
+ jump_if_cpu_midr \_midr, 1f
+ b erratum_\_cpu\()_\_id\()_skip
+
+ 1:
+ .endif
+ _workaround_start \_cpu, \_cve, \_id, \_chosen, 0
+.endm
+
+/*
+ * Usage and arguments identical to `workaround_reset_start`. The _cve argument
+ * is kept here so the same #define can be used as that macro
+ */
+.macro workaround_reset_end _cpu:req, _cve:req, _id:req
+ _workaround_end \_cpu, \_id
+.endm
+
+/*
+ * See `workaround_reset_start` for usage info. The _cve argument is kept here
+ * so the same #define can be used as that macro. Additional arguments:
+ *
+ * _no_isb:
+ * Optionally do not include the trailing isb. Please disable with the
+ * NO_ISB macro
+ */
+.macro workaround_runtime_end _cpu:req, _cve:req, _id:req, _no_isb
+ /*
+ * Runtime errata do not have a reset function to call the isb for them
+ * and missing the isb could be very problematic. It is also likely as
+ * they tend to be scattered in generic code.
+ */
+ .ifb \_no_isb
+ isb
+ .endif
+ _workaround_end \_cpu, \_id
+.endm
+
+/*******************************************************************************
+ * Errata workaround helpers
+ ******************************************************************************/
+/*
+ * Set a bit in a system register. Can set multiple bits but is limited by the
+ * way the ORR instruction encodes them.
+ *
+ * _reg:
+ * Register to write to
+ *
+ * _bit:
+ * Bit to set. Please use a descriptive #define
+ *
+ * _assert:
+ * Optionally whether to read back and assert that the bit has been
+ * written. Please disable with NO_ASSERT macro
+ *
+ * clobbers: x1
+ */
+.macro sysreg_bit_set _reg:req, _bit:req, _assert=1
+ mrs x1, \_reg
+ orr x1, x1, #\_bit
+ msr \_reg, x1
+.endm
+
+/*
+ * Clear a bit in a system register. Can clear multiple bits but is limited by
+ * the way the BIC instrucion encodes them.
+ *
+ * see sysreg_bit_set for usage
+ */
+.macro sysreg_bit_clear _reg:req, _bit:req
+ mrs x1, \_reg
+ bic x1, x1, #\_bit
+ msr \_reg, x1
+.endm
+
+.macro override_vector_table _table:req
+ adr x1, \_table
+ msr vbar_el3, x1
+.endm
+
+/*
+ * BFI : Inserts bitfield into a system register.
+ *
+ * BFI{cond} Rd, Rn, #lsb, #width
+ */
+.macro sysreg_bitfield_insert _reg:req, _src:req, _lsb:req, _width:req
+ /* Source value for BFI */
+ mov x1, #\_src
+ mrs x0, \_reg
+ bfi x0, x1, #\_lsb, #\_width
+ msr \_reg, x0
+.endm
+
+/*
+ * Apply erratum
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ *
+ * _cve:
+ * Whether erratum is a CVE. CVE year if yes, 0 otherwise
+ *
+ * _id:
+ * Erratum or CVE number. Please combine with previous field with ERRATUM
+ * or CVE macros
+ *
+ * _chosen:
+ * Compile time flag on whether the erratum is included
+ *
+ * _get_rev:
+ * Optional parameter that determines whether to insert a call to the CPU revision fetching
+ * procedure. Stores the result of this in the temporary register x10.
+ *
+ * clobbers: x0-x10 (PCS compliant)
+ */
+.macro apply_erratum _cpu:req, _cve:req, _id:req, _chosen:req, _get_rev=GET_CPU_REV
+ .if (\_chosen & \_get_rev)
+ mov x9, x30
+ bl cpu_get_rev_var
+ mov x10, x0
+ .elseif (\_chosen)
+ mov x9, x30
+ mov x0, x10
+ .endif
+
+ .if \_chosen
+ bl erratum_\_cpu\()_\_id\()_wa
+ mov x30, x9
+ .endif
+.endm
+
+/*
+ * Helpers to select which revisions errata apply to. Don't leave a link
+ * register as the cpu_rev_var_*** will call the ret and we can save on one.
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ *
+ * _cve:
+ * Whether erratum is a CVE. CVE year if yes, 0 otherwise
+ *
+ * _id:
+ * Erratum or CVE number. Please combine with previous field with ERRATUM
+ * or CVE macros
+ *
+ * _rev_num:
+ * Revision to apply to
+ *
+ * in body:
+ * clobber: x0 to x4
+ * argument: x0 - cpu_rev_var
+ */
+.macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req
+ func check_erratum_\_cpu\()_\_id
+ mov x1, #\_rev_num
+ b cpu_rev_var_ls
+ endfunc check_erratum_\_cpu\()_\_id
+.endm
+
+.macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req
+ func check_erratum_\_cpu\()_\_id
+ mov x1, #\_rev_num
+ b cpu_rev_var_hs
+ endfunc check_erratum_\_cpu\()_\_id
+.endm
+
+.macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req
+ func check_erratum_\_cpu\()_\_id
+ mov x1, #\_rev_num_lo
+ mov x2, #\_rev_num_hi
+ b cpu_rev_var_range
+ endfunc check_erratum_\_cpu\()_\_id
+.endm
+
+.macro check_erratum_chosen _cpu:req, _cve:req, _id:req, _chosen:req
+ func check_erratum_\_cpu\()_\_id
+ .if \_chosen
+ mov x0, #ERRATA_APPLIES
+ .else
+ mov x0, #ERRATA_MISSING
+ .endif
+ ret
+ endfunc check_erratum_\_cpu\()_\_id
+.endm
+
+/* provide a shorthand for the name format for annoying errata */
+.macro check_erratum_custom_start _cpu:req, _cve:req, _id:req
+ func check_erratum_\_cpu\()_\_id
+.endm
+
+.macro check_erratum_custom_end _cpu:req, _cve:req, _id:req
+ endfunc check_erratum_\_cpu\()_\_id
+.endm
+
+
+/*******************************************************************************
+ * CPU reset function wrapper
+ ******************************************************************************/
+
+/*
+ * Wrapper to automatically apply all reset-time errata. Will end with an isb.
+ *
+ * _cpu:
+ * Name of cpu as given to declare_cpu_ops
+ *
+ * in body:
+ * clobber x8 to x14
+ * argument x14 - cpu_rev_var
+ */
+.macro cpu_reset_func_start _cpu:req
+ func \_cpu\()_reset_func
+ mov x15, x30
+ bl cpu_get_rev_var
+ mov x14, x0
+
+ /* short circuit the location to avoid searching the list */
+ adrp x12, \_cpu\()_errata_list_start
+ add x12, x12, :lo12:\_cpu\()_errata_list_start
+ adrp x13, \_cpu\()_errata_list_end
+ add x13, x13, :lo12:\_cpu\()_errata_list_end
+
+ errata_begin:
+ /* if head catches up with end of list, exit */
+ cmp x12, x13
+ b.eq errata_end
+
+ ldr x10, [x12, #ERRATUM_WA_FUNC]
+ /* TODO(errata ABI): check mitigated and checker function fields
+ * for 0 */
+ ldrb w11, [x12, #ERRATUM_CHOSEN]
+
+ /* skip if not chosen */
+ cbz x11, 1f
+ /* skip if runtime erratum */
+ cbz x10, 1f
+
+ /* put cpu revision in x0 and call workaround */
+ mov x0, x14
+ blr x10
+ 1:
+ add x12, x12, #ERRATUM_ENTRY_SIZE
+ b errata_begin
+ errata_end:
+.endm
+
+.macro cpu_reset_func_end _cpu:req
+ isb
+ ret x15
+ endfunc \_cpu\()_reset_func
+.endm
+
+/*
+ * Maintain compatibility with the old scheme of each cpu has its own reporting.
+ * TODO remove entirely once all cpus have been converted. This includes the
+ * cpu_ops entry, as print_errata_status can call this directly for all cpus
+ */
+.macro errata_report_shim _cpu:req
+ #if REPORT_ERRATA
+ func \_cpu\()_errata_report
+ /* normal stack frame for pretty debugging */
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ bl generic_errata_report
+
+ ldp x29, x30, [sp], #16
+ ret
+ endfunc \_cpu\()_errata_report
+ #endif
+.endm
+#endif /* CPU_MACROS_S */
diff --git a/include/lib/cpus/aarch64/cpuamu.h b/include/lib/cpus/aarch64/cpuamu.h
new file mode 100644
index 0000000..cb004bf
--- /dev/null
+++ b/include/lib/cpus/aarch64/cpuamu.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CPUAMU_H
+#define CPUAMU_H
+
+/*******************************************************************************
+ * CPU Activity Monitor Unit register specific definitions.
+ ******************************************************************************/
+#define CPUAMCNTENCLR_EL0 S3_3_C15_C9_7
+#define CPUAMCNTENSET_EL0 S3_3_C15_C9_6
+#define CPUAMCFGR_EL0 S3_3_C15_C10_6
+#define CPUAMUSERENR_EL0 S3_3_C15_C10_7
+
+/* Activity Monitor Event Counter Registers */
+#define CPUAMEVCNTR0_EL0 S3_3_C15_C9_0
+#define CPUAMEVCNTR1_EL0 S3_3_C15_C9_1
+#define CPUAMEVCNTR2_EL0 S3_3_C15_C9_2
+#define CPUAMEVCNTR3_EL0 S3_3_C15_C9_3
+#define CPUAMEVCNTR4_EL0 S3_3_C15_C9_4
+
+/* Activity Monitor Event Type Registers */
+#define CPUAMEVTYPER0_EL0 S3_3_C15_C10_0
+#define CPUAMEVTYPER1_EL0 S3_3_C15_C10_1
+#define CPUAMEVTYPER2_EL0 S3_3_C15_C10_2
+#define CPUAMEVTYPER3_EL0 S3_3_C15_C10_3
+#define CPUAMEVTYPER4_EL0 S3_3_C15_C10_4
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+
+uint64_t cpuamu_cnt_read(unsigned int idx);
+void cpuamu_cnt_write(unsigned int idx, uint64_t val);
+unsigned int cpuamu_read_cpuamcntenset_el0(void);
+unsigned int cpuamu_read_cpuamcntenclr_el0(void);
+void cpuamu_write_cpuamcntenset_el0(unsigned int mask);
+void cpuamu_write_cpuamcntenclr_el0(unsigned int mask);
+
+int midr_match(unsigned int cpu_midr);
+void cpuamu_context_save(unsigned int nr_counters);
+void cpuamu_context_restore(unsigned int nr_counters);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* CPUAMU_H */
diff --git a/include/lib/cpus/aarch64/denver.h b/include/lib/cpus/aarch64/denver.h
new file mode 100644
index 0000000..84ab6bb
--- /dev/null
+++ b/include/lib/cpus/aarch64/denver.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DENVER_H
+#define DENVER_H
+
+/* MIDR values for Denver */
+#define DENVER_MIDR_PN0 U(0x4E0F0000)
+#define DENVER_MIDR_PN1 U(0x4E0F0010)
+#define DENVER_MIDR_PN2 U(0x4E0F0020)
+#define DENVER_MIDR_PN3 U(0x4E0F0030)
+#define DENVER_MIDR_PN4 U(0x4E0F0040)
+#define DENVER_MIDR_PN5 U(0x4E0F0050)
+#define DENVER_MIDR_PN6 U(0x4E0F0060)
+#define DENVER_MIDR_PN7 U(0x4E0F0070)
+#define DENVER_MIDR_PN8 U(0x4E0F0080)
+#define DENVER_MIDR_PN9 U(0x4E0F0090)
+
+/* Implementer code in the MIDR register */
+#define DENVER_IMPL U(0x4E)
+
+/* CPU state ids - implementation defined */
+#define DENVER_CPU_STATE_POWER_DOWN U(0x3)
+
+/* Speculative store buffering */
+#define DENVER_CPU_DIS_SSB_EL3 (U(1) << 11)
+#define DENVER_PN4_CPU_DIS_SSB_EL3 (U(1) << 18)
+
+/* Speculative memory disambiguation */
+#define DENVER_CPU_DIS_MD_EL3 (U(1) << 9)
+#define DENVER_PN4_CPU_DIS_MD_EL3 (U(1) << 17)
+
+/* Core power management states */
+#define DENVER_CPU_PMSTATE_C1 U(0x1)
+#define DENVER_CPU_PMSTATE_C6 U(0x6)
+#define DENVER_CPU_PMSTATE_C7 U(0x7)
+#define DENVER_CPU_PMSTATE_MASK U(0xF)
+
+/* ACTRL_ELx bits to enable dual execution*/
+#define DENVER_CPU_ENABLE_DUAL_EXEC_EL2 (ULL(1) << 9)
+#define DENVER_CPU_ENABLE_DUAL_EXEC_EL3 (ULL(1) << 9)
+#define DENVER_CPU_ENABLE_DUAL_EXEC_EL1 (U(1) << 4)
+
+#ifndef __ASSEMBLER__
+
+/* Disable Dynamic Code Optimisation */
+void denver_disable_dco(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* DENVER_H */
diff --git a/include/lib/cpus/aarch64/dsu_def.h b/include/lib/cpus/aarch64/dsu_def.h
new file mode 100644
index 0000000..577de61
--- /dev/null
+++ b/include/lib/cpus/aarch64/dsu_def.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DSU_DEF_H
+#define DSU_DEF_H
+
+#include <lib/utils_def.h>
+
+/********************************************************************
+ * DSU Cluster Configuration registers definitions
+ ********************************************************************/
+#define CLUSTERCFR_EL1 S3_0_C15_C3_0
+
+#define CLUSTERCFR_ACP_SHIFT U(11)
+
+/********************************************************************
+ * DSU Cluster Main Revision ID registers definitions
+ ********************************************************************/
+#define CLUSTERIDR_EL1 S3_0_C15_C3_1
+
+#define CLUSTERIDR_REV_SHIFT U(0)
+#define CLUSTERIDR_REV_BITS U(4)
+#define CLUSTERIDR_VAR_SHIFT U(4)
+#define CLUSTERIDR_VAR_BITS U(4)
+
+/********************************************************************
+ * DSU Cluster Auxiliary Control registers definitions
+ ********************************************************************/
+#define CLUSTERACTLR_EL1 S3_0_C15_C3_3
+
+#define CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING (ULL(1) << 15)
+#define CLUSTERACTLR_EL1_DISABLE_SCLK_GATING (ULL(3) << 15)
+
+/********************************************************************
+ * Masks applied for DSU errata workarounds
+ ********************************************************************/
+#define DSU_ERRATA_936184_MASK (U(0x3) << 15)
+
+#endif /* DSU_DEF_H */
diff --git a/include/lib/cpus/aarch64/generic.h b/include/lib/cpus/aarch64/generic.h
new file mode 100644
index 0000000..dd71554
--- /dev/null
+++ b/include/lib/cpus/aarch64/generic.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AARCH64_GENERIC_H
+#define AARCH64_GENERIC_H
+
+#include <lib/utils_def.h>
+
+/*
+ * 0x0 value on the MIDR implementer value is reserved for software use,
+ * so use an MIDR value of 0 for a default CPU library.
+ */
+#define AARCH64_GENERIC_MIDR U(0)
+
+#endif /* AARCH64_GENERIC_H */
diff --git a/include/lib/cpus/aarch64/neoverse_e1.h b/include/lib/cpus/aarch64/neoverse_e1.h
new file mode 100644
index 0000000..6e784f6
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_e1.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_E1_H
+#define NEOVERSE_E1_H
+
+#include <lib/utils_def.h>
+
+#define NEOVERSE_E1_MIDR U(0x410FD4A0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_E1_ECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_E1_CPUACTLR_EL1 S3_0_C15_C1_0
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions.
+ ******************************************************************************/
+
+#define NEOVERSE_E1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_E1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT (U(1) << 0)
+
+#endif /* NEOVERSE_E1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_hermes.h b/include/lib/cpus/aarch64/neoverse_hermes.h
new file mode 100644
index 0000000..22492c3
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_hermes.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_HERMES_H
+#define NEOVERSE_HERMES_H
+
+#define NEOVERSE_HERMES_MIDR U(0x410FD8E0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_HERMES_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_HERMES_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_HERMES_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* NEOVERSE_HERMES_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n1.h b/include/lib/cpus/aarch64/neoverse_n1.h
new file mode 100644
index 0000000..0ba5ad1
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_n1.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_N1_H
+#define NEOVERSE_N1_H
+
+#include <lib/utils_def.h>
+
+/* Neoverse N1 MIDR for revision 0 */
+#define NEOVERSE_N1_MIDR U(0x410fd0c0)
+
+/* Neoverse N1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N1_BHB_LOOP_COUNT U(24)
+
+/* Exception Syndrome register EC code for IC Trap */
+#define NEOVERSE_N1_EC_IC_TRAP U(0x1f)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+
+/* Definitions of register field mask in NEOVERSE_N1_CPUPWRCTLR_EL1 */
+#define NEOVERSE_N1_CORE_PWRDN_EN_MASK U(0x1)
+
+#define NEOVERSE_N1_ACTLR_AMEN_BIT (U(1) << 4)
+
+#define NEOVERSE_N1_AMU_NR_COUNTERS U(5)
+#define NEOVERSE_N1_AMU_GROUP0_MASK U(0x1f)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N1_CPUECTLR_EL1 S3_0_C15_C1_4
+
+#define NEOVERSE_N1_WS_THR_L2_MASK (ULL(3) << 24)
+#define NEOVERSE_N1_CPUECTLR_EL1_MM_TLBPF_DIS_BIT (ULL(1) << 51)
+#define NEOVERSE_N1_CPUECTLR_EL1_EXTLLC_BIT (ULL(1) << 0)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N1_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_6 (ULL(1) << 6)
+#define NEOVERSE_N1_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
+
+#define NEOVERSE_N1_CPUACTLR2_EL1 S3_0_C15_C1_1
+
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_11 (ULL(1) << 11)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_15 (ULL(1) << 15)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_16 (ULL(1) << 16)
+#define NEOVERSE_N1_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
+
+#define NEOVERSE_N1_CPUACTLR3_EL1 S3_0_C15_C1_2
+
+#define NEOVERSE_N1_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
+
+/* Instruction patching registers */
+#define CPUPSELR_EL3 S3_6_C15_C8_0
+#define CPUPCR_EL3 S3_6_C15_C8_1
+#define CPUPOR_EL3 S3_6_C15_C8_2
+#define CPUPMR_EL3 S3_6_C15_C8_3
+
+#endif /* NEOVERSE_N1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n2.h b/include/lib/cpus/aarch64/neoverse_n2.h
new file mode 100644
index 0000000..b379fab
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_n2.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_N2_H
+#define NEOVERSE_N2_H
+
+/* Neoverse N2 ID register for revision r0p0 */
+#define NEOVERSE_N2_MIDR U(0x410FD490)
+
+/* Neoverse N2 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_N2_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Power control register
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_N2_CORE_PWRDN_EN_BIT (ULL(1) << 0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUECTLR_EL1 S3_0_C15_C1_4
+#define NEOVERSE_N2_CPUECTLR_EL1_EXTLLC_BIT (ULL(1) << 0)
+#define NEOVERSE_N2_CPUECTLR_EL1_PFSTIDIS_BIT (ULL(1) << 8)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUACTLR_EL1 S3_0_C15_C1_0
+#define NEOVERSE_N2_CPUACTLR_EL1_BIT_46 (ULL(1) << 46)
+#define NEOVERSE_N2_CPUACTLR_EL1_BIT_22 (ULL(1) << 22)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 2 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define NEOVERSE_N2_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
+#define NEOVERSE_N2_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define NEOVERSE_N2_CPUACTLR2_EL1_BIT_36 (ULL(1) << 36)
+#define NEOVERSE_N2_CPUACTLR2_EL1_BIT_40 (ULL(1) << 40)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 3 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUACTLR3_EL1 S3_0_C15_C1_2
+#define NEOVERSE_N2_CPUACTLR3_EL1_BIT_47 (ULL(1) << 47)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 5 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUACTLR5_EL1 S3_0_C15_C8_0
+#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_56 (ULL(1) << 56)
+#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_55 (ULL(1) << 55)
+#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_44 (ULL(1) << 44)
+#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_13 (ULL(1) << 13)
+#define NEOVERSE_N2_CPUACTLR5_EL1_BIT_17 (ULL(1) << 17)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_N2_CPUECTLR2_EL1 S3_0_C15_C1_5
+#define NEOVERSE_N2_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(9)
+#define CPUECTLR2_EL1_PF_MODE_LSB U(11)
+#define CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
+#define CPUECTLR2_EL1_TXREQ_STATIC_FULL ULL(0)
+#define CPUECTLR2_EL1_TXREQ_LSB U(0)
+#define CPUECTLR2_EL1_TXREQ_WIDTH U(3)
+
+#endif /* NEOVERSE_N2_H */
diff --git a/include/lib/cpus/aarch64/neoverse_n_common.h b/include/lib/cpus/aarch64/neoverse_n_common.h
new file mode 100644
index 0000000..7cb91cd
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_n_common.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_N_COMMON_H
+#define NEOVERSE_N_COMMON_H
+
+/******************************************************************************
+ * Neoverse Nx CPU Configuration register definitions
+ *****************************************************************************/
+#define CPUCFR_EL1 S3_0_C15_C0_0
+
+/* SCU bit of CPU Configuration Register, EL1 */
+#define SCU_SHIFT U(2)
+
+#endif /* NEOVERSE_N_COMMON_H */
diff --git a/include/lib/cpus/aarch64/neoverse_poseidon.h b/include/lib/cpus/aarch64/neoverse_poseidon.h
new file mode 100644
index 0000000..202ef5c
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_poseidon.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_POSEIDON_H
+#define NEOVERSE_POSEIDON_H
+
+
+#define NEOVERSE_POSEIDON_MIDR U(0x410FD830)
+
+/* Neoverse Poseidon loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_POSEIDON_BHB_LOOP_COUNT U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_POSEIDON_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_POSEIDON_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_POSEIDON_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+#endif /* NEOVERSE_POSEIDON_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v1.h b/include/lib/cpus/aarch64/neoverse_v1.h
new file mode 100644
index 0000000..d618994
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_v1.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_V1_H
+#define NEOVERSE_V1_H
+
+#define NEOVERSE_V1_MIDR U(0x410FD400)
+
+/* Neoverse V1 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_V1_BHB_LOOP_COUNT U(32)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_V1_CPUECTLR_EL1 S3_0_C15_C1_4
+#define NEOVERSE_V1_CPUPSELR_EL3 S3_6_C15_C8_0
+#define NEOVERSE_V1_CPUPOR_EL3 S3_6_C15_C8_2
+#define NEOVERSE_V1_CPUPMR_EL3 S3_6_C15_C8_3
+#define NEOVERSE_V1_CPUPCR_EL3 S3_6_C15_C8_1
+#define NEOVERSE_V1_CPUECTLR_EL1_BIT_8 (ULL(1) << 8)
+#define NEOVERSE_V1_CPUECTLR_EL1_BIT_53 (ULL(1) << 53)
+#define NEOVERSE_V1_CPUECTLR_EL1_PF_MODE_CNSRV ULL(3)
+#define CPUECTLR_EL1_PF_MODE_LSB U(6)
+#define CPUECTLR_EL1_PF_MODE_WIDTH U(2)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V1_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_V1_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_V1_ACTLR2_EL1 S3_0_C15_C1_1
+#define NEOVERSE_V1_ACTLR2_EL1_BIT_0 ULL(1)
+#define NEOVERSE_V1_ACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define NEOVERSE_V1_ACTLR2_EL1_BIT_28 (ULL(1) << 28)
+#define NEOVERSE_V1_ACTLR2_EL1_BIT_40 (ULL(1) << 40)
+
+#define NEOVERSE_V1_ACTLR3_EL1 S3_0_C15_C1_2
+#define NEOVERSE_V1_ACTLR3_EL1_BIT_47 (ULL(1) << 47)
+
+#define NEOVERSE_V1_ACTLR5_EL1 S3_0_C15_C9_0
+#define NEOVERSE_V1_ACTLR5_EL1_BIT_55 (ULL(1) << 55)
+#define NEOVERSE_V1_ACTLR5_EL1_BIT_56 (ULL(1) << 56)
+
+#endif /* NEOVERSE_V1_H */
diff --git a/include/lib/cpus/aarch64/neoverse_v2.h b/include/lib/cpus/aarch64/neoverse_v2.h
new file mode 100644
index 0000000..68c1558
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_v2.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_V2_H
+#define NEOVERSE_V2_H
+
+#define NEOVERSE_V2_MIDR U(0x410FD4F0)
+
+/* Neoverse V2 loop count for CVE-2022-23960 mitigation */
+#define NEOVERSE_V2_BHB_LOOP_COUNT U(132)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEOVERSE_V2_CPUPWRCTLR_EL1_CORE_PWRDN_BIT U(1)
+
+/*******************************************************************************
+ * CPU Extended Control register 2 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUECTLR2_EL1 S3_0_C15_C1_5
+#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_CNSRV ULL(9)
+#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_LSB U(11)
+#define NEOVERSE_V2_CPUECTLR2_EL1_PF_MODE_WIDTH U(4)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 2 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUACTLR2_EL1 S3_0_C15_C1_1
+#define NEOVERSE_V2_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 3 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUACTLR3_EL1 S3_0_C15_C1_2
+#define NEOVERSE_V2_CPUACTLR3_EL1_BIT_47 (ULL(1) << 47)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register 5 specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_V2_CPUACTLR5_EL1 S3_0_C15_C8_0
+#define NEOVERSE_V2_CPUACTLR5_EL1_BIT_56 (ULL(1) << 56)
+#define NEOVERSE_V2_CPUACTLR5_EL1_BIT_55 (ULL(1) << 55)
+
+#endif /* NEOVERSE_V2_H */
diff --git a/include/lib/cpus/aarch64/nevis.h b/include/lib/cpus/aarch64/nevis.h
new file mode 100644
index 0000000..7006a29
--- /dev/null
+++ b/include/lib/cpus/aarch64/nevis.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEVIS_H
+#define NEVIS_H
+
+#define NEVIS_MIDR U(0x410FD8A0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define NEVIS_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEVIS_IMP_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define NEVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT U(1)
+
+#endif /* NEVIS_H */
diff --git a/include/lib/cpus/aarch64/qemu_max.h b/include/lib/cpus/aarch64/qemu_max.h
new file mode 100644
index 0000000..58923d2
--- /dev/null
+++ b/include/lib/cpus/aarch64/qemu_max.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef QEMU_MAX_H
+#define QEMU_MAX_H
+
+#include <lib/utils_def.h>
+
+/*
+ * QEMU MAX midr for revision 0
+ * 00 - Reserved for software use
+ * 0 - Variant
+ * F - Architectural features identified in ID_* registers
+ * 051 - 'Q', in a 12-bit field.
+ * 0 - Revision
+ */
+#define QEMU_MAX_MIDR U(0x000F0510)
+
+#endif /* QEMU_MAX_H */
diff --git a/include/lib/cpus/aarch64/rainier.h b/include/lib/cpus/aarch64/rainier.h
new file mode 100644
index 0000000..978661f
--- /dev/null
+++ b/include/lib/cpus/aarch64/rainier.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RAINIER_H
+#define RAINIER_H
+
+#include <lib/utils_def.h>
+
+/* RAINIER MIDR for revision 0 */
+#define RAINIER_MIDR U(0x3f0f4120)
+
+/* Exception Syndrome register EC code for IC Trap */
+#define RAINIER_EC_IC_TRAP U(0x1f)
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions.
+ ******************************************************************************/
+#define RAINIER_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+
+/* Definitions of register field mask in RAINIER_CPUPWRCTLR_EL1 */
+#define RAINIER_CORE_PWRDN_EN_MASK U(0x1)
+
+#define RAINIER_ACTLR_AMEN_BIT (U(1) << 4)
+
+#define RAINIER_AMU_NR_COUNTERS U(5)
+#define RAINIER_AMU_GROUP0_MASK U(0x1f)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define RAINIER_CPUECTLR_EL1 S3_0_C15_C1_4
+
+#define RAINIER_WS_THR_L2_MASK (ULL(3) << 24)
+#define RAINIER_CPUECTLR_EL1_MM_TLBPF_DIS_BIT (ULL(1) << 51)
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define RAINIER_CPUACTLR_EL1 S3_0_C15_C1_0
+
+#define RAINIER_CPUACTLR_EL1_BIT_6 (ULL(1) << 6)
+#define RAINIER_CPUACTLR_EL1_BIT_13 (ULL(1) << 13)
+
+#define RAINIER_CPUACTLR2_EL1 S3_0_C15_C1_1
+
+#define RAINIER_CPUACTLR2_EL1_BIT_0 (ULL(1) << 0)
+#define RAINIER_CPUACTLR2_EL1_BIT_2 (ULL(1) << 2)
+#define RAINIER_CPUACTLR2_EL1_BIT_11 (ULL(1) << 11)
+#define RAINIER_CPUACTLR2_EL1_BIT_15 (ULL(1) << 15)
+#define RAINIER_CPUACTLR2_EL1_BIT_16 (ULL(1) << 16)
+#define RAINIER_CPUACTLR2_EL1_BIT_59 (ULL(1) << 59)
+
+#define RAINIER_CPUACTLR3_EL1 S3_0_C15_C1_2
+
+#define RAINIER_CPUACTLR3_EL1_BIT_10 (ULL(1) << 10)
+
+/* Instruction patching registers */
+#define CPUPSELR_EL3 S3_6_C15_C8_0
+#define CPUPCR_EL3 S3_6_C15_C8_1
+#define CPUPOR_EL3 S3_6_C15_C8_2
+#define CPUPMR_EL3 S3_6_C15_C8_3
+
+#endif /* RAINIER_H */
diff --git a/include/lib/cpus/aarch64/travis.h b/include/lib/cpus/aarch64/travis.h
new file mode 100644
index 0000000..a8a2556
--- /dev/null
+++ b/include/lib/cpus/aarch64/travis.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRAVIS_H
+#define TRAVIS_H
+
+#define TRAVIS_MIDR U(0x410FD8C0)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions
+ ******************************************************************************/
+#define TRAVIS_IMP_CPUECTLR_EL1 S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define TRAVIS_IMP_CPUPWRCTLR_EL1 S3_0_C15_C2_7
+#define TRAVIS_IMP_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT U(1)
+
+/*******************************************************************************
+ * SME Control registers
+ ******************************************************************************/
+#define TRAVIS_SVCRSM S0_3_C4_C2_3
+#define TRAVIS_SVCRZA S0_3_C4_C4_3
+
+#endif /* TRAVIS_H */
diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h
new file mode 100644
index 0000000..8b36ff1
--- /dev/null
+++ b/include/lib/cpus/cpu_ops.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CPU_OPS_H
+#define CPU_OPS_H
+
+#include <arch.h>
+
+#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
+ (MIDR_PN_MASK << MIDR_PN_SHIFT)
+
+/* Hardcode to keep compatible with assembly. sizeof(uintptr_t) */
+#if __aarch64__
+#define CPU_WORD_SIZE 8
+#else
+#define CPU_WORD_SIZE 4
+#endif /* __aarch64__ */
+
+/* The number of CPU operations allowed */
+#define CPU_MAX_PWR_DWN_OPS 2
+/* Special constant to specify that CPU has no reset function */
+#define CPU_NO_RESET_FUNC 0
+
+#if __aarch64__
+#define CPU_NO_EXTRA1_FUNC 0
+#define CPU_NO_EXTRA2_FUNC 0
+#define CPU_NO_EXTRA3_FUNC 0
+#endif /* __aarch64__ */
+
+
+/*
+ * Define the sizes of the fields in the cpu_ops structure. Word size is set per
+ * Aarch so keep these definitions the same and each can include whatever it
+ * needs.
+ */
+#define CPU_MIDR_SIZE CPU_WORD_SIZE
+#ifdef IMAGE_AT_EL3
+#define CPU_RESET_FUNC_SIZE CPU_WORD_SIZE
+#else
+#define CPU_RESET_FUNC_SIZE 0
+#endif /* IMAGE_AT_EL3 */
+#define CPU_EXTRA1_FUNC_SIZE CPU_WORD_SIZE
+#define CPU_EXTRA2_FUNC_SIZE CPU_WORD_SIZE
+#define CPU_EXTRA3_FUNC_SIZE CPU_WORD_SIZE
+#define CPU_E_HANDLER_FUNC_SIZE CPU_WORD_SIZE
+/* The power down core and cluster is needed only in BL31 and BL32 */
+#if defined(IMAGE_BL31) || defined(IMAGE_BL32)
+#define CPU_PWR_DWN_OPS_SIZE CPU_WORD_SIZE * CPU_MAX_PWR_DWN_OPS
+#else
+#define CPU_PWR_DWN_OPS_SIZE 0
+#endif /* defined(IMAGE_BL31) || defined(IMAGE_BL32) */
+
+#define CPU_ERRATA_LIST_START_SIZE CPU_WORD_SIZE
+#define CPU_ERRATA_LIST_END_SIZE CPU_WORD_SIZE
+/* Fields required to print errata status */
+#if REPORT_ERRATA
+#define CPU_ERRATA_FUNC_SIZE CPU_WORD_SIZE
+#define CPU_CPU_STR_SIZE CPU_WORD_SIZE
+/* BL1 doesn't require mutual exclusion and printed flag. */
+#if defined(IMAGE_BL31) || defined(IMAGE_BL32)
+#define CPU_ERRATA_LOCK_SIZE CPU_WORD_SIZE
+#define CPU_ERRATA_PRINTED_SIZE CPU_WORD_SIZE
+#else
+#define CPU_ERRATA_LOCK_SIZE 0
+#define CPU_ERRATA_PRINTED_SIZE 0
+#endif /* defined(IMAGE_BL31) || defined(IMAGE_BL32) */
+#else
+#define CPU_ERRATA_FUNC_SIZE 0
+#define CPU_CPU_STR_SIZE 0
+#define CPU_ERRATA_LOCK_SIZE 0
+#define CPU_ERRATA_PRINTED_SIZE 0
+#endif /* REPORT_ERRATA */
+
+#if defined(IMAGE_BL31) && CRASH_REPORTING
+#define CPU_REG_DUMP_SIZE CPU_WORD_SIZE
+#else
+#define CPU_REG_DUMP_SIZE 0
+#endif /* defined(IMAGE_BL31) && CRASH_REPORTING */
+
+
+/*
+ * Define the offsets to the fields in cpu_ops structure. Every offset is
+ * defined based on the offset and size of the previous field.
+ */
+#define CPU_MIDR 0
+#define CPU_RESET_FUNC CPU_MIDR + CPU_MIDR_SIZE
+#if __aarch64__
+#define CPU_EXTRA1_FUNC CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
+#define CPU_EXTRA2_FUNC CPU_EXTRA1_FUNC + CPU_EXTRA1_FUNC_SIZE
+#define CPU_EXTRA3_FUNC CPU_EXTRA2_FUNC + CPU_EXTRA2_FUNC_SIZE
+#define CPU_E_HANDLER_FUNC CPU_EXTRA3_FUNC + CPU_EXTRA3_FUNC_SIZE
+#define CPU_PWR_DWN_OPS CPU_E_HANDLER_FUNC + CPU_E_HANDLER_FUNC_SIZE
+#else
+#define CPU_PWR_DWN_OPS CPU_RESET_FUNC + CPU_RESET_FUNC_SIZE
+#endif /* __aarch64__ */
+#define CPU_ERRATA_LIST_START CPU_PWR_DWN_OPS + CPU_PWR_DWN_OPS_SIZE
+#define CPU_ERRATA_LIST_END CPU_ERRATA_LIST_START + CPU_ERRATA_LIST_START_SIZE
+#define CPU_ERRATA_FUNC CPU_ERRATA_LIST_END + CPU_ERRATA_LIST_END_SIZE
+#define CPU_CPU_STR CPU_ERRATA_FUNC + CPU_ERRATA_FUNC_SIZE
+#define CPU_ERRATA_LOCK CPU_CPU_STR + CPU_CPU_STR_SIZE
+#define CPU_ERRATA_PRINTED CPU_ERRATA_LOCK + CPU_ERRATA_LOCK_SIZE
+#if __aarch64__
+#define CPU_REG_DUMP CPU_ERRATA_PRINTED + CPU_ERRATA_PRINTED_SIZE
+#define CPU_OPS_SIZE CPU_REG_DUMP + CPU_REG_DUMP_SIZE
+#else
+#define CPU_OPS_SIZE CPU_ERRATA_PRINTED + CPU_ERRATA_PRINTED_SIZE
+#endif /* __aarch64__ */
+
+#ifndef __ASSEMBLER__
+#include <lib/cassert.h>
+#include <lib/spinlock.h>
+
+struct cpu_ops {
+ unsigned long midr;
+#ifdef IMAGE_AT_EL3
+ void (*reset_func)(void);
+#endif /* IMAGE_AT_EL3 */
+#if __aarch64__
+ void (*extra1_func)(void);
+ void (*extra2_func)(void);
+ void (*extra3_func)(void);
+ void (*e_handler_func)(long es);
+#endif /* __aarch64__ */
+#if (defined(IMAGE_BL31) || defined(IMAGE_BL32)) && CPU_MAX_PWR_DWN_OPS
+ void (*pwr_dwn_ops[CPU_MAX_PWR_DWN_OPS])(void);
+#endif /* (defined(IMAGE_BL31) || defined(IMAGE_BL32)) && CPU_MAX_PWR_DWN_OPS */
+ void *errata_list_start;
+ void *errata_list_end;
+#if REPORT_ERRATA
+ void (*errata_func)(void);
+ char *cpu_str;
+#if defined(IMAGE_BL31) || defined(IMAGE_BL32)
+ spinlock_t *errata_lock;
+ unsigned int *errata_reported;
+#endif /* defined(IMAGE_BL31) || defined(IMAGE_BL32) */
+#endif /* REPORT_ERRATA */
+#if defined(IMAGE_BL31) && CRASH_REPORTING
+ void (*reg_dump)(void);
+#endif /* defined(IMAGE_BL31) && CRASH_REPORTING */
+} __packed;
+
+CASSERT(sizeof(struct cpu_ops) == CPU_OPS_SIZE,
+ assert_cpu_ops_asm_c_different_sizes);
+
+long cpu_get_rev_var(void);
+void *get_cpu_ops_ptr(void);
+
+#endif /* __ASSEMBLER__ */
+#endif /* CPU_OPS_H */
diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h
new file mode 100644
index 0000000..2080898
--- /dev/null
+++ b/include/lib/cpus/errata.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ERRATA_REPORT_H
+#define ERRATA_REPORT_H
+
+#include <lib/cpus/cpu_ops.h>
+
+
+#define ERRATUM_WA_FUNC_SIZE CPU_WORD_SIZE
+#define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE
+#define ERRATUM_ID_SIZE 4
+#define ERRATUM_CVE_SIZE 2
+#define ERRATUM_CHOSEN_SIZE 1
+#define ERRATUM_MITIGATED_SIZE 1
+
+#define ERRATUM_WA_FUNC 0
+#define ERRATUM_CHECK_FUNC ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE
+#define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE
+#define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE
+#define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE
+#define ERRATUM_MITIGATED ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
+#define ERRATUM_ENTRY_SIZE ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE
+
+#ifndef __ASSEMBLER__
+#include <lib/cassert.h>
+
+void print_errata_status(void);
+void errata_print_msg(unsigned int status, const char *cpu, const char *id);
+
+/*
+ * NOTE that this structure will be different on AArch32 and AArch64. The
+ * uintptr_t will reflect the change and the alignment will be correct in both.
+ */
+struct erratum_entry {
+ uintptr_t (*wa_func)(uint64_t cpu_rev);
+ uintptr_t (*check_func)(uint64_t cpu_rev);
+ /* Will fit CVEs with up to 10 character in the ID field */
+ uint32_t id;
+ /* Denote CVEs with their year or errata with 0 */
+ uint16_t cve;
+ uint8_t chosen;
+ /* TODO(errata ABI): placeholder for the mitigated field */
+ uint8_t _mitigated;
+} __packed;
+
+CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE,
+ assert_erratum_entry_asm_c_different_sizes);
+#else
+
+/*
+ * errata framework macro helpers
+ *
+ * NOTE an erratum and CVE id could clash. However, both numbers are very large
+ * and the probablity is minuscule. Working around this makes code very
+ * complicated and extremely difficult to read so it is not considered. In the
+ * unlikely event that this does happen, prepending the CVE id with a 0 should
+ * resolve the conflict
+ */
+#define ERRATUM(id) 0, id
+#define CVE(year, id) year, id
+#define NO_ISB 1
+#define NO_ASSERT 0
+#define NO_APPLY_AT_RESET 0
+#define APPLY_AT_RESET 1
+#define GET_CPU_REV 1
+#define NO_GET_CPU_REV 0
+
+/* useful for errata that end up always being worked around */
+#define ERRATUM_ALWAYS_CHOSEN 1
+
+#endif /* __ASSEMBLER__ */
+
+/* Errata status */
+#define ERRATA_NOT_APPLIES 0
+#define ERRATA_APPLIES 1
+#define ERRATA_MISSING 2
+
+/* Macro to get CPU revision code for checking errata version compatibility. */
+#define CPU_REV(r, p) ((r << 4) | p)
+
+#endif /* ERRATA_REPORT_H */
diff --git a/include/lib/cpus/wa_cve_2017_5715.h b/include/lib/cpus/wa_cve_2017_5715.h
new file mode 100644
index 0000000..2ad56e1
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2017_5715.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef WA_CVE_2017_5715_H
+#define WA_CVE_2017_5715_H
+
+int check_wa_cve_2017_5715(void);
+
+#endif /* WA_CVE_2017_5715_H */
diff --git a/include/lib/cpus/wa_cve_2018_3639.h b/include/lib/cpus/wa_cve_2018_3639.h
new file mode 100644
index 0000000..5a7c9bf
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2018_3639.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef WA_CVE_2018_3639_H
+#define WA_CVE_2018_3639_H
+
+void *wa_cve_2018_3639_get_disable_ptr(void);
+
+#endif /* WA_CVE_2018_3639_H */
diff --git a/include/lib/cpus/wa_cve_2022_23960.h b/include/lib/cpus/wa_cve_2022_23960.h
new file mode 100644
index 0000000..50c0f76
--- /dev/null
+++ b/include/lib/cpus/wa_cve_2022_23960.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef WA_CVE_2022_23960_H
+#define WA_CVE_2022_23960_H
+
+int check_smccc_arch_wa3_applies(void);
+
+#endif /* WA_CVE_2022_23960_H */
diff --git a/include/lib/debugfs.h b/include/lib/debugfs.h
new file mode 100644
index 0000000..8ed237a
--- /dev/null
+++ b/include/lib/debugfs.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DEBUGFS_H
+#define DEBUGFS_H
+
+#define NAMELEN 13 /* Maximum length of a file name */
+#define PATHLEN 41 /* Maximum length of a path */
+#define STATLEN 41 /* Size of static part of dir format */
+#define ROOTLEN (2 + 4) /* Size needed to encode root string */
+#define FILNAMLEN (2 + NAMELEN) /* Size needed to encode filename */
+#define DIRLEN (STATLEN + FILNAMLEN + 3*ROOTLEN) /* Size of dir entry */
+
+#define KSEEK_SET 0
+#define KSEEK_CUR 1
+#define KSEEK_END 2
+
+#define NELEM(tab) (sizeof(tab) / sizeof((tab)[0]))
+
+typedef unsigned short qid_t; /* FIXME: short type not recommended? */
+
+/*******************************************************************************
+ * This structure contains the necessary information to represent a 9p
+ * directory.
+ ******************************************************************************/
+typedef struct {
+ char name[NAMELEN];
+ long length;
+ unsigned char mode;
+ unsigned char index;
+ unsigned char dev;
+ qid_t qid;
+} dir_t;
+
+/* Permission definitions used as flags */
+#define O_READ (1 << 0)
+#define O_WRITE (1 << 1)
+#define O_RDWR (1 << 2)
+#define O_BIND (1 << 3)
+#define O_DIR (1 << 4)
+#define O_STAT (1 << 5)
+
+/* 9p interface */
+int mount(const char *srv, const char *mnt, const char *spec);
+int create(const char *name, int flags);
+int open(const char *name, int flags);
+int close(int fd);
+int read(int fd, void *buf, int n);
+int write(int fd, void *buf, int n);
+int seek(int fd, long off, int whence);
+int bind(const char *path, const char *where);
+int stat(const char *path, dir_t *dir);
+
+/* DebugFS initialization */
+void debugfs_init(void);
+int debugfs_smc_setup(void);
+
+/* Debugfs version returned through SMC interface */
+#define DEBUGFS_VERSION (0x000000001U)
+
+/* Function ID for accessing the debugfs interface */
+#define DEBUGFS_FID_VALUE (0x30U)
+
+#define is_debugfs_fid(_fid) \
+ (((_fid) & FUNCID_NUM_MASK) == DEBUGFS_FID_VALUE)
+
+/* Error code for debugfs SMC interface failures */
+#define DEBUGFS_E_INVALID_PARAMS (-2)
+#define DEBUGFS_E_DENIED (-3)
+
+uintptr_t debugfs_smc_handler(unsigned int smc_fid,
+ u_register_t cmd,
+ u_register_t arg2,
+ u_register_t arg3,
+ u_register_t arg4,
+ void *cookie,
+ void *handle,
+ uintptr_t flags);
+
+#endif /* DEBUGFS_H */
diff --git a/include/lib/el3_runtime/aarch32/context.h b/include/lib/el3_runtime/aarch32/context.h
new file mode 100644
index 0000000..3b698e3
--- /dev/null
+++ b/include/lib/el3_runtime/aarch32/context.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_REGS_OFFSET U(0x0)
+#define CTX_GPREG_R0 U(0x0)
+#define CTX_GPREG_R1 U(0x4)
+#define CTX_GPREG_R2 U(0x8)
+#define CTX_GPREG_R3 U(0xC)
+#define CTX_LR U(0x10)
+#define CTX_SCR U(0x14)
+#define CTX_SPSR U(0x18)
+#define CTX_NS_SCTLR U(0x1C)
+#define CTX_REGS_END U(0x20)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define WORD_SHIFT U(2)
+#define DEFINE_REG_STRUCT(name, num_regs) \
+ typedef struct name { \
+ uint32_t ctx_regs[num_regs]; \
+ } __aligned(8) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_REG_ALL (CTX_REGS_END >> WORD_SHIFT)
+
+DEFINE_REG_STRUCT(regs, CTX_REG_ALL);
+
+#undef CTX_REG_ALL
+
+#define read_ctx_reg(ctx, offset) ((ctx)->ctx_regs[offset >> WORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val) (((ctx)->ctx_regs[offset >> WORD_SHIFT]) \
+ = val)
+typedef struct cpu_context {
+ regs_t regs_ctx;
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_regs_ctx(h) (&((cpu_context_t *) h)->regs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx),
+ assert_core_context_regs_offset_mismatch);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* CONTEXT_H */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
new file mode 100644
index 0000000..47d91de
--- /dev/null
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <lib/el3_runtime/cpu_data.h>
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'gp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_GPREGS_OFFSET U(0x0)
+#define CTX_GPREG_X0 U(0x0)
+#define CTX_GPREG_X1 U(0x8)
+#define CTX_GPREG_X2 U(0x10)
+#define CTX_GPREG_X3 U(0x18)
+#define CTX_GPREG_X4 U(0x20)
+#define CTX_GPREG_X5 U(0x28)
+#define CTX_GPREG_X6 U(0x30)
+#define CTX_GPREG_X7 U(0x38)
+#define CTX_GPREG_X8 U(0x40)
+#define CTX_GPREG_X9 U(0x48)
+#define CTX_GPREG_X10 U(0x50)
+#define CTX_GPREG_X11 U(0x58)
+#define CTX_GPREG_X12 U(0x60)
+#define CTX_GPREG_X13 U(0x68)
+#define CTX_GPREG_X14 U(0x70)
+#define CTX_GPREG_X15 U(0x78)
+#define CTX_GPREG_X16 U(0x80)
+#define CTX_GPREG_X17 U(0x88)
+#define CTX_GPREG_X18 U(0x90)
+#define CTX_GPREG_X19 U(0x98)
+#define CTX_GPREG_X20 U(0xa0)
+#define CTX_GPREG_X21 U(0xa8)
+#define CTX_GPREG_X22 U(0xb0)
+#define CTX_GPREG_X23 U(0xb8)
+#define CTX_GPREG_X24 U(0xc0)
+#define CTX_GPREG_X25 U(0xc8)
+#define CTX_GPREG_X26 U(0xd0)
+#define CTX_GPREG_X27 U(0xd8)
+#define CTX_GPREG_X28 U(0xe0)
+#define CTX_GPREG_X29 U(0xe8)
+#define CTX_GPREG_LR U(0xf0)
+#define CTX_GPREG_SP_EL0 U(0xf8)
+#define CTX_GPREGS_END U(0x100)
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'el3_state'
+ * structure at their correct offsets. Note that some of the registers are only
+ * 32-bits wide but are stored as 64-bit values for convenience
+ ******************************************************************************/
+#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_SCR_EL3 U(0x0)
+#define CTX_ESR_EL3 U(0x8)
+#define CTX_RUNTIME_SP U(0x10)
+#define CTX_SPSR_EL3 U(0x18)
+#define CTX_ELR_EL3 U(0x20)
+#define CTX_PMCR_EL0 U(0x28)
+#define CTX_IS_IN_EL3 U(0x30)
+#define CTX_MPAM3_EL3 U(0x38)
+/* Constants required in supporting nested exception in EL3 */
+#define CTX_SAVED_ELR_EL3 U(0x40)
+/*
+ * General purpose flag, to save various EL3 states
+ * FFH mode : Used to identify if handling nested exception
+ * KFH mode : Used as counter value
+ */
+#define CTX_NESTED_EA_FLAG U(0x48)
+#if FFH_SUPPORT
+ #define CTX_SAVED_ESR_EL3 U(0x50)
+ #define CTX_SAVED_SPSR_EL3 U(0x58)
+ #define CTX_SAVED_GPREG_LR U(0x60)
+ #define CTX_EL3STATE_END U(0x70) /* Align to the next 16 byte boundary */
+#else
+ #define CTX_EL3STATE_END U(0x50) /* Align to the next 16 byte boundary */
+#endif
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the
+ * 'el1_sys_regs' structure at their correct offsets. Note that some of the
+ * registers are only 32-bits wide but are stored as 64-bit values for
+ * convenience
+ ******************************************************************************/
+#define CTX_EL1_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
+#define CTX_SPSR_EL1 U(0x0)
+#define CTX_ELR_EL1 U(0x8)
+#define CTX_SCTLR_EL1 U(0x10)
+#define CTX_TCR_EL1 U(0x18)
+#define CTX_CPACR_EL1 U(0x20)
+#define CTX_CSSELR_EL1 U(0x28)
+#define CTX_SP_EL1 U(0x30)
+#define CTX_ESR_EL1 U(0x38)
+#define CTX_TTBR0_EL1 U(0x40)
+#define CTX_TTBR1_EL1 U(0x48)
+#define CTX_MAIR_EL1 U(0x50)
+#define CTX_AMAIR_EL1 U(0x58)
+#define CTX_ACTLR_EL1 U(0x60)
+#define CTX_TPIDR_EL1 U(0x68)
+#define CTX_TPIDR_EL0 U(0x70)
+#define CTX_TPIDRRO_EL0 U(0x78)
+#define CTX_PAR_EL1 U(0x80)
+#define CTX_FAR_EL1 U(0x88)
+#define CTX_AFSR0_EL1 U(0x90)
+#define CTX_AFSR1_EL1 U(0x98)
+#define CTX_CONTEXTIDR_EL1 U(0xa0)
+#define CTX_VBAR_EL1 U(0xa8)
+
+/*
+ * If the platform is AArch64-only, there is no need to save and restore these
+ * AArch32 registers.
+ */
+#if CTX_INCLUDE_AARCH32_REGS
+#define CTX_SPSR_ABT U(0xb0) /* Align to the next 16 byte boundary */
+#define CTX_SPSR_UND U(0xb8)
+#define CTX_SPSR_IRQ U(0xc0)
+#define CTX_SPSR_FIQ U(0xc8)
+#define CTX_DACR32_EL2 U(0xd0)
+#define CTX_IFSR32_EL2 U(0xd8)
+#define CTX_AARCH32_END U(0xe0) /* Align to the next 16 byte boundary */
+#else
+#define CTX_AARCH32_END U(0xb0) /* Align to the next 16 byte boundary */
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+/*
+ * If the timer registers aren't saved and restored, we don't have to reserve
+ * space for them in the context
+ */
+#if NS_TIMER_SWITCH
+#define CTX_CNTP_CTL_EL0 (CTX_AARCH32_END + U(0x0))
+#define CTX_CNTP_CVAL_EL0 (CTX_AARCH32_END + U(0x8))
+#define CTX_CNTV_CTL_EL0 (CTX_AARCH32_END + U(0x10))
+#define CTX_CNTV_CVAL_EL0 (CTX_AARCH32_END + U(0x18))
+#define CTX_CNTKCTL_EL1 (CTX_AARCH32_END + U(0x20))
+#define CTX_TIMER_SYSREGS_END (CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
+#else
+#define CTX_TIMER_SYSREGS_END CTX_AARCH32_END
+#endif /* NS_TIMER_SWITCH */
+
+#if CTX_INCLUDE_MTE_REGS
+#define CTX_TFSRE0_EL1 (CTX_TIMER_SYSREGS_END + U(0x0))
+#define CTX_TFSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x8))
+#define CTX_RGSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x10))
+#define CTX_GCR_EL1 (CTX_TIMER_SYSREGS_END + U(0x18))
+
+/* Align to the next 16 byte boundary */
+#define CTX_MTE_REGS_END (CTX_TIMER_SYSREGS_END + U(0x20))
+#else
+#define CTX_MTE_REGS_END CTX_TIMER_SYSREGS_END
+#endif /* CTX_INCLUDE_MTE_REGS */
+
+/*
+ * End of system registers.
+ */
+#define CTX_EL1_SYSREGS_END CTX_MTE_REGS_END
+
+/*
+ * EL2 register set
+ */
+
+#if CTX_INCLUDE_EL2_REGS
+/* For later discussion
+ * ICH_AP0R<n>_EL2
+ * ICH_AP1R<n>_EL2
+ * AMEVCNTVOFF0<n>_EL2
+ * AMEVCNTVOFF1<n>_EL2
+ * ICH_LR<n>_EL2
+ */
+#define CTX_EL2_SYSREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
+
+#define CTX_ACTLR_EL2 U(0x0)
+#define CTX_AFSR0_EL2 U(0x8)
+#define CTX_AFSR1_EL2 U(0x10)
+#define CTX_AMAIR_EL2 U(0x18)
+#define CTX_CNTHCTL_EL2 U(0x20)
+#define CTX_CNTVOFF_EL2 U(0x28)
+#define CTX_CPTR_EL2 U(0x30)
+#define CTX_DBGVCR32_EL2 U(0x38)
+#define CTX_ELR_EL2 U(0x40)
+#define CTX_ESR_EL2 U(0x48)
+#define CTX_FAR_EL2 U(0x50)
+#define CTX_HACR_EL2 U(0x58)
+#define CTX_HCR_EL2 U(0x60)
+#define CTX_HPFAR_EL2 U(0x68)
+#define CTX_HSTR_EL2 U(0x70)
+#define CTX_ICC_SRE_EL2 U(0x78)
+#define CTX_ICH_HCR_EL2 U(0x80)
+#define CTX_ICH_VMCR_EL2 U(0x88)
+#define CTX_MAIR_EL2 U(0x90)
+#define CTX_MDCR_EL2 U(0x98)
+#define CTX_PMSCR_EL2 U(0xa0)
+#define CTX_SCTLR_EL2 U(0xa8)
+#define CTX_SPSR_EL2 U(0xb0)
+#define CTX_SP_EL2 U(0xb8)
+#define CTX_TCR_EL2 U(0xc0)
+#define CTX_TPIDR_EL2 U(0xc8)
+#define CTX_TTBR0_EL2 U(0xd0)
+#define CTX_VBAR_EL2 U(0xd8)
+#define CTX_VMPIDR_EL2 U(0xe0)
+#define CTX_VPIDR_EL2 U(0xe8)
+#define CTX_VTCR_EL2 U(0xf0)
+#define CTX_VTTBR_EL2 U(0xf8)
+
+// Only if MTE registers in use
+#define CTX_TFSR_EL2 U(0x100)
+
+#define CTX_MPAM2_EL2 U(0x108)
+#define CTX_MPAMHCR_EL2 U(0x110)
+#define CTX_MPAMVPM0_EL2 U(0x118)
+#define CTX_MPAMVPM1_EL2 U(0x120)
+#define CTX_MPAMVPM2_EL2 U(0x128)
+#define CTX_MPAMVPM3_EL2 U(0x130)
+#define CTX_MPAMVPM4_EL2 U(0x138)
+#define CTX_MPAMVPM5_EL2 U(0x140)
+#define CTX_MPAMVPM6_EL2 U(0x148)
+#define CTX_MPAMVPM7_EL2 U(0x150)
+#define CTX_MPAMVPMV_EL2 U(0x158)
+
+// Starting with Armv8.6
+#define CTX_HDFGRTR_EL2 U(0x160)
+#define CTX_HAFGRTR_EL2 U(0x168)
+#define CTX_HDFGWTR_EL2 U(0x170)
+#define CTX_HFGITR_EL2 U(0x178)
+#define CTX_HFGRTR_EL2 U(0x180)
+#define CTX_HFGWTR_EL2 U(0x188)
+#define CTX_CNTPOFF_EL2 U(0x190)
+
+// Starting with Armv8.4
+#define CTX_CONTEXTIDR_EL2 U(0x198)
+#define CTX_TTBR1_EL2 U(0x1a0)
+#define CTX_VDISR_EL2 U(0x1a8)
+#define CTX_VSESR_EL2 U(0x1b0)
+#define CTX_VNCR_EL2 U(0x1b8)
+#define CTX_TRFCR_EL2 U(0x1c0)
+
+// Starting with Armv8.5
+#define CTX_SCXTNUM_EL2 U(0x1c8)
+
+// Register for FEAT_HCX
+#define CTX_HCRX_EL2 U(0x1d0)
+
+// Starting with Armv8.9
+#define CTX_TCR2_EL2 U(0x1d8)
+#define CTX_POR_EL2 U(0x1e0)
+#define CTX_PIRE0_EL2 U(0x1e8)
+#define CTX_PIR_EL2 U(0x1f0)
+#define CTX_S2PIR_EL2 U(0x1f8)
+#define CTX_GCSCR_EL2 U(0x200)
+#define CTX_GCSPR_EL2 U(0x208)
+
+/* Align to the next 16 byte boundary */
+#define CTX_EL2_SYSREGS_END U(0x210)
+
+#endif /* CTX_INCLUDE_EL2_REGS */
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'fp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#if CTX_INCLUDE_EL2_REGS
+# define CTX_FPREGS_OFFSET (CTX_EL2_SYSREGS_OFFSET + CTX_EL2_SYSREGS_END)
+#else
+# define CTX_FPREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
+#endif
+#if CTX_INCLUDE_FPREGS
+#define CTX_FP_Q0 U(0x0)
+#define CTX_FP_Q1 U(0x10)
+#define CTX_FP_Q2 U(0x20)
+#define CTX_FP_Q3 U(0x30)
+#define CTX_FP_Q4 U(0x40)
+#define CTX_FP_Q5 U(0x50)
+#define CTX_FP_Q6 U(0x60)
+#define CTX_FP_Q7 U(0x70)
+#define CTX_FP_Q8 U(0x80)
+#define CTX_FP_Q9 U(0x90)
+#define CTX_FP_Q10 U(0xa0)
+#define CTX_FP_Q11 U(0xb0)
+#define CTX_FP_Q12 U(0xc0)
+#define CTX_FP_Q13 U(0xd0)
+#define CTX_FP_Q14 U(0xe0)
+#define CTX_FP_Q15 U(0xf0)
+#define CTX_FP_Q16 U(0x100)
+#define CTX_FP_Q17 U(0x110)
+#define CTX_FP_Q18 U(0x120)
+#define CTX_FP_Q19 U(0x130)
+#define CTX_FP_Q20 U(0x140)
+#define CTX_FP_Q21 U(0x150)
+#define CTX_FP_Q22 U(0x160)
+#define CTX_FP_Q23 U(0x170)
+#define CTX_FP_Q24 U(0x180)
+#define CTX_FP_Q25 U(0x190)
+#define CTX_FP_Q26 U(0x1a0)
+#define CTX_FP_Q27 U(0x1b0)
+#define CTX_FP_Q28 U(0x1c0)
+#define CTX_FP_Q29 U(0x1d0)
+#define CTX_FP_Q30 U(0x1e0)
+#define CTX_FP_Q31 U(0x1f0)
+#define CTX_FP_FPSR U(0x200)
+#define CTX_FP_FPCR U(0x208)
+#if CTX_INCLUDE_AARCH32_REGS
+#define CTX_FP_FPEXC32_EL2 U(0x210)
+#define CTX_FPREGS_END U(0x220) /* Align to the next 16 byte boundary */
+#else
+#define CTX_FPREGS_END U(0x210) /* Align to the next 16 byte boundary */
+#endif
+#else
+#define CTX_FPREGS_END U(0)
+#endif
+
+/*******************************************************************************
+ * Registers related to CVE-2018-3639
+ ******************************************************************************/
+#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END)
+#define CTX_CVE_2018_3639_DISABLE U(0)
+#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
+
+/*******************************************************************************
+ * Registers related to ARMv8.3-PAuth.
+ ******************************************************************************/
+#define CTX_PAUTH_REGS_OFFSET (CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_END)
+#if CTX_INCLUDE_PAUTH_REGS
+#define CTX_PACIAKEY_LO U(0x0)
+#define CTX_PACIAKEY_HI U(0x8)
+#define CTX_PACIBKEY_LO U(0x10)
+#define CTX_PACIBKEY_HI U(0x18)
+#define CTX_PACDAKEY_LO U(0x20)
+#define CTX_PACDAKEY_HI U(0x28)
+#define CTX_PACDBKEY_LO U(0x30)
+#define CTX_PACDBKEY_HI U(0x38)
+#define CTX_PACGAKEY_LO U(0x40)
+#define CTX_PACGAKEY_HI U(0x48)
+#define CTX_PAUTH_REGS_END U(0x50) /* Align to the next 16 byte boundary */
+#else
+#define CTX_PAUTH_REGS_END U(0)
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+
+/*******************************************************************************
+ * Registers initialised in a per-world context.
+ ******************************************************************************/
+#define CTX_CPTR_EL3 U(0x0)
+#define CTX_ZCR_EL3 U(0x8)
+#define CTX_GLOBAL_EL3STATE_END U(0x10)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define DWORD_SHIFT U(3)
+#define DEFINE_REG_STRUCT(name, num_regs) \
+ typedef struct name { \
+ uint64_t ctx_regs[num_regs]; \
+ } __aligned(16) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
+#define CTX_EL1_SYSREGS_ALL (CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_EL2_REGS
+# define CTX_EL2_SYSREGS_ALL (CTX_EL2_SYSREGS_END >> DWORD_SHIFT)
+#endif
+#if CTX_INCLUDE_FPREGS
+# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
+#endif
+#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
+#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_PAUTH_REGS
+# define CTX_PAUTH_REGS_ALL (CTX_PAUTH_REGS_END >> DWORD_SHIFT)
+#endif
+
+/*
+ * AArch64 general purpose register context structure. Usually x0-x18,
+ * lr are saved as the compiler is expected to preserve the remaining
+ * callee saved registers if used by the C runtime and the assembler
+ * does not touch the remaining. But in case of world switch during
+ * exception handling, we need to save the callee registers too.
+ */
+DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+
+/*
+ * AArch64 EL1 system register context structure for preserving the
+ * architectural state during world switches.
+ */
+DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
+
+
+/*
+ * AArch64 EL2 system register context structure for preserving the
+ * architectural state during world switches.
+ */
+#if CTX_INCLUDE_EL2_REGS
+DEFINE_REG_STRUCT(el2_sysregs, CTX_EL2_SYSREGS_ALL);
+#endif
+
+/*
+ * AArch64 floating point register context structure for preserving
+ * the floating point state during switches from one security state to
+ * another.
+ */
+#if CTX_INCLUDE_FPREGS
+DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
+#endif
+
+/*
+ * Miscellaneous registers used by EL3 firmware to maintain its state
+ * across exception entries and exits
+ */
+DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+
+/* Function pointer used by CVE-2018-3639 dynamic mitigation */
+DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
+
+/* Registers associated to ARMv8.3-PAuth */
+#if CTX_INCLUDE_PAUTH_REGS
+DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
+#endif
+
+/*
+ * Macros to access members of any of the above structures using their
+ * offsets
+ */
+#define read_ctx_reg(ctx, offset) ((ctx)->ctx_regs[(offset) >> DWORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val) (((ctx)->ctx_regs[(offset) >> DWORD_SHIFT]) \
+ = (uint64_t) (val))
+
+/*
+ * Top-level context structure which is used by EL3 firmware to preserve
+ * the state of a core at the next lower EL in a given security state and
+ * save enough EL3 meta data to be able to return to that EL and security
+ * state. The context management library will be used to ensure that
+ * SP_EL3 always points to an instance of this structure at exception
+ * entry and exit.
+ */
+typedef struct cpu_context {
+ gp_regs_t gpregs_ctx;
+ el3_state_t el3state_ctx;
+ el1_sysregs_t el1_sysregs_ctx;
+#if CTX_INCLUDE_EL2_REGS
+ el2_sysregs_t el2_sysregs_ctx;
+#endif
+#if CTX_INCLUDE_FPREGS
+ fp_regs_t fpregs_ctx;
+#endif
+ cve_2018_3639_t cve_2018_3639_ctx;
+#if CTX_INCLUDE_PAUTH_REGS
+ pauth_t pauth_ctx;
+#endif
+} cpu_context_t;
+
+/*
+ * Per-World Context.
+ * It stores registers whose values can be shared across CPUs.
+ */
+typedef struct per_world_context {
+ uint64_t ctx_cptr_el3;
+ uint64_t ctx_zcr_el3;
+} per_world_context_t;
+
+extern per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx)
+#if CTX_INCLUDE_FPREGS
+# define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx)
+#endif
+#define get_el1_sysregs_ctx(h) (&((cpu_context_t *) h)->el1_sysregs_ctx)
+#if CTX_INCLUDE_EL2_REGS
+# define get_el2_sysregs_ctx(h) (&((cpu_context_t *) h)->el2_sysregs_ctx)
+#endif
+#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx)
+#define get_cve_2018_3639_ctx(h) (&((cpu_context_t *) h)->cve_2018_3639_ctx)
+#if CTX_INCLUDE_PAUTH_REGS
+# define get_pauth_ctx(h) (&((cpu_context_t *) h)->pauth_ctx)
+#endif
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx),
+ assert_core_context_gp_offset_mismatch);
+CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx),
+ assert_core_context_el1_sys_offset_mismatch);
+#if CTX_INCLUDE_EL2_REGS
+CASSERT(CTX_EL2_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el2_sysregs_ctx),
+ assert_core_context_el2_sys_offset_mismatch);
+#endif
+#if CTX_INCLUDE_FPREGS
+CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
+ assert_core_context_fp_offset_mismatch);
+#endif
+CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
+ assert_core_context_el3state_offset_mismatch);
+CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx),
+ assert_core_context_cve_2018_3639_offset_mismatch);
+#if CTX_INCLUDE_PAUTH_REGS
+CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx),
+ assert_core_context_pauth_offset_mismatch);
+#endif
+
+/*
+ * Helper macro to set the general purpose registers that correspond to
+ * parameters in an aapcs_64 call i.e. x0-x7
+ */
+#define set_aapcs_args0(ctx, x0) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \
+ } while (0)
+#define set_aapcs_args1(ctx, x0, x1) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \
+ set_aapcs_args0(ctx, x0); \
+ } while (0)
+#define set_aapcs_args2(ctx, x0, x1, x2) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \
+ set_aapcs_args1(ctx, x0, x1); \
+ } while (0)
+#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \
+ set_aapcs_args2(ctx, x0, x1, x2); \
+ } while (0)
+#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \
+ set_aapcs_args3(ctx, x0, x1, x2, x3); \
+ } while (0)
+#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \
+ set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \
+ } while (0)
+#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \
+ set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \
+ } while (0)
+#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \
+ set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \
+ } while (0)
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+void el1_sysregs_context_save(el1_sysregs_t *regs);
+void el1_sysregs_context_restore(el1_sysregs_t *regs);
+
+#if CTX_INCLUDE_FPREGS
+void fpregs_context_save(fp_regs_t *regs);
+void fpregs_context_restore(fp_regs_t *regs);
+#endif
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* CONTEXT_H */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
new file mode 100644
index 0000000..b2bdaf5
--- /dev/null
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_MGMT_H
+#define CONTEXT_MGMT_H
+
+#include <assert.h>
+#include <context.h>
+#include <stdint.h>
+
+#include <arch.h>
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void cm_init(void);
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state);
+void cm_set_context_by_index(unsigned int cpu_idx,
+ void *context,
+ unsigned int security_state);
+void *cm_get_context(uint32_t security_state);
+void cm_set_context(void *context, uint32_t security_state);
+void cm_init_my_context(const struct entry_point_info *ep);
+void cm_init_context_by_index(unsigned int cpu_idx,
+ const struct entry_point_info *ep);
+void cm_setup_context(cpu_context_t *ctx, const struct entry_point_info *ep);
+void cm_prepare_el3_exit(uint32_t security_state);
+void cm_prepare_el3_exit_ns(void);
+
+#ifdef __aarch64__
+#if IMAGE_BL31
+void cm_manage_extensions_el3(void);
+void manage_extensions_nonsecure_per_world(void);
+#endif
+#if CTX_INCLUDE_EL2_REGS
+void cm_el2_sysregs_context_save(uint32_t security_state);
+void cm_el2_sysregs_context_restore(uint32_t security_state);
+#endif
+
+void cm_el1_sysregs_context_save(uint32_t security_state);
+void cm_el1_sysregs_context_restore(uint32_t security_state);
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
+void cm_set_elr_spsr_el3(uint32_t security_state,
+ uintptr_t entrypoint, uint32_t spsr);
+void cm_write_scr_el3_bit(uint32_t security_state,
+ uint32_t bit_pos,
+ uint32_t value);
+void cm_set_next_eret_context(uint32_t security_state);
+u_register_t cm_get_scr_el3(uint32_t security_state);
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+static inline void cm_set_next_context(void *context)
+{
+#if ENABLE_ASSERTIONS
+ uint64_t sp_mode;
+
+ /*
+ * Check that this function is called with SP_EL0 as the stack
+ * pointer
+ */
+ __asm__ volatile("mrs %0, SPSel\n"
+ : "=r" (sp_mode));
+
+ assert(sp_mode == MODE_SP_EL0);
+#endif /* ENABLE_ASSERTIONS */
+
+ __asm__ volatile("msr spsel, #1\n"
+ "mov sp, %0\n"
+ "msr spsel, #0\n"
+ : : "r" (context));
+}
+
+#else
+void *cm_get_next_context(void);
+void cm_set_next_context(void *context);
+static inline void cm_manage_extensions_el3(void) {}
+static inline void manage_extensions_nonsecure_per_world(void) {}
+#endif /* __aarch64__ */
+
+#endif /* CONTEXT_MGMT_H */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
new file mode 100644
index 0000000..2c7b619
--- /dev/null
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CPU_DATA_H
+#define CPU_DATA_H
+
+#include <platform_def.h> /* CACHE_WRITEBACK_GRANULE required */
+
+#include <bl31/ehf.h>
+
+/* Size of psci_cpu_data structure */
+#define PSCI_CPU_DATA_SIZE 12
+
+#ifdef __aarch64__
+
+/* 8-bytes aligned size of psci_cpu_data structure */
+#define PSCI_CPU_DATA_SIZE_ALIGNED ((PSCI_CPU_DATA_SIZE + 7) & ~7)
+
+#if ENABLE_RME
+/* Size of cpu_context array */
+#define CPU_DATA_CONTEXT_NUM 3
+/* Offset of cpu_ops_ptr, size 8 bytes */
+#define CPU_DATA_CPU_OPS_PTR 0x18
+#else /* ENABLE_RME */
+#define CPU_DATA_CONTEXT_NUM 2
+#define CPU_DATA_CPU_OPS_PTR 0x10
+#endif /* ENABLE_RME */
+
+#if ENABLE_PAUTH
+/* 8-bytes aligned offset of apiakey[2], size 16 bytes */
+#define CPU_DATA_APIAKEY_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
+ + CPU_DATA_CPU_OPS_PTR)
+#define CPU_DATA_CRASH_BUF_OFFSET (0x10 + CPU_DATA_APIAKEY_OFFSET)
+#else /* ENABLE_PAUTH */
+#define CPU_DATA_CRASH_BUF_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
+ + CPU_DATA_CPU_OPS_PTR)
+#endif /* ENABLE_PAUTH */
+
+/* need enough space in crash buffer to save 8 registers */
+#define CPU_DATA_CRASH_BUF_SIZE 64
+
+#else /* !__aarch64__ */
+
+#if CRASH_REPORTING
+#error "Crash reporting is not supported in AArch32"
+#endif
+#define CPU_DATA_CPU_OPS_PTR 0x0
+#define CPU_DATA_CRASH_BUF_OFFSET (0x4 + PSCI_CPU_DATA_SIZE)
+
+#endif /* __aarch64__ */
+
+#if CRASH_REPORTING
+#define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \
+ CPU_DATA_CRASH_BUF_SIZE)
+#else
+#define CPU_DATA_CRASH_BUF_END CPU_DATA_CRASH_BUF_OFFSET
+#endif
+
+/* cpu_data size is the data size rounded up to the platform cache line size */
+#define CPU_DATA_SIZE (((CPU_DATA_CRASH_BUF_END + \
+ CACHE_WRITEBACK_GRANULE - 1) / \
+ CACHE_WRITEBACK_GRANULE) * \
+ CACHE_WRITEBACK_GRANULE)
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+/* Temporary space to store PMF timestamps from assembly code */
+#define CPU_DATA_PMF_TS_COUNT 1
+#define CPU_DATA_PMF_TS0_OFFSET CPU_DATA_CRASH_BUF_END
+#define CPU_DATA_PMF_TS0_IDX 0
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <lib/cassert.h>
+#include <lib/psci/psci.h>
+
+#include <platform_def.h>
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\
+ (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
+
+#if PLAT_PCPU_DATA_SIZE
+#define CPU_DATA_PLAT_PCPU_OFFSET __builtin_offsetof\
+ (cpu_data_t, platform_cpu_data)
+#endif
+
+typedef enum context_pas {
+ CPU_CONTEXT_SECURE = 0,
+ CPU_CONTEXT_NS,
+#if ENABLE_RME
+ CPU_CONTEXT_REALM,
+#endif
+ CPU_CONTEXT_NUM
+} context_pas_t;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ * Pointers to non-secure, realm, and secure security state contexts
+ * Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+typedef struct cpu_data {
+#ifdef __aarch64__
+ void *cpu_context[CPU_DATA_CONTEXT_NUM];
+#endif /* __aarch64__ */
+ uintptr_t cpu_ops_ptr;
+ struct psci_cpu_data psci_svc_cpu_data;
+#if ENABLE_PAUTH
+ uint64_t apiakey[2];
+#endif
+#if CRASH_REPORTING
+ u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
+#endif
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
+#endif
+#if PLAT_PCPU_DATA_SIZE
+ uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
+#endif
+#if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
+ pe_exc_data_t ehf_data;
+#endif
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
+
+#ifdef __aarch64__
+CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
+ assert_cpu_data_context_num_mismatch);
+#endif
+
+#if ENABLE_PAUTH
+CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
+ (cpu_data_t, apiakey),
+ assert_cpu_data_pauth_stack_offset_mismatch);
+#endif
+
+#if CRASH_REPORTING
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
+ (cpu_data_t, crash_buf),
+ assert_cpu_data_crash_stack_offset_mismatch);
+#endif
+
+CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
+ assert_cpu_data_size_mismatch);
+
+CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
+ (cpu_data_t, cpu_ops_ptr),
+ assert_cpu_data_cpu_ops_ptr_offset_mismatch);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
+ (cpu_data_t, cpu_data_pmf_ts[0]),
+ assert_cpu_data_pmf_ts0_offset_mismatch);
+#endif
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+
+#ifdef __aarch64__
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+ return (cpu_data_t *)read_tpidr_el3();
+}
+#else
+struct cpu_data *_cpu_data(void);
+#endif
+
+/*
+ * Returns the index of the cpu_context array for the given security state.
+ * All accesses to cpu_context should be through this helper to make sure
+ * an access is not out-of-bounds. The function assumes security_state is
+ * valid.
+ */
+static inline context_pas_t get_cpu_context_index(uint32_t security_state)
+{
+ if (security_state == SECURE) {
+ return CPU_CONTEXT_SECURE;
+ } else {
+#if ENABLE_RME
+ if (security_state == NON_SECURE) {
+ return CPU_CONTEXT_NS;
+ } else {
+ assert(security_state == REALM);
+ return CPU_CONTEXT_REALM;
+ }
+#else
+ assert(security_state == NON_SECURE);
+ return CPU_CONTEXT_NS;
+#endif
+ }
+}
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+void init_cpu_ops(void);
+
+#define get_cpu_data(_m) _cpu_data()->_m
+#define set_cpu_data(_m, _v) _cpu_data()->_m = (_v)
+#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
+/* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
+#define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \
+ &(_cpu_data()->_m), \
+ sizeof(((cpu_data_t *)0)->_m))
+#define inv_cpu_data(_m) inv_dcache_range((uintptr_t) \
+ &(_cpu_data()->_m), \
+ sizeof(((cpu_data_t *)0)->_m))
+#define flush_cpu_data_by_index(_ix, _m) \
+ flush_dcache_range((uintptr_t) \
+ &(_cpu_data_by_index(_ix)->_m), \
+ sizeof(((cpu_data_t *)0)->_m))
+
+
+#endif /* __ASSEMBLER__ */
+#endif /* CPU_DATA_H */
diff --git a/include/lib/el3_runtime/pubsub.h b/include/lib/el3_runtime/pubsub.h
new file mode 100644
index 0000000..cbd8ecc
--- /dev/null
+++ b/include/lib/el3_runtime/pubsub.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PUBSUB_H
+#define PUBSUB_H
+
+#ifdef __LINKER__
+
+/* For the linker ... */
+#define __pubsub_start_sym(event) __pubsub_##event##_start
+#define __pubsub_end_sym(event) __pubsub_##event##_end
+#define __pubsub_section(event) .__pubsub_##event
+
+/*
+ * REGISTER_PUBSUB_EVENT has a different definition between linker and compiler
+ * contexts. In linker context, this collects pubsub sections for each event,
+ * placing guard symbols around each.
+ */
+#if defined(USE_ARM_LINK)
+#define REGISTER_PUBSUB_EVENT(event) \
+ __pubsub_start_sym(event) +0 FIXED \
+ { \
+ *(__pubsub_section(event)) \
+ } \
+ __pubsub_end_sym(event) +0 FIXED EMPTY 0 \
+ { \
+ /* placeholder */ \
+ }
+#else
+#define REGISTER_PUBSUB_EVENT(event) \
+ __pubsub_start_sym(event) = .; \
+ KEEP(*(__pubsub_section(event))); \
+ __pubsub_end_sym(event) = .
+#endif
+
+#else /* __LINKER__ */
+
+/* For the compiler ... */
+
+#include <assert.h>
+#include <cdefs.h>
+#include <stddef.h>
+
+#include <arch_helpers.h>
+
+#if defined(USE_ARM_LINK)
+#define __pubsub_start_sym(event) Load$$__pubsub_##event##_start$$Base
+#define __pubsub_end_sym(event) Load$$__pubsub_##event##_end$$Base
+#else
+#define __pubsub_start_sym(event) __pubsub_##event##_start
+#define __pubsub_end_sym(event) __pubsub_##event##_end
+#endif
+
+#define __pubsub_section(event) __section(".__pubsub_" #event)
+
+/*
+ * In compiler context, REGISTER_PUBSUB_EVENT declares the per-event symbols
+ * exported by the linker required for the other pubsub macros to work.
+ */
+#define REGISTER_PUBSUB_EVENT(event) \
+ extern pubsub_cb_t __pubsub_start_sym(event)[]; \
+ extern pubsub_cb_t __pubsub_end_sym(event)[]
+
+/*
+ * Have the function func called back when the specified event happens. This
+ * macro places the function address into the pubsub section, which is picked up
+ * and invoked by the invoke_pubsubs() function via the PUBLISH_EVENT* macros.
+ *
+ * The extern declaration is there to satisfy MISRA C-2012 rule 8.4.
+ */
+#define SUBSCRIBE_TO_EVENT(event, func) \
+ extern pubsub_cb_t __cb_func_##func##event __pubsub_section(event); \
+ pubsub_cb_t __cb_func_##func##event __pubsub_section(event) = (func)
+
+/*
+ * Iterate over subscribed handlers for a defined event. 'event' is the name of
+ * the event, and 'subscriber' a local variable of type 'pubsub_cb_t *'.
+ */
+#define for_each_subscriber(event, subscriber) \
+ for (subscriber = __pubsub_start_sym(event); \
+ subscriber < __pubsub_end_sym(event); \
+ subscriber++)
+
+/*
+ * Publish a defined event supplying an argument. All subscribed handlers are
+ * invoked, but the return value of handlers are ignored for now.
+ */
+#define PUBLISH_EVENT_ARG(event, arg) \
+ do { \
+ pubsub_cb_t *subscriber; \
+ for_each_subscriber(event, subscriber) { \
+ (*subscriber)(arg); \
+ } \
+ } while (0)
+
+/* Publish a defined event with NULL argument */
+#define PUBLISH_EVENT(event) PUBLISH_EVENT_ARG(event, NULL)
+
+/* Subscriber callback type */
+typedef void* (*pubsub_cb_t)(const void *arg);
+
+#endif /* __LINKER__ */
+#endif /* PUBSUB_H */
diff --git a/include/lib/el3_runtime/pubsub_events.h b/include/lib/el3_runtime/pubsub_events.h
new file mode 100644
index 0000000..d0c0502
--- /dev/null
+++ b/include/lib/el3_runtime/pubsub_events.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/el3_runtime/pubsub.h>
+
+/*
+ * This file defines a list of pubsub events, declared using
+ * REGISTER_PUBSUB_EVENT() macro.
+ */
+
+/*
+ * Event published after a CPU has been powered up and finished its
+ * initialization.
+ */
+REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
+
+/*
+ * These events are published before/after a CPU has been powered down/up
+ * via the PSCI CPU SUSPEND API.
+ */
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
+
+#ifdef __aarch64__
+/*
+ * These events are published by the AArch64 context management framework
+ * after the secure context is restored/saved via
+ * cm_el1_sysregs_context_{restore,save}() API.
+ */
+REGISTER_PUBSUB_EVENT(cm_entering_secure_world);
+REGISTER_PUBSUB_EVENT(cm_exited_secure_world);
+
+/*
+ * These events are published by the AArch64 context management framework
+ * after the normal context is restored/saved via
+ * cm_el1_sysregs_context_{restore,save}() API.
+ */
+REGISTER_PUBSUB_EVENT(cm_entering_normal_world);
+REGISTER_PUBSUB_EVENT(cm_exited_normal_world);
+#endif /* __aarch64__ */
diff --git a/include/lib/extensions/amu.h b/include/lib/extensions/amu.h
new file mode 100644
index 0000000..a396b99
--- /dev/null
+++ b/include/lib/extensions/amu.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AMU_H
+#define AMU_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <context.h>
+
+#include <platform_def.h>
+
+#if ENABLE_FEAT_AMU
+#if __aarch64__
+void amu_enable(cpu_context_t *ctx);
+void amu_init_el3(void);
+void amu_init_el2_unused(void);
+void amu_enable_per_world(per_world_context_t *per_world_ctx);
+#else
+void amu_enable(bool el2_unused);
+#endif /* __aarch64__ */
+
+#else
+#if __aarch64__
+void amu_enable(cpu_context_t *ctx)
+{
+}
+void amu_init_el3(void)
+{
+}
+void amu_init_el2_unused(void)
+{
+}
+void amu_enable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+#else
+static inline void amu_enable(bool el2_unused)
+{
+}
+#endif /*__aarch64__ */
+#endif /* ENABLE_FEAT_AMU */
+
+#if ENABLE_AMU_AUXILIARY_COUNTERS
+/*
+ * AMU data for a single core.
+ */
+struct amu_core {
+ uint16_t enable; /* Mask of auxiliary counters to enable */
+};
+
+/*
+ * Topological platform data specific to the AMU.
+ */
+struct amu_topology {
+ struct amu_core cores[PLATFORM_CORE_COUNT]; /* Per-core data */
+};
+
+#if !ENABLE_AMU_FCONF
+/*
+ * Retrieve the platform's AMU topology. A `NULL` return value is treated as a
+ * non-fatal error, in which case no auxiliary counters will be enabled.
+ */
+const struct amu_topology *plat_amu_topology(void);
+#endif /* ENABLE_AMU_FCONF */
+#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
+
+#endif /* AMU_H */
diff --git a/include/lib/extensions/brbe.h b/include/lib/extensions/brbe.h
new file mode 100644
index 0000000..194efba
--- /dev/null
+++ b/include/lib/extensions/brbe.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef BRBE_H
+#define BRBE_H
+
+#if ENABLE_BRBE_FOR_NS
+void brbe_init_el3(void);
+#else
+static inline void brbe_init_el3(void)
+{
+}
+#endif /* ENABLE_BRBE_FOR_NS */
+
+#endif /* BRBE_H */
diff --git a/include/lib/extensions/mpam.h b/include/lib/extensions/mpam.h
new file mode 100644
index 0000000..170f919
--- /dev/null
+++ b/include/lib/extensions/mpam.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MPAM_H
+#define MPAM_H
+
+#include <stdbool.h>
+
+#include <context.h>
+
+#if ENABLE_FEAT_MPAM
+void mpam_enable(cpu_context_t *context);
+void mpam_init_el2_unused(void);
+#else
+static inline void mpam_enable(cpu_context_t *context)
+{
+}
+static inline void mpam_init_el2_unused(void)
+{
+}
+#endif /* ENABLE_FEAT_MPAM */
+
+#endif /* MPAM_H */
diff --git a/include/lib/extensions/pauth.h b/include/lib/extensions/pauth.h
new file mode 100644
index 0000000..dbc2226
--- /dev/null
+++ b/include/lib/extensions/pauth.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PAUTH_H
+#define PAUTH_H
+
+/*******************************************************************************
+ * ARMv8.3-PAuth support functions
+ ******************************************************************************/
+
+/* Disable ARMv8.3 pointer authentication in EL1/EL3 */
+void pauth_disable_el1(void);
+void pauth_disable_el3(void);
+
+#endif /* PAUTH_H */
diff --git a/include/lib/extensions/pmuv3.h b/include/lib/extensions/pmuv3.h
new file mode 100644
index 0000000..62fee7b
--- /dev/null
+++ b/include/lib/extensions/pmuv3.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMUV3_H
+#define PMUV3_H
+
+#include <context.h>
+
+void pmuv3_init_el3(void);
+
+#ifdef __aarch64__
+void pmuv3_enable(cpu_context_t *ctx);
+void pmuv3_init_el2_unused(void);
+#endif /* __aarch64__ */
+
+#endif /* PMUV3_H */
diff --git a/include/lib/extensions/ras.h b/include/lib/extensions/ras.h
new file mode 100644
index 0000000..6997da0
--- /dev/null
+++ b/include/lib/extensions/ras.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RAS_H
+#define RAS_H
+
+#define ERR_HANDLER_VERSION 1U
+
+/* Error record access mechanism */
+#define ERR_ACCESS_SYSREG 0
+#define ERR_ACCESS_MEMMAP 1
+
+/*
+ * Register all error records on the platform.
+ *
+ * This macro must be used in the same file as the array of error record info
+ * are declared. Only then would ARRAY_SIZE() yield a meaningful value.
+ */
+#define REGISTER_ERR_RECORD_INFO(_records) \
+ const struct err_record_mapping err_record_mappings = { \
+ .err_records = (_records), \
+ .num_err_records = ARRAY_SIZE(_records), \
+ }
+
+/* Error record info iterator */
+#define for_each_err_record_info(_i, _info) \
+ for ((_i) = 0, (_info) = err_record_mappings.err_records; \
+ (_i) < err_record_mappings.num_err_records; \
+ (_i)++, (_info)++)
+
+#define ERR_RECORD_COMMON_(_probe, _handler, _aux) \
+ .probe = _probe, \
+ .handler = _handler, \
+ .aux_data = _aux,
+
+#define ERR_RECORD_SYSREG_V1(_idx_start, _num_idx, _probe, _handler, _aux) \
+ { \
+ .version = 1, \
+ .sysreg.idx_start = _idx_start, \
+ .sysreg.num_idx = _num_idx, \
+ .access = ERR_ACCESS_SYSREG, \
+ ERR_RECORD_COMMON_(_probe, _handler, _aux) \
+ }
+
+#define ERR_RECORD_MEMMAP_V1(_base_addr, _size_num_k, _probe, _handler, _aux) \
+ { \
+ .version = 1, \
+ .memmap.base_addr = _base_addr, \
+ .memmap.size_num_k = _size_num_k, \
+ .access = ERR_ACCESS_MEMMAP, \
+ ERR_RECORD_COMMON_(_probe, _handler, _aux) \
+ }
+
+/*
+ * Macro to be used to name and declare an array of RAS interrupts along with
+ * their handlers.
+ *
+ * This macro must be used in the same file as the array of interrupts are
+ * declared. Only then would ARRAY_SIZE() yield a meaningful value. Also, the
+ * array is expected to be sorted in the increasing order of interrupt number.
+ */
+#define REGISTER_RAS_INTERRUPTS(_array) \
+ const struct ras_interrupt_mapping ras_interrupt_mappings = { \
+ .intrs = (_array), \
+ .num_intrs = ARRAY_SIZE(_array), \
+ }
+
+#ifndef __ASSEMBLER__
+
+#include <assert.h>
+
+#include <lib/extensions/ras_arch.h>
+
+struct err_record_info;
+
+struct ras_interrupt {
+ /* Interrupt number, and the associated error record info */
+ unsigned int intr_number;
+ struct err_record_info *err_record;
+ void *cookie;
+};
+
+/* Function to probe a error record group for error */
+typedef int (*err_record_probe_t)(const struct err_record_info *info,
+ int *probe_data);
+
+/* Data passed to error record group handler */
+struct err_handler_data {
+ /* Info passed on from top-level exception handler */
+ uint64_t flags;
+ void *cookie;
+ void *handle;
+
+ /* Data structure version */
+ unsigned int version;
+
+ /* Reason for EA: one the ERROR_* constants */
+ unsigned int ea_reason;
+
+ /*
+ * For EAs received at vector, the value read from ESR; for an EA
+ * synchronized by ESB, the value of DISR.
+ */
+ uint32_t syndrome;
+
+ /* For errors signalled via interrupt, the raw interrupt ID; otherwise, 0. */
+ unsigned int interrupt;
+};
+
+/* Function to handle error from an error record group */
+typedef int (*err_record_handler_t)(const struct err_record_info *info,
+ int probe_data, const struct err_handler_data *const data);
+
+/* Error record information */
+struct err_record_info {
+ /* Function to probe error record group for errors */
+ err_record_probe_t probe;
+
+ /* Function to handle error record group errors */
+ err_record_handler_t handler;
+
+ /* Opaque group-specific data */
+ void *aux_data;
+
+ /* Additional information for Standard Error Records */
+ union {
+ struct {
+ /*
+ * For a group accessed via memory-mapped register,
+ * base address of the page hosting error records, and
+ * the size of the record group.
+ */
+ uintptr_t base_addr;
+
+ /* Size of group in number of KBs */
+ unsigned int size_num_k;
+ } memmap;
+
+ struct {
+ /*
+ * For error records accessed via system register, index of
+ * the error record.
+ */
+ unsigned int idx_start;
+ unsigned int num_idx;
+ } sysreg;
+ };
+
+ /* Data structure version */
+ unsigned int version;
+
+ /* Error record access mechanism */
+ unsigned int access:1;
+};
+
+struct err_record_mapping {
+ struct err_record_info *err_records;
+ size_t num_err_records;
+};
+
+struct ras_interrupt_mapping {
+ struct ras_interrupt *intrs;
+ size_t num_intrs;
+};
+
+extern const struct err_record_mapping err_record_mappings;
+extern const struct ras_interrupt_mapping ras_interrupt_mappings;
+
+
+/*
+ * Helper functions to probe memory-mapped and system registers implemented in
+ * Standard Error Record format
+ */
+static inline int ras_err_ser_probe_memmap(const struct err_record_info *info,
+ int *probe_data)
+{
+ assert(info->version == ERR_HANDLER_VERSION);
+
+ return ser_probe_memmap(info->memmap.base_addr, info->memmap.size_num_k,
+ probe_data);
+}
+
+static inline int ras_err_ser_probe_sysreg(const struct err_record_info *info,
+ int *probe_data)
+{
+ assert(info->version == ERR_HANDLER_VERSION);
+
+ return ser_probe_sysreg(info->sysreg.idx_start, info->sysreg.num_idx,
+ probe_data);
+}
+
+const char *ras_serr_to_str(unsigned int serr);
+int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
+ void *handle, uint64_t flags);
+void ras_init(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* RAS_H */
diff --git a/include/lib/extensions/ras_arch.h b/include/lib/extensions/ras_arch.h
new file mode 100644
index 0000000..e0aee50
--- /dev/null
+++ b/include/lib/extensions/ras_arch.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2018, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RAS_ARCH_H
+#define RAS_ARCH_H
+
+/*
+ * Size of nodes implementing Standard Error Records - currently only 4k is
+ * supported.
+ */
+#define STD_ERR_NODE_SIZE_NUM_K 4U
+
+/*
+ * Individual register offsets within an error record in Standard Error Record
+ * format when error records are accessed through memory-mapped registers.
+ */
+#define ERR_FR(n) (0x0ULL + (64ULL * (n)))
+#define ERR_CTLR(n) (0x8ULL + (64ULL * (n)))
+#define ERR_STATUS(n) (0x10ULL + (64ULL * (n)))
+#define ERR_ADDR(n) (0x18ULL + (64ULL * (n)))
+#define ERR_MISC0(n) (0x20ULL + (64ULL * (n)))
+#define ERR_MISC1(n) (0x28ULL + (64ULL * (n)))
+
+/* Group Status Register (ERR_STATUS) offset */
+#define ERR_GSR(base, size_num_k, n) \
+ ((base) + (0x380ULL * (size_num_k)) + (8ULL * (n)))
+
+/* Management register offsets */
+#define ERR_DEVID(base, size_num_k) \
+ ((base) + ((0x400ULL * (size_num_k)) - 0x100ULL) + 0xc8ULL)
+
+#define ERR_DEVID_MASK 0xffffUL
+
+/* Standard Error Record status register fields */
+#define ERR_STATUS_AV_SHIFT 31
+#define ERR_STATUS_AV_MASK U(0x1)
+
+#define ERR_STATUS_V_SHIFT 30
+#define ERR_STATUS_V_MASK U(0x1)
+
+#define ERR_STATUS_UE_SHIFT 29
+#define ERR_STATUS_UE_MASK U(0x1)
+
+#define ERR_STATUS_ER_SHIFT 28
+#define ERR_STATUS_ER_MASK U(0x1)
+
+#define ERR_STATUS_OF_SHIFT 27
+#define ERR_STATUS_OF_MASK U(0x1)
+
+#define ERR_STATUS_MV_SHIFT 26
+#define ERR_STATUS_MV_MASK U(0x1)
+
+#define ERR_STATUS_CE_SHIFT 24
+#define ERR_STATUS_CE_MASK U(0x3)
+
+#define ERR_STATUS_DE_SHIFT 23
+#define ERR_STATUS_DE_MASK U(0x1)
+
+#define ERR_STATUS_PN_SHIFT 22
+#define ERR_STATUS_PN_MASK U(0x1)
+
+#define ERR_STATUS_UET_SHIFT 20
+#define ERR_STATUS_UET_MASK U(0x3)
+
+#define ERR_STATUS_IERR_SHIFT 8
+#define ERR_STATUS_IERR_MASK U(0xff)
+
+#define ERR_STATUS_SERR_SHIFT 0
+#define ERR_STATUS_SERR_MASK U(0xff)
+
+#define ERR_STATUS_GET_FIELD(_status, _field) \
+ (((_status) >> ERR_STATUS_ ##_field ##_SHIFT) & ERR_STATUS_ ##_field ##_MASK)
+
+#define ERR_STATUS_CLR_FIELD(_status, _field) \
+ (_status) &= ~(ERR_STATUS_ ##_field ##_MASK << ERR_STATUS_ ##_field ##_SHIFT)
+
+#define ERR_STATUS_SET_FIELD(_status, _field, _value) \
+ (_status) |= (((_value) & ERR_STATUS_ ##_field ##_MASK) << ERR_STATUS_ ##_field ##_SHIFT)
+
+#define ERR_STATUS_WRITE_FIELD(_status, _field, _value) do { \
+ ERR_STATUS_CLR_FIELD(_status, _field, _value); \
+ ERR_STATUS_SET_FIELD(_status, _field, _value); \
+ } while (0)
+
+
+/* Standard Error Record control register fields */
+#define ERR_CTLR_WDUI_SHIFT 11
+#define ERR_CTLR_WDUI_MASK 0x1
+
+#define ERR_CTLR_RDUI_SHIFT 10
+#define ERR_CTLR_RDUI_MASK 0x1
+#define ERR_CTLR_DUI_SHIFT ERR_CTLR_RDUI_SHIFT
+#define ERR_CTLR_DUI_MASK ERR_CTLR_RDUI_MASK
+
+#define ERR_CTLR_WCFI_SHIFT 9
+#define ERR_CTLR_WCFI_MASK 0x1
+
+#define ERR_CTLR_RCFI_SHIFT 8
+#define ERR_CTLR_RCFI_MASK 0x1
+#define ERR_CTLR_CFI_SHIFT ERR_CTLR_RCFI_SHIFT
+#define ERR_CTLR_CFI_MASK ERR_CTLR_RCFI_MASK
+
+#define ERR_CTLR_WUE_SHIFT 7
+#define ERR_CTLR_WUE_MASK 0x1
+
+#define ERR_CTLR_WFI_SHIFT 6
+#define ERR_CTLR_WFI_MASK 0x1
+
+#define ERR_CTLR_WUI_SHIFT 5
+#define ERR_CTLR_WUI_MASK 0x1
+
+#define ERR_CTLR_RUE_SHIFT 4
+#define ERR_CTLR_RUE_MASK 0x1
+#define ERR_CTLR_UE_SHIFT ERR_CTLR_RUE_SHIFT
+#define ERR_CTLR_UE_MASK ERR_CTLR_RUE_MASK
+
+#define ERR_CTLR_RFI_SHIFT 3
+#define ERR_CTLR_RFI_MASK 0x1
+#define ERR_CTLR_FI_SHIFT ERR_CTLR_RFI_SHIFT
+#define ERR_CTLR_FI_MASK ERR_CTLR_RFI_MASK
+
+#define ERR_CTLR_RUI_SHIFT 2
+#define ERR_CTLR_RUI_MASK 0x1
+#define ERR_CTLR_UI_SHIFT ERR_CTLR_RUI_SHIFT
+#define ERR_CTLR_UI_MASK ERR_CTLR_RUI_MASK
+
+#define ERR_CTLR_ED_SHIFT 0
+#define ERR_CTLR_ED_MASK 0x1
+
+#define ERR_CTLR_CLR_FIELD(_ctlr, _field) \
+ (_ctlr) &= ~(ERR_CTLR_ ##_field _MASK << ERR_CTLR_ ##_field ##_SHIFT)
+
+#define ERR_CTLR_SET_FIELD(_ctlr, _field, _value) \
+ (_ctlr) |= (((_value) & ERR_CTLR_ ##_field ##_MASK) << ERR_CTLR_ ##_field ##_SHIFT)
+
+#define ERR_CTLR_ENABLE_FIELD(_ctlr, _field) \
+ ERR_CTLR_SET_FIELD(_ctlr, _field, ERR_CTLR_ ##_field ##_MASK)
+
+/* Uncorrected error types for Asynchronous exceptions */
+#define ERROR_STATUS_UET_UC 0x0 /* Uncontainable */
+#define ERROR_STATUS_UET_UEU 0x1 /* Unrecoverable */
+#define ERROR_STATUS_UET_UEO 0x2 /* Restable */
+#define ERROR_STATUS_UET_UER 0x3 /* Recoverable */
+
+/* Error types for Synchronous exceptions */
+#define ERROR_STATUS_SET_UER 0x0 /* Recoverable */
+#define ERROR_STATUS_SET_UEO 0x1 /* Restable */
+#define ERROR_STATUS_SET_UC 0x2 /* Uncontainable */
+#define ERROR_STATUS_SET_CE 0x3 /* Corrected */
+
+/* Number of architecturally-defined primary error codes */
+#define ERROR_STATUS_NUM_SERR U(22)
+
+/* Implementation Defined Syndrome bit in ESR */
+#define SERROR_IDS_BIT U(24)
+
+/*
+ * Asynchronous Error Type in exception syndrome. The field has same values in
+ * both DISR_EL1 and ESR_EL3 for SError.
+ */
+#define EABORT_AET_SHIFT U(10)
+#define EABORT_AET_WIDTH U(3)
+#define EABORT_AET_MASK U(0x7)
+
+/* DFSC field in Asynchronous exception syndrome */
+#define EABORT_DFSC_SHIFT U(0)
+#define EABORT_DFSC_WIDTH U(6)
+#define EABORT_DFSC_MASK U(0x3f)
+
+/* Synchronous Error Type in exception syndrome. */
+#define EABORT_SET_SHIFT U(11)
+#define EABORT_SET_WIDTH U(2)
+#define EABORT_SET_MASK U(0x3)
+
+/* DFSC code for SErrors */
+#define DFSC_SERROR 0x11
+
+/* I/DFSC code for synchronous external abort */
+#define SYNC_EA_FSC 0x10
+
+#ifndef __ASSEMBLER__
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context.h>
+#include <lib/mmio.h>
+#include <stdint.h>
+
+/*
+ * Standard Error Record accessors for memory-mapped registers.
+ */
+
+static inline uint64_t ser_get_feature(uintptr_t base, unsigned int idx)
+{
+ return mmio_read_64(base + ERR_FR(idx));
+}
+
+static inline uint64_t ser_get_control(uintptr_t base, unsigned int idx)
+{
+ return mmio_read_64(base + ERR_CTLR(idx));
+}
+
+static inline uint64_t ser_get_status(uintptr_t base, unsigned int idx)
+{
+ return mmio_read_64(base + ERR_STATUS(idx));
+}
+
+/*
+ * Error handling agent would write to the status register to clear an
+ * identified/handled error. Most fields in the status register are
+ * conditional write-one-to-clear.
+ *
+ * Typically, to clear the status, it suffices to write back the same value
+ * previously read. However, if there were new, higher-priority errors recorded
+ * on the node since status was last read, writing read value won't clear the
+ * status. Therefore, an error handling agent must wait on and verify the status
+ * has indeed been cleared.
+ */
+static inline void ser_set_status(uintptr_t base, unsigned int idx,
+ uint64_t status)
+{
+ mmio_write_64(base + ERR_STATUS(idx), status);
+}
+
+static inline uint64_t ser_get_addr(uintptr_t base, unsigned int idx)
+{
+ return mmio_read_64(base + ERR_ADDR(idx));
+}
+
+static inline uint64_t ser_get_misc0(uintptr_t base, unsigned int idx)
+{
+ return mmio_read_64(base + ERR_MISC0(idx));
+}
+
+static inline uint64_t ser_get_misc1(uintptr_t base, unsigned int idx)
+{
+ return mmio_read_64(base + ERR_MISC1(idx));
+}
+
+
+/*
+ * Standard Error Record helpers for System registers.
+ */
+static inline void ser_sys_select_record(unsigned int idx)
+{
+ unsigned int max_idx __unused =
+ (unsigned int) read_erridr_el1() & ERRIDR_MASK;
+
+ assert(idx < max_idx);
+
+ write_errselr_el1(idx);
+ isb();
+}
+
+/* Library functions to probe Standard Error Record */
+int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data);
+int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data);
+#endif /* __ASSEMBLER__ */
+
+#endif /* RAS_ARCH_H */
diff --git a/include/lib/extensions/sme.h b/include/lib/extensions/sme.h
new file mode 100644
index 0000000..bd7948e
--- /dev/null
+++ b/include/lib/extensions/sme.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SME_H
+#define SME_H
+
+#include <stdbool.h>
+#include <context.h>
+
+/*
+ * Maximum value of LEN field in SMCR_ELx. This is different than the maximum
+ * supported value which is platform dependent. In the first version of SME the
+ * LEN field is limited to 4 bits but will be expanded in future iterations.
+ * To support different versions, the code that discovers the supported vector
+ * lengths will write the max value into SMCR_ELx then read it back to see how
+ * many bits are implemented.
+ */
+#define SME_SMCR_LEN_MAX U(0x1FF)
+
+#if ENABLE_SME_FOR_NS
+void sme_init_el3(void);
+void sme_init_el2_unused(void);
+void sme_enable(cpu_context_t *context);
+void sme_disable(cpu_context_t *context);
+void sme_enable_per_world(per_world_context_t *per_world_ctx);
+void sme_disable_per_world(per_world_context_t *per_world_ctx);
+#else
+static inline void sme_init_el3(void)
+{
+}
+static inline void sme_init_el2_unused(void)
+{
+}
+static inline void sme_enable(cpu_context_t *context)
+{
+}
+static inline void sme_disable(cpu_context_t *context)
+{
+}
+static inline void sme_enable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+static inline void sme_disable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+#endif /* ENABLE_SME_FOR_NS */
+
+#endif /* SME_H */
diff --git a/include/lib/extensions/spe.h b/include/lib/extensions/spe.h
new file mode 100644
index 0000000..7b39037
--- /dev/null
+++ b/include/lib/extensions/spe.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPE_H
+#define SPE_H
+
+#include <stdbool.h>
+
+#if ENABLE_SPE_FOR_NS
+void spe_init_el3(void);
+void spe_init_el2_unused(void);
+void spe_disable(void);
+#else
+static inline void spe_init_el3(void)
+{
+}
+static inline void spe_init_el2_unused(void)
+{
+}
+static inline void spe_disable(void)
+{
+}
+#endif /* ENABLE_SPE_FOR_NS */
+
+#endif /* SPE_H */
diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h
new file mode 100644
index 0000000..947c905
--- /dev/null
+++ b/include/lib/extensions/sve.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SVE_H
+#define SVE_H
+
+#include <context.h>
+
+#if (ENABLE_SME_FOR_NS || ENABLE_SVE_FOR_NS)
+void sve_init_el2_unused(void);
+void sve_enable_per_world(per_world_context_t *per_world_ctx);
+void sve_disable_per_world(per_world_context_t *per_world_ctx);
+#else
+static inline void sve_init_el2_unused(void)
+{
+}
+static inline void sve_enable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+static inline void sve_disable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+#endif /* ( ENABLE_SME_FOR_NS | ENABLE_SVE_FOR_NS ) */
+
+#endif /* SVE_H */
diff --git a/include/lib/extensions/sys_reg_trace.h b/include/lib/extensions/sys_reg_trace.h
new file mode 100644
index 0000000..7004267
--- /dev/null
+++ b/include/lib/extensions/sys_reg_trace.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SYS_REG_TRACE_H
+#define SYS_REG_TRACE_H
+
+#include <context.h>
+
+#if ENABLE_SYS_REG_TRACE_FOR_NS
+
+#if __aarch64__
+void sys_reg_trace_enable_per_world(per_world_context_t *per_world_ctx);
+void sys_reg_trace_disable_per_world(per_world_context_t *per_world_ctx);
+void sys_reg_trace_init_el2_unused(void);
+#else
+void sys_reg_trace_init_el3(void);
+#endif /* __aarch64__ */
+
+#else /* !ENABLE_SYS_REG_TRACE_FOR_NS */
+
+#if __aarch64__
+static inline void sys_reg_trace_enable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+static inline void sys_reg_trace_disable_per_world(per_world_context_t *per_world_ctx)
+{
+}
+static inline void sys_reg_trace_init_el2_unused(void)
+{
+}
+#else
+static inline void sys_reg_trace_init_el3(void)
+{
+}
+#endif /* __aarch64__ */
+
+#endif /* ENABLE_SYS_REG_TRACE_FOR_NS */
+
+#endif /* SYS_REG_TRACE_H */
diff --git a/include/lib/extensions/trbe.h b/include/lib/extensions/trbe.h
new file mode 100644
index 0000000..0bed433
--- /dev/null
+++ b/include/lib/extensions/trbe.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRBE_H
+#define TRBE_H
+
+#if ENABLE_TRBE_FOR_NS
+void trbe_init_el3(void);
+void trbe_init_el2_unused(void);
+#else
+static inline void trbe_init_el3(void)
+{
+}
+static inline void trbe_init_el2_unused(void)
+{
+}
+#endif /* ENABLE_TRBE_FOR_NS */
+
+#endif /* TRBE_H */
diff --git a/include/lib/extensions/trf.h b/include/lib/extensions/trf.h
new file mode 100644
index 0000000..1ac7cda
--- /dev/null
+++ b/include/lib/extensions/trf.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRF_H
+#define TRF_H
+
+#if ENABLE_TRF_FOR_NS
+void trf_init_el3(void);
+void trf_init_el2_unused(void);
+#else
+static inline void trf_init_el3(void)
+{
+}
+static inline void trf_init_el2_unused(void)
+{
+}
+#endif /* ENABLE_TRF_FOR_NS */
+
+#endif /* TRF_H */
diff --git a/include/lib/fconf/fconf.h b/include/lib/fconf/fconf.h
new file mode 100644
index 0000000..5b54c04
--- /dev/null
+++ b/include/lib/fconf/fconf.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_H
+#define FCONF_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Public API */
+#define FCONF_GET_PROPERTY(a, b, c) a##__##b##_getter(c)
+
+/*
+ * This macro takes three arguments:
+ * config: Configuration identifier
+ * name: property namespace
+ * callback: populate() function
+ */
+#define FCONF_REGISTER_POPULATOR(config, name, callback) \
+ __attribute__((used, section(".fconf_populator"))) \
+ static const struct fconf_populator (name##__populator) = { \
+ .config_type = (#config), \
+ .info = (#name), \
+ .populate = (callback) \
+ };
+
+/*
+ * Populator callback
+ *
+ * This structure are used by the fconf_populate function and should only be
+ * defined by the FCONF_REGISTER_POPULATOR macro.
+ */
+struct fconf_populator {
+ /* Description of the data loaded by the callback */
+ const char *config_type;
+ const char *info;
+
+ /* Callback used by fconf_populate function with a provided config dtb.
+ * Return 0 on success, err_code < 0 otherwise.
+ */
+ int (*populate)(uintptr_t config);
+};
+
+/* This function supports to load tb_fw_config and fw_config dtb */
+int fconf_load_config(unsigned int image_id);
+
+/* Top level populate function
+ *
+ * This function takes a configuration dtb and calls all the registered
+ * populator callback with it.
+ *
+ * Panic on error.
+ */
+void fconf_populate(const char *config_type, uintptr_t config);
+
+/* FCONF specific getter */
+#define fconf__dtb_getter(prop) fconf_dtb_info.prop
+
+/* Structure used to locally keep a reference to the config dtb. */
+struct fconf_dtb_info_t {
+ uintptr_t base_addr;
+ size_t size;
+};
+
+extern struct fconf_dtb_info_t fconf_dtb_info;
+
+#endif /* FCONF_H */
diff --git a/include/lib/fconf/fconf_amu_getter.h b/include/lib/fconf/fconf_amu_getter.h
new file mode 100644
index 0000000..2faee73
--- /dev/null
+++ b/include/lib/fconf/fconf_amu_getter.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_AMU_GETTER_H
+#define FCONF_AMU_GETTER_H
+
+#include <lib/extensions/amu.h>
+
+#define amu__config_getter(id) fconf_amu_config.id
+
+struct fconf_amu_config {
+ const struct amu_topology *topology;
+};
+
+extern struct fconf_amu_config fconf_amu_config;
+
+#endif /* FCONF_AMU_GETTER_H */
diff --git a/include/lib/fconf/fconf_dyn_cfg_getter.h b/include/lib/fconf/fconf_dyn_cfg_getter.h
new file mode 100644
index 0000000..3554673
--- /dev/null
+++ b/include/lib/fconf/fconf_dyn_cfg_getter.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_DYN_CFG_GETTER_H
+#define FCONF_DYN_CFG_GETTER_H
+
+#include <lib/fconf/fconf.h>
+
+#define FCONF_INVALID_IDX 0xFFFFFFFFU
+
+/* Dynamic configuration related getter */
+#define dyn_cfg__dtb_getter(id) dyn_cfg_dtb_info_getter(id)
+
+struct dyn_cfg_dtb_info_t {
+ uintptr_t config_addr;
+ uint32_t config_max_size;
+ unsigned int config_id;
+ /*
+ * A platform uses this address to copy the configuration
+ * to another location during the boot-flow.
+ * - e.g. HW_CONFIG
+ */
+ uintptr_t secondary_config_addr;
+};
+
+unsigned int dyn_cfg_dtb_info_get_index(unsigned int config_id);
+struct dyn_cfg_dtb_info_t *dyn_cfg_dtb_info_getter(unsigned int config_id);
+int fconf_populate_dtb_registry(uintptr_t config);
+
+/* Set config information in global DTB array */
+void set_config_info(uintptr_t config_addr, uintptr_t secondary_config_addr,
+ uint32_t config_max_size,
+ unsigned int config_id);
+
+#endif /* FCONF_DYN_CFG_GETTER_H */
diff --git a/include/lib/fconf/fconf_mpmm_getter.h b/include/lib/fconf/fconf_mpmm_getter.h
new file mode 100644
index 0000000..50d991a
--- /dev/null
+++ b/include/lib/fconf/fconf_mpmm_getter.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_MPMM_GETTER_H
+#define FCONF_MPMM_GETTER_H
+
+#include <lib/mpmm/mpmm.h>
+
+#define mpmm__config_getter(id) fconf_mpmm_config.id
+
+struct fconf_mpmm_config {
+ const struct mpmm_topology *topology;
+};
+
+extern struct fconf_mpmm_config fconf_mpmm_config;
+
+#endif /* FCONF_MPMM_GETTER_H */
diff --git a/include/lib/fconf/fconf_tbbr_getter.h b/include/lib/fconf/fconf_tbbr_getter.h
new file mode 100644
index 0000000..541a396
--- /dev/null
+++ b/include/lib/fconf/fconf_tbbr_getter.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FCONF_TBBR_GETTER_H
+#define FCONF_TBBR_GETTER_H
+
+#include <assert.h>
+
+#include <lib/fconf/fconf.h>
+
+/* TBBR related getter */
+#define tbbr__cot_getter(id) __extension__ ({ \
+ assert((id) < cot_desc_size); \
+ cot_desc_ptr[id]; \
+})
+
+#define tbbr__dyn_config_getter(id) tbbr_dyn_config.id
+
+struct tbbr_dyn_config_t {
+ uint32_t disable_auth;
+ void *mbedtls_heap_addr;
+ size_t mbedtls_heap_size;
+};
+
+extern struct tbbr_dyn_config_t tbbr_dyn_config;
+
+int fconf_populate_tbbr_dyn_config(uintptr_t config);
+
+#endif /* FCONF_TBBR_GETTER_H */
diff --git a/include/lib/gpt_rme/gpt_rme.h b/include/lib/gpt_rme/gpt_rme.h
new file mode 100644
index 0000000..94a88b0
--- /dev/null
+++ b/include/lib/gpt_rme/gpt_rme.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef GPT_RME_H
+#define GPT_RME_H
+
+#include <stdint.h>
+
+#include <arch.h>
+
+/******************************************************************************/
+/* GPT helper macros and definitions */
+/******************************************************************************/
+
+/*
+ * Structure for specifying a mapping range and it's properties. This should not
+ * be manually initialized, using the MAP_GPT_REGION_x macros is recommended as
+ * to avoid potential incompatibilities in the future.
+ */
+typedef struct pas_region {
+ uintptr_t base_pa; /* Base address for PAS. */
+ size_t size; /* Size of the PAS. */
+ unsigned int attrs; /* PAS GPI and entry type. */
+} pas_region_t;
+
+/* GPT GPI definitions */
+#define GPT_GPI_NO_ACCESS U(0x0)
+#define GPT_GPI_SECURE U(0x8)
+#define GPT_GPI_NS U(0x9)
+#define GPT_GPI_ROOT U(0xA)
+#define GPT_GPI_REALM U(0xB)
+#define GPT_GPI_ANY U(0xF)
+#define GPT_GPI_VAL_MASK UL(0xF)
+
+#define GPT_NSE_SECURE U(0b00)
+#define GPT_NSE_ROOT U(0b01)
+#define GPT_NSE_NS U(0b10)
+#define GPT_NSE_REALM U(0b11)
+
+#define GPT_NSE_SHIFT U(62)
+
+/* PAS attribute GPI definitions. */
+#define GPT_PAS_ATTR_GPI_SHIFT U(0)
+#define GPT_PAS_ATTR_GPI_MASK U(0xF)
+#define GPT_PAS_ATTR_GPI(_attrs) (((_attrs) \
+ >> GPT_PAS_ATTR_GPI_SHIFT) \
+ & GPT_PAS_ATTR_GPI_MASK)
+
+/* PAS attribute mapping type definitions */
+#define GPT_PAS_ATTR_MAP_TYPE_BLOCK U(0x0)
+#define GPT_PAS_ATTR_MAP_TYPE_GRANULE U(0x1)
+#define GPT_PAS_ATTR_MAP_TYPE_SHIFT U(4)
+#define GPT_PAS_ATTR_MAP_TYPE_MASK U(0x1)
+#define GPT_PAS_ATTR_MAP_TYPE(_attrs) (((_attrs) \
+ >> GPT_PAS_ATTR_MAP_TYPE_SHIFT) \
+ & GPT_PAS_ATTR_MAP_TYPE_MASK)
+
+/*
+ * Macro to initialize the attributes field in the pas_region_t structure.
+ * [31:5] Reserved
+ * [4] Mapping type (GPT_PAS_ATTR_MAP_TYPE_x definitions)
+ * [3:0] PAS GPI type (GPT_GPI_x definitions)
+ */
+#define GPT_PAS_ATTR(_type, _gpi) \
+ ((((_type) & GPT_PAS_ATTR_MAP_TYPE_MASK) \
+ << GPT_PAS_ATTR_MAP_TYPE_SHIFT) | \
+ (((_gpi) & GPT_PAS_ATTR_GPI_MASK) \
+ << GPT_PAS_ATTR_GPI_SHIFT))
+
+/*
+ * Macro to create a GPT entry for this PAS range as a block descriptor. If this
+ * region does not fit the requirements for a block descriptor then GPT
+ * initialization will fail.
+ */
+#define GPT_MAP_REGION_BLOCK(_pa, _sz, _gpi) \
+ { \
+ .base_pa = (_pa), \
+ .size = (_sz), \
+ .attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_BLOCK, (_gpi)), \
+ }
+
+/*
+ * Macro to create a GPT entry for this PAS range as a table descriptor. If this
+ * region does not fit the requirements for a table descriptor then GPT
+ * initialization will fail.
+ */
+#define GPT_MAP_REGION_GRANULE(_pa, _sz, _gpi) \
+ { \
+ .base_pa = (_pa), \
+ .size = (_sz), \
+ .attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_GRANULE, (_gpi)), \
+ }
+
+/******************************************************************************/
+/* GPT register field definitions */
+/******************************************************************************/
+
+/*
+ * Least significant address bits protected by each entry in level 0 GPT. This
+ * field is read-only.
+ */
+#define GPCCR_L0GPTSZ_SHIFT U(20)
+#define GPCCR_L0GPTSZ_MASK U(0xF)
+
+typedef enum {
+ GPCCR_L0GPTSZ_30BITS = U(0x0),
+ GPCCR_L0GPTSZ_34BITS = U(0x4),
+ GPCCR_L0GPTSZ_36BITS = U(0x6),
+ GPCCR_L0GPTSZ_39BITS = U(0x9)
+} gpccr_l0gptsz_e;
+
+/* Granule protection check priority bit definitions */
+#define GPCCR_GPCP_SHIFT U(17)
+#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT)
+
+/* Granule protection check bit definitions */
+#define GPCCR_GPC_SHIFT U(16)
+#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT)
+
+/* Physical granule size bit definitions */
+#define GPCCR_PGS_SHIFT U(14)
+#define GPCCR_PGS_MASK U(0x3)
+#define SET_GPCCR_PGS(x) (((x) & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT)
+
+typedef enum {
+ GPCCR_PGS_4K = U(0x0),
+ GPCCR_PGS_64K = U(0x1),
+ GPCCR_PGS_16K = U(0x2)
+} gpccr_pgs_e;
+
+/* GPT fetch shareability attribute bit definitions */
+#define GPCCR_SH_SHIFT U(12)
+#define GPCCR_SH_MASK U(0x3)
+#define SET_GPCCR_SH(x) (((x) & GPCCR_SH_MASK) << GPCCR_SH_SHIFT)
+
+typedef enum {
+ GPCCR_SH_NS = U(0x0),
+ GPCCR_SH_OS = U(0x2),
+ GPCCR_SH_IS = U(0x3)
+} gpccr_sh_e;
+
+/* GPT fetch outer cacheability attribute bit definitions */
+#define GPCCR_ORGN_SHIFT U(10)
+#define GPCCR_ORGN_MASK U(0x3)
+#define SET_GPCCR_ORGN(x) (((x) & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT)
+
+typedef enum {
+ GPCCR_ORGN_NC = U(0x0),
+ GPCCR_ORGN_WB_RA_WA = U(0x1),
+ GPCCR_ORGN_WT_RA_NWA = U(0x2),
+ GPCCR_ORGN_WB_RA_NWA = U(0x3)
+} gpccr_orgn_e;
+
+/* GPT fetch inner cacheability attribute bit definitions */
+#define GPCCR_IRGN_SHIFT U(8)
+#define GPCCR_IRGN_MASK U(0x3)
+#define SET_GPCCR_IRGN(x) (((x) & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT)
+
+typedef enum {
+ GPCCR_IRGN_NC = U(0x0),
+ GPCCR_IRGN_WB_RA_WA = U(0x1),
+ GPCCR_IRGN_WT_RA_NWA = U(0x2),
+ GPCCR_IRGN_WB_RA_NWA = U(0x3)
+} gpccr_irgn_e;
+
+/* Protected physical address size bit definitions */
+#define GPCCR_PPS_SHIFT U(0)
+#define GPCCR_PPS_MASK U(0x7)
+#define SET_GPCCR_PPS(x) (((x) & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT)
+
+typedef enum {
+ GPCCR_PPS_4GB = U(0x0),
+ GPCCR_PPS_64GB = U(0x1),
+ GPCCR_PPS_1TB = U(0x2),
+ GPCCR_PPS_4TB = U(0x3),
+ GPCCR_PPS_16TB = U(0x4),
+ GPCCR_PPS_256TB = U(0x5),
+ GPCCR_PPS_4PB = U(0x6)
+} gpccr_pps_e;
+
+/* Base Address for the GPT bit definitions */
+#define GPTBR_BADDR_SHIFT U(0)
+#define GPTBR_BADDR_VAL_SHIFT U(12)
+#define GPTBR_BADDR_MASK ULL(0xffffffffff)
+
+/******************************************************************************/
+/* GPT public APIs */
+/******************************************************************************/
+
+/*
+ * Public API that initializes the entire protected space to GPT_GPI_ANY using
+ * the L0 tables (block descriptors). Ideally, this function is invoked prior
+ * to DDR discovery and initialization. The MMU must be initialized before
+ * calling this function.
+ *
+ * Parameters
+ * pps PPS value to use for table generation
+ * l0_mem_base Base address of L0 tables in memory.
+ * l0_mem_size Total size of memory available for L0 tables.
+ *
+ * Return
+ * Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_init_l0_tables(gpccr_pps_e pps,
+ uintptr_t l0_mem_base,
+ size_t l0_mem_size);
+
+/*
+ * Public API that carves out PAS regions from the L0 tables and builds any L1
+ * tables that are needed. This function ideally is run after DDR discovery and
+ * initialization. The L0 tables must have already been initialized to GPI_ANY
+ * when this function is called.
+ *
+ * Parameters
+ * pgs PGS value to use for table generation.
+ * l1_mem_base Base address of memory used for L1 tables.
+ * l1_mem_size Total size of memory available for L1 tables.
+ * *pas_regions Pointer to PAS regions structure array.
+ * pas_count Total number of PAS regions.
+ *
+ * Return
+ * Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_init_pas_l1_tables(gpccr_pgs_e pgs,
+ uintptr_t l1_mem_base,
+ size_t l1_mem_size,
+ pas_region_t *pas_regions,
+ unsigned int pas_count);
+
+/*
+ * Public API to initialize the runtime gpt_config structure based on the values
+ * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
+ * typically happens in a bootloader stage prior to setting up the EL3 runtime
+ * environment for the granule transition service so this function detects the
+ * initialization from a previous stage. Granule protection checks must be
+ * enabled already or this function will return an error.
+ *
+ * Return
+ * Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_runtime_init(void);
+
+/*
+ * Public API to enable granule protection checks once the tables have all been
+ * initialized. This function is called at first initialization and then again
+ * later during warm boots of CPU cores.
+ *
+ * Return
+ * Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_enable(void);
+
+/*
+ * Public API to disable granule protection checks.
+ */
+void gpt_disable(void);
+
+/*
+ * This function is the core of the granule transition service. When a granule
+ * transition request occurs it is routed to this function where the request is
+ * validated then fulfilled if possible.
+ *
+ * TODO: implement support for transitioning multiple granules at once.
+ *
+ * Parameters
+ * base: Base address of the region to transition, must be aligned to granule
+ * size.
+ * size: Size of region to transition, must be aligned to granule size.
+ * src_sec_state: Security state of the originating SMC invoking the API.
+ *
+ * Return
+ * Negative Linux error code in the event of a failure, 0 for success.
+ */
+int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state);
+int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state);
+
+#endif /* GPT_RME_H */
diff --git a/include/lib/libc/aarch32/endian_.h b/include/lib/libc/aarch32/endian_.h
new file mode 100644
index 0000000..edca496
--- /dev/null
+++ b/include/lib/libc/aarch32/endian_.h
@@ -0,0 +1,146 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ENDIAN__H
+#define ENDIAN__H
+
+#include <stdint.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#ifdef __ARMEB__
+#define _BYTE_ORDER _BIG_ENDIAN
+#else
+#define _BYTE_ORDER _LITTLE_ENDIAN
+#endif /* __ARMEB__ */
+
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#ifdef __ARMEB__
+#define _QUAD_HIGHWORD 0
+#define _QUAD_LOWWORD 1
+#define __ntohl(x) ((uint32_t)(x))
+#define __ntohs(x) ((uint16_t)(x))
+#define __htonl(x) ((uint32_t)(x))
+#define __htons(x) ((uint16_t)(x))
+#else
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32(x))
+#define __ntohs(x) (__bswap16(x))
+#define __htonl(x) (__bswap32(x))
+#define __htons(x) (__bswap16(x))
+#endif /* __ARMEB__ */
+
+static __inline uint64_t
+__bswap64(uint64_t _x)
+{
+
+ return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+ ((_x >> 8) & 0xff000000) | ((_x << 8) & ((uint64_t)0xff << 32)) |
+ ((_x << 24) & ((uint64_t)0xff << 40)) |
+ ((_x << 40) & ((uint64_t)0xff << 48)) | ((_x << 56)));
+}
+
+static __inline uint32_t
+__bswap32_var(uint32_t v)
+{
+ uint32_t t1;
+
+ __asm __volatile("eor %1, %0, %0, ror #16\n"
+ "bic %1, %1, #0x00ff0000\n"
+ "mov %0, %0, ror #8\n"
+ "eor %0, %0, %1, lsr #8\n"
+ : "+r" (v), "=r" (t1));
+
+ return (v);
+}
+
+static __inline uint16_t
+__bswap16_var(uint16_t v)
+{
+ uint32_t ret = v & 0xffff;
+
+ __asm __volatile(
+ "mov %0, %0, ror #8\n"
+ "orr %0, %0, %0, lsr #16\n"
+ "bic %0, %0, %0, lsl #16"
+ : "+r" (ret));
+
+ return ((uint16_t)ret);
+}
+
+#ifdef __OPTIMIZE__
+
+#define __bswap32_constant(x) \
+ ((((x) & 0xff000000U) >> 24) | \
+ (((x) & 0x00ff0000U) >> 8) | \
+ (((x) & 0x0000ff00U) << 8) | \
+ (((x) & 0x000000ffU) << 24))
+
+#define __bswap16_constant(x) \
+ ((((x) & 0xff00) >> 8) | \
+ (((x) & 0x00ff) << 8))
+
+#define __bswap16(x) \
+ ((uint16_t)(__builtin_constant_p(x) ? \
+ __bswap16_constant(x) : \
+ __bswap16_var(x)))
+
+#define __bswap32(x) \
+ ((uint32_t)(__builtin_constant_p(x) ? \
+ __bswap32_constant(x) : \
+ __bswap32_var(x)))
+
+#else
+#define __bswap16(x) __bswap16_var(x)
+#define __bswap32(x) __bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* ENDIAN__H */
diff --git a/include/lib/libc/aarch32/float.h b/include/lib/libc/aarch32/float.h
new file mode 100644
index 0000000..857d76e
--- /dev/null
+++ b/include/lib/libc/aarch32/float.h
@@ -0,0 +1,100 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)float.h 7.1 (Berkeley) 5/8/90
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_ 1
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds(void);
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#ifndef _ARM_HARD_FLOAT
+#define FLT_ROUNDS __flt_rounds()
+#else
+#define FLT_ROUNDS (-1)
+#endif
+#if __ISO_C_VISIBLE >= 1999
+#define FLT_EVAL_METHOD 0
+#define DECIMAL_DIG 17 /* max precision in decimal digits */
+#endif
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+#if __ISO_C_VISIBLE >= 2011
+#define FLT_TRUE_MIN 1.40129846E-45F /* b**(emin-p) */
+#define FLT_DECIMAL_DIG 9 /* ceil(1+p*log10(b)) */
+#define FLT_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+#if __ISO_C_VISIBLE >= 2011
+#define DBL_TRUE_MIN 4.9406564584124654E-324
+#define DBL_DECIMAL_DIG 17
+#define DBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define LDBL_MANT_DIG DBL_MANT_DIG
+#define LDBL_EPSILON ((long double)DBL_EPSILON)
+#define LDBL_DIG DBL_DIG
+#define LDBL_MIN_EXP DBL_MIN_EXP
+#define LDBL_MIN ((long double)DBL_MIN)
+#define LDBL_MIN_10_EXP DBL_MIN_10_EXP
+#define LDBL_MAX_EXP DBL_MAX_EXP
+#define LDBL_MAX ((long double)DBL_MAX)
+#define LDBL_MAX_10_EXP DBL_MAX_10_EXP
+#if __ISO_C_VISIBLE >= 2011
+#define LDBL_TRUE_MIN ((long double)DBL_TRUE_MIN)
+#define LDBL_DECIMAL_DIG DBL_DECIMAL_DIG
+#define LDBL_HAS_SUBNORM DBL_HAS_SUBNORM
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/include/lib/libc/aarch32/inttypes_.h b/include/lib/libc/aarch32/inttypes_.h
new file mode 100644
index 0000000..0888bf0
--- /dev/null
+++ b/include/lib/libc/aarch32/inttypes_.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2020, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef INTTYPES__H
+#define INTTYPES__H
+
+#define PRId64 "lld" /* int64_t */
+#define PRIi64 "lli" /* int64_t */
+#define PRIo64 "llo" /* int64_t */
+#define PRIu64 "llu" /* uint64_t */
+#define PRIx64 "llx" /* uint64_t */
+#define PRIX64 "llX" /* uint64_t */
+
+#define PRIdPTR "d" /* intptr_t */
+#define PRIiPTR "i" /* intptr_t */
+#define PRIoPTR "o" /* intptr_t */
+#define PRIuPTR "u" /* uintptr_t */
+#define PRIxPTR "x" /* uintptr_t */
+#define PRIXPTR "X" /* uintptr_t */
+
+#endif /* INTTYPES__H */
diff --git a/include/lib/libc/aarch32/limits_.h b/include/lib/libc/aarch32/limits_.h
new file mode 100644
index 0000000..5b0516a
--- /dev/null
+++ b/include/lib/libc/aarch32/limits_.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define SCHAR_MAX 0x7F
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#define CHAR_MAX 0x7F
+#define CHAR_MIN (-CHAR_MAX - 1)
+#define UCHAR_MAX 0xFFU
+#define SHRT_MAX 0x7FFF
+#define SHRT_MIN (-SHRT_MAX - 1)
+#define USHRT_MAX 0xFFFFU
+#define INT_MAX 0x7FFFFFFF
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX 0xFFFFFFFFU
+#define LONG_MAX 0x7FFFFFFFL
+#define LONG_MIN (-LONG_MAX - 1L)
+#define ULONG_MAX 0xFFFFFFFFUL
+#define LLONG_MAX 0x7FFFFFFFFFFFFFFFLL
+#define LLONG_MIN (-LLONG_MAX - 1LL)
+#define ULLONG_MAX 0xFFFFFFFFFFFFFFFFULL
+
+#define __LONG_BIT 32
+#define __WORD_BIT 32
diff --git a/include/lib/libc/aarch32/stddef_.h b/include/lib/libc/aarch32/stddef_.h
new file mode 100644
index 0000000..14ed094
--- /dev/null
+++ b/include/lib/libc/aarch32/stddef_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDDEF__H
+#define STDDEF__H
+
+#ifndef SIZET_
+typedef unsigned int size_t;
+#define SIZET_
+#endif
+
+#endif /* STDDEF__H */
diff --git a/include/lib/libc/aarch32/stdint_.h b/include/lib/libc/aarch32/stdint_.h
new file mode 100644
index 0000000..6e2deed
--- /dev/null
+++ b/include/lib/libc/aarch32/stdint_.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2020, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDINT__H
+#define STDINT__H
+
+#define INT64_MAX LLONG_MAX
+#define INT64_MIN LLONG_MIN
+#define UINT64_MAX ULLONG_MAX
+
+#define INT64_C(x) x ## LL
+#define UINT64_C(x) x ## ULL
+
+typedef long long int64_t;
+typedef unsigned long long uint64_t;
+typedef long long int64_least_t;
+typedef unsigned long long uint64_least_t;
+typedef long long int64_fast_t;
+typedef unsigned long long uint64_fast_t;
+
+#endif
diff --git a/include/lib/libc/aarch32/stdio_.h b/include/lib/libc/aarch32/stdio_.h
new file mode 100644
index 0000000..7042664
--- /dev/null
+++ b/include/lib/libc/aarch32/stdio_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDIO__H
+#define STDIO__H
+
+#ifndef SSIZET_
+typedef int ssize_t;
+#define SSIZET_
+#endif
+
+#endif /* STDIO__H */
diff --git a/include/lib/libc/aarch64/endian_.h b/include/lib/libc/aarch64/endian_.h
new file mode 100644
index 0000000..58273d7
--- /dev/null
+++ b/include/lib/libc/aarch64/endian_.h
@@ -0,0 +1,128 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ENDIAN__H
+#define ENDIAN__H
+
+#include <stdint.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#define _BYTE_ORDER _LITTLE_ENDIAN
+
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32(x))
+#define __ntohs(x) (__bswap16(x))
+#define __htonl(x) (__bswap32(x))
+#define __htons(x) (__bswap16(x))
+
+static __inline uint64_t
+__bswap64(uint64_t x)
+{
+ uint64_t ret;
+
+ __asm __volatile("rev %0, %1\n"
+ : "=&r" (ret), "+r" (x));
+
+ return (ret);
+}
+
+static __inline uint32_t
+__bswap32_var(uint32_t v)
+{
+ uint32_t ret;
+
+ __asm __volatile("rev32 %x0, %x1\n"
+ : "=&r" (ret), "+r" (v));
+
+ return (ret);
+}
+
+static __inline uint16_t
+__bswap16_var(uint16_t v)
+{
+ uint32_t ret;
+
+ __asm __volatile("rev16 %w0, %w1\n"
+ : "=&r" (ret), "+r" (v));
+
+ return ((uint16_t)ret);
+}
+
+#ifdef __OPTIMIZE__
+
+#define __bswap32_constant(x) \
+ ((((x) & 0xff000000U) >> 24) | \
+ (((x) & 0x00ff0000U) >> 8) | \
+ (((x) & 0x0000ff00U) << 8) | \
+ (((x) & 0x000000ffU) << 24))
+
+#define __bswap16_constant(x) \
+ ((((x) & 0xff00) >> 8) | \
+ (((x) & 0x00ff) << 8))
+
+#define __bswap16(x) \
+ ((uint16_t)(__builtin_constant_p(x) ? \
+ __bswap16_constant((uint16_t)(x)) : \
+ __bswap16_var(x)))
+
+#define __bswap32(x) \
+ ((uint32_t)(__builtin_constant_p(x) ? \
+ __bswap32_constant((uint32_t)(x)) : \
+ __bswap32_var(x)))
+
+#else
+#define __bswap16(x) __bswap16_var(x)
+#define __bswap32(x) __bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* ENDIAN__H */
diff --git a/include/lib/libc/aarch64/float.h b/include/lib/libc/aarch64/float.h
new file mode 100644
index 0000000..0829f6f
--- /dev/null
+++ b/include/lib/libc/aarch64/float.h
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)float.h 7.1 (Berkeley) 5/8/90
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_FLOAT_H_
+#define _MACHINE_FLOAT_H_
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int __flt_rounds(void);
+__END_DECLS
+
+#define FLT_RADIX 2 /* b */
+#define FLT_ROUNDS __flt_rounds()
+#if __ISO_C_VISIBLE >= 1999
+#define FLT_EVAL_METHOD 0
+#define DECIMAL_DIG 17 /* max precision in decimal digits */
+#endif
+
+#define FLT_MANT_DIG 24 /* p */
+#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */
+#define FLT_MIN_EXP (-125) /* emin */
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */
+#define FLT_MAX_EXP 128 /* emax */
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */
+#if __ISO_C_VISIBLE >= 2011
+#define FLT_TRUE_MIN 1.40129846E-45F /* b**(emin-p) */
+#define FLT_DECIMAL_DIG 9 /* ceil(1+p*log10(b)) */
+#define FLT_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define DBL_MANT_DIG 53
+#define DBL_EPSILON 2.2204460492503131E-16
+#define DBL_DIG 15
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014E-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157E+308
+#define DBL_MAX_10_EXP 308
+#if __ISO_C_VISIBLE >= 2011
+#define DBL_TRUE_MIN 4.9406564584124654E-324
+#define DBL_DECIMAL_DIG 17
+#define DBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#define LDBL_MANT_DIG 113
+#define LDBL_EPSILON 1.925929944387235853055977942584927319E-34L
+#define LDBL_DIG 33
+#define LDBL_MIN_EXP (-16381)
+#define LDBL_MIN 3.362103143112093506262677817321752603E-4932L
+#define LDBL_MIN_10_EXP (-4931)
+#define LDBL_MAX_EXP (+16384)
+#define LDBL_MAX 1.189731495357231765085759326628007016E+4932L
+#define LDBL_MAX_10_EXP (+4932)
+#if __ISO_C_VISIBLE >= 2011
+#define LDBL_TRUE_MIN 6.475175119438025110924438958227646552E-4966L
+#define LDBL_DECIMAL_DIG 36
+#define LDBL_HAS_SUBNORM 1
+#endif /* __ISO_C_VISIBLE >= 2011 */
+
+#endif /* _MACHINE_FLOAT_H_ */
diff --git a/include/lib/libc/aarch64/inttypes_.h b/include/lib/libc/aarch64/inttypes_.h
new file mode 100644
index 0000000..6109084
--- /dev/null
+++ b/include/lib/libc/aarch64/inttypes_.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2020, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef INTTYPES__H
+#define INTTYPES__H
+
+#define PRId64 "ld" /* int64_t */
+#define PRIi64 "li" /* int64_t */
+#define PRIo64 "lo" /* int64_t */
+#define PRIu64 "lu" /* uint64_t */
+#define PRIx64 "lx" /* uint64_t */
+#define PRIX64 "lX" /* uint64_t */
+
+#define PRIdPTR "ld" /* intptr_t */
+#define PRIiPTR "li" /* intptr_t */
+#define PRIoPTR "lo" /* intptr_t */
+#define PRIuPTR "lu" /* uintptr_t */
+#define PRIxPTR "lx" /* uintptr_t */
+#define PRIXPTR "lX" /* uintptr_t */
+
+#endif /* INTTYPES__H */
diff --git a/include/lib/libc/aarch64/limits_.h b/include/lib/libc/aarch64/limits_.h
new file mode 100644
index 0000000..834439e
--- /dev/null
+++ b/include/lib/libc/aarch64/limits_.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define SCHAR_MAX 0x7F
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#define CHAR_MAX 0x7F
+#define CHAR_MIN (-CHAR_MAX - 1)
+#define UCHAR_MAX 0xFFU
+#define SHRT_MAX 0x7FFF
+#define SHRT_MIN (-SHRT_MAX - 1)
+#define USHRT_MAX 0xFFFFU
+#define INT_MAX 0x7FFFFFFF
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX 0xFFFFFFFFU
+#define LONG_MAX 0x7FFFFFFFFFFFFFFFL
+#define LONG_MIN (-LONG_MAX - 1L)
+#define ULONG_MAX 0xFFFFFFFFFFFFFFFFUL
+#define LLONG_MAX 0x7FFFFFFFFFFFFFFFLL
+#define LLONG_MIN (-LLONG_MAX - 1LL)
+#define ULLONG_MAX 0xFFFFFFFFFFFFFFFFULL
+
+#define __LONG_BIT 64
+#define __WORD_BIT 32
diff --git a/include/lib/libc/aarch64/setjmp_.h b/include/lib/libc/aarch64/setjmp_.h
new file mode 100644
index 0000000..a7d0b5c
--- /dev/null
+++ b/include/lib/libc/aarch64/setjmp_.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SETJMP__H
+#define SETJMP__H
+
+#define JMP_CTX_X19 0x0
+#define JMP_CTX_X21 0x10
+#define JMP_CTX_X23 0x20
+#define JMP_CTX_X25 0x30
+#define JMP_CTX_X27 0x40
+#define JMP_CTX_X29 0x50
+#define JMP_CTX_SP 0x60
+#define JMP_CTX_END 0x70 /* Aligned to 16 bytes */
+
+#define JMP_SIZE (JMP_CTX_END >> 3)
+
+#ifndef __ASSEMBLER__
+
+#include <cdefs.h>
+
+/* Jump buffer hosting x18 - x30 and sp_el0 registers */
+typedef uint64_t jmp_buf[JMP_SIZE] __aligned(16);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SETJMP__H */
diff --git a/include/lib/libc/aarch64/stddef_.h b/include/lib/libc/aarch64/stddef_.h
new file mode 100644
index 0000000..963048e
--- /dev/null
+++ b/include/lib/libc/aarch64/stddef_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDDEF__H
+#define STDDEF__H
+
+#ifndef SIZET_
+typedef unsigned long size_t;
+#define SIZET_
+#endif
+
+#endif /* STDDEF__H */
diff --git a/include/lib/libc/aarch64/stdint_.h b/include/lib/libc/aarch64/stdint_.h
new file mode 100644
index 0000000..34a75ec
--- /dev/null
+++ b/include/lib/libc/aarch64/stdint_.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2020, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDINT__H
+#define STDINT__H
+
+#define INT64_MAX LONG_MAX
+#define INT64_MIN LONG_MIN
+#define UINT64_MAX ULONG_MAX
+
+#define INT64_C(x) x ## L
+#define UINT64_C(x) x ## UL
+
+typedef long int64_t;
+typedef unsigned long uint64_t;
+typedef long int64_least_t;
+typedef unsigned long uint64_least_t;
+typedef long int64_fast_t;
+typedef unsigned long uint64_fast_t;
+
+typedef __int128 int128_t;
+typedef unsigned __int128 uint128_t;
+
+#endif
diff --git a/include/lib/libc/aarch64/stdio_.h b/include/lib/libc/aarch64/stdio_.h
new file mode 100644
index 0000000..331bcaa
--- /dev/null
+++ b/include/lib/libc/aarch64/stdio_.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDIO__H
+#define STDIO__H
+
+#ifndef SSIZET_
+typedef long ssize_t;
+#define SSIZET_
+#endif
+
+#endif /* STDIO__H */
diff --git a/include/lib/libc/arm_acle.h b/include/lib/libc/arm_acle.h
new file mode 100644
index 0000000..d1bc0f9
--- /dev/null
+++ b/include/lib/libc/arm_acle.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2021 Arm Limited
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * The definitions below are a subset of what we would normally get by using
+ * the compiler's version of arm_acle.h. We can't use that directly because
+ * we specify -nostdinc in the Makefiles.
+ *
+ * We just define the functions we need so far.
+ */
+
+#ifndef ARM_ACLE_H
+#define ARM_ACLE_H
+
+#if !defined(__aarch64__) || defined(__clang__)
+# define __crc32b __builtin_arm_crc32b
+# define __crc32w __builtin_arm_crc32w
+#else
+# define __crc32b __builtin_aarch64_crc32b
+# define __crc32w __builtin_aarch64_crc32w
+#endif
+
+#endif /* ARM_ACLE_H */
diff --git a/include/lib/libc/assert.h b/include/lib/libc/assert.h
new file mode 100644
index 0000000..acfd147
--- /dev/null
+++ b/include/lib/libc/assert.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ASSERT_H
+#define ASSERT_H
+
+#include <cdefs.h>
+
+#include <common/debug.h>
+
+#ifndef PLAT_LOG_LEVEL_ASSERT
+#define PLAT_LOG_LEVEL_ASSERT LOG_LEVEL
+#endif
+
+#if ENABLE_ASSERTIONS
+# if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
+# define assert(e) ((e) ? (void)0 : __assert(__FILE__, __LINE__))
+# else
+# define assert(e) ((e) ? (void)0 : __assert())
+# endif
+#else
+#define assert(e) ((void)0)
+#endif /* ENABLE_ASSERTIONS */
+
+#if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO
+void __dead2 __assert(const char *file, unsigned int line);
+#else
+void __dead2 __assert(void);
+#endif
+
+#endif /* ASSERT_H */
diff --git a/include/lib/libc/cdefs.h b/include/lib/libc/cdefs.h
new file mode 100644
index 0000000..b11d072
--- /dev/null
+++ b/include/lib/libc/cdefs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDEFS_H
+#define CDEFS_H
+
+#define __dead2 __attribute__((__noreturn__))
+#define __deprecated __attribute__((__deprecated__))
+#define __packed __attribute__((__packed__))
+#define __used __attribute__((__used__))
+#define __unused __attribute__((__unused__))
+#define __maybe_unused __attribute__((__unused__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+#define __fallthrough __attribute__((__fallthrough__))
+#if RECLAIM_INIT_CODE
+/*
+ * Add each function to a section that is unique so the functions can still
+ * be garbage collected
+ */
+#define __init __section(".text.init." __FILE__ "." __XSTRING(__LINE__))
+#else
+#define __init
+#endif
+
+#define __printflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
+
+#define __weak_reference(sym, alias) \
+ __asm__(".weak alias"); \
+ __asm__(".equ alias, sym")
+
+#define __STRING(x) #x
+#define __XSTRING(x) __STRING(x)
+
+#endif /* CDEFS_H */
diff --git a/include/lib/libc/endian.h b/include/lib/libc/endian.h
new file mode 100644
index 0000000..9c9fd58
--- /dev/null
+++ b/include/lib/libc/endian.h
@@ -0,0 +1,191 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2002 Thomas Moestl <tmm@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ENDIAN_H
+#define ENDIAN_H
+
+#include <cdefs.h>
+#include <stdint.h>
+#include <endian_.h>
+
+/*
+ * General byte order swapping functions.
+ */
+#define bswap16(x) __bswap16(x)
+#define bswap32(x) __bswap32(x)
+#define bswap64(x) __bswap64(x)
+
+/*
+ * Host to big endian, host to little endian, big endian to host, and little
+ * endian to host byte order functions as detailed in byteorder(9).
+ */
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define htobe16(x) bswap16((x))
+#define htobe32(x) bswap32((x))
+#define htobe64(x) bswap64((x))
+#define htole16(x) ((uint16_t)(x))
+#define htole32(x) ((uint32_t)(x))
+#define htole64(x) ((uint64_t)(x))
+
+#define be16toh(x) bswap16((x))
+#define be32toh(x) bswap32((x))
+#define be64toh(x) bswap64((x))
+#define le16toh(x) ((uint16_t)(x))
+#define le32toh(x) ((uint32_t)(x))
+#define le64toh(x) ((uint64_t)(x))
+#else /* _BYTE_ORDER != _LITTLE_ENDIAN */
+#define htobe16(x) ((uint16_t)(x))
+#define htobe32(x) ((uint32_t)(x))
+#define htobe64(x) ((uint64_t)(x))
+#define htole16(x) bswap16((x))
+#define htole32(x) bswap32((x))
+#define htole64(x) bswap64((x))
+
+#define be16toh(x) ((uint16_t)(x))
+#define be32toh(x) ((uint32_t)(x))
+#define be64toh(x) ((uint64_t)(x))
+#define le16toh(x) bswap16((x))
+#define le32toh(x) bswap32((x))
+#define le64toh(x) bswap64((x))
+#endif /* _BYTE_ORDER == _LITTLE_ENDIAN */
+
+/* Alignment-agnostic encode/decode bytestream to/from little/big endian. */
+
+static __inline uint16_t
+be16dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return ((p[0] << 8) | p[1]);
+}
+
+static __inline uint32_t
+be32dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((unsigned)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
+}
+
+static __inline uint64_t
+be64dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4));
+}
+
+static __inline uint16_t
+le16dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return ((p[1] << 8) | p[0]);
+}
+
+static __inline uint32_t
+le32dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((unsigned)p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
+}
+
+static __inline uint64_t
+le64dec(const void *pp)
+{
+ uint8_t const *p = (uint8_t const *)pp;
+
+ return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p));
+}
+
+static __inline void
+be16enc(void *pp, uint16_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = (u >> 8) & 0xff;
+ p[1] = u & 0xff;
+}
+
+static __inline void
+be32enc(void *pp, uint32_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = (u >> 24) & 0xff;
+ p[1] = (u >> 16) & 0xff;
+ p[2] = (u >> 8) & 0xff;
+ p[3] = u & 0xff;
+}
+
+static __inline void
+be64enc(void *pp, uint64_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ be32enc(p, (uint32_t)(u >> 32));
+ be32enc(p + 4, (uint32_t)(u & 0xffffffffU));
+}
+
+static __inline void
+le16enc(void *pp, uint16_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+}
+
+static __inline void
+le32enc(void *pp, uint32_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+ p[2] = (u >> 16) & 0xff;
+ p[3] = (u >> 24) & 0xff;
+}
+
+static __inline void
+le64enc(void *pp, uint64_t u)
+{
+ uint8_t *p = (uint8_t *)pp;
+
+ le32enc(p, (uint32_t)(u & 0xffffffffU));
+ le32enc(p + 4, (uint32_t)(u >> 32));
+}
+
+#endif /* ENDIAN_H */
diff --git a/include/lib/libc/errno.h b/include/lib/libc/errno.h
new file mode 100644
index 0000000..b536fe9
--- /dev/null
+++ b/include/lib/libc/errno.h
@@ -0,0 +1,169 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)errno.h 8.5 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+/*
+ * Portions copyright (c) 2018, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef ERRNO_H
+#define ERRNO_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* Input/output error */
+#define ENXIO 6 /* Device not configured */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file descriptor */
+#define ECHILD 10 /* No child processes */
+#define EDEADLK 11 /* Resource deadlock avoided */
+ /* 11 was EAGAIN */
+#define ENOMEM 12 /* Cannot allocate memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* Operation not supported by device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* Too many open files in system */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Inappropriate ioctl for device */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only filesystem */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+
+/* math software */
+#define EDOM 33 /* Numerical argument out of domain */
+#define ERANGE 34 /* Result too large */
+
+/* non-blocking and interrupt i/o */
+#define EAGAIN 35 /* Resource temporarily unavailable */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+
+/* ipc/network software -- argument errors */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported */
+#define ENOTSUP EOPNOTSUPP /* Operation not supported */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Can't assign requested address */
+
+/* ipc/network software -- operational errors */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection on reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Socket is already connected */
+#define ENOTCONN 57 /* Socket is not connected */
+#define ESHUTDOWN 58 /* Can't send after socket shutdown */
+#define ETOOMANYREFS 59 /* Too many references: can't splice */
+#define ETIMEDOUT 60 /* Operation timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+
+#define ELOOP 62 /* Too many levels of symbolic links */
+#define ENAMETOOLONG 63 /* File name too long */
+
+/* should be rearranged */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+/* quotas & mush */
+#define EPROCLIM 67 /* Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Disc quota exceeded */
+
+/* Network File System */
+#define ESTALE 70 /* Stale NFS file handle */
+#define EREMOTE 71 /* Too many levels of remote in path */
+#define EBADRPC 72 /* RPC struct is bad */
+#define ERPCMISMATCH 73 /* RPC version wrong */
+#define EPROGUNAVAIL 74 /* RPC prog. not avail */
+#define EPROGMISMATCH 75 /* Program version wrong */
+#define EPROCUNAVAIL 76 /* Bad procedure for program */
+
+#define ENOLCK 77 /* No locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#define EFTYPE 79 /* Inappropriate file type or format */
+#define EAUTH 80 /* Authentication error */
+#define ENEEDAUTH 81 /* Need authenticator */
+#define EIDRM 82 /* Identifier removed */
+#define ENOMSG 83 /* No message of desired type */
+#define EOVERFLOW 84 /* Value too large to be stored in data type */
+#define ECANCELED 85 /* Operation canceled */
+#define EILSEQ 86 /* Illegal byte sequence */
+#define ENOATTR 87 /* Attribute not found */
+
+#define EDOOFUS 88 /* Programming error */
+
+#define EBADMSG 89 /* Bad message */
+#define EMULTIHOP 90 /* Multihop attempted */
+#define ENOLINK 91 /* Link has been severed */
+#define EPROTO 92 /* Protocol error */
+
+#define ENOTCAPABLE 93 /* Capabilities insufficient */
+#define ECAPMODE 94 /* Not permitted in capability mode */
+#define ENOTRECOVERABLE 95 /* State not recoverable */
+#define EOWNERDEAD 96 /* Previous owner died */
+
+#define ELAST 96 /* Must be equal largest errno */
+
+#endif /* ERRNO_H */
diff --git a/include/lib/libc/inttypes.h b/include/lib/libc/inttypes.h
new file mode 100644
index 0000000..344b71c
--- /dev/null
+++ b/include/lib/libc/inttypes.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2020 Broadcom
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2020, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef INTTYPES_H
+#define INTTYPES_H
+
+#include <inttypes_.h>
+#include <stdint.h>
+
+#define PRId8 "d" /* int8_t */
+#define PRId16 "d" /* int16_t */
+#define PRId32 "d" /* int32_t */
+
+#define PRIi8 "i" /* int8_t */
+#define PRIi16 "i" /* int16_t */
+#define PRIi32 "i" /* int32_t */
+
+#define PRIo8 "o" /* int8_t */
+#define PRIo16 "o" /* int16_t */
+#define PRIo32 "o" /* int32_t */
+
+#define PRIu8 "u" /* uint8_t */
+#define PRIu16 "u" /* uint16_t */
+#define PRIu32 "u" /* uint32_t */
+
+#define PRIx8 "x" /* uint8_t */
+#define PRIx16 "x" /* uint16_t */
+#define PRIx32 "x" /* uint32_t */
+
+#define PRIX8 "X" /* uint8_t */
+#define PRIX16 "X" /* uint16_t */
+#define PRIX32 "X" /* uint32_t */
+
+#endif
diff --git a/include/lib/libc/limits.h b/include/lib/libc/limits.h
new file mode 100644
index 0000000..c5c8764
--- /dev/null
+++ b/include/lib/libc/limits.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef LIMITS_H
+#define LIMITS_H
+
+#include <limits_.h>
+
+#define CHAR_BIT 8
+#define MB_LEN_MAX 1
+
+#endif /* LIMITS_H */
diff --git a/include/lib/libc/setjmp.h b/include/lib/libc/setjmp.h
new file mode 100644
index 0000000..871c868
--- /dev/null
+++ b/include/lib/libc/setjmp.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SETJMP_H
+#define SETJMP_H
+
+#include <setjmp_.h>
+
+#ifndef __ASSEMBLER__
+
+#include <cdefs.h>
+
+int setjmp(jmp_buf env);
+__dead2 void longjmp(jmp_buf env, int val);
+
+#endif /* __ASSEMBLER__ */
+#endif /* SETJMP_H */
diff --git a/include/lib/libc/stdarg.h b/include/lib/libc/stdarg.h
new file mode 100644
index 0000000..2d1f785
--- /dev/null
+++ b/include/lib/libc/stdarg.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDARG_H
+#define STDARG_H
+
+#define va_list __builtin_va_list
+#define va_start(ap, last) __builtin_va_start(ap, last)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_copy(to, from) __builtin_va_copy(to, from)
+#define va_arg(to, type) __builtin_va_arg(to, type)
+
+#endif /* STDARG_H */
diff --git a/include/lib/libc/stdbool.h b/include/lib/libc/stdbool.h
new file mode 100644
index 0000000..c2c9b22
--- /dev/null
+++ b/include/lib/libc/stdbool.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef STDBOOL_H
+#define STDBOOL_H
+
+#define bool _Bool
+
+#define true (0 < 1)
+#define false (0 > 1)
+
+#define __bool_true_false_are_defined 1
+
+#endif /* STDBOOL_H */
diff --git a/include/lib/libc/stddef.h b/include/lib/libc/stddef.h
new file mode 100644
index 0000000..aaad673
--- /dev/null
+++ b/include/lib/libc/stddef.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDDEF_H
+#define STDDEF_H
+
+#include <stddef_.h>
+
+#ifndef _PTRDIFF_T
+typedef long ptrdiff_t;
+#define _PTRDIFF_T
+#endif
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define offsetof(st, m) __builtin_offsetof(st, m)
+
+#endif /* STDDEF_H */
diff --git a/include/lib/libc/stdint.h b/include/lib/libc/stdint.h
new file mode 100644
index 0000000..88502e7
--- /dev/null
+++ b/include/lib/libc/stdint.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDINT_H
+#define STDINT_H
+
+#include <limits.h>
+#include <stdint_.h>
+
+#define INT8_MAX CHAR_MAX
+#define INT8_MIN CHAR_MIN
+#define UINT8_MAX UCHAR_MAX
+
+#define INT16_MAX SHRT_MAX
+#define INT16_MIN SHRT_MIN
+#define UINT16_MAX USHRT_MAX
+
+#define INT32_MAX INT_MAX
+#define INT32_MIN INT_MIN
+#define UINT32_MAX UINT_MAX
+
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+#define INT_FAST8_MIN INT32_MIN
+#define INT_FAST8_MAX INT32_MAX
+#define UINT_FAST8_MAX UINT32_MAX
+
+#define INT_FAST16_MIN INT32_MIN
+#define INT_FAST16_MAX INT32_MAX
+#define UINT_FAST16_MAX UINT32_MAX
+
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+#define INTPTR_MIN LONG_MIN
+#define INTPTR_MAX LONG_MAX
+#define UINTPTR_MAX ULONG_MAX
+
+#define INTMAX_MIN LLONG_MIN
+#define INTMAX_MAX LLONG_MAX
+#define UINTMAX_MAX ULLONG_MAX
+
+#define PTRDIFF_MIN LONG_MIN
+#define PTRDIFF_MAX LONG_MAX
+
+#define SIZE_MAX ULONG_MAX
+
+#define INT8_C(x) x
+#define INT16_C(x) x
+#define INT32_C(x) x
+
+#define UINT8_C(x) x
+#define UINT16_C(x) x
+#define UINT32_C(x) x ## U
+
+#define INTMAX_C(x) x ## LL
+#define UINTMAX_C(x) x ## ULL
+
+typedef signed char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+
+typedef signed char int8_least_t;
+typedef short int16_least_t;
+typedef int int32_least_t;
+
+typedef unsigned char uint8_least_t;
+typedef unsigned short uint16_least_t;
+typedef unsigned int uint32_least_t;
+
+typedef int int8_fast_t;
+typedef int int16_fast_t;
+typedef int int32_fast_t;
+
+typedef unsigned int uint8_fast_t;
+typedef unsigned int uint16_fast_t;
+typedef unsigned int uint32_fast_t;
+
+typedef long intptr_t;
+typedef unsigned long uintptr_t;
+
+/*
+* Conceptually, these are supposed to be the largest integers representable in C,
+* but GCC and Clang define them as long long for compatibility.
+*/
+typedef long long intmax_t;
+typedef unsigned long long uintmax_t;
+
+typedef long register_t;
+typedef unsigned long u_register_t;
+
+#endif /* STDINT_H */
diff --git a/include/lib/libc/stdio.h b/include/lib/libc/stdio.h
new file mode 100644
index 0000000..5ceaf68
--- /dev/null
+++ b/include/lib/libc/stdio.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDIO_H
+#define STDIO_H
+
+#include <cdefs.h>
+#include <stddef.h>
+#include <stdio_.h>
+
+#define EOF -1
+
+int printf(const char *fmt, ...) __printflike(1, 2);
+int snprintf(char *s, size_t n, const char *fmt, ...) __printflike(3, 4);
+
+#ifdef STDARG_H
+int vprintf(const char *fmt, va_list args);
+int vsnprintf(char *s, size_t n, const char *fmt, va_list args);
+#endif
+
+int putchar(int c);
+int puts(const char *s);
+
+#endif /* STDIO_H */
diff --git a/include/lib/libc/stdlib.h b/include/lib/libc/stdlib.h
new file mode 100644
index 0000000..4e5a824
--- /dev/null
+++ b/include/lib/libc/stdlib.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2012-2021 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef STDLIB_H
+#define STDLIB_H
+
+#include <stddef.h>
+
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+
+#define _ATEXIT_MAX 1
+
+#define isspace(x) (((x) == ' ') || ((x) == '\r') || ((x) == '\n') || \
+ ((x) == '\t') || ((x) == '\b'))
+
+extern void abort(void);
+extern int atexit(void (*func)(void));
+extern void exit(int status);
+
+long strtol(const char *nptr, char **endptr, int base);
+unsigned long strtoul(const char *nptr, char **endptr, int base);
+long long strtoll(const char *nptr, char **endptr, int base);
+unsigned long long strtoull(const char *nptr, char **endptr, int base);
+#endif /* STDLIB_H */
diff --git a/include/lib/libc/string.h b/include/lib/libc/string.h
new file mode 100644
index 0000000..8129404
--- /dev/null
+++ b/include/lib/libc/string.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2020, Arm Limited and Contributors.
+ * Portions copyright (c) 2023, Intel Corporation. All rights reserved.
+ * All rights reserved.
+ */
+
+#ifndef STRING_H
+#define STRING_H
+
+#include <stddef.h>
+
+void *memcpy(void *dst, const void *src, size_t len);
+int memcpy_s(void *dst, size_t dsize, void *src, size_t ssize);
+void *memmove(void *dst, const void *src, size_t len);
+int memcmp(const void *s1, const void *s2, size_t len);
+int strcmp(const char *s1, const char *s2);
+int strncmp(const char *s1, const char *s2, size_t n);
+void *memchr(const void *src, int c, size_t len);
+void *memrchr(const void *src, int c, size_t len);
+char *strchr(const char *s, int c);
+void *memset(void *dst, int val, size_t count);
+size_t strlen(const char *s);
+size_t strnlen(const char *s, size_t maxlen);
+char *strrchr(const char *p, int ch);
+size_t strlcpy(char * dst, const char * src, size_t dsize);
+size_t strlcat(char * dst, const char * src, size_t dsize);
+char *strtok_r(char *s, const char *delim, char **last);
+
+#endif /* STRING_H */
diff --git a/include/lib/libc/sys/cdefs.h b/include/lib/libc/sys/cdefs.h
new file mode 100644
index 0000000..1ace5fb
--- /dev/null
+++ b/include/lib/libc/sys/cdefs.h
@@ -0,0 +1,922 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Berkeley Software Design, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)cdefs.h 8.8 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CDEFS_H_
+#define _SYS_CDEFS_H_
+
+#if defined(_KERNEL) && defined(_STANDALONE)
+#error "_KERNEL and _STANDALONE are mutually exclusive"
+#endif
+
+/*
+ * Testing against Clang-specific extensions.
+ */
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+#ifndef __has_extension
+#define __has_extension __has_feature
+#endif
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+#ifndef __has_include
+#define __has_include(x) 0
+#endif
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(__cplusplus)
+#define __BEGIN_DECLS extern "C" {
+#define __END_DECLS }
+#else
+#define __BEGIN_DECLS
+#define __END_DECLS
+#endif
+
+/*
+ * This code has been put in place to help reduce the addition of
+ * compiler specific defines in FreeBSD code. It helps to aid in
+ * having a compiler-agnostic source tree.
+ */
+
+#if defined(__GNUC__)
+
+#if __GNUC__ >= 3
+#define __GNUCLIKE_ASM 3
+#define __GNUCLIKE_MATH_BUILTIN_CONSTANTS
+#else
+#define __GNUCLIKE_ASM 2
+#endif
+#define __GNUCLIKE___TYPEOF 1
+#define __GNUCLIKE___SECTION 1
+
+#define __GNUCLIKE_CTOR_SECTION_HANDLING 1
+
+#define __GNUCLIKE_BUILTIN_CONSTANT_P 1
+
+#if (__GNUC_MINOR__ > 95 || __GNUC__ >= 3)
+#define __GNUCLIKE_BUILTIN_VARARGS 1
+#define __GNUCLIKE_BUILTIN_STDARG 1
+#define __GNUCLIKE_BUILTIN_VAALIST 1
+#endif
+
+#define __GNUC_VA_LIST_COMPATIBILITY 1
+
+/*
+ * Compiler memory barriers, specific to gcc and clang.
+ */
+#define __compiler_membar() __asm __volatile(" " : : : "memory")
+
+#define __GNUCLIKE_BUILTIN_NEXT_ARG 1
+#define __GNUCLIKE_MATH_BUILTIN_RELOPS
+
+#define __GNUCLIKE_BUILTIN_MEMCPY 1
+
+/* XXX: if __GNUC__ >= 2: not tested everywhere originally, where replaced */
+#define __CC_SUPPORTS_INLINE 1
+#define __CC_SUPPORTS___INLINE 1
+#define __CC_SUPPORTS___INLINE__ 1
+
+#define __CC_SUPPORTS___FUNC__ 1
+#define __CC_SUPPORTS_WARNING 1
+
+#define __CC_SUPPORTS_VARADIC_XXX 1 /* see varargs.h */
+
+#define __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1
+
+#endif /* __GNUC__ */
+
+/*
+ * Macro to test if we're using a specific version of gcc or later.
+ */
+#if defined(__GNUC__)
+#define __GNUC_PREREQ__(ma, mi) \
+ (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
+#else
+#define __GNUC_PREREQ__(ma, mi) 0
+#endif
+
+/*
+ * The __CONCAT macro is used to concatenate parts of symbol names, e.g.
+ * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo.
+ * The __CONCAT macro is a bit tricky to use if it must work in non-ANSI
+ * mode -- there must be no spaces between its arguments, and for nested
+ * __CONCAT's, all the __CONCAT's must be at the left. __CONCAT can also
+ * concatenate double-quoted strings produced by the __STRING macro, but
+ * this only works with ANSI C.
+ *
+ * __XSTRING is like __STRING, but it expands any macros in its argument
+ * first. It is only available with ANSI C.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define __P(protos) protos /* full-blown ANSI C */
+#define __CONCAT1(x,y) x ## y
+#define __CONCAT(x,y) __CONCAT1(x,y)
+#define __STRING(x) #x /* stringify without expanding x */
+#define __XSTRING(x) __STRING(x) /* expand x, then stringify */
+
+#define __const const /* define reserved names to standard */
+#define __signed signed
+#define __volatile volatile
+#if defined(__cplusplus)
+#define __inline inline /* convert to C++ keyword */
+#else
+#if !(defined(__CC_SUPPORTS___INLINE))
+#define __inline /* delete GCC keyword */
+#endif /* ! __CC_SUPPORTS___INLINE */
+#endif /* !__cplusplus */
+
+#else /* !(__STDC__ || __cplusplus) */
+#define __P(protos) () /* traditional C preprocessor */
+#define __CONCAT(x,y) x/**/y
+#define __STRING(x) "x"
+
+#if !defined(__CC_SUPPORTS___INLINE)
+#define __const /* delete pseudo-ANSI C keywords */
+#define __inline
+#define __signed
+#define __volatile
+/*
+ * In non-ANSI C environments, new programs will want ANSI-only C keywords
+ * deleted from the program and old programs will want them left alone.
+ * When using a compiler other than gcc, programs using the ANSI C keywords
+ * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS.
+ * When using "gcc -traditional", we assume that this is the intent; if
+ * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone.
+ */
+#ifndef NO_ANSI_KEYWORDS
+#define const /* delete ANSI C keywords */
+#define inline
+#define signed
+#define volatile
+#endif /* !NO_ANSI_KEYWORDS */
+#endif /* !__CC_SUPPORTS___INLINE */
+#endif /* !(__STDC__ || __cplusplus) */
+
+/*
+ * Compiler-dependent macros to help declare dead (non-returning) and
+ * pure (no side effects) functions, and unused variables. They are
+ * null except for versions of gcc that are known to support the features
+ * properly (old versions of gcc-2 supported the dead and pure features
+ * in a different (wrong) way). If we do not provide an implementation
+ * for a given compiler, let the compile fail if it is told to use
+ * a feature that we cannot live without.
+ */
+#define __weak_symbol __attribute__((__weak__))
+#if !__GNUC_PREREQ__(2, 5)
+#define __dead2
+#define __pure2
+#define __unused
+#endif
+#if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused
+/* XXX Find out what to do for __packed, __aligned and __section */
+#endif
+#if __GNUC_PREREQ__(2, 7)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused __attribute__((__unused__))
+#define __used __attribute__((__used__))
+#define __packed __attribute__((__packed__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+#endif
+#if __GNUC_PREREQ__(4, 3) || __has_attribute(__alloc_size__)
+#define __alloc_size(x) __attribute__((__alloc_size__(x)))
+#define __alloc_size2(n, x) __attribute__((__alloc_size__(n, x)))
+#else
+#define __alloc_size(x)
+#define __alloc_size2(n, x)
+#endif
+#if __GNUC_PREREQ__(4, 9) || __has_attribute(__alloc_align__)
+#define __alloc_align(x) __attribute__((__alloc_align__(x)))
+#else
+#define __alloc_align(x)
+#endif
+
+#if !__GNUC_PREREQ__(2, 95)
+#define __alignof(x) __offsetof(struct { char __a; x __b; }, __b)
+#endif
+
+/*
+ * Keywords added in C11.
+ */
+
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L
+
+#if !__has_extension(c_alignas)
+#if (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ __has_extension(cxx_alignas)
+#define _Alignas(x) alignas(x)
+#else
+/* XXX: Only emulates _Alignas(constant-expression); not _Alignas(type-name). */
+#define _Alignas(x) __aligned(x)
+#endif
+#endif
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define _Alignof(x) alignof(x)
+#else
+#define _Alignof(x) __alignof(x)
+#endif
+
+#if !defined(__cplusplus) && !__has_extension(c_atomic) && \
+ !__has_extension(cxx_atomic) && !__GNUC_PREREQ__(4, 7)
+/*
+ * No native support for _Atomic(). Place object in structure to prevent
+ * most forms of direct non-atomic access.
+ */
+#define _Atomic(T) struct { T volatile __val; }
+#endif
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define _Noreturn [[noreturn]]
+#else
+#define _Noreturn __dead2
+#endif
+
+#if !__has_extension(c_static_assert)
+#if (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ __has_extension(cxx_static_assert)
+#define _Static_assert(x, y) static_assert(x, y)
+#elif __GNUC_PREREQ__(4,6) && !defined(__cplusplus)
+/* Nothing, gcc 4.6 and higher has _Static_assert built-in */
+#elif defined(__COUNTER__)
+#define _Static_assert(x, y) __Static_assert(x, __COUNTER__)
+#define __Static_assert(x, y) ___Static_assert(x, y)
+#define ___Static_assert(x, y) typedef char __assert_ ## y[(x) ? 1 : -1] \
+ __unused
+#else
+#define _Static_assert(x, y) struct __hack
+#endif
+#endif
+
+#if !__has_extension(c_thread_local)
+/*
+ * XXX: Some compilers (Clang 3.3, GCC 4.7) falsely announce C++11 mode
+ * without actually supporting the thread_local keyword. Don't check for
+ * the presence of C++11 when defining _Thread_local.
+ */
+#if /* (defined(__cplusplus) && __cplusplus >= 201103L) || */ \
+ __has_extension(cxx_thread_local)
+#define _Thread_local thread_local
+#else
+#define _Thread_local __thread
+#endif
+#endif
+
+#endif /* __STDC_VERSION__ || __STDC_VERSION__ < 201112L */
+
+/*
+ * Emulation of C11 _Generic(). Unlike the previously defined C11
+ * keywords, it is not possible to implement this using exactly the same
+ * syntax. Therefore implement something similar under the name
+ * __generic(). Unlike _Generic(), this macro can only distinguish
+ * between a single type, so it requires nested invocations to
+ * distinguish multiple cases.
+ */
+
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ __has_extension(c_generic_selections)
+#define __generic(expr, t, yes, no) \
+ _Generic(expr, t: yes, default: no)
+#elif __GNUC_PREREQ__(3, 1) && !defined(__cplusplus)
+#define __generic(expr, t, yes, no) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(__typeof(expr), t), yes, no)
+#endif
+
+/*
+ * C99 Static array indices in function parameter declarations. Syntax such as:
+ * void bar(int myArray[static 10]);
+ * is allowed in C99 but not in C++. Define __min_size appropriately so
+ * headers using it can be compiled in either language. Use like this:
+ * void bar(int myArray[__min_size(10)]);
+ */
+#if !defined(__cplusplus) && \
+ (defined(__clang__) || __GNUC_PREREQ__(4, 6)) && \
+ (!defined(__STDC_VERSION__) || (__STDC_VERSION__ >= 199901))
+#define __min_size(x) static (x)
+#else
+#define __min_size(x) (x)
+#endif
+
+#if __GNUC_PREREQ__(2, 96)
+#define __malloc_like __attribute__((__malloc__))
+#define __pure __attribute__((__pure__))
+#else
+#define __malloc_like
+#define __pure
+#endif
+
+#if __GNUC_PREREQ__(3, 1)
+#define __always_inline __attribute__((__always_inline__))
+#else
+#define __always_inline
+#endif
+
+#if __GNUC_PREREQ__(3, 1)
+#define __noinline __attribute__ ((__noinline__))
+#else
+#define __noinline
+#endif
+
+#if __GNUC_PREREQ__(3, 4)
+#define __fastcall __attribute__((__fastcall__))
+#define __result_use_check __attribute__((__warn_unused_result__))
+#else
+#define __fastcall
+#define __result_use_check
+#endif
+
+#if __GNUC_PREREQ__(4, 1)
+#define __returns_twice __attribute__((__returns_twice__))
+#else
+#define __returns_twice
+#endif
+
+#if __GNUC_PREREQ__(4, 6) || __has_builtin(__builtin_unreachable)
+#define __unreachable() __builtin_unreachable()
+#else
+#define __unreachable() ((void)0)
+#endif
+
+/* XXX: should use `#if __STDC_VERSION__ < 199901'. */
+#if !__GNUC_PREREQ__(2, 7)
+#define __func__ NULL
+#endif
+
+#if (defined(__GNUC__) && __GNUC__ >= 2) && !defined(__STRICT_ANSI__) || __STDC_VERSION__ >= 199901
+#define __LONG_LONG_SUPPORTED
+#endif
+
+/* C++11 exposes a load of C99 stuff */
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#define __LONG_LONG_SUPPORTED
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+#endif
+
+/*
+ * GCC 2.95 provides `__restrict' as an extension to C90 to support the
+ * C99-specific `restrict' type qualifier. We happen to use `__restrict' as
+ * a way to define the `restrict' type qualifier without disturbing older
+ * software that is unaware of C99 keywords.
+ */
+#if !(__GNUC__ == 2 && __GNUC_MINOR__ == 95)
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901
+#define __restrict
+#else
+#define __restrict restrict
+#endif
+#endif
+
+/*
+ * GNU C version 2.96 adds explicit branch prediction so that
+ * the CPU back-end can hint the processor and also so that
+ * code blocks can be reordered such that the predicted path
+ * sees a more linear flow, thus improving cache behavior, etc.
+ *
+ * The following two macros provide us with a way to utilize this
+ * compiler feature. Use __predict_true() if you expect the expression
+ * to evaluate to true, and __predict_false() if you expect the
+ * expression to evaluate to false.
+ *
+ * A few notes about usage:
+ *
+ * * Generally, __predict_false() error condition checks (unless
+ * you have some _strong_ reason to do otherwise, in which case
+ * document it), and/or __predict_true() `no-error' condition
+ * checks, assuming you want to optimize for the no-error case.
+ *
+ * * Other than that, if you don't know the likelihood of a test
+ * succeeding from empirical or other `hard' evidence, don't
+ * make predictions.
+ *
+ * * These are meant to be used in places that are run `a lot'.
+ * It is wasteful to make predictions in code that is run
+ * seldomly (e.g. at subsystem initialization time) as the
+ * basic block reordering that this affects can often generate
+ * larger code.
+ */
+#if __GNUC_PREREQ__(2, 96)
+#define __predict_true(exp) __builtin_expect((exp), 1)
+#define __predict_false(exp) __builtin_expect((exp), 0)
+#else
+#define __predict_true(exp) (exp)
+#define __predict_false(exp) (exp)
+#endif
+
+#if __GNUC_PREREQ__(4, 0)
+#define __null_sentinel __attribute__((__sentinel__))
+#define __exported __attribute__((__visibility__("default")))
+#define __hidden __attribute__((__visibility__("hidden")))
+#else
+#define __null_sentinel
+#define __exported
+#define __hidden
+#endif
+
+/*
+ * We define this here since <stddef.h>, <sys/queue.h>, and <sys/types.h>
+ * require it.
+ */
+#if __GNUC_PREREQ__(4, 1)
+#define __offsetof(type, field) __builtin_offsetof(type, field)
+#else
+#ifndef __cplusplus
+#define __offsetof(type, field) \
+ ((__size_t)(__uintptr_t)((const volatile void *)&((type *)0)->field))
+#else
+#define __offsetof(type, field) \
+ (__offsetof__ (reinterpret_cast <__size_t> \
+ (&reinterpret_cast <const volatile char &> \
+ (static_cast<type *> (0)->field))))
+#endif
+#endif
+#define __rangeof(type, start, end) \
+ (__offsetof(type, end) - __offsetof(type, start))
+
+/*
+ * Given the pointer x to the member m of the struct s, return
+ * a pointer to the containing structure. When using GCC, we first
+ * assign pointer x to a local variable, to check that its type is
+ * compatible with member m.
+ */
+#if __GNUC_PREREQ__(3, 1)
+#define __containerof(x, s, m) ({ \
+ const volatile __typeof(((s *)0)->m) *__x = (x); \
+ __DEQUALIFY(s *, (const volatile char *)__x - __offsetof(s, m));\
+})
+#else
+#define __containerof(x, s, m) \
+ __DEQUALIFY(s *, (const volatile char *)(x) - __offsetof(s, m))
+#endif
+
+/*
+ * Compiler-dependent macros to declare that functions take printf-like
+ * or scanf-like arguments. They are null except for versions of gcc
+ * that are known to support the features properly (old versions of gcc-2
+ * didn't permit keeping the keywords out of the application namespace).
+ */
+#if !__GNUC_PREREQ__(2, 7)
+#define __printflike(fmtarg, firstvararg)
+#define __scanflike(fmtarg, firstvararg)
+#define __format_arg(fmtarg)
+#define __strfmonlike(fmtarg, firstvararg)
+#define __strftimelike(fmtarg, firstvararg)
+#else
+#define __printflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
+#define __scanflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__scanf__, fmtarg, firstvararg)))
+#define __format_arg(fmtarg) __attribute__((__format_arg__ (fmtarg)))
+#define __strfmonlike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__strfmon__, fmtarg, firstvararg)))
+#define __strftimelike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__strftime__, fmtarg, firstvararg)))
+#endif
+
+/* Compiler-dependent macros that rely on FreeBSD-specific extensions. */
+#if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 300001 && \
+ defined(__GNUC__)
+#define __printf0like(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf0__, fmtarg, firstvararg)))
+#else
+#define __printf0like(fmtarg, firstvararg)
+#endif
+
+#if defined(__GNUC__)
+#define __strong_reference(sym,aliassym) \
+ extern __typeof (sym) aliassym __attribute__ ((__alias__ (#sym)))
+#ifdef __STDC__
+#define __weak_reference(sym,alias) \
+ __asm__(".weak " #alias); \
+ __asm__(".equ " #alias ", " #sym)
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning." #sym); \
+ __asm__(".asciz \"" msg "\""); \
+ __asm__(".previous")
+#define __sym_compat(sym,impl,verid) \
+ __asm__(".symver " #impl ", " #sym "@" #verid)
+#define __sym_default(sym,impl,verid) \
+ __asm__(".symver " #impl ", " #sym "@@@" #verid)
+#else
+#define __weak_reference(sym,alias) \
+ __asm__(".weak alias"); \
+ __asm__(".equ alias, sym")
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning.sym"); \
+ __asm__(".asciz \"msg\""); \
+ __asm__(".previous")
+#define __sym_compat(sym,impl,verid) \
+ __asm__(".symver impl, sym@verid")
+#define __sym_default(impl,sym,verid) \
+ __asm__(".symver impl, sym@@@verid")
+#endif /* __STDC__ */
+#endif /* __GNUC__ */
+
+#define __GLOBL(sym) __asm__(".globl " __XSTRING(sym))
+#define __WEAK(sym) __asm__(".weak " __XSTRING(sym))
+
+#if defined(__GNUC__)
+#define __IDSTRING(name,string) __asm__(".ident\t\"" string "\"")
+#else
+/*
+ * The following definition might not work well if used in header files,
+ * but it should be better than nothing. If you want a "do nothing"
+ * version, then it should generate some harmless declaration, such as:
+ * #define __IDSTRING(name,string) struct __hack
+ */
+#define __IDSTRING(name,string) static const char name[] __unused = string
+#endif
+
+/*
+ * Embed the rcs id of a source file in the resulting library. Note that in
+ * more recent ELF binutils, we use .ident allowing the ID to be stripped.
+ * Usage:
+ * __FBSDID("$FreeBSD$");
+ */
+#ifndef __FBSDID
+#if !defined(STRIP_FBSDID)
+#define __FBSDID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define __FBSDID(s) struct __hack
+#endif
+#endif
+
+#ifndef __RCSID
+#ifndef NO__RCSID
+#define __RCSID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define __RCSID(s) struct __hack
+#endif
+#endif
+
+#ifndef __RCSID_SOURCE
+#ifndef NO__RCSID_SOURCE
+#define __RCSID_SOURCE(s) __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
+#else
+#define __RCSID_SOURCE(s) struct __hack
+#endif
+#endif
+
+#ifndef __SCCSID
+#ifndef NO__SCCSID
+#define __SCCSID(s) __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
+#else
+#define __SCCSID(s) struct __hack
+#endif
+#endif
+
+#ifndef __COPYRIGHT
+#ifndef NO__COPYRIGHT
+#define __COPYRIGHT(s) __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
+#else
+#define __COPYRIGHT(s) struct __hack
+#endif
+#endif
+
+#ifndef __DECONST
+#define __DECONST(type, var) ((type)(__uintptr_t)(const void *)(var))
+#endif
+
+#ifndef __DEVOLATILE
+#define __DEVOLATILE(type, var) ((type)(__uintptr_t)(volatile void *)(var))
+#endif
+
+#ifndef __DEQUALIFY
+#define __DEQUALIFY(type, var) ((type)(__uintptr_t)(const volatile void *)(var))
+#endif
+
+/*-
+ * The following definitions are an extension of the behavior originally
+ * implemented in <sys/_posix.h>, but with a different level of granularity.
+ * POSIX.1 requires that the macros we test be defined before any standard
+ * header file is included.
+ *
+ * Here's a quick run-down of the versions (and some informal names)
+ * defined(_POSIX_SOURCE) 1003.1-1988
+ * encoded as 198808 below
+ * _POSIX_C_SOURCE == 1 1003.1-1990
+ * encoded as 199009 below
+ * _POSIX_C_SOURCE == 2 1003.2-1992 C Language Binding Option
+ * encoded as 199209 below
+ * _POSIX_C_SOURCE == 199309 1003.1b-1993
+ * (1003.1 Issue 4, Single Unix Spec v1, Unix 93)
+ * _POSIX_C_SOURCE == 199506 1003.1c-1995, 1003.1i-1995,
+ * and the omnibus ISO/IEC 9945-1: 1996
+ * (1003.1 Issue 5, Single Unix Spec v2, Unix 95)
+ * _POSIX_C_SOURCE == 200112 1003.1-2001 (1003.1 Issue 6, Unix 03)
+ * _POSIX_C_SOURCE == 200809 1003.1-2008 (1003.1 Issue 7)
+ * IEEE Std 1003.1-2017 (Rev of 1003.1-2008) is
+ * 1003.1-2008 with two TCs applied with
+ * _POSIX_C_SOURCE=200809 and _XOPEN_SOURCE=700
+ *
+ * In addition, the X/Open Portability Guide, which is now the Single UNIX
+ * Specification, defines a feature-test macro which indicates the version of
+ * that specification, and which subsumes _POSIX_C_SOURCE.
+ *
+ * Our macros begin with two underscores to avoid namespace screwage.
+ */
+
+/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1
+#undef _POSIX_C_SOURCE /* Probably illegal, but beyond caring now. */
+#define _POSIX_C_SOURCE 199009
+#endif
+
+/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199209
+#endif
+
+/* Deal with various X/Open Portability Guides and Single UNIX Spec. */
+#ifdef _XOPEN_SOURCE
+#if _XOPEN_SOURCE - 0 >= 700
+#define __XSI_VISIBLE 700
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809
+#elif _XOPEN_SOURCE - 0 >= 600
+#define __XSI_VISIBLE 600
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200112
+#elif _XOPEN_SOURCE - 0 >= 500
+#define __XSI_VISIBLE 500
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199506
+#endif
+#endif
+
+/*
+ * Deal with all versions of POSIX. The ordering relative to the tests above is
+ * important.
+ */
+#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE)
+#define _POSIX_C_SOURCE 198808
+#endif
+#ifdef _POSIX_C_SOURCE
+#if _POSIX_C_SOURCE >= 200809
+#define __POSIX_VISIBLE 200809
+#define __ISO_C_VISIBLE 1999
+#elif _POSIX_C_SOURCE >= 200112
+#define __POSIX_VISIBLE 200112
+#define __ISO_C_VISIBLE 1999
+#elif _POSIX_C_SOURCE >= 199506
+#define __POSIX_VISIBLE 199506
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199309
+#define __POSIX_VISIBLE 199309
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199209
+#define __POSIX_VISIBLE 199209
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199009
+#define __POSIX_VISIBLE 199009
+#define __ISO_C_VISIBLE 1990
+#else
+#define __POSIX_VISIBLE 198808
+#define __ISO_C_VISIBLE 0
+#endif /* _POSIX_C_SOURCE */
+/*
+ * Both glibc and OpenBSD enable c11 features when _ISOC11_SOURCE is defined, or
+ * when compiling with -stdc=c11. A strict reading of the standard would suggest
+ * doing it only for the former. However, a strict reading also requires C99
+ * mode only, so building with C11 is already undefined. Follow glibc's and
+ * OpenBSD's lead for this non-standard configuration for maximum compatibility.
+ */
+#if _ISOC11_SOURCE || (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L)
+#undef __ISO_C_VISIBLE
+#define __ISO_C_VISIBLE 2011
+#endif
+#else
+/*-
+ * Deal with _ANSI_SOURCE:
+ * If it is defined, and no other compilation environment is explicitly
+ * requested, then define our internal feature-test macros to zero. This
+ * makes no difference to the preprocessor (undefined symbols in preprocessing
+ * expressions are defined to have value zero), but makes it more convenient for
+ * a test program to print out the values.
+ *
+ * If a program mistakenly defines _ANSI_SOURCE and some other macro such as
+ * _POSIX_C_SOURCE, we will assume that it wants the broader compilation
+ * environment (and in fact we will never get here).
+ */
+#if defined(_ANSI_SOURCE) /* Hide almost everything. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 1990
+#define __EXT1_VISIBLE 0
+#elif defined(_C99_SOURCE) /* Localism to specify strict C99 env. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 1999
+#define __EXT1_VISIBLE 0
+#elif defined(_C11_SOURCE) /* Localism to specify strict C11 env. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 2011
+#define __EXT1_VISIBLE 0
+#else /* Default environment: show everything. */
+#define __POSIX_VISIBLE 200809
+#define __XSI_VISIBLE 700
+#define __BSD_VISIBLE 1
+#define __ISO_C_VISIBLE 2011
+#define __EXT1_VISIBLE 1
+#endif
+#endif
+
+/* User override __EXT1_VISIBLE */
+#if defined(__STDC_WANT_LIB_EXT1__)
+#undef __EXT1_VISIBLE
+#if __STDC_WANT_LIB_EXT1__
+#define __EXT1_VISIBLE 1
+#else
+#define __EXT1_VISIBLE 0
+#endif
+#endif /* __STDC_WANT_LIB_EXT1__ */
+
+/*
+ * Old versions of GCC use non-standard ARM arch symbols; acle-compat.h
+ * translates them to __ARM_ARCH and the modern feature symbols defined by ARM.
+ */
+#if defined(__arm__) && !defined(__ARM_ARCH)
+#include <machine/acle-compat.h>
+#endif
+
+/*
+ * Nullability qualifiers: currently only supported by Clang.
+ */
+#if !(defined(__clang__) && __has_feature(nullability))
+#define _Nonnull
+#define _Nullable
+#define _Null_unspecified
+#define __NULLABILITY_PRAGMA_PUSH
+#define __NULLABILITY_PRAGMA_POP
+#else
+#define __NULLABILITY_PRAGMA_PUSH _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wnullability-completeness\"")
+#define __NULLABILITY_PRAGMA_POP _Pragma("clang diagnostic pop")
+#endif
+
+/*
+ * Type Safety Checking
+ *
+ * Clang provides additional attributes to enable checking type safety
+ * properties that cannot be enforced by the C type system.
+ */
+
+#if __has_attribute(__argument_with_type_tag__) && \
+ __has_attribute(__type_tag_for_datatype__)
+#define __arg_type_tag(arg_kind, arg_idx, type_tag_idx) \
+ __attribute__((__argument_with_type_tag__(arg_kind, arg_idx, type_tag_idx)))
+#define __datatype_type_tag(kind, type) \
+ __attribute__((__type_tag_for_datatype__(kind, type)))
+#else
+#define __arg_type_tag(arg_kind, arg_idx, type_tag_idx)
+#define __datatype_type_tag(kind, type)
+#endif
+
+/*
+ * Lock annotations.
+ *
+ * Clang provides support for doing basic thread-safety tests at
+ * compile-time, by marking which locks will/should be held when
+ * entering/leaving a functions.
+ *
+ * Furthermore, it is also possible to annotate variables and structure
+ * members to enforce that they are only accessed when certain locks are
+ * held.
+ */
+
+#if __has_extension(c_thread_safety_attributes)
+#define __lock_annotate(x) __attribute__((x))
+#else
+#define __lock_annotate(x)
+#endif
+
+/* Structure implements a lock. */
+#define __lockable __lock_annotate(lockable)
+
+/* Function acquires an exclusive or shared lock. */
+#define __locks_exclusive(...) \
+ __lock_annotate(exclusive_lock_function(__VA_ARGS__))
+#define __locks_shared(...) \
+ __lock_annotate(shared_lock_function(__VA_ARGS__))
+
+/* Function attempts to acquire an exclusive or shared lock. */
+#define __trylocks_exclusive(...) \
+ __lock_annotate(exclusive_trylock_function(__VA_ARGS__))
+#define __trylocks_shared(...) \
+ __lock_annotate(shared_trylock_function(__VA_ARGS__))
+
+/* Function releases a lock. */
+#define __unlocks(...) __lock_annotate(unlock_function(__VA_ARGS__))
+
+/* Function asserts that an exclusive or shared lock is held. */
+#define __asserts_exclusive(...) \
+ __lock_annotate(assert_exclusive_lock(__VA_ARGS__))
+#define __asserts_shared(...) \
+ __lock_annotate(assert_shared_lock(__VA_ARGS__))
+
+/* Function requires that an exclusive or shared lock is or is not held. */
+#define __requires_exclusive(...) \
+ __lock_annotate(exclusive_locks_required(__VA_ARGS__))
+#define __requires_shared(...) \
+ __lock_annotate(shared_locks_required(__VA_ARGS__))
+#define __requires_unlocked(...) \
+ __lock_annotate(locks_excluded(__VA_ARGS__))
+
+/* Function should not be analyzed. */
+#define __no_lock_analysis __lock_annotate(no_thread_safety_analysis)
+
+/*
+ * Function or variable should not be sanitized, e.g., by AddressSanitizer.
+ * GCC has the nosanitize attribute, but as a function attribute only, and
+ * warns on use as a variable attribute.
+ */
+#if __has_attribute(no_sanitize) && defined(__clang__)
+#ifdef _KERNEL
+#define __nosanitizeaddress __attribute__((no_sanitize("kernel-address")))
+#define __nosanitizememory __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __nosanitizeaddress __attribute__((no_sanitize("address")))
+#define __nosanitizememory __attribute__((no_sanitize("memory")))
+#endif
+#define __nosanitizethread __attribute__((no_sanitize("thread")))
+#else
+#define __nosanitizeaddress
+#define __nosanitizememory
+#define __nosanitizethread
+#endif
+
+/* Guard variables and structure members by lock. */
+#define __guarded_by(x) __lock_annotate(guarded_by(x))
+#define __pt_guarded_by(x) __lock_annotate(pt_guarded_by(x))
+
+/* Alignment builtins for better type checking and improved code generation. */
+/* Provide fallback versions for other compilers (GCC/Clang < 10): */
+#if !__has_builtin(__builtin_is_aligned)
+#define __builtin_is_aligned(x, align) \
+ (((__uintptr_t)x & ((align) - 1)) == 0)
+#endif
+#if !__has_builtin(__builtin_align_up)
+#define __builtin_align_up(x, align) \
+ ((__typeof__(x))(((__uintptr_t)(x)+((align)-1))&(~((align)-1))))
+#endif
+#if !__has_builtin(__builtin_align_down)
+#define __builtin_align_down(x, align) \
+ ((__typeof__(x))((x)&(~((align)-1))))
+#endif
+
+#define __align_up(x, y) __builtin_align_up(x, y)
+#define __align_down(x, y) __builtin_align_down(x, y)
+#define __is_aligned(x, y) __builtin_is_aligned(x, y)
+
+#endif /* !_SYS_CDEFS_H_ */
diff --git a/include/lib/libc/time.h b/include/lib/libc/time.h
new file mode 100644
index 0000000..e1eb2a5
--- /dev/null
+++ b/include/lib/libc/time.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2012-2017 Roberto E. Vargas Caballero
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+/*
+ * Portions copyright (c) 2018-2019, Arm Limited and Contributors.
+ * All rights reserved.
+ */
+
+#ifndef TIME_H
+#define TIME_H
+
+#include <stddef.h>
+
+typedef long int time_t;
+
+#endif /* TIME_H */
diff --git a/include/lib/libfdt/fdt.h b/include/lib/libfdt/fdt.h
new file mode 100644
index 0000000..c9acc0c
--- /dev/null
+++ b/include/lib/libfdt/fdt.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+#ifndef FDT_H
+#define FDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ * Copyright 2012 Kim Phillips, Freescale Semiconductor.
+ */
+
+#ifndef __ASSEMBLER__
+
+struct fdt_header {
+ fdt32_t magic; /* magic word FDT_MAGIC */
+ fdt32_t totalsize; /* total size of DT block */
+ fdt32_t off_dt_struct; /* offset to structure */
+ fdt32_t off_dt_strings; /* offset to strings */
+ fdt32_t off_mem_rsvmap; /* offset to memory reserve map */
+ fdt32_t version; /* format version */
+ fdt32_t last_comp_version; /* last compatible version */
+
+ /* version 2 fields below */
+ fdt32_t boot_cpuid_phys; /* Which physical CPU id we're
+ booting on */
+ /* version 3 fields below */
+ fdt32_t size_dt_strings; /* size of the strings block */
+
+ /* version 17 fields below */
+ fdt32_t size_dt_struct; /* size of the structure block */
+};
+
+struct fdt_reserve_entry {
+ fdt64_t address;
+ fdt64_t size;
+};
+
+struct fdt_node_header {
+ fdt32_t tag;
+ char name[];
+};
+
+struct fdt_property {
+ fdt32_t tag;
+ fdt32_t len;
+ fdt32_t nameoff;
+ char data[];
+};
+
+#endif /* !__ASSEMBLER__*/
+
+#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */
+#define FDT_TAGSIZE sizeof(fdt32_t)
+
+#define FDT_BEGIN_NODE 0x1 /* Start node: full name */
+#define FDT_END_NODE 0x2 /* End node */
+#define FDT_PROP 0x3 /* Property: name off,
+ size, content */
+#define FDT_NOP 0x4 /* nop */
+#define FDT_END 0x9
+
+#define FDT_V1_SIZE (7*sizeof(fdt32_t))
+#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(fdt32_t))
+#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(fdt32_t))
+#define FDT_V16_SIZE FDT_V3_SIZE
+#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(fdt32_t))
+
+#endif /* FDT_H */
diff --git a/include/lib/libfdt/libfdt.h b/include/lib/libfdt/libfdt.h
new file mode 100644
index 0000000..d0a2ed2
--- /dev/null
+++ b/include/lib/libfdt/libfdt.h
@@ -0,0 +1,2154 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+#ifndef LIBFDT_H
+#define LIBFDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ */
+
+#include <libfdt_env.h>
+#include <fdt.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FDT_FIRST_SUPPORTED_VERSION 0x02
+#define FDT_LAST_COMPATIBLE_VERSION 0x10
+#define FDT_LAST_SUPPORTED_VERSION 0x11
+
+/* Error codes: informative error codes */
+#define FDT_ERR_NOTFOUND 1
+ /* FDT_ERR_NOTFOUND: The requested node or property does not exist */
+#define FDT_ERR_EXISTS 2
+ /* FDT_ERR_EXISTS: Attempted to create a node or property which
+ * already exists */
+#define FDT_ERR_NOSPACE 3
+ /* FDT_ERR_NOSPACE: Operation needed to expand the device
+ * tree, but its buffer did not have sufficient space to
+ * contain the expanded tree. Use fdt_open_into() to move the
+ * device tree to a buffer with more space. */
+
+/* Error codes: codes for bad parameters */
+#define FDT_ERR_BADOFFSET 4
+ /* FDT_ERR_BADOFFSET: Function was passed a structure block
+ * offset which is out-of-bounds, or which points to an
+ * unsuitable part of the structure for the operation. */
+#define FDT_ERR_BADPATH 5
+ /* FDT_ERR_BADPATH: Function was passed a badly formatted path
+ * (e.g. missing a leading / for a function which requires an
+ * absolute path) */
+#define FDT_ERR_BADPHANDLE 6
+ /* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle.
+ * This can be caused either by an invalid phandle property
+ * length, or the phandle value was either 0 or -1, which are
+ * not permitted. */
+#define FDT_ERR_BADSTATE 7
+ /* FDT_ERR_BADSTATE: Function was passed an incomplete device
+ * tree created by the sequential-write functions, which is
+ * not sufficiently complete for the requested operation. */
+
+/* Error codes: codes for bad device tree blobs */
+#define FDT_ERR_TRUNCATED 8
+ /* FDT_ERR_TRUNCATED: FDT or a sub-block is improperly
+ * terminated (overflows, goes outside allowed bounds, or
+ * isn't properly terminated). */
+#define FDT_ERR_BADMAGIC 9
+ /* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a
+ * device tree at all - it is missing the flattened device
+ * tree magic number. */
+#define FDT_ERR_BADVERSION 10
+ /* FDT_ERR_BADVERSION: Given device tree has a version which
+ * can't be handled by the requested operation. For
+ * read-write functions, this may mean that fdt_open_into() is
+ * required to convert the tree to the expected version. */
+#define FDT_ERR_BADSTRUCTURE 11
+ /* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt
+ * structure block or other serious error (e.g. misnested
+ * nodes, or subnodes preceding properties). */
+#define FDT_ERR_BADLAYOUT 12
+ /* FDT_ERR_BADLAYOUT: For read-write functions, the given
+ * device tree has it's sub-blocks in an order that the
+ * function can't handle (memory reserve map, then structure,
+ * then strings). Use fdt_open_into() to reorganize the tree
+ * into a form suitable for the read-write operations. */
+
+/* "Can't happen" error indicating a bug in libfdt */
+#define FDT_ERR_INTERNAL 13
+ /* FDT_ERR_INTERNAL: libfdt has failed an internal assertion.
+ * Should never be returned, if it is, it indicates a bug in
+ * libfdt itself. */
+
+/* Errors in device tree content */
+#define FDT_ERR_BADNCELLS 14
+ /* FDT_ERR_BADNCELLS: Device tree has a #address-cells, #size-cells
+ * or similar property with a bad format or value */
+
+#define FDT_ERR_BADVALUE 15
+ /* FDT_ERR_BADVALUE: Device tree has a property with an unexpected
+ * value. For example: a property expected to contain a string list
+ * is not NUL-terminated within the length of its value. */
+
+#define FDT_ERR_BADOVERLAY 16
+ /* FDT_ERR_BADOVERLAY: The device tree overlay, while
+ * correctly structured, cannot be applied due to some
+ * unexpected or missing value, property or node. */
+
+#define FDT_ERR_NOPHANDLES 17
+ /* FDT_ERR_NOPHANDLES: The device tree doesn't have any
+ * phandle available anymore without causing an overflow */
+
+#define FDT_ERR_BADFLAGS 18
+ /* FDT_ERR_BADFLAGS: The function was passed a flags field that
+ * contains invalid flags or an invalid combination of flags. */
+
+#define FDT_ERR_ALIGNMENT 19
+ /* FDT_ERR_ALIGNMENT: The device tree base address is not 8-byte
+ * aligned. */
+
+#define FDT_ERR_MAX 19
+
+/* constants */
+#define FDT_MAX_PHANDLE 0xfffffffe
+ /* Valid values for phandles range from 1 to 2^32-2. */
+
+/**********************************************************************/
+/* Low-level functions (you probably don't need these) */
+/**********************************************************************/
+
+#ifndef SWIG /* This function is not useful in Python */
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen);
+#endif
+static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
+{
+ return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen);
+}
+
+uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
+
+/*
+ * External helpers to access words from a device tree blob. They're built
+ * to work even with unaligned pointers on platforms (such as ARMv5) that don't
+ * like unaligned loads and stores.
+ */
+static inline uint16_t fdt16_ld(const fdt16_t *p)
+{
+ const uint8_t *bp = (const uint8_t *)p;
+
+ return ((uint16_t)bp[0] << 8) | bp[1];
+}
+
+static inline uint32_t fdt32_ld(const fdt32_t *p)
+{
+ const uint8_t *bp = (const uint8_t *)p;
+
+ return ((uint32_t)bp[0] << 24)
+ | ((uint32_t)bp[1] << 16)
+ | ((uint32_t)bp[2] << 8)
+ | bp[3];
+}
+
+static inline void fdt32_st(void *property, uint32_t value)
+{
+ uint8_t *bp = (uint8_t *)property;
+
+ bp[0] = value >> 24;
+ bp[1] = (value >> 16) & 0xff;
+ bp[2] = (value >> 8) & 0xff;
+ bp[3] = value & 0xff;
+}
+
+static inline uint64_t fdt64_ld(const fdt64_t *p)
+{
+ const uint8_t *bp = (const uint8_t *)p;
+
+ return ((uint64_t)bp[0] << 56)
+ | ((uint64_t)bp[1] << 48)
+ | ((uint64_t)bp[2] << 40)
+ | ((uint64_t)bp[3] << 32)
+ | ((uint64_t)bp[4] << 24)
+ | ((uint64_t)bp[5] << 16)
+ | ((uint64_t)bp[6] << 8)
+ | bp[7];
+}
+
+static inline void fdt64_st(void *property, uint64_t value)
+{
+ uint8_t *bp = (uint8_t *)property;
+
+ bp[0] = value >> 56;
+ bp[1] = (value >> 48) & 0xff;
+ bp[2] = (value >> 40) & 0xff;
+ bp[3] = (value >> 32) & 0xff;
+ bp[4] = (value >> 24) & 0xff;
+ bp[5] = (value >> 16) & 0xff;
+ bp[6] = (value >> 8) & 0xff;
+ bp[7] = value & 0xff;
+}
+
+/**********************************************************************/
+/* Traversal functions */
+/**********************************************************************/
+
+int fdt_next_node(const void *fdt, int offset, int *depth);
+
+/**
+ * fdt_first_subnode() - get offset of first direct subnode
+ * @fdt: FDT blob
+ * @offset: Offset of node to check
+ *
+ * Return: offset of first subnode, or -FDT_ERR_NOTFOUND if there is none
+ */
+int fdt_first_subnode(const void *fdt, int offset);
+
+/**
+ * fdt_next_subnode() - get offset of next direct subnode
+ * @fdt: FDT blob
+ * @offset: Offset of previous subnode
+ *
+ * After first calling fdt_first_subnode(), call this function repeatedly to
+ * get direct subnodes of a parent node.
+ *
+ * Return: offset of next subnode, or -FDT_ERR_NOTFOUND if there are no more
+ * subnodes
+ */
+int fdt_next_subnode(const void *fdt, int offset);
+
+/**
+ * fdt_for_each_subnode - iterate over all subnodes of a parent
+ *
+ * @node: child node (int, lvalue)
+ * @fdt: FDT blob (const void *)
+ * @parent: parent node (int)
+ *
+ * This is actually a wrapper around a for loop and would be used like so:
+ *
+ * fdt_for_each_subnode(node, fdt, parent) {
+ * Use node
+ * ...
+ * }
+ *
+ * if ((node < 0) && (node != -FDT_ERR_NOTFOUND)) {
+ * Error handling
+ * }
+ *
+ * Note that this is implemented as a macro and @node is used as
+ * iterator in the loop. The parent variable be constant or even a
+ * literal.
+ */
+#define fdt_for_each_subnode(node, fdt, parent) \
+ for (node = fdt_first_subnode(fdt, parent); \
+ node >= 0; \
+ node = fdt_next_subnode(fdt, node))
+
+/**********************************************************************/
+/* General functions */
+/**********************************************************************/
+#define fdt_get_header(fdt, field) \
+ (fdt32_ld(&((const struct fdt_header *)(fdt))->field))
+#define fdt_magic(fdt) (fdt_get_header(fdt, magic))
+#define fdt_totalsize(fdt) (fdt_get_header(fdt, totalsize))
+#define fdt_off_dt_struct(fdt) (fdt_get_header(fdt, off_dt_struct))
+#define fdt_off_dt_strings(fdt) (fdt_get_header(fdt, off_dt_strings))
+#define fdt_off_mem_rsvmap(fdt) (fdt_get_header(fdt, off_mem_rsvmap))
+#define fdt_version(fdt) (fdt_get_header(fdt, version))
+#define fdt_last_comp_version(fdt) (fdt_get_header(fdt, last_comp_version))
+#define fdt_boot_cpuid_phys(fdt) (fdt_get_header(fdt, boot_cpuid_phys))
+#define fdt_size_dt_strings(fdt) (fdt_get_header(fdt, size_dt_strings))
+#define fdt_size_dt_struct(fdt) (fdt_get_header(fdt, size_dt_struct))
+
+#define fdt_set_hdr_(name) \
+ static inline void fdt_set_##name(void *fdt, uint32_t val) \
+ { \
+ struct fdt_header *fdth = (struct fdt_header *)fdt; \
+ fdth->name = cpu_to_fdt32(val); \
+ }
+fdt_set_hdr_(magic);
+fdt_set_hdr_(totalsize);
+fdt_set_hdr_(off_dt_struct);
+fdt_set_hdr_(off_dt_strings);
+fdt_set_hdr_(off_mem_rsvmap);
+fdt_set_hdr_(version);
+fdt_set_hdr_(last_comp_version);
+fdt_set_hdr_(boot_cpuid_phys);
+fdt_set_hdr_(size_dt_strings);
+fdt_set_hdr_(size_dt_struct);
+#undef fdt_set_hdr_
+
+/**
+ * fdt_header_size - return the size of the tree's header
+ * @fdt: pointer to a flattened device tree
+ *
+ * Return: size of DTB header in bytes
+ */
+size_t fdt_header_size(const void *fdt);
+
+/**
+ * fdt_header_size_ - internal function to get header size from a version number
+ * @version: devicetree version number
+ *
+ * Return: size of DTB header in bytes
+ */
+size_t fdt_header_size_(uint32_t version);
+
+/**
+ * fdt_check_header - sanity check a device tree header
+ * @fdt: pointer to data which might be a flattened device tree
+ *
+ * fdt_check_header() checks that the given buffer contains what
+ * appears to be a flattened device tree, and that the header contains
+ * valid information (to the extent that can be determined from the
+ * header alone).
+ *
+ * returns:
+ * 0, if the buffer appears to contain a valid device tree
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_TRUNCATED, standard meanings, as above
+ */
+int fdt_check_header(const void *fdt);
+
+/**
+ * fdt_move - move a device tree around in memory
+ * @fdt: pointer to the device tree to move
+ * @buf: pointer to memory where the device is to be moved
+ * @bufsize: size of the memory space at buf
+ *
+ * fdt_move() relocates, if possible, the device tree blob located at
+ * fdt to the buffer at buf of size bufsize. The buffer may overlap
+ * with the existing device tree blob at fdt. Therefore,
+ * fdt_move(fdt, fdt, fdt_totalsize(fdt))
+ * should always succeed.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_move(const void *fdt, void *buf, int bufsize);
+
+/**********************************************************************/
+/* Read-only functions */
+/**********************************************************************/
+
+int fdt_check_full(const void *fdt, size_t bufsize);
+
+/**
+ * fdt_get_string - retrieve a string from the strings block of a device tree
+ * @fdt: pointer to the device tree blob
+ * @stroffset: offset of the string within the strings block (native endian)
+ * @lenp: optional pointer to return the string's length
+ *
+ * fdt_get_string() retrieves a pointer to a single string from the
+ * strings block of the device tree blob at fdt, and optionally also
+ * returns the string's length in *lenp.
+ *
+ * returns:
+ * a pointer to the string, on success
+ * NULL, if stroffset is out of bounds, or doesn't point to a valid string
+ */
+const char *fdt_get_string(const void *fdt, int stroffset, int *lenp);
+
+/**
+ * fdt_string - retrieve a string from the strings block of a device tree
+ * @fdt: pointer to the device tree blob
+ * @stroffset: offset of the string within the strings block (native endian)
+ *
+ * fdt_string() retrieves a pointer to a single string from the
+ * strings block of the device tree blob at fdt.
+ *
+ * returns:
+ * a pointer to the string, on success
+ * NULL, if stroffset is out of bounds, or doesn't point to a valid string
+ */
+const char *fdt_string(const void *fdt, int stroffset);
+
+/**
+ * fdt_find_max_phandle - find and return the highest phandle in a tree
+ * @fdt: pointer to the device tree blob
+ * @phandle: return location for the highest phandle value found in the tree
+ *
+ * fdt_find_max_phandle() finds the highest phandle value in the given device
+ * tree. The value returned in @phandle is only valid if the function returns
+ * success.
+ *
+ * returns:
+ * 0 on success or a negative error code on failure
+ */
+int fdt_find_max_phandle(const void *fdt, uint32_t *phandle);
+
+/**
+ * fdt_get_max_phandle - retrieves the highest phandle in a tree
+ * @fdt: pointer to the device tree blob
+ *
+ * fdt_get_max_phandle retrieves the highest phandle in the given
+ * device tree. This will ignore badly formatted phandles, or phandles
+ * with a value of 0 or -1.
+ *
+ * This function is deprecated in favour of fdt_find_max_phandle().
+ *
+ * returns:
+ * the highest phandle on success
+ * 0, if no phandle was found in the device tree
+ * -1, if an error occurred
+ */
+static inline uint32_t fdt_get_max_phandle(const void *fdt)
+{
+ uint32_t phandle;
+ int err;
+
+ err = fdt_find_max_phandle(fdt, &phandle);
+ if (err < 0)
+ return (uint32_t)-1;
+
+ return phandle;
+}
+
+/**
+ * fdt_generate_phandle - return a new, unused phandle for a device tree blob
+ * @fdt: pointer to the device tree blob
+ * @phandle: return location for the new phandle
+ *
+ * Walks the device tree blob and looks for the highest phandle value. On
+ * success, the new, unused phandle value (one higher than the previously
+ * highest phandle value in the device tree blob) will be returned in the
+ * @phandle parameter.
+ *
+ * Return: 0 on success or a negative error-code on failure
+ */
+int fdt_generate_phandle(const void *fdt, uint32_t *phandle);
+
+/**
+ * fdt_num_mem_rsv - retrieve the number of memory reserve map entries
+ * @fdt: pointer to the device tree blob
+ *
+ * Returns the number of entries in the device tree blob's memory
+ * reservation map. This does not include the terminating 0,0 entry
+ * or any other (0,0) entries reserved for expansion.
+ *
+ * returns:
+ * the number of entries
+ */
+int fdt_num_mem_rsv(const void *fdt);
+
+/**
+ * fdt_get_mem_rsv - retrieve one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @n: index of reserve map entry
+ * @address: pointer to 64-bit variable to hold the start address
+ * @size: pointer to 64-bit variable to hold the size of the entry
+ *
+ * On success, @address and @size will contain the address and size of
+ * the n-th reserve map entry from the device tree blob, in
+ * native-endian format.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size);
+
+/**
+ * fdt_subnode_offset_namelen - find a subnode based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_subnode_offset(), but only examine the first
+ * namelen characters of name for matching the subnode name. This is
+ * useful for finding subnodes based on a portion of a larger string,
+ * such as a full path.
+ *
+ * Return: offset of the subnode or -FDT_ERR_NOTFOUND if name not found.
+ */
+#ifndef SWIG /* Not available in Python */
+int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
+ const char *name, int namelen);
+#endif
+/**
+ * fdt_subnode_offset - find a subnode of a given node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_subnode_offset() finds a subnode of the node at structure block
+ * offset parentoffset with the given name. name may include a unit
+ * address, in which case fdt_subnode_offset() will find the subnode
+ * with that unit address, or the unit address may be omitted, in
+ * which case fdt_subnode_offset() will find an arbitrary subnode
+ * whose name excluding unit address matches the given name.
+ *
+ * returns:
+ * structure block offset of the requested subnode (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE
+ * tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_path_offset_namelen - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ * @namelen: number of characters of path to consider
+ *
+ * Identical to fdt_path_offset(), but only consider the first namelen
+ * characters of path as the path name.
+ *
+ * Return: offset of the node or negative libfdt error value otherwise
+ */
+#ifndef SWIG /* Not available in Python */
+int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen);
+#endif
+
+/**
+ * fdt_path_offset - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ *
+ * fdt_path_offset() finds a node of a given path in the device tree.
+ * Each path component may omit the unit address portion, but the
+ * results of this are undefined if any such path component is
+ * ambiguous (that is if there are multiple nodes at the relevant
+ * level matching the given component, differentiated only by unit
+ * address).
+ *
+ * returns:
+ * structure block offset of the node with the requested path (>=0), on
+ * success
+ * -FDT_ERR_BADPATH, given path does not begin with '/' or is invalid
+ * -FDT_ERR_NOTFOUND, if the requested node does not exist
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_path_offset(const void *fdt, const char *path);
+
+/**
+ * fdt_get_name - retrieve the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the starting node
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_name() retrieves the name (including unit address) of the
+ * device tree node at structure block offset nodeoffset. If lenp is
+ * non-NULL, the length of this name is also returned, in the integer
+ * pointed to by lenp.
+ *
+ * returns:
+ * pointer to the node's name, on success
+ * If lenp is non-NULL, *lenp contains the length of that name
+ * (>=0)
+ * NULL, on error
+ * if lenp is non-NULL *lenp contains an error code (<0):
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE
+ * tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
+
+/**
+ * fdt_first_property_offset - find the offset of a node's first property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ *
+ * fdt_first_property_offset() finds the first property of the node at
+ * the given structure block offset.
+ *
+ * returns:
+ * structure block offset of the property (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested node has no properties
+ * -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_first_property_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_next_property_offset - step through a node's properties
+ * @fdt: pointer to the device tree blob
+ * @offset: structure block offset of a property
+ *
+ * fdt_next_property_offset() finds the property immediately after the
+ * one at the given structure block offset. This will be a property
+ * of the same node as the given property.
+ *
+ * returns:
+ * structure block offset of the next property (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the given property is the last in its node
+ * -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_PROP tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_next_property_offset(const void *fdt, int offset);
+
+/**
+ * fdt_for_each_property_offset - iterate over all properties of a node
+ *
+ * @property: property offset (int, lvalue)
+ * @fdt: FDT blob (const void *)
+ * @node: node offset (int)
+ *
+ * This is actually a wrapper around a for loop and would be used like so:
+ *
+ * fdt_for_each_property_offset(property, fdt, node) {
+ * Use property
+ * ...
+ * }
+ *
+ * if ((property < 0) && (property != -FDT_ERR_NOTFOUND)) {
+ * Error handling
+ * }
+ *
+ * Note that this is implemented as a macro and property is used as
+ * iterator in the loop. The node variable can be constant or even a
+ * literal.
+ */
+#define fdt_for_each_property_offset(property, fdt, node) \
+ for (property = fdt_first_property_offset(fdt, node); \
+ property >= 0; \
+ property = fdt_next_property_offset(fdt, property))
+
+/**
+ * fdt_get_property_by_offset - retrieve the property at a given offset
+ * @fdt: pointer to the device tree blob
+ * @offset: offset of the property to retrieve
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property_by_offset() retrieves a pointer to the
+ * fdt_property structure within the device tree blob at the given
+ * offset. If lenp is non-NULL, the length of the property value is
+ * also returned, in the integer pointed to by lenp.
+ *
+ * Note that this code only works on device tree versions >= 16. fdt_getprop()
+ * works on all versions.
+ *
+ * returns:
+ * pointer to the structure representing the property
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
+ int offset,
+ int *lenp);
+static inline struct fdt_property *fdt_get_property_by_offset_w(void *fdt,
+ int offset,
+ int *lenp)
+{
+ return (struct fdt_property *)(uintptr_t)
+ fdt_get_property_by_offset(fdt, offset, lenp);
+}
+
+/**
+ * fdt_get_property_namelen - find a property based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_get_property(), but only examine the first namelen
+ * characters of name for matching the property name.
+ *
+ * Return: pointer to the structure representing the property, or NULL
+ * if not found
+ */
+#ifndef SWIG /* Not available in Python */
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+ int nodeoffset,
+ const char *name,
+ int namelen, int *lenp);
+#endif
+
+/**
+ * fdt_get_property - find a given property in a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property() retrieves a pointer to the fdt_property
+ * structure within the device tree blob corresponding to the property
+ * named 'name' of the node at offset nodeoffset. If lenp is
+ * non-NULL, the length of the property value is also returned, in the
+ * integer pointed to by lenp.
+ *
+ * returns:
+ * pointer to the structure representing the property
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_NOTFOUND, node does not have named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE
+ * tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset,
+ const char *name, int *lenp);
+static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
+ const char *name,
+ int *lenp)
+{
+ return (struct fdt_property *)(uintptr_t)
+ fdt_get_property(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_getprop_by_offset - retrieve the value of a property at a given offset
+ * @fdt: pointer to the device tree blob
+ * @offset: offset of the property to read
+ * @namep: pointer to a string variable (will be overwritten) or NULL
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop_by_offset() retrieves a pointer to the value of the
+ * property at structure block offset 'offset' (this will be a pointer
+ * to within the device blob itself, not a copy of the value). If
+ * lenp is non-NULL, the length of the property value is also
+ * returned, in the integer pointed to by lenp. If namep is non-NULL,
+ * the property's namne will also be returned in the char * pointed to
+ * by namep (this will be a pointer to within the device tree's string
+ * block, not a new copy of the name).
+ *
+ * returns:
+ * pointer to the property's value
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * if namep is non-NULL *namep contiains a pointer to the property
+ * name.
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#ifndef SWIG /* This function is not useful in Python */
+const void *fdt_getprop_by_offset(const void *fdt, int offset,
+ const char **namep, int *lenp);
+#endif
+
+/**
+ * fdt_getprop_namelen - get property value based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_getprop(), but only examine the first namelen
+ * characters of name for matching the property name.
+ *
+ * Return: pointer to the property's value or NULL on error
+ */
+#ifndef SWIG /* Not available in Python */
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+ const char *name, int namelen, int *lenp);
+static inline void *fdt_getprop_namelen_w(void *fdt, int nodeoffset,
+ const char *name, int namelen,
+ int *lenp)
+{
+ return (void *)(uintptr_t)fdt_getprop_namelen(fdt, nodeoffset, name,
+ namelen, lenp);
+}
+#endif
+
+/**
+ * fdt_getprop - retrieve the value of a given property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop() retrieves a pointer to the value of the property
+ * named @name of the node at offset @nodeoffset (this will be a
+ * pointer to within the device blob itself, not a copy of the value).
+ * If @lenp is non-NULL, the length of the property value is also
+ * returned, in the integer pointed to by @lenp.
+ *
+ * returns:
+ * pointer to the property's value
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_NOTFOUND, node does not have named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE
+ * tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+ const char *name, int *lenp);
+static inline void *fdt_getprop_w(void *fdt, int nodeoffset,
+ const char *name, int *lenp)
+{
+ return (void *)(uintptr_t)fdt_getprop(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_get_phandle - retrieve the phandle of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the node
+ *
+ * fdt_get_phandle() retrieves the phandle of the device tree node at
+ * structure block offset nodeoffset.
+ *
+ * returns:
+ * the phandle of the node at nodeoffset, on success (!= 0, != -1)
+ * 0, if the node has no phandle, or another error occurs
+ */
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_get_alias_namelen - get alias based on substring
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_get_alias(), but only examine the first @namelen
+ * characters of @name for matching the alias name.
+ *
+ * Return: a pointer to the expansion of the alias named @name, if it exists,
+ * NULL otherwise
+ */
+#ifndef SWIG /* Not available in Python */
+const char *fdt_get_alias_namelen(const void *fdt,
+ const char *name, int namelen);
+#endif
+
+/**
+ * fdt_get_alias - retrieve the path referenced by a given alias
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ *
+ * fdt_get_alias() retrieves the value of a given alias. That is, the
+ * value of the property named @name in the node /aliases.
+ *
+ * returns:
+ * a pointer to the expansion of the alias named 'name', if it exists
+ * NULL, if the given alias or the /aliases node does not exist
+ */
+const char *fdt_get_alias(const void *fdt, const char *name);
+
+/**
+ * fdt_get_path - determine the full path of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose path to find
+ * @buf: character buffer to contain the returned path (will be overwritten)
+ * @buflen: size of the character buffer at buf
+ *
+ * fdt_get_path() computes the full path of the node at offset
+ * nodeoffset, and records that path in the buffer at buf.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * 0, on success
+ * buf contains the absolute path of the node at
+ * nodeoffset, as a NUL-terminated string.
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1)
+ * characters and will not fit in the given buffer.
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen);
+
+/**
+ * fdt_supernode_atdepth_offset - find a specific ancestor of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ * @supernodedepth: depth of the ancestor to find
+ * @nodedepth: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_supernode_atdepth_offset() finds an ancestor of the given node
+ * at a specific depth from the root (where the root itself has depth
+ * 0, its immediate subnodes depth 1 and so forth). So
+ * fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL);
+ * will always return 0, the offset of the root node. If the node at
+ * nodeoffset has depth D, then:
+ * fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL);
+ * will return nodeoffset itself.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * structure block offset of the node at node offset's ancestor
+ * of depth supernodedepth (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of
+ * nodeoffset
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+ int supernodedepth, int *nodedepth);
+
+/**
+ * fdt_node_depth - find the depth of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_node_depth() finds the depth of a given node. The root node
+ * has depth 0, its immediate subnodes depth 1 and so forth.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * depth of the node at nodeoffset (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_depth(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_parent_offset - find the parent of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_parent_offset() locates the parent node of a given node (that
+ * is, it finds the offset of the node which contains the node at
+ * nodeoffset as a subnode).
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset, *twice*.
+ *
+ * returns:
+ * structure block offset of the parent of the node at nodeoffset
+ * (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_parent_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_node_offset_by_prop_value - find nodes with a given property value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @propname: property name to check
+ * @propval: property value to search for
+ * @proplen: length of the value in propval
+ *
+ * fdt_node_offset_by_prop_value() returns the offset of the first
+ * node after startoffset, which has a property named propname whose
+ * value is of length proplen and has value equal to propval; or if
+ * startoffset is -1, the very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ * offset = fdt_node_offset_by_prop_value(fdt, -1, propname,
+ * propval, proplen);
+ * while (offset != -FDT_ERR_NOTFOUND) {
+ * // other code here
+ * offset = fdt_node_offset_by_prop_value(fdt, offset, propname,
+ * propval, proplen);
+ * }
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0, >startoffset),
+ * on success
+ * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ * tree after startoffset
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+ const char *propname,
+ const void *propval, int proplen);
+
+/**
+ * fdt_node_offset_by_phandle - find the node with a given phandle
+ * @fdt: pointer to the device tree blob
+ * @phandle: phandle value
+ *
+ * fdt_node_offset_by_phandle() returns the offset of the node
+ * which has the given phandle value. If there is more than one node
+ * in the tree with the given phandle (an invalid tree), results are
+ * undefined.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0), on success
+ * -FDT_ERR_NOTFOUND, no node with that phandle exists
+ * -FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1)
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle);
+
+/**
+ * fdt_node_check_compatible - check a node's compatible property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @compatible: string to match against
+ *
+ * fdt_node_check_compatible() returns 0 if the given node contains a
+ * @compatible property with the given string as one of its elements,
+ * it returns non-zero otherwise, or on error.
+ *
+ * returns:
+ * 0, if the node has a 'compatible' property listing the given string
+ * 1, if the node has a 'compatible' property, but it does not list
+ * the given string
+ * -FDT_ERR_NOTFOUND, if the given node has no 'compatible' property
+ * -FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+ const char *compatible);
+
+/**
+ * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @compatible: 'compatible' string to match against
+ *
+ * fdt_node_offset_by_compatible() returns the offset of the first
+ * node after startoffset, which has a 'compatible' property which
+ * lists the given compatible string; or if startoffset is -1, the
+ * very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ * offset = fdt_node_offset_by_compatible(fdt, -1, compatible);
+ * while (offset != -FDT_ERR_NOTFOUND) {
+ * // other code here
+ * offset = fdt_node_offset_by_compatible(fdt, offset, compatible);
+ * }
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0, >startoffset),
+ * on success
+ * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ * tree after startoffset
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+ const char *compatible);
+
+/**
+ * fdt_stringlist_contains - check a string list property for a string
+ * @strlist: Property containing a list of strings to check
+ * @listlen: Length of property
+ * @str: String to search for
+ *
+ * This is a utility function provided for convenience. The list contains
+ * one or more strings, each terminated by \0, as is found in a device tree
+ * "compatible" property.
+ *
+ * Return: 1 if the string is found in the list, 0 not found, or invalid list
+ */
+int fdt_stringlist_contains(const char *strlist, int listlen, const char *str);
+
+/**
+ * fdt_stringlist_count - count the number of strings in a string list
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @property: name of the property containing the string list
+ *
+ * Return:
+ * the number of strings in the given property
+ * -FDT_ERR_BADVALUE if the property value is not NUL-terminated
+ * -FDT_ERR_NOTFOUND if the property does not exist
+ */
+int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property);
+
+/**
+ * fdt_stringlist_search - find a string in a string list and return its index
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @property: name of the property containing the string list
+ * @string: string to look up in the string list
+ *
+ * Note that it is possible for this function to succeed on property values
+ * that are not NUL-terminated. That's because the function will stop after
+ * finding the first occurrence of @string. This can for example happen with
+ * small-valued cell properties, such as #address-cells, when searching for
+ * the empty string.
+ *
+ * return:
+ * the index of the string in the list of strings
+ * -FDT_ERR_BADVALUE if the property value is not NUL-terminated
+ * -FDT_ERR_NOTFOUND if the property does not exist or does not contain
+ * the given string
+ */
+int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property,
+ const char *string);
+
+/**
+ * fdt_stringlist_get() - obtain the string at a given index in a string list
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @property: name of the property containing the string list
+ * @index: index of the string to return
+ * @lenp: return location for the string length or an error code on failure
+ *
+ * Note that this will successfully extract strings from properties with
+ * non-NUL-terminated values. For example on small-valued cell properties
+ * this function will return the empty string.
+ *
+ * If non-NULL, the length of the string (on success) or a negative error-code
+ * (on failure) will be stored in the integer pointer to by lenp.
+ *
+ * Return:
+ * A pointer to the string at the given index in the string list or NULL on
+ * failure. On success the length of the string will be stored in the memory
+ * location pointed to by the lenp parameter, if non-NULL. On failure one of
+ * the following negative error codes will be returned in the lenp parameter
+ * (if non-NULL):
+ * -FDT_ERR_BADVALUE if the property value is not NUL-terminated
+ * -FDT_ERR_NOTFOUND if the property does not exist
+ */
+const char *fdt_stringlist_get(const void *fdt, int nodeoffset,
+ const char *property, int index,
+ int *lenp);
+
+/**********************************************************************/
+/* Read-only functions (addressing related) */
+/**********************************************************************/
+
+/**
+ * FDT_MAX_NCELLS - maximum value for #address-cells and #size-cells
+ *
+ * This is the maximum value for #address-cells, #size-cells and
+ * similar properties that will be processed by libfdt. IEE1275
+ * requires that OF implementations handle values up to 4.
+ * Implementations may support larger values, but in practice higher
+ * values aren't used.
+ */
+#define FDT_MAX_NCELLS 4
+
+/**
+ * fdt_address_cells - retrieve address size for a bus represented in the tree
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to find the address size for
+ *
+ * When the node has a valid #address-cells property, returns its value.
+ *
+ * returns:
+ * 0 <= n < FDT_MAX_NCELLS, on success
+ * 2, if the node has no #address-cells property
+ * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ * #address-cells property
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_address_cells(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_size_cells - retrieve address range size for a bus represented in the
+ * tree
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to find the address range size for
+ *
+ * When the node has a valid #size-cells property, returns its value.
+ *
+ * returns:
+ * 0 <= n < FDT_MAX_NCELLS, on success
+ * 1, if the node has no #size-cells property
+ * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ * #size-cells property
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_size_cells(const void *fdt, int nodeoffset);
+
+
+/**********************************************************************/
+/* Write-in-place functions */
+/**********************************************************************/
+
+/**
+ * fdt_setprop_inplace_namelen_partial - change a property's value,
+ * but not its size
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @namelen: number of characters of name to consider
+ * @idx: index of the property to change in the array
+ * @val: pointer to data to replace the property value with
+ * @len: length of the property value
+ *
+ * Identical to fdt_setprop_inplace(), but modifies the given property
+ * starting from the given index, and using only the first characters
+ * of the name. It is useful when you want to manipulate only one value of
+ * an array and you have a string that doesn't end with \0.
+ *
+ * Return: 0 on success, negative libfdt error value otherwise
+ */
+#ifndef SWIG /* Not available in Python */
+int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset,
+ const char *name, int namelen,
+ uint32_t idx, const void *val,
+ int len);
+#endif
+
+/**
+ * fdt_setprop_inplace - change a property's value, but not its size
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to replace the property value with
+ * @len: length of the property value
+ *
+ * fdt_setprop_inplace() replaces the value of a given property with
+ * the data in val, of length len. This function cannot change the
+ * size of a property, and so will only work if len is equal to the
+ * current length of the property.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if len is not equal to the property's current length
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#ifndef SWIG /* Not available in Python */
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+#endif
+
+/**
+ * fdt_setprop_inplace_u32 - change the value of a 32-bit integer property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value to replace the property with
+ *
+ * fdt_setprop_inplace_u32() replaces the value of a given property
+ * with the 32-bit integer value in val, converting val to big-endian
+ * if necessary. This function cannot change the size of a property,
+ * and so will only work if the property already exists and has length
+ * 4.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if the property's length is not equal to 4
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset,
+ const char *name, uint32_t val)
+{
+ fdt32_t tmp = cpu_to_fdt32(val);
+ return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_inplace_u64 - change the value of a 64-bit integer property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value to replace the property with
+ *
+ * fdt_setprop_inplace_u64() replaces the value of a given property
+ * with the 64-bit integer value in val, converting val to big-endian
+ * if necessary. This function cannot change the size of a property,
+ * and so will only work if the property already exists and has length
+ * 8.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if the property's length is not equal to 8
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset,
+ const char *name, uint64_t val)
+{
+ fdt64_t tmp = cpu_to_fdt64(val);
+ return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_inplace_cell - change the value of a single-cell property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node containing the property
+ * @name: name of the property to change the value of
+ * @val: new value of the 32-bit cell
+ *
+ * This is an alternative name for fdt_setprop_inplace_u32()
+ * Return: 0 on success, negative libfdt error number otherwise.
+ */
+static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
+ const char *name, uint32_t val)
+{
+ return fdt_setprop_inplace_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_nop_property - replace a property with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_nop_property() will replace a given property's representation
+ * in the blob with FDT_NOP tags, effectively removing it from the
+ * tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the property, and will not alter or move any other part of the
+ * tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_nop_node - replace a node (subtree) with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_nop_node() will replace a given node's representation in the
+ * blob, including all its subnodes, if any, with FDT_NOP tags,
+ * effectively removing it from the tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the node and its properties and subnodes, and will not alter or
+ * move any other part of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Sequential write functions */
+/**********************************************************************/
+
+/* fdt_create_with_flags flags */
+#define FDT_CREATE_FLAG_NO_NAME_DEDUP 0x1
+ /* FDT_CREATE_FLAG_NO_NAME_DEDUP: Do not try to de-duplicate property
+ * names in the fdt. This can result in faster creation times, but
+ * a larger fdt. */
+
+#define FDT_CREATE_FLAGS_ALL (FDT_CREATE_FLAG_NO_NAME_DEDUP)
+
+/**
+ * fdt_create_with_flags - begin creation of a new fdt
+ * @buf: pointer to memory allocated where fdt will be created
+ * @bufsize: size of the memory space at fdt
+ * @flags: a valid combination of FDT_CREATE_FLAG_ flags, or 0.
+ *
+ * fdt_create_with_flags() begins the process of creating a new fdt with
+ * the sequential write interface.
+ *
+ * fdt creation process must end with fdt_finished() to produce a valid fdt.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, bufsize is insufficient for a minimal fdt
+ * -FDT_ERR_BADFLAGS, flags is not valid
+ */
+int fdt_create_with_flags(void *buf, int bufsize, uint32_t flags);
+
+/**
+ * fdt_create - begin creation of a new fdt
+ * @buf: pointer to memory allocated where fdt will be created
+ * @bufsize: size of the memory space at fdt
+ *
+ * fdt_create() is equivalent to fdt_create_with_flags() with flags=0.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, bufsize is insufficient for a minimal fdt
+ */
+int fdt_create(void *buf, int bufsize);
+
+int fdt_resize(void *fdt, void *buf, int bufsize);
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
+int fdt_finish_reservemap(void *fdt);
+int fdt_begin_node(void *fdt, const char *name);
+int fdt_property(void *fdt, const char *name, const void *val, int len);
+static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val)
+{
+ fdt32_t tmp = cpu_to_fdt32(val);
+ return fdt_property(fdt, name, &tmp, sizeof(tmp));
+}
+static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val)
+{
+ fdt64_t tmp = cpu_to_fdt64(val);
+ return fdt_property(fdt, name, &tmp, sizeof(tmp));
+}
+
+#ifndef SWIG /* Not available in Python */
+static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
+{
+ return fdt_property_u32(fdt, name, val);
+}
+#endif
+
+/**
+ * fdt_property_placeholder - add a new property and return a ptr to its value
+ *
+ * @fdt: pointer to the device tree blob
+ * @name: name of property to add
+ * @len: length of property value in bytes
+ * @valp: returns a pointer to where where the value should be placed
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_NOSPACE, standard meanings
+ */
+int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp);
+
+#define fdt_property_string(fdt, name, str) \
+ fdt_property(fdt, name, str, strlen(str)+1)
+int fdt_end_node(void *fdt);
+int fdt_finish(void *fdt);
+
+/**********************************************************************/
+/* Read-write functions */
+/**********************************************************************/
+
+int fdt_create_empty_tree(void *buf, int bufsize);
+int fdt_open_into(const void *fdt, void *buf, int bufsize);
+int fdt_pack(void *fdt);
+
+/**
+ * fdt_add_mem_rsv - add one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @address: 64-bit start address of the reserve map entry
+ * @size: 64-bit size of the reserved region
+ *
+ * Adds a reserve map entry to the given blob reserving a region at
+ * address address of length size.
+ *
+ * This function will insert data into the reserve map and will
+ * therefore change the indexes of some entries in the table.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new reservation entry
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size);
+
+/**
+ * fdt_del_mem_rsv - remove a memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @n: entry to remove
+ *
+ * fdt_del_mem_rsv() removes the n-th memory reserve map entry from
+ * the blob.
+ *
+ * This function will delete data from the reservation table and will
+ * therefore change the indexes of some entries in the table.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, there is no entry of the given index (i.e. there
+ * are less than n+1 reserve map entries)
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_mem_rsv(void *fdt, int n);
+
+/**
+ * fdt_set_name - change the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ * @name: name to give the node
+ *
+ * fdt_set_name() replaces the name (including unit address, if any)
+ * of the given node with the given string. NOTE: this function can't
+ * efficiently check if the new name is unique amongst the given
+ * node's siblings; results are undefined if this function is invoked
+ * with a name equal to one of the given node's siblings.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob
+ * to contain the new name
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_set_name(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_setprop - create or change a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to set the property value to
+ * @len: length of the property value
+ *
+ * fdt_setprop() sets the value of the named property in the given
+ * node to the given value and length, creating the property if it
+ * does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+
+/**
+ * fdt_setprop_placeholder - allocate space for a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @len: length of the property value
+ * @prop_data: return pointer to property data
+ *
+ * fdt_setprop_placeholer() allocates the named property in the given node.
+ * If the property exists it is resized. In either case a pointer to the
+ * property data is returned.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
+ int len, void **prop_data);
+
+/**
+ * fdt_setprop_u32 - set a property to a 32-bit integer
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_u32() sets the value of the named property in the given
+ * node to the given 32-bit integer value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name,
+ uint32_t val)
+{
+ fdt32_t tmp = cpu_to_fdt32(val);
+ return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_u64 - set a property to a 64-bit integer
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_u64() sets the value of the named property in the given
+ * node to the given 64-bit integer value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name,
+ uint64_t val)
+{
+ fdt64_t tmp = cpu_to_fdt64(val);
+ return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_setprop_cell - set a property to a single cell value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value for the property (native endian)
+ *
+ * This is an alternative name for fdt_setprop_u32()
+ *
+ * Return: 0 on success, negative libfdt error value otherwise.
+ */
+static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
+ uint32_t val)
+{
+ return fdt_setprop_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_setprop_string - set a property to a string value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value for the property
+ *
+ * fdt_setprop_string() sets the value of the named property in the
+ * given node to the given string value (using the length of the
+ * string to determine the new length of the property), or creates a
+ * new property with that value if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_string(fdt, nodeoffset, name, str) \
+ fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+
+/**
+ * fdt_setprop_empty - set a property to an empty value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ *
+ * fdt_setprop_empty() sets the value of the named property in the
+ * given node to an empty (zero length) value, or creates a new empty
+ * property if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_empty(fdt, nodeoffset, name) \
+ fdt_setprop((fdt), (nodeoffset), (name), NULL, 0)
+
+/**
+ * fdt_appendprop - append to or create a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to append to
+ * @val: pointer to data to append to the property value
+ * @len: length of the data to append to the property value
+ *
+ * fdt_appendprop() appends the value to the named property in the
+ * given node, creating the property if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+
+/**
+ * fdt_appendprop_u32 - append a 32-bit integer value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value to append to the property (native endian)
+ *
+ * fdt_appendprop_u32() appends the given 32-bit integer value
+ * (converting to big-endian if necessary) to the value of the named
+ * property in the given node, or creates a new property with that
+ * value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_appendprop_u32(void *fdt, int nodeoffset,
+ const char *name, uint32_t val)
+{
+ fdt32_t tmp = cpu_to_fdt32(val);
+ return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_appendprop_u64 - append a 64-bit integer value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value to append to the property (native endian)
+ *
+ * fdt_appendprop_u64() appends the given 64-bit integer value
+ * (converting to big-endian if necessary) to the value of the named
+ * property in the given node, or creates a new property with that
+ * value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_appendprop_u64(void *fdt, int nodeoffset,
+ const char *name, uint64_t val)
+{
+ fdt64_t tmp = cpu_to_fdt64(val);
+ return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp));
+}
+
+/**
+ * fdt_appendprop_cell - append a single cell value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value to append to the property (native endian)
+ *
+ * This is an alternative name for fdt_appendprop_u32()
+ *
+ * Return: 0 on success, negative libfdt error value otherwise.
+ */
+static inline int fdt_appendprop_cell(void *fdt, int nodeoffset,
+ const char *name, uint32_t val)
+{
+ return fdt_appendprop_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_appendprop_string - append a string to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value to append to the property
+ *
+ * fdt_appendprop_string() appends the given string to the value of
+ * the named property in the given node, or creates a new property
+ * with that value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_appendprop_string(fdt, nodeoffset, name, str) \
+ fdt_appendprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+/**
+ * fdt_appendprop_addrrange - append a address range property
+ * @fdt: pointer to the device tree blob
+ * @parent: offset of the parent node
+ * @nodeoffset: offset of the node to add a property at
+ * @name: name of property
+ * @addr: start address of a given range
+ * @size: size of a given range
+ *
+ * fdt_appendprop_addrrange() appends an address range value (start
+ * address and size) to the value of the named property in the given
+ * node, or creates a new property with that value if it does not
+ * already exist.
+ * If "name" is not specified, a default "reg" is used.
+ * Cell sizes are determined by parent's #address-cells and #size-cells.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
+ * #address-cells property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADVALUE, addr or size doesn't fit to respective cells size
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain a new property
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_appendprop_addrrange(void *fdt, int parent, int nodeoffset,
+ const char *name, uint64_t addr, uint64_t size);
+
+/**
+ * fdt_delprop - delete a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_del_property() will delete the given property.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_delprop(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_add_subnode_namelen - creates a new node based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to create
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_add_subnode(), but use only the first @namelen
+ * characters of @name as the name of the new node. This is useful for
+ * creating subnodes based on a portion of a larger string, such as a
+ * full path.
+ *
+ * Return: structure block offset of the created subnode (>=0),
+ * negative libfdt error value otherwise
+ */
+#ifndef SWIG /* Not available in Python */
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+ const char *name, int namelen);
+#endif
+
+/**
+ * fdt_add_subnode - creates a new node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_add_subnode() creates a new node as a subnode of the node at
+ * structure block offset parentoffset, with the given name (which
+ * should include the unit address, if any).
+ *
+ * This function will insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * structure block offset of the created nodeequested subnode (>=0), on
+ * success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE
+ * tag
+ * -FDT_ERR_EXISTS, if the node at parentoffset already has a subnode of
+ * the given name
+ * -FDT_ERR_NOSPACE, if there is insufficient free space in the
+ * blob to contain the new node
+ * -FDT_ERR_NOSPACE
+ * -FDT_ERR_BADLAYOUT
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_del_node - delete a node (subtree)
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_del_node() will remove the given node, including all its
+ * subnodes if any, from the blob.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_node(void *fdt, int nodeoffset);
+
+/**
+ * fdt_overlay_apply - Applies a DT overlay on a base DT
+ * @fdt: pointer to the base device tree blob
+ * @fdto: pointer to the device tree overlay blob
+ *
+ * fdt_overlay_apply() will apply the given device tree overlay on the
+ * given base device tree.
+ *
+ * Expect the base device tree to be modified, even if the function
+ * returns an error.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there's not enough space in the base device tree
+ * -FDT_ERR_NOTFOUND, the overlay points to some inexistant nodes or
+ * properties in the base DT
+ * -FDT_ERR_BADPHANDLE,
+ * -FDT_ERR_BADOVERLAY,
+ * -FDT_ERR_NOPHANDLES,
+ * -FDT_ERR_INTERNAL,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADOFFSET,
+ * -FDT_ERR_BADPATH,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_overlay_apply(void *fdt, void *fdto);
+
+/**
+ * fdt_overlay_target_offset - retrieves the offset of a fragment's target
+ * @fdt: Base device tree blob
+ * @fdto: Device tree overlay blob
+ * @fragment_offset: node offset of the fragment in the overlay
+ * @pathp: pointer which receives the path of the target (or NULL)
+ *
+ * fdt_overlay_target_offset() retrieves the target offset in the base
+ * device tree of a fragment, no matter how the actual targeting is
+ * done (through a phandle or a path)
+ *
+ * returns:
+ * the targeted node offset in the base device tree
+ * Negative error code on error
+ */
+int fdt_overlay_target_offset(const void *fdt, const void *fdto,
+ int fragment_offset, char const **pathp);
+
+/**********************************************************************/
+/* Debugging / informational functions */
+/**********************************************************************/
+
+const char *fdt_strerror(int errval);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LIBFDT_H */
diff --git a/include/lib/libfdt/libfdt_env.h b/include/lib/libfdt/libfdt_env.h
new file mode 100644
index 0000000..73b6d40
--- /dev/null
+++ b/include/lib/libfdt/libfdt_env.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+#ifndef LIBFDT_ENV_H
+#define LIBFDT_ENV_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ * Copyright 2012 Kim Phillips, Freescale Semiconductor.
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+#ifdef __CHECKER__
+#define FDT_FORCE __attribute__((force))
+#define FDT_BITWISE __attribute__((bitwise))
+#else
+#define FDT_FORCE
+#define FDT_BITWISE
+#endif
+
+typedef uint16_t FDT_BITWISE fdt16_t;
+typedef uint32_t FDT_BITWISE fdt32_t;
+typedef uint64_t FDT_BITWISE fdt64_t;
+
+#define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n])
+#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1))
+#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \
+ (EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3))
+#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \
+ (EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \
+ (EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \
+ (EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7))
+
+static inline uint16_t fdt16_to_cpu(fdt16_t x)
+{
+ return (FDT_FORCE uint16_t)CPU_TO_FDT16(x);
+}
+static inline fdt16_t cpu_to_fdt16(uint16_t x)
+{
+ return (FDT_FORCE fdt16_t)CPU_TO_FDT16(x);
+}
+
+static inline uint32_t fdt32_to_cpu(fdt32_t x)
+{
+ return (FDT_FORCE uint32_t)CPU_TO_FDT32(x);
+}
+static inline fdt32_t cpu_to_fdt32(uint32_t x)
+{
+ return (FDT_FORCE fdt32_t)CPU_TO_FDT32(x);
+}
+
+static inline uint64_t fdt64_to_cpu(fdt64_t x)
+{
+ return (FDT_FORCE uint64_t)CPU_TO_FDT64(x);
+}
+static inline fdt64_t cpu_to_fdt64(uint64_t x)
+{
+ return (FDT_FORCE fdt64_t)CPU_TO_FDT64(x);
+}
+#undef CPU_TO_FDT64
+#undef CPU_TO_FDT32
+#undef CPU_TO_FDT16
+#undef EXTRACT_BYTE
+
+#ifdef __APPLE__
+#include <AvailabilityMacros.h>
+
+/* strnlen() is not available on Mac OS < 10.7 */
+# if !defined(MAC_OS_X_VERSION_10_7) || (MAC_OS_X_VERSION_MAX_ALLOWED < \
+ MAC_OS_X_VERSION_10_7)
+
+#define strnlen fdt_strnlen
+
+/*
+ * fdt_strnlen: returns the length of a string or max_count - which ever is
+ * smallest.
+ * Input 1 string: the string whose size is to be determined
+ * Input 2 max_count: the maximum value returned by this function
+ * Output: length of the string or max_count (the smallest of the two)
+ */
+static inline size_t fdt_strnlen(const char *string, size_t max_count)
+{
+ const char *p = memchr(string, 0, max_count);
+ return p ? p - string : max_count;
+}
+
+#endif /* !defined(MAC_OS_X_VERSION_10_7) || (MAC_OS_X_VERSION_MAX_ALLOWED <
+ MAC_OS_X_VERSION_10_7) */
+
+#endif /* __APPLE__ */
+
+#endif /* LIBFDT_ENV_H */
diff --git a/include/lib/mmio.h b/include/lib/mmio.h
new file mode 100644
index 0000000..591d7b6
--- /dev/null
+++ b/include/lib/mmio.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2013-2014, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MMIO_H
+#define MMIO_H
+
+#include <stdint.h>
+
+static inline void mmio_write_8(uintptr_t addr, uint8_t value)
+{
+ *(volatile uint8_t*)addr = value;
+}
+
+static inline uint8_t mmio_read_8(uintptr_t addr)
+{
+ return *(volatile uint8_t*)addr;
+}
+
+static inline void mmio_write_16(uintptr_t addr, uint16_t value)
+{
+ *(volatile uint16_t*)addr = value;
+}
+
+static inline uint16_t mmio_read_16(uintptr_t addr)
+{
+ return *(volatile uint16_t*)addr;
+}
+
+static inline void mmio_clrsetbits_16(uintptr_t addr,
+ uint16_t clear,
+ uint16_t set)
+{
+ mmio_write_16(addr, (mmio_read_16(addr) & ~clear) | set);
+}
+
+static inline void mmio_write_32(uintptr_t addr, uint32_t value)
+{
+ *(volatile uint32_t*)addr = value;
+}
+
+static inline uint32_t mmio_read_32(uintptr_t addr)
+{
+ return *(volatile uint32_t*)addr;
+}
+
+static inline void mmio_write_64(uintptr_t addr, uint64_t value)
+{
+ *(volatile uint64_t*)addr = value;
+}
+
+static inline uint64_t mmio_read_64(uintptr_t addr)
+{
+ return *(volatile uint64_t*)addr;
+}
+
+static inline void mmio_clrbits_32(uintptr_t addr, uint32_t clear)
+{
+ mmio_write_32(addr, mmio_read_32(addr) & ~clear);
+}
+
+static inline void mmio_setbits_32(uintptr_t addr, uint32_t set)
+{
+ mmio_write_32(addr, mmio_read_32(addr) | set);
+}
+
+static inline void mmio_clrsetbits_32(uintptr_t addr,
+ uint32_t clear,
+ uint32_t set)
+{
+ mmio_write_32(addr, (mmio_read_32(addr) & ~clear) | set);
+}
+
+#endif /* MMIO_H */
diff --git a/include/lib/mpmm/mpmm.h b/include/lib/mpmm/mpmm.h
new file mode 100644
index 0000000..955c530
--- /dev/null
+++ b/include/lib/mpmm/mpmm.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MPMM_H
+#define MPMM_H
+
+#include <stdbool.h>
+
+#include <platform_def.h>
+
+/*
+ * Enable the Maximum Power Mitigation Mechanism.
+ *
+ * This function will enable MPMM for the current core. The AMU counters
+ * representing the MPMM gears must have been configured and enabled prior to
+ * calling this function.
+ */
+void mpmm_enable(void);
+
+/*
+ * MPMM core data.
+ *
+ * This structure represents per-core data retrieved from the hardware
+ * configuration device tree.
+ */
+struct mpmm_core {
+ /*
+ * Whether MPMM is supported.
+ *
+ * Cores with support for MPMM offer one or more auxiliary AMU counters
+ * representing MPMM gears.
+ */
+ bool supported;
+};
+
+/*
+ * MPMM topology.
+ *
+ * This topology structure describes the system-wide representation of the
+ * information retrieved from the hardware configuration device tree.
+ */
+struct mpmm_topology {
+ struct mpmm_core cores[PLATFORM_CORE_COUNT]; /* Per-core data */
+};
+
+#if !ENABLE_MPMM_FCONF
+/*
+ * Retrieve the platform's MPMM topology. A `NULL` return value is treated as a
+ * non-fatal error, in which case MPMM will not be enabled for any core.
+ */
+const struct mpmm_topology *plat_mpmm_topology(void);
+#endif /* ENABLE_MPMM_FCONF */
+
+#endif /* MPMM_H */
diff --git a/include/lib/object_pool.h b/include/lib/object_pool.h
new file mode 100644
index 0000000..49584eb
--- /dev/null
+++ b/include/lib/object_pool.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef OBJECT_POOL_H
+#define OBJECT_POOL_H
+
+#include <stdlib.h>
+
+#include <common/debug.h>
+#include <lib/utils_def.h>
+
+/*
+ * Pool of statically allocated objects.
+ *
+ * Objects can be reserved but not freed. This is by design and it is not a
+ * limitation. We do not want to introduce complexity induced by memory freeing,
+ * such as use-after-free bugs, memory fragmentation and so on.
+ *
+ * The object size and capacity of the pool are fixed at build time. So is the
+ * address of the objects back store.
+ */
+struct object_pool {
+ /* Size of 1 object in the pool in byte unit. */
+ const size_t obj_size;
+
+ /* Number of objects in the pool. */
+ const size_t capacity;
+
+ /* Objects back store. */
+ void *const objects;
+
+ /* How many objects are currently allocated. */
+ size_t used;
+};
+
+/* Create a static pool of objects. */
+#define OBJECT_POOL(_pool_name, _obj_backstore, _obj_size, _obj_count) \
+ struct object_pool _pool_name = { \
+ .objects = (_obj_backstore), \
+ .obj_size = (_obj_size), \
+ .capacity = (_obj_count), \
+ .used = 0U, \
+ }
+
+/* Create a static pool of objects out of an array of pre-allocated objects. */
+#define OBJECT_POOL_ARRAY(_pool_name, _obj_array) \
+ OBJECT_POOL(_pool_name, (_obj_array), \
+ sizeof((_obj_array)[0]), ARRAY_SIZE(_obj_array))
+
+/*
+ * Allocate 'count' objects from a pool.
+ * Return the address of the first object. Panic on error.
+ */
+static inline void *pool_alloc_n(struct object_pool *pool, size_t count)
+{
+ if ((pool->used + count) > pool->capacity) {
+ ERROR("Cannot allocate %zu objects out of pool (%zu objects left).\n",
+ count, pool->capacity - pool->used);
+ panic();
+ }
+
+ void *obj = (char *)(pool->objects) + (pool->obj_size * pool->used);
+ pool->used += count;
+ return obj;
+}
+
+/*
+ * Allocate 1 object from a pool.
+ * Return the address of the object. Panic on error.
+ */
+static inline void *pool_alloc(struct object_pool *pool)
+{
+ return pool_alloc_n(pool, 1U);
+}
+
+#endif /* OBJECT_POOL_H */
diff --git a/include/lib/optee_utils.h b/include/lib/optee_utils.h
new file mode 100644
index 0000000..e1e9d80
--- /dev/null
+++ b/include/lib/optee_utils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef OPTEE_UTILS_H
+#define OPTEE_UTILS_H
+
+#include <stdbool.h>
+
+#include <common/bl_common.h>
+
+bool optee_header_is_valid(uintptr_t header_base);
+
+int parse_optee_header(entry_point_info_t *header_ep,
+ image_info_t *pager_image_info,
+ image_info_t *paged_image_info);
+
+/*
+ * load_addr_hi and load_addr_lo: image load address.
+ * image_id: 0 - pager, 1 - paged
+ * size: image size in bytes.
+ */
+typedef struct optee_image {
+ uint32_t load_addr_hi;
+ uint32_t load_addr_lo;
+ uint32_t image_id;
+ uint32_t size;
+} optee_image_t;
+
+#define OPTEE_PAGER_IMAGE_ID 0
+#define OPTEE_PAGED_IMAGE_ID 1
+
+#define OPTEE_MAX_NUM_IMAGES 2u
+
+#define TEE_MAGIC_NUM_OPTEE 0x4554504f
+/*
+ * magic: header magic number.
+ * version: OPTEE header version:
+ * 1 - not supported
+ * 2 - supported
+ * arch: OPTEE os architecture type: 0 - AARCH32, 1 - AARCH64.
+ * flags: unused currently.
+ * nb_images: number of images.
+ */
+typedef struct optee_header {
+ uint32_t magic;
+ uint8_t version;
+ uint8_t arch;
+ uint16_t flags;
+ uint32_t nb_images;
+ optee_image_t optee_image_list[];
+} optee_header_t;
+
+#endif /* OPTEE_UTILS_H */
diff --git a/include/lib/pmf/aarch32/pmf_asm_macros.S b/include/lib/pmf/aarch32/pmf_asm_macros.S
new file mode 100644
index 0000000..1dbb408
--- /dev/null
+++ b/include/lib/pmf/aarch32/pmf_asm_macros.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMF_ASM_MACROS_S
+#define PMF_ASM_MACROS_S
+
+#define PMF_TS_SIZE 8
+
+ /*
+ * This macro calculates the address of the per-cpu timestamp
+ * for the given service name and local timestamp id.
+ * Clobbers: r0 - r4
+ */
+ .macro pmf_calc_timestamp_addr _name, _tid
+ mov r4, lr
+ bl plat_my_core_pos
+ mov lr, r4
+ ldr r1, =__PERCPU_TIMESTAMP_SIZE__
+ mov r2, #(\_tid * PMF_TS_SIZE)
+ mla r0, r0, r1, r2
+ ldr r1, =pmf_ts_mem_\_name
+ add r0, r0, r1
+ .endm
+
+#endif /* PMF_ASM_MACROS_S */
diff --git a/include/lib/pmf/aarch64/pmf_asm_macros.S b/include/lib/pmf/aarch64/pmf_asm_macros.S
new file mode 100644
index 0000000..792ede9
--- /dev/null
+++ b/include/lib/pmf/aarch64/pmf_asm_macros.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMF_ASM_MACROS_S
+#define PMF_ASM_MACROS_S
+
+#define PMF_TS_SIZE 8
+
+ /*
+ * This macro calculates the address of the per-cpu timestamp
+ * for the given service name and local timestamp id.
+ * Clobbers: x0 - x9
+ */
+ .macro pmf_calc_timestamp_addr _name, _tid
+ mov x9, x30
+ bl plat_my_core_pos
+ mov x30, x9
+ adr x2, __PMF_PERCPU_TIMESTAMP_END__
+ adr x1, __PMF_TIMESTAMP_START__
+ sub x1, x2, x1
+ mov x2, #(\_tid * PMF_TS_SIZE)
+ madd x0, x0, x1, x2
+ adr x1, pmf_ts_mem_\_name
+ add x0, x0, x1
+ .endm
+
+#endif /* PMF_ASM_MACROS_S */
diff --git a/include/lib/pmf/pmf.h b/include/lib/pmf/pmf.h
new file mode 100644
index 0000000..9d901e2
--- /dev/null
+++ b/include/lib/pmf/pmf.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMF_H
+#define PMF_H
+
+#include <lib/cassert.h>
+#include <lib/pmf/pmf_helpers.h>
+#include <lib/utils_def.h>
+
+/*
+ * Constants used for/by PMF services.
+ */
+#define PMF_ARM_TIF_IMPL_ID UL(0x41)
+#define PMF_TID_SHIFT 0
+#define PMF_TID_MASK (UL(0xFF) << PMF_TID_SHIFT)
+#define PMF_SVC_ID_SHIFT 10
+#define PMF_SVC_ID_MASK (UL(0x3F) << PMF_SVC_ID_SHIFT)
+#define PMF_IMPL_ID_SHIFT 24
+#define PMF_IMPL_ID_MASK (UL(0xFF) << PMF_IMPL_ID_SHIFT)
+
+/*
+ * Flags passed to PMF_REGISTER_SERVICE
+ */
+#define PMF_STORE_ENABLE (1 << 0)
+#define PMF_DUMP_ENABLE (1 << 1)
+
+/*
+ * Flags passed to PMF_GET_TIMESTAMP_XXX
+ * and PMF_CAPTURE_TIMESTAMP
+ */
+#define PMF_CACHE_MAINT (U(1) << 0)
+#define PMF_NO_CACHE_MAINT U(0)
+
+/*
+ * Defines for PMF SMC function ids.
+ */
+#define PMF_SMC_GET_TIMESTAMP_32 U(0x82000010)
+#define PMF_SMC_GET_TIMESTAMP_64 U(0xC2000010)
+#define PMF_NUM_SMC_CALLS 2
+
+/*
+ * The macros below are used to identify
+ * PMF calls from the SMC function ID.
+ */
+#define PMF_FID_MASK U(0xffe0)
+#define PMF_FID_VALUE U(0)
+#define is_pmf_fid(_fid) (((_fid) & PMF_FID_MASK) == PMF_FID_VALUE)
+
+/* Following are the supported PMF service IDs */
+#define PMF_PSCI_STAT_SVC_ID 0
+#define PMF_RT_INSTR_SVC_ID 1
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+/* PMF common functions */
+int pmf_get_timestamp_smc(unsigned int tid,
+ u_register_t mpidr,
+ unsigned int flags,
+ unsigned long long *ts_value);
+int pmf_setup(void);
+uintptr_t pmf_smc_handler(unsigned int smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags);
+
+#endif /* PMF_H */
diff --git a/include/lib/pmf/pmf_helpers.h b/include/lib/pmf/pmf_helpers.h
new file mode 100644
index 0000000..f5f040b
--- /dev/null
+++ b/include/lib/pmf/pmf_helpers.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (c) 2016-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PMF_HELPERS_H
+#define PMF_HELPERS_H
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <plat/common/platform.h>
+
+/*
+ * Prototype for PMF service functions.
+ */
+typedef int (*pmf_svc_init_t)(void);
+typedef unsigned long long (*pmf_svc_get_ts_t)(unsigned int tid,
+ u_register_t mpidr,
+ unsigned int flags);
+
+/*
+ * This is the definition of PMF service desc.
+ */
+typedef struct pmf_svc_desc {
+ /* Structure version information */
+ param_header_t h;
+
+ /* Name of the PMF service */
+ const char *name;
+
+ /* PMF service config: Implementer id, Service id and total id*/
+ unsigned int svc_config;
+
+ /* PMF service initialization handler */
+ pmf_svc_init_t init;
+
+ /* PMF service time-stamp retrieval handler */
+ pmf_svc_get_ts_t get_ts;
+} pmf_svc_desc_t;
+
+#if ENABLE_PMF
+/*
+ * Convenience macros for capturing time-stamp.
+ */
+#define PMF_DECLARE_CAPTURE_TIMESTAMP(_name) \
+ void pmf_capture_timestamp_with_cache_maint_ ## _name( \
+ unsigned int tid, \
+ unsigned long long ts); \
+ void pmf_capture_timestamp_ ## _name( \
+ unsigned int tid, \
+ unsigned long long ts);
+
+#define PMF_CAPTURE_TIMESTAMP(_name, _tid, _flags) \
+ do { \
+ unsigned long long ts = read_cntpct_el0(); \
+ if (((_flags) & PMF_CACHE_MAINT) != 0U) \
+ pmf_capture_timestamp_with_cache_maint_ ## _name((_tid), ts);\
+ else \
+ pmf_capture_timestamp_ ## _name((_tid), ts); \
+ } while (0)
+
+#define PMF_CAPTURE_AND_GET_TIMESTAMP(_name, _tid, _flags, _tsval) \
+ do { \
+ (_tsval) = read_cntpct_el0(); \
+ CASSERT(sizeof(_tsval) == sizeof(unsigned long long), invalid_tsval_size);\
+ if (((_flags) & PMF_CACHE_MAINT) != 0U) \
+ pmf_capture_timestamp_with_cache_maint_ ## _name((_tid), (_tsval));\
+ else \
+ pmf_capture_timestamp_ ## _name((_tid), (_tsval));\
+ } while (0)
+
+#define PMF_WRITE_TIMESTAMP(_name, _tid, _flags, _wrval) \
+ do { \
+ CASSERT(sizeof(_wrval) == sizeof(unsigned long long), invalid_wrval_size);\
+ if (((_flags) & PMF_CACHE_MAINT) != 0U) \
+ pmf_capture_timestamp_with_cache_maint_ ## _name((_tid), (_wrval));\
+ else \
+ pmf_capture_timestamp_ ## _name((_tid), (_wrval));\
+ } while (0)
+
+/*
+ * Convenience macros for retrieving time-stamp.
+ */
+#define PMF_DECLARE_GET_TIMESTAMP(_name) \
+ unsigned long long pmf_get_timestamp_by_index_ ## _name(\
+ unsigned int tid, \
+ unsigned int cpuid, \
+ unsigned int flags); \
+ unsigned long long pmf_get_timestamp_by_mpidr_ ## _name(\
+ unsigned int tid, \
+ u_register_t mpidr, \
+ unsigned int flags);
+
+#define PMF_GET_TIMESTAMP_BY_MPIDR(_name, _tid, _mpidr, _flags, _tsval)\
+ _tsval = pmf_get_timestamp_by_mpidr_ ## _name(_tid, _mpidr, _flags)
+
+#define PMF_GET_TIMESTAMP_BY_INDEX(_name, _tid, _cpuid, _flags, _tsval)\
+ _tsval = pmf_get_timestamp_by_index_ ## _name(_tid, _cpuid, _flags)
+
+/* Convenience macros to register a PMF service.*/
+/*
+ * This macro is used to register a PMF Service. It allocates PMF memory
+ * and defines default service-specific PMF functions.
+ */
+#define PMF_REGISTER_SERVICE(_name, _svcid, _totalid, _flags) \
+ PMF_ALLOCATE_TIMESTAMP_MEMORY(_name, _totalid) \
+ PMF_DEFINE_CAPTURE_TIMESTAMP(_name, _flags) \
+ PMF_DEFINE_GET_TIMESTAMP(_name)
+
+/*
+ * This macro is used to register a PMF service, including an
+ * SMC interface to that service.
+ */
+#define PMF_REGISTER_SERVICE_SMC(_name, _svcid, _totalid, _flags)\
+ PMF_REGISTER_SERVICE(_name, _svcid, _totalid, _flags) \
+ PMF_DEFINE_SERVICE_DESC(_name, PMF_ARM_TIF_IMPL_ID, \
+ _svcid, _totalid, NULL, \
+ pmf_get_timestamp_by_mpidr_ ## _name)
+
+/*
+ * This macro is used to register a PMF service that has an SMC interface
+ * but provides its own service-specific PMF functions.
+ */
+#define PMF_REGISTER_SERVICE_SMC_OWN(_name, _implid, _svcid, _totalid, \
+ _init, _getts) \
+ PMF_DEFINE_SERVICE_DESC(_name, _implid, _svcid, _totalid, \
+ _init, _getts)
+
+#else
+
+#define PMF_REGISTER_SERVICE(_name, _svcid, _totalid, _flags)
+#define PMF_REGISTER_SERVICE_SMC(_name, _svcid, _totalid, _flags)
+#define PMF_REGISTER_SERVICE_SMC_OWN(_name, _implid, _svcid, _totalid, \
+ _init, _getts)
+#define PMF_DECLARE_CAPTURE_TIMESTAMP(_name)
+#define PMF_DECLARE_GET_TIMESTAMP(_name)
+#define PMF_CAPTURE_TIMESTAMP(_name, _tid, _flags)
+#define PMF_GET_TIMESTAMP_BY_MPIDR(_name, _tid, _mpidr, _flags, _tsval)
+#define PMF_GET_TIMESTAMP_BY_INDEX(_name, _tid, _cpuid, _flags, _tsval)
+
+#endif /* ENABLE_PMF */
+
+/*
+ * Convenience macro to allocate memory for a PMF service.
+ *
+ * The extern declaration is there to satisfy MISRA C-2012 rule 8.4.
+ */
+#define PMF_ALLOCATE_TIMESTAMP_MEMORY(_name, _total_id) \
+ extern unsigned long long pmf_ts_mem_ ## _name[_total_id]; \
+ unsigned long long pmf_ts_mem_ ## _name[_total_id] \
+ __aligned(CACHE_WRITEBACK_GRANULE) \
+ __section(".pmf_timestamp_array") \
+ __used;
+
+/*
+ * Convenience macro to validate tid index for the given TS array.
+ */
+#define PMF_VALIDATE_TID(_name, _tid) \
+ assert((_tid & PMF_TID_MASK) < (ARRAY_SIZE(pmf_ts_mem_ ## _name)))
+
+/*
+ * Convenience macros for capturing time-stamp.
+ *
+ * The extern declaration is there to satisfy MISRA C-2012 rule 8.4.
+ */
+#define PMF_DEFINE_CAPTURE_TIMESTAMP(_name, _flags) \
+ void pmf_capture_timestamp_ ## _name( \
+ unsigned int tid, \
+ unsigned long long ts) \
+ { \
+ CASSERT(_flags != 0, select_proper_config); \
+ PMF_VALIDATE_TID(_name, (uint64_t)tid); \
+ uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name; \
+ if (((_flags) & PMF_STORE_ENABLE) != 0) \
+ __pmf_store_timestamp(base_addr, \
+ (uint64_t)tid, ts); \
+ if (((_flags) & PMF_DUMP_ENABLE) != 0) \
+ __pmf_dump_timestamp((uint64_t)tid, ts); \
+ } \
+ void pmf_capture_timestamp_with_cache_maint_ ## _name( \
+ unsigned int tid, \
+ unsigned long long ts) \
+ { \
+ CASSERT(_flags != 0, select_proper_config); \
+ PMF_VALIDATE_TID(_name, (uint64_t)tid); \
+ uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name; \
+ if (((_flags) & PMF_STORE_ENABLE) != 0) \
+ __pmf_store_timestamp_with_cache_maint( \
+ base_addr, (uint64_t)tid, ts); \
+ if (((_flags) & PMF_DUMP_ENABLE) != 0) \
+ __pmf_dump_timestamp((uint64_t)tid, ts); \
+ }
+
+/*
+ * Convenience macros for retrieving time-stamp.
+ *
+ * The extern declaration is there to satisfy MISRA C-2012 rule 8.4.
+ */
+#define PMF_DEFINE_GET_TIMESTAMP(_name) \
+ unsigned long long pmf_get_timestamp_by_index_ ## _name( \
+ unsigned int tid, unsigned int cpuid, unsigned int flags)\
+ { \
+ PMF_VALIDATE_TID(_name, tid); \
+ uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name; \
+ return __pmf_get_timestamp(base_addr, tid, cpuid, flags);\
+ } \
+ unsigned long long pmf_get_timestamp_by_mpidr_ ## _name( \
+ unsigned int tid, u_register_t mpidr, unsigned int flags)\
+ { \
+ PMF_VALIDATE_TID(_name, tid); \
+ uintptr_t base_addr = (uintptr_t) pmf_ts_mem_ ## _name; \
+ return __pmf_get_timestamp(base_addr, tid, \
+ plat_core_pos_by_mpidr(mpidr), flags); \
+ }
+
+/*
+ * Convenience macro to register a PMF service.
+ * This is needed for services that require SMC handling.
+ */
+#define PMF_DEFINE_SERVICE_DESC(_name, _implid, _svcid, _totalid, \
+ _init, _getts_by_mpidr) \
+ static const pmf_svc_desc_t __pmf_desc_ ## _name \
+ __section(".pmf_svc_descs") __used = { \
+ .h.type = PARAM_EP, \
+ .h.version = VERSION_1, \
+ .h.size = sizeof(pmf_svc_desc_t), \
+ .h.attr = 0, \
+ .name = #_name, \
+ .svc_config = ((((_implid) << PMF_IMPL_ID_SHIFT) & \
+ PMF_IMPL_ID_MASK) | \
+ (((_svcid) << PMF_SVC_ID_SHIFT) & \
+ PMF_SVC_ID_MASK) | \
+ (((_totalid) << PMF_TID_SHIFT) & \
+ PMF_TID_MASK)), \
+ .init = _init, \
+ .get_ts = _getts_by_mpidr \
+ };
+
+/* PMF internal functions */
+void __pmf_dump_timestamp(unsigned int tid, unsigned long long ts);
+void __pmf_store_timestamp(uintptr_t base_addr,
+ unsigned int tid,
+ unsigned long long ts);
+void __pmf_store_timestamp_with_cache_maint(uintptr_t base_addr,
+ unsigned int tid,
+ unsigned long long ts);
+unsigned long long __pmf_get_timestamp(uintptr_t base_addr,
+ unsigned int tid,
+ unsigned int cpuid,
+ unsigned int flags);
+#endif /* PMF_HELPERS_H */
diff --git a/include/lib/psa/delegated_attestation.h b/include/lib/psa/delegated_attestation.h
new file mode 100644
index 0000000..7aaceb3
--- /dev/null
+++ b/include/lib/psa/delegated_attestation.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+/* This file describes the Delegated Attestation API */
+
+#ifndef DELEGATED_ATTESTATION_H
+#define DELEGATED_ATTESTATION_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "psa/error.h"
+
+/* RSS Delegated Attestation message types that distinguish its services. */
+#define RSS_DELEGATED_ATTEST_GET_DELEGATED_KEY 1001U
+#define RSS_DELEGATED_ATTEST_GET_PLATFORM_TOKEN 1002U
+
+/**
+ * The aim of these APIs to get a derived signing key (private only) for the
+ * delegated attestation model and obtain the corresponding platform attestation
+ * token. In the delegated attestation model the final token consist of more
+ * than one subtokens which are signed by different entities. There is a
+ * cryptographical binding between the tokens. The derived delegated attestation
+ * key is bind to the platform token (details below).
+ *
+ * Expected usage model:
+ * - First rss_delegated_attest_get_delegated_key() API need to be called to
+ * obtain the private part of the delegated attestation key. The public part
+ * of key is computed by the cryptographic library when the key is
+ * registered.
+ * - Secondly the rss_delegated_attest_get_token() must be called to obtain
+ * platform attestation token. The hash of the public key (computed by
+ * the hash_algo indicated in the rss_delegated_attest_get_delegated_key()
+ * call) must be the input of this call. This ensures that nothing but the
+ * previously derived delegated key is bindable to the platform token.
+ */
+
+/**
+ * Get a delegated attestation key (DAK).
+ *
+ * The aim of the delegated attestation key is to enable other SW components
+ * within the system to sign an attestation token which is different than the
+ * initial/platform token. The initial attestation token MUST contain the hash
+ * of the public delegated key to make a cryptographical binding (hash lock)
+ * between the key and the token.
+ * The initial attestation token has two roles in this scenario:
+ * - Attest the device boot status and security lifecycle.
+ * - Attest the delegated attestation key.
+ * The delegated attestation key is derived from a preprovisioned seed. The
+ * input for the key derivation is the platform boot status. The system can be
+ * attestated with the two tokens together.
+ *
+ * ecc_curve The type of the elliptic curve to which the requested
+ * attestation key belongs. Please check the note section for
+ * limitations.
+ * key_bits The size of the requested attestation key, in bits.
+ * key_buf Pointer to the buffer where the delegated attestation key will
+ * be stored.
+ * key_buf_size Size of allocated buffer for the key, in bytes.
+ * key_size Size of the key that has been returned, in bytes.
+ * hash_algo The hash algorithm that will be used later by the owner of the
+ * requested delegated key for binding it to the platform
+ * attestation token.
+ *
+ * Returns error code as specified in psa_status_t.
+ *
+ * Notes:
+ * - Currently, only the PSA_ECC_FAMILY_SECP_R1 curve type is supported.
+ * - The delegated attestation key must be derived before requesting for the
+ * platform attestation token as they are cryptographically linked together.
+ */
+psa_status_t
+rss_delegated_attest_get_delegated_key(uint8_t ecc_curve,
+ uint32_t key_bits,
+ uint8_t *key_buf,
+ size_t key_buf_size,
+ size_t *key_size,
+ uint32_t hash_algo);
+
+/**
+ * Get platform attestation token
+ *
+ * dak_pub_hash Pointer to buffer where the hash of the public DAK is
+ * stored.
+ * dak_pub_hash_size Size of the hash value, in bytes.
+ * token_buf Pointer to the buffer where the platform attestation token
+ * will be stored.
+ * token_buf_size Size of allocated buffer for token, in bytes.
+ * token_size Size of the token that has been returned, in bytes.
+ *
+ * Returns error code as specified in psa_status_t.
+ *
+ * A delegated attestation key must be derived before requesting for the
+ * platform attestation token as they are cryptographically linked together.
+ * Otherwise, the token request will fail and the PSA_ERROR_INVALID_ARGUMENT
+ * code will be returned.
+ */
+psa_status_t
+rss_delegated_attest_get_token(const uint8_t *dak_pub_hash,
+ size_t dak_pub_hash_size,
+ uint8_t *token_buf,
+ size_t token_buf_size,
+ size_t *token_size);
+
+#endif /* DELEGATED_ATTESTATION_H */
diff --git a/include/lib/psa/measured_boot.h b/include/lib/psa/measured_boot.h
new file mode 100644
index 0000000..af624a6
--- /dev/null
+++ b/include/lib/psa/measured_boot.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_MEASURED_BOOT_H
+#define PSA_MEASURED_BOOT_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "psa/error.h"
+
+/* Minimum measurement value size that can be requested to store */
+#define MEASUREMENT_VALUE_MIN_SIZE 32U
+/* Maximum measurement value size that can be requested to store */
+#define MEASUREMENT_VALUE_MAX_SIZE 64U
+/* Minimum signer id size that can be requested to store */
+#define SIGNER_ID_MIN_SIZE MEASUREMENT_VALUE_MIN_SIZE
+/* Maximum signer id size that can be requested to store */
+#define SIGNER_ID_MAX_SIZE MEASUREMENT_VALUE_MAX_SIZE
+/* The theoretical maximum image version is: "255.255.65535\0" */
+#define VERSION_MAX_SIZE 14U
+/* Example sw_type: "BL_2, BL_33, etc." */
+#define SW_TYPE_MAX_SIZE 20U
+#define NUM_OF_MEASUREMENT_SLOTS 32U
+
+
+/**
+ * Extends and stores a measurement to the requested slot.
+ *
+ * index Slot number in which measurement is to be stored
+ * signer_id Pointer to signer_id buffer.
+ * signer_id_size Size of the signer_id in bytes.
+ * version Pointer to version buffer.
+ * version_size Size of the version string in bytes.
+ * measurement_algo Algorithm identifier used for measurement.
+ * sw_type Pointer to sw_type buffer.
+ * sw_type_size Size of the sw_type string in bytes.
+ * measurement_value Pointer to measurement_value buffer.
+ * measurement_value_size Size of the measurement_value in bytes.
+ * lock_measurement Boolean flag requesting whether the measurement
+ * is to be locked.
+ *
+ * PSA_SUCCESS:
+ * - Success.
+ * PSA_ERROR_INVALID_ARGUMENT:
+ * - The size of any argument is invalid OR
+ * - Input Measurement value is NULL OR
+ * - Input Signer ID is NULL OR
+ * - Requested slot index is invalid.
+ * PSA_ERROR_BAD_STATE:
+ * - Request to lock, when slot is already locked.
+ * PSA_ERROR_NOT_PERMITTED:
+ * - When the requested slot is not accessible to the caller.
+ */
+
+/* Not a standard PSA API, just an extension therefore use the 'rss_' prefix
+ * rather than the usual 'psa_'.
+ */
+psa_status_t
+rss_measured_boot_extend_measurement(uint8_t index,
+ const uint8_t *signer_id,
+ size_t signer_id_size,
+ const uint8_t *version,
+ size_t version_size,
+ uint32_t measurement_algo,
+ const uint8_t *sw_type,
+ size_t sw_type_size,
+ const uint8_t *measurement_value,
+ size_t measurement_value_size,
+ bool lock_measurement);
+
+/**
+ * Retrieves a measurement from the requested slot.
+ *
+ * index Slot number from which measurement is to be
+ * retrieved.
+ * signer_id Pointer to signer_id buffer.
+ * signer_id_size Size of the signer_id buffer in bytes.
+ * signer_id_len On success, number of bytes that make up
+ * signer_id.
+ * version Pointer to version buffer.
+ * version_size Size of the version buffer in bytes.
+ * version_len On success, number of bytes that makeup the
+ * version.
+ * measurement_algo Pointer to measurement_algo.
+ * sw_type Pointer to sw_type buffer.
+ * sw_type_size Size of the sw_type buffer in bytes.
+ * sw_type_len On success, number of bytes that makeup the
+ * sw_type.
+ * measurement_value Pointer to measurement_value buffer.
+ * measurement_value_size Size of the measurement_value buffer in bytes.
+ * measurement_value_len On success, number of bytes that make up the
+ * measurement_value.
+ * is_locked Pointer to lock status of requested measurement
+ * slot.
+ *
+ * PSA_SUCCESS
+ * - Success.
+ * PSA_ERROR_INVALID_ARGUMENT
+ * - The size of at least one of the output buffers is incorrect or the
+ * requested slot index is invalid.
+ * PSA_ERROR_DOES_NOT_EXIST
+ * - The requested slot is empty, does not contain a measurement.
+ */
+psa_status_t rss_measured_boot_read_measurement(uint8_t index,
+ uint8_t *signer_id,
+ size_t signer_id_size,
+ size_t *signer_id_len,
+ uint8_t *version,
+ size_t version_size,
+ size_t *version_len,
+ uint32_t *measurement_algo,
+ uint8_t *sw_type,
+ size_t sw_type_size,
+ size_t *sw_type_len,
+ uint8_t *measurement_value,
+ size_t measurement_value_size,
+ size_t *measurement_value_len,
+ bool *is_locked);
+
+#endif /* PSA_MEASURED_BOOT_H */
diff --git a/include/lib/psa/psa/client.h b/include/lib/psa/psa/client.h
new file mode 100644
index 0000000..56fe028
--- /dev/null
+++ b/include/lib/psa/psa/client.h
@@ -0,0 +1,102 @@
+
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_CLIENT_H
+#define PSA_CLIENT_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <psa/error.h>
+
+#ifndef IOVEC_LEN
+#define IOVEC_LEN(arr) ((uint32_t)(sizeof(arr)/sizeof(arr[0])))
+#endif
+/*********************** PSA Client Macros and Types *************************/
+/**
+ * The version of the PSA Framework API that is being used to build the calling
+ * firmware. Only part of features of FF-M v1.1 have been implemented. FF-M v1.1
+ * is compatible with v1.0.
+ */
+#define PSA_FRAMEWORK_VERSION (0x0101u)
+/**
+ * Return value from psa_version() if the requested RoT Service is not present
+ * in the system.
+ */
+#define PSA_VERSION_NONE (0u)
+/**
+ * The zero-value null handle can be assigned to variables used in clients and
+ * RoT Services, indicating that there is no current connection or message.
+ */
+#define PSA_NULL_HANDLE ((psa_handle_t)0)
+/**
+ * Tests whether a handle value returned by psa_connect() is valid.
+ */
+#define PSA_HANDLE_IS_VALID(handle) ((psa_handle_t)(handle) > 0)
+/**
+ * Converts the handle value returned from a failed call psa_connect() into
+ * an error code.
+ */
+#define PSA_HANDLE_TO_ERROR(handle) ((psa_status_t)(handle))
+/**
+ * Maximum number of input and output vectors for a request to psa_call().
+ */
+#define PSA_MAX_IOVEC (4u)
+/**
+ * An IPC message type that indicates a generic client request.
+ */
+#define PSA_IPC_CALL (0)
+typedef int32_t psa_handle_t;
+/**
+ * A read-only input memory region provided to an RoT Service.
+ */
+typedef struct psa_invec {
+ const void *base; /*!< the start address of the memory buffer */
+ size_t len; /*!< the size in bytes */
+} psa_invec;
+/**
+ * A writable output memory region provided to an RoT Service.
+ */
+typedef struct psa_outvec {
+ void *base; /*!< the start address of the memory buffer */
+ size_t len; /*!< the size in bytes */
+} psa_outvec;
+
+/**
+ * Call an RoT Service on an established connection.
+ *
+ * handle A handle to an established connection.
+ * type The request type. Must be zero(PSA_IPC_CALL) or positive.
+ * in_vec Array of input psa_invec structures.
+ * in_len Number of input psa_invec structures.
+ * out_vec Array of output psa_outvec structures.
+ * out_len Number of output psa_outvec structures.
+ *
+ * Return value >=0 RoT Service-specific status value.
+ * Return value <0 RoT Service-specific error code.
+ *
+ * PSA_ERROR_PROGRAMMER_ERROR:
+ * - The connection has been terminated by the RoT Service.
+ *
+ * The call is a PROGRAMMER ERROR if one or more of the following are true:
+ * - An invalid handle was passed.
+ * - The connection is already handling a request.
+ * - type < 0.
+ * - An invalid memory reference was provided.
+ * - in_len + out_len > PSA_MAX_IOVEC.
+ * - The message is unrecognized by the RoT.
+ * - Service or incorrectly formatted.
+ */
+psa_status_t psa_call(psa_handle_t handle,
+ int32_t type,
+ const psa_invec *in_vec,
+ size_t in_len,
+ psa_outvec *out_vec,
+ size_t out_len);
+
+#endif /* PSA_CLIENT_H */
diff --git a/include/lib/psa/psa/error.h b/include/lib/psa/psa/error.h
new file mode 100644
index 0000000..8a6eb7b
--- /dev/null
+++ b/include/lib/psa/psa/error.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_ERROR_H
+#define PSA_ERROR_H
+
+#include <stdint.h>
+
+typedef int32_t psa_status_t;
+
+#define PSA_SUCCESS ((psa_status_t)0)
+#define PSA_SUCCESS_REBOOT ((psa_status_t)1)
+#define PSA_SUCCESS_RESTART ((psa_status_t)2)
+#define PSA_ERROR_PROGRAMMER_ERROR ((psa_status_t)-129)
+#define PSA_ERROR_CONNECTION_REFUSED ((psa_status_t)-130)
+#define PSA_ERROR_CONNECTION_BUSY ((psa_status_t)-131)
+#define PSA_ERROR_GENERIC_ERROR ((psa_status_t)-132)
+#define PSA_ERROR_NOT_PERMITTED ((psa_status_t)-133)
+#define PSA_ERROR_NOT_SUPPORTED ((psa_status_t)-134)
+#define PSA_ERROR_INVALID_ARGUMENT ((psa_status_t)-135)
+#define PSA_ERROR_INVALID_HANDLE ((psa_status_t)-136)
+#define PSA_ERROR_BAD_STATE ((psa_status_t)-137)
+#define PSA_ERROR_BUFFER_TOO_SMALL ((psa_status_t)-138)
+#define PSA_ERROR_ALREADY_EXISTS ((psa_status_t)-139)
+#define PSA_ERROR_DOES_NOT_EXIST ((psa_status_t)-140)
+#define PSA_ERROR_INSUFFICIENT_MEMORY ((psa_status_t)-141)
+#define PSA_ERROR_INSUFFICIENT_STORAGE ((psa_status_t)-142)
+#define PSA_ERROR_INSUFFICIENT_DATA ((psa_status_t)-143)
+#define PSA_ERROR_SERVICE_FAILURE ((psa_status_t)-144)
+#define PSA_ERROR_COMMUNICATION_FAILURE ((psa_status_t)-145)
+#define PSA_ERROR_STORAGE_FAILURE ((psa_status_t)-146)
+#define PSA_ERROR_HARDWARE_FAILURE ((psa_status_t)-147)
+#define PSA_ERROR_INVALID_SIGNATURE ((psa_status_t)-149)
+#define PSA_ERROR_DEPENDENCY_NEEDED ((psa_status_t)-156)
+#define PSA_ERROR_CURRENTLY_INSTALLING ((psa_status_t)-157)
+
+#endif /* PSA_ERROR_H */
diff --git a/include/lib/psa/psa_manifest/sid.h b/include/lib/psa/psa_manifest/sid.h
new file mode 100644
index 0000000..7183112
--- /dev/null
+++ b/include/lib/psa/psa_manifest/sid.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PSA_MANIFEST_SID_H
+#define PSA_MANIFEST_SID_H
+
+/******** RSS_SP_CRYPTO ********/
+#define RSS_CRYPTO_HANDLE (0x40000100U)
+
+/******** RSS_SP_PLATFORM ********/
+#define RSS_PLATFORM_SERVICE_HANDLE (0x40000105U)
+
+/******** PSA_SP_MEASURED_BOOT ********/
+#define RSS_MEASURED_BOOT_HANDLE (0x40000110U)
+
+/******** PSA_SP_DELAGATED_ATTESTATION ********/
+#define RSS_DELEGATED_SERVICE_HANDLE (0x40000111U)
+
+#endif /* PSA_MANIFEST_SID_H */
diff --git a/include/lib/psa/rss_crypto_defs.h b/include/lib/psa/rss_crypto_defs.h
new file mode 100644
index 0000000..b8c7426
--- /dev/null
+++ b/include/lib/psa/rss_crypto_defs.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef RSS_CRYPTO_DEFS_H
+#define RSS_CRYPTO_DEFS_H
+
+/* Declares types that encode errors, algorithms, key types, policies, etc. */
+#include "psa/crypto_types.h"
+
+/*
+ * Value identifying export public key function API, used to dispatch the request
+ * to the corresponding API implementation in the Crypto service backend.
+ *
+ */
+#define RSS_CRYPTO_EXPORT_PUBLIC_KEY_SID (uint16_t)(0x701)
+
+/*
+ * The persistent key identifiers for RSS builtin keys.
+ */
+enum rss_key_id_builtin_t {
+ RSS_BUILTIN_KEY_ID_HOST_S_ROTPK = 0x7FFF816Cu,
+ RSS_BUILTIN_KEY_ID_HOST_NS_ROTPK,
+ RSS_BUILTIN_KEY_ID_HOST_CCA_ROTPK,
+};
+
+/*
+ * This type is used to overcome a limitation within RSS firmware in the number of maximum
+ * IOVECs it can use especially in psa_aead_encrypt and psa_aead_decrypt.
+ */
+#define RSS_CRYPTO_MAX_NONCE_LENGTH (16u)
+struct rss_crypto_aead_pack_input {
+ uint8_t nonce[RSS_CRYPTO_MAX_NONCE_LENGTH];
+ uint32_t nonce_length;
+};
+
+/*
+ * Structure used to pack non-pointer types in a call
+ */
+struct rss_crypto_pack_iovec {
+ psa_key_id_t key_id; /* Key id */
+ psa_algorithm_t alg; /* Algorithm */
+ uint32_t op_handle; /* Frontend context handle associated
+ to a multipart operation */
+ uint32_t capacity; /* Key derivation capacity */
+ uint32_t ad_length; /* Additional Data length for multipart AEAD */
+ uint32_t plaintext_length; /* Plaintext length for multipart AEAD */
+ struct rss_crypto_aead_pack_input aead_in; /* Packs AEAD-related inputs */
+ uint16_t function_id; /* Used to identify the function in the API dispatcher
+ to the service backend. See rss_crypto_func_sid for
+ detail */
+ uint16_t step; /* Key derivation step */
+};
+
+#endif /* RSS_CRYPTO_DEFS_H */
diff --git a/include/lib/psa/rss_platform_api.h b/include/lib/psa/rss_platform_api.h
new file mode 100644
index 0000000..8f74a51
--- /dev/null
+++ b/include/lib/psa/rss_platform_api.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef RSS_PLATFORM_API_H
+#define RSS_PLATFORM_API_H
+
+#include <stdint.h>
+
+#include "psa/error.h"
+#include <rss_crypto_defs.h>
+
+#define RSS_PLATFORM_API_ID_NV_READ (1010)
+#define RSS_PLATFORM_API_ID_NV_INCREMENT (1011)
+
+/*
+ * Increments the given non-volatile (NV) counter by one
+ *
+ * counter_id NV counter ID.
+ *
+ * PSA_SUCCESS if the value is read correctly. Otherwise,
+ * it returns a PSA_ERROR.
+ */
+psa_status_t
+rss_platform_nv_counter_increment(uint32_t counter_id);
+
+/*
+ * Reads the given non-volatile (NV) counter
+ *
+ * counter_id NV counter ID.
+ * size Size of the buffer to store NV counter value
+ * in bytes.
+ * val Pointer to store the current NV counter value.
+ *
+ * PSA_SUCCESS if the value is read correctly. Otherwise,
+ * it returns a PSA_ERROR.
+ */
+psa_status_t
+rss_platform_nv_counter_read(uint32_t counter_id,
+ uint32_t size, uint8_t *val);
+
+/*
+ * Reads the public key or the public part of a key pair in binary format.
+ *
+ * key Identifier of the key to export.
+ * data Buffer where the key data is to be written.
+ * data_size Size of the data buffer in bytes.
+ * data_length On success, the number of bytes that make up the key data.
+ *
+ * PSA_SUCCESS if the value is read correctly. Otherwise,
+ * it returns a PSA_ERROR.
+ */
+psa_status_t
+rss_platform_key_read(enum rss_key_id_builtin_t key, uint8_t *data,
+ size_t data_size, size_t *data_length);
+
+#endif /* RSS_PLATFORM_API_H */
diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h
new file mode 100644
index 0000000..c40f955
--- /dev/null
+++ b/include/lib/psci/psci.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2013-2019, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PSCI_H
+#define PSCI_H
+
+#include <platform_def.h> /* for PLAT_NUM_PWR_DOMAINS */
+
+#include <common/bl_common.h>
+#include <lib/bakery_lock.h>
+#include <lib/psci/psci_lib.h> /* To maintain compatibility for SPDs */
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Number of power domains whose state this PSCI implementation can track
+ ******************************************************************************/
+#ifdef PLAT_NUM_PWR_DOMAINS
+#define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS
+#else
+#define PSCI_NUM_PWR_DOMAINS (U(2) * PLATFORM_CORE_COUNT)
+#endif
+
+#define PSCI_NUM_NON_CPU_PWR_DOMAINS (PSCI_NUM_PWR_DOMAINS - \
+ PLATFORM_CORE_COUNT)
+
+/* This is the power level corresponding to a CPU */
+#define PSCI_CPU_PWR_LVL U(0)
+
+/*
+ * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND
+ * uses the old power_state parameter format which has 2 bits to specify the
+ * power level, this constant is defined to be 3.
+ */
+#define PSCI_MAX_PWR_LVL U(3)
+
+/*******************************************************************************
+ * Defines for runtime services function ids
+ ******************************************************************************/
+#define PSCI_VERSION U(0x84000000)
+#define PSCI_CPU_SUSPEND_AARCH32 U(0x84000001)
+#define PSCI_CPU_SUSPEND_AARCH64 U(0xc4000001)
+#define PSCI_CPU_OFF U(0x84000002)
+#define PSCI_CPU_ON_AARCH32 U(0x84000003)
+#define PSCI_CPU_ON_AARCH64 U(0xc4000003)
+#define PSCI_AFFINITY_INFO_AARCH32 U(0x84000004)
+#define PSCI_AFFINITY_INFO_AARCH64 U(0xc4000004)
+#define PSCI_MIG_AARCH32 U(0x84000005)
+#define PSCI_MIG_AARCH64 U(0xc4000005)
+#define PSCI_MIG_INFO_TYPE U(0x84000006)
+#define PSCI_MIG_INFO_UP_CPU_AARCH32 U(0x84000007)
+#define PSCI_MIG_INFO_UP_CPU_AARCH64 U(0xc4000007)
+#define PSCI_SYSTEM_OFF U(0x84000008)
+#define PSCI_SYSTEM_RESET U(0x84000009)
+#define PSCI_FEATURES U(0x8400000A)
+#define PSCI_NODE_HW_STATE_AARCH32 U(0x8400000d)
+#define PSCI_NODE_HW_STATE_AARCH64 U(0xc400000d)
+#define PSCI_SYSTEM_SUSPEND_AARCH32 U(0x8400000E)
+#define PSCI_SYSTEM_SUSPEND_AARCH64 U(0xc400000E)
+#define PSCI_SET_SUSPEND_MODE U(0x8400000F)
+#define PSCI_STAT_RESIDENCY_AARCH32 U(0x84000010)
+#define PSCI_STAT_RESIDENCY_AARCH64 U(0xc4000010)
+#define PSCI_STAT_COUNT_AARCH32 U(0x84000011)
+#define PSCI_STAT_COUNT_AARCH64 U(0xc4000011)
+#define PSCI_SYSTEM_RESET2_AARCH32 U(0x84000012)
+#define PSCI_SYSTEM_RESET2_AARCH64 U(0xc4000012)
+#define PSCI_MEM_PROTECT U(0x84000013)
+#define PSCI_MEM_CHK_RANGE_AARCH32 U(0x84000014)
+#define PSCI_MEM_CHK_RANGE_AARCH64 U(0xc4000014)
+
+/*
+ * Number of PSCI calls (above) implemented
+ */
+#if ENABLE_PSCI_STAT
+#if PSCI_OS_INIT_MODE
+#define PSCI_NUM_CALLS U(30)
+#else
+#define PSCI_NUM_CALLS U(29)
+#endif
+#else
+#if PSCI_OS_INIT_MODE
+#define PSCI_NUM_CALLS U(26)
+#else
+#define PSCI_NUM_CALLS U(25)
+#endif
+#endif
+
+/* The macros below are used to identify PSCI calls from the SMC function ID */
+#define PSCI_FID_MASK U(0xffe0)
+#define PSCI_FID_VALUE U(0)
+#define is_psci_fid(_fid) \
+ (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)
+
+/*******************************************************************************
+ * PSCI Migrate and friends
+ ******************************************************************************/
+#define PSCI_TOS_UP_MIG_CAP 0
+#define PSCI_TOS_NOT_UP_MIG_CAP 1
+#define PSCI_TOS_NOT_PRESENT_MP 2
+
+/*******************************************************************************
+ * PSCI CPU_SUSPEND 'power_state' parameter specific defines
+ ******************************************************************************/
+#define PSTATE_ID_SHIFT U(0)
+
+#if PSCI_EXTENDED_STATE_ID
+#define PSTATE_VALID_MASK U(0xB0000000)
+#define PSTATE_TYPE_SHIFT U(30)
+#define PSTATE_ID_MASK U(0xfffffff)
+#else
+#define PSTATE_VALID_MASK U(0xFCFE0000)
+#define PSTATE_TYPE_SHIFT U(16)
+#define PSTATE_PWR_LVL_SHIFT U(24)
+#define PSTATE_ID_MASK U(0xffff)
+#define PSTATE_PWR_LVL_MASK U(0x3)
+
+#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \
+ PSTATE_PWR_LVL_MASK)
+#define psci_make_powerstate(state_id, type, pwrlvl) \
+ (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
+ (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
+ (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
+#endif /* __PSCI_EXTENDED_STATE_ID__ */
+
+#define PSTATE_TYPE_STANDBY U(0x0)
+#define PSTATE_TYPE_POWERDOWN U(0x1)
+#define PSTATE_TYPE_MASK U(0x1)
+
+/*******************************************************************************
+ * PSCI CPU_FEATURES feature flag specific defines
+ ******************************************************************************/
+/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */
+#define FF_PSTATE_SHIFT U(1)
+#define FF_PSTATE_ORIG U(0)
+#define FF_PSTATE_EXTENDED U(1)
+#if PSCI_EXTENDED_STATE_ID
+#define FF_PSTATE FF_PSTATE_EXTENDED
+#else
+#define FF_PSTATE FF_PSTATE_ORIG
+#endif
+
+/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
+#define FF_MODE_SUPPORT_SHIFT U(0)
+#if PSCI_OS_INIT_MODE
+#define FF_SUPPORTS_OS_INIT_MODE U(1)
+#else
+#define FF_SUPPORTS_OS_INIT_MODE U(0)
+#endif
+
+/*******************************************************************************
+ * PSCI version
+ ******************************************************************************/
+#define PSCI_MAJOR_VER (U(1) << 16)
+#define PSCI_MINOR_VER U(0x1)
+
+/*******************************************************************************
+ * PSCI error codes
+ ******************************************************************************/
+#define PSCI_E_SUCCESS 0
+#define PSCI_E_NOT_SUPPORTED -1
+#define PSCI_E_INVALID_PARAMS -2
+#define PSCI_E_DENIED -3
+#define PSCI_E_ALREADY_ON -4
+#define PSCI_E_ON_PENDING -5
+#define PSCI_E_INTERN_FAIL -6
+#define PSCI_E_NOT_PRESENT -7
+#define PSCI_E_DISABLED -8
+#define PSCI_E_INVALID_ADDRESS -9
+
+#define PSCI_INVALID_MPIDR ~((u_register_t)0)
+
+/*
+ * SYSTEM_RESET2 macros
+ */
+#define PSCI_RESET2_TYPE_VENDOR_SHIFT U(31)
+#define PSCI_RESET2_TYPE_VENDOR (U(1) << PSCI_RESET2_TYPE_VENDOR_SHIFT)
+#define PSCI_RESET2_TYPE_ARCH (U(0) << PSCI_RESET2_TYPE_VENDOR_SHIFT)
+#define PSCI_RESET2_SYSTEM_WARM_RESET (PSCI_RESET2_TYPE_ARCH | U(0))
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+/* Function to help build the psci capabilities bitfield */
+
+static inline unsigned int define_psci_cap(unsigned int x)
+{
+ return U(1) << (x & U(0x1f));
+}
+
+
+/* Power state helper functions */
+
+static inline unsigned int psci_get_pstate_id(unsigned int power_state)
+{
+ return ((power_state) >> PSTATE_ID_SHIFT) & PSTATE_ID_MASK;
+}
+
+static inline unsigned int psci_get_pstate_type(unsigned int power_state)
+{
+ return ((power_state) >> PSTATE_TYPE_SHIFT) & PSTATE_TYPE_MASK;
+}
+
+static inline unsigned int psci_check_power_state(unsigned int power_state)
+{
+ return ((power_state) & PSTATE_VALID_MASK);
+}
+
+/*
+ * These are the states reported by the PSCI_AFFINITY_INFO API for the specified
+ * CPU. The definitions of these states can be found in Section 5.7.1 in the
+ * PSCI specification (ARM DEN 0022C).
+ */
+typedef enum {
+ AFF_STATE_ON = U(0),
+ AFF_STATE_OFF = U(1),
+ AFF_STATE_ON_PENDING = U(2)
+} aff_info_state_t;
+
+/*
+ * These are the power states reported by PSCI_NODE_HW_STATE API for the
+ * specified CPU. The definitions of these states can be found in Section 5.15.3
+ * of PSCI specification (ARM DEN 0022C).
+ */
+#define HW_ON 0
+#define HW_OFF 1
+#define HW_STANDBY 2
+
+/*
+ * Macro to represent invalid affinity level within PSCI.
+ */
+#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + U(1))
+
+/*
+ * Type for representing the local power state at a particular level.
+ */
+typedef uint8_t plat_local_state_t;
+
+/* The local state macro used to represent RUN state. */
+#define PSCI_LOCAL_STATE_RUN U(0)
+
+/*
+ * Function to test whether the plat_local_state is RUN state
+ */
+static inline int is_local_state_run(unsigned int plat_local_state)
+{
+ return (plat_local_state == PSCI_LOCAL_STATE_RUN) ? 1 : 0;
+}
+
+/*
+ * Function to test whether the plat_local_state is RETENTION state
+ */
+static inline int is_local_state_retn(unsigned int plat_local_state)
+{
+ return ((plat_local_state > PSCI_LOCAL_STATE_RUN) &&
+ (plat_local_state <= PLAT_MAX_RET_STATE)) ? 1 : 0;
+}
+
+/*
+ * Function to test whether the plat_local_state is OFF state
+ */
+static inline int is_local_state_off(unsigned int plat_local_state)
+{
+ return ((plat_local_state > PLAT_MAX_RET_STATE) &&
+ (plat_local_state <= PLAT_MAX_OFF_STATE)) ? 1 : 0;
+}
+
+/*****************************************************************************
+ * This data structure defines the representation of the power state parameter
+ * for its exchange between the generic PSCI code and the platform port. For
+ * example, it is used by the platform port to specify the requested power
+ * states during a power management operation. It is used by the generic code to
+ * inform the platform about the target power states that each level should
+ * enter.
+ ****************************************************************************/
+typedef struct psci_power_state {
+ /*
+ * The pwr_domain_state[] stores the local power state at each level
+ * for the CPU.
+ */
+ plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + U(1)];
+#if PSCI_OS_INIT_MODE
+ /*
+ * The highest power level at which the current CPU is the last running
+ * CPU.
+ */
+ unsigned int last_at_pwrlvl;
+#endif
+} psci_power_state_t;
+
+/*******************************************************************************
+ * Structure used to store per-cpu information relevant to the PSCI service.
+ * It is populated in the per-cpu data array. In return we get a guarantee that
+ * this information will not reside on a cache line shared with another cpu.
+ ******************************************************************************/
+typedef struct psci_cpu_data {
+ /* State as seen by PSCI Affinity Info API */
+ aff_info_state_t aff_info_state;
+
+ /*
+ * Highest power level which takes part in a power management
+ * operation.
+ */
+ unsigned int target_pwrlvl;
+
+ /* The local power state of this CPU */
+ plat_local_state_t local_state;
+} psci_cpu_data_t;
+
+/*******************************************************************************
+ * Structure populated by platform specific code to export routines which
+ * perform common low level power management functions
+ ******************************************************************************/
+typedef struct plat_psci_ops {
+ void (*cpu_standby)(plat_local_state_t cpu_state);
+ int (*pwr_domain_on)(u_register_t mpidr);
+ void (*pwr_domain_off)(const psci_power_state_t *target_state);
+ int (*pwr_domain_off_early)(const psci_power_state_t *target_state);
+#if PSCI_OS_INIT_MODE
+ int (*pwr_domain_validate_suspend)(
+ const psci_power_state_t *target_state);
+#endif
+ void (*pwr_domain_suspend_pwrdown_early)(
+ const psci_power_state_t *target_state);
+ void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
+ void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
+ void (*pwr_domain_on_finish_late)(
+ const psci_power_state_t *target_state);
+ void (*pwr_domain_suspend_finish)(
+ const psci_power_state_t *target_state);
+ void __dead2 (*pwr_domain_pwr_down_wfi)(
+ const psci_power_state_t *target_state);
+ void __dead2 (*system_off)(void);
+ void __dead2 (*system_reset)(void);
+ int (*validate_power_state)(unsigned int power_state,
+ psci_power_state_t *req_state);
+ int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
+ void (*get_sys_suspend_power_state)(
+ psci_power_state_t *req_state);
+ int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state,
+ int pwrlvl);
+ int (*translate_power_state_by_mpidr)(u_register_t mpidr,
+ unsigned int power_state,
+ psci_power_state_t *output_state);
+ int (*get_node_hw_state)(u_register_t mpidr, unsigned int power_level);
+ int (*mem_protect_chk)(uintptr_t base, u_register_t length);
+ int (*read_mem_protect)(int *val);
+ int (*write_mem_protect)(int val);
+ int (*system_reset2)(int is_vendor,
+ int reset_type, u_register_t cookie);
+} plat_psci_ops_t;
+
+/*******************************************************************************
+ * Function & Data prototypes
+ ******************************************************************************/
+unsigned int psci_version(void);
+int psci_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id);
+int psci_cpu_suspend(unsigned int power_state,
+ uintptr_t entrypoint,
+ u_register_t context_id);
+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id);
+int psci_cpu_off(void);
+int psci_affinity_info(u_register_t target_affinity,
+ unsigned int lowest_affinity_level);
+int psci_migrate(u_register_t target_cpu);
+int psci_migrate_info_type(void);
+u_register_t psci_migrate_info_up_cpu(void);
+int psci_node_hw_state(u_register_t target_cpu,
+ unsigned int power_level);
+int psci_features(unsigned int psci_fid);
+#if PSCI_OS_INIT_MODE
+int psci_set_suspend_mode(unsigned int mode);
+#endif
+void __dead2 psci_power_down_wfi(void);
+void psci_arch_setup(void);
+
+#endif /*__ASSEMBLER__*/
+
+#endif /* PSCI_H */
diff --git a/include/lib/psci/psci_lib.h b/include/lib/psci/psci_lib.h
new file mode 100644
index 0000000..4b244ec
--- /dev/null
+++ b/include/lib/psci/psci_lib.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PSCI_LIB_H
+#define PSCI_LIB_H
+
+#include <common/ep_info.h>
+
+#ifndef __ASSEMBLER__
+
+#include <cdefs.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * Optional structure populated by the Secure Payload Dispatcher to be given a
+ * chance to perform any bookkeeping before PSCI executes a power management
+ * operation. It also allows PSCI to determine certain properties of the SP e.g.
+ * migrate capability etc.
+ ******************************************************************************/
+typedef struct spd_pm_ops {
+ void (*svc_on)(u_register_t target_cpu);
+ int32_t (*svc_off)(u_register_t __unused unused);
+ void (*svc_suspend)(u_register_t max_off_pwrlvl);
+ void (*svc_on_finish)(u_register_t __unused unused);
+ void (*svc_suspend_finish)(u_register_t max_off_pwrlvl);
+ int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu);
+ int32_t (*svc_migrate_info)(u_register_t *resident_cpu);
+ void (*svc_system_off)(void);
+ void (*svc_system_reset)(void);
+} spd_pm_ops_t;
+
+/*
+ * Function prototype for the warmboot entrypoint function which will be
+ * programmed in the mailbox by the platform.
+ */
+typedef void (*mailbox_entrypoint_t)(void);
+
+/******************************************************************************
+ * Structure to pass PSCI Library arguments.
+ *****************************************************************************/
+typedef struct psci_lib_args {
+ /* The version information of PSCI Library Interface */
+ param_header_t h;
+ /* The warm boot entrypoint function */
+ mailbox_entrypoint_t mailbox_ep;
+} psci_lib_args_t;
+
+/* Helper macro to set the psci_lib_args_t structure at runtime */
+#define SET_PSCI_LIB_ARGS_V1(_p, _entry) do { \
+ SET_PARAM_HEAD(_p, PARAM_PSCI_LIB_ARGS, VERSION_1, 0); \
+ (_p)->mailbox_ep = (_entry); \
+ } while (0)
+
+/* Helper macro to define the psci_lib_args_t statically */
+#define DEFINE_STATIC_PSCI_LIB_ARGS_V1(_name, _entry) \
+ static const psci_lib_args_t (_name) = { \
+ .h.type = (uint8_t)PARAM_PSCI_LIB_ARGS, \
+ .h.version = (uint8_t)VERSION_1, \
+ .h.size = (uint16_t)sizeof(_name), \
+ .h.attr = 0U, \
+ .mailbox_ep = (_entry) \
+ }
+
+/* Helper macro to verify the pointer to psci_lib_args_t structure */
+#define VERIFY_PSCI_LIB_ARGS_V1(_p) (((_p) != NULL) \
+ && ((_p)->h.type == PARAM_PSCI_LIB_ARGS) \
+ && ((_p)->h.version == VERSION_1) \
+ && ((_p)->h.size == sizeof(*(_p))) \
+ && ((_p)->h.attr == 0) \
+ && ((_p)->mailbox_ep != NULL))
+
+/******************************************************************************
+ * PSCI Library Interfaces
+ *****************************************************************************/
+u_register_t psci_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags);
+int psci_setup(const psci_lib_args_t *lib_args);
+int psci_secondaries_brought_up(void);
+void psci_warmboot_entrypoint(void);
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
+void psci_prepare_next_non_secure_ctx(
+ entry_point_info_t *next_image_info);
+int psci_stop_other_cores(unsigned int wait_ms,
+ void (*stop_func)(u_register_t mpidr));
+bool psci_is_last_on_cpu_safe(void);
+bool psci_are_all_cpus_on_safe(void);
+void psci_pwrdown_cpu(unsigned int power_level);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* PSCI_LIB_H */
diff --git a/include/lib/runtime_instr.h b/include/lib/runtime_instr.h
new file mode 100644
index 0000000..65fafa7
--- /dev/null
+++ b/include/lib/runtime_instr.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef RUNTIME_INSTR_H
+#define RUNTIME_INSTR_H
+
+#include <lib/utils_def.h>
+
+#define RT_INSTR_ENTER_PSCI U(0)
+#define RT_INSTR_EXIT_PSCI U(1)
+#define RT_INSTR_ENTER_HW_LOW_PWR U(2)
+#define RT_INSTR_EXIT_HW_LOW_PWR U(3)
+#define RT_INSTR_ENTER_CFLUSH U(4)
+#define RT_INSTR_EXIT_CFLUSH U(5)
+#define RT_INSTR_TOTAL_IDS U(6)
+
+#ifndef __ASSEMBLER__
+PMF_DECLARE_CAPTURE_TIMESTAMP(rt_instr_svc)
+PMF_DECLARE_GET_TIMESTAMP(rt_instr_svc)
+#endif /* __ASSEMBLER__ */
+
+#endif /* RUNTIME_INSTR_H */
diff --git a/include/lib/semihosting.h b/include/lib/semihosting.h
new file mode 100644
index 0000000..5c72e8b
--- /dev/null
+++ b/include/lib/semihosting.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013-2014, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SEMIHOSTING_H
+#define SEMIHOSTING_H
+
+#include <stdint.h>
+#include <stdio.h> /* For ssize_t */
+
+
+#define SEMIHOSTING_SYS_OPEN 0x01
+#define SEMIHOSTING_SYS_CLOSE 0x02
+#define SEMIHOSTING_SYS_WRITE0 0x04
+#define SEMIHOSTING_SYS_WRITEC 0x03
+#define SEMIHOSTING_SYS_WRITE 0x05
+#define SEMIHOSTING_SYS_READ 0x06
+#define SEMIHOSTING_SYS_READC 0x07
+#define SEMIHOSTING_SYS_SEEK 0x0A
+#define SEMIHOSTING_SYS_FLEN 0x0C
+#define SEMIHOSTING_SYS_REMOVE 0x0E
+#define SEMIHOSTING_SYS_SYSTEM 0x12
+#define SEMIHOSTING_SYS_ERRNO 0x13
+#define SEMIHOSTING_SYS_EXIT 0x18
+
+#define FOPEN_MODE_R 0x0
+#define FOPEN_MODE_RB 0x1
+#define FOPEN_MODE_RPLUS 0x2
+#define FOPEN_MODE_RPLUSB 0x3
+#define FOPEN_MODE_W 0x4
+#define FOPEN_MODE_WB 0x5
+#define FOPEN_MODE_WPLUS 0x6
+#define FOPEN_MODE_WPLUSB 0x7
+#define FOPEN_MODE_A 0x8
+#define FOPEN_MODE_AB 0x9
+#define FOPEN_MODE_APLUS 0xa
+#define FOPEN_MODE_APLUSB 0xb
+
+long semihosting_connection_supported(void);
+long semihosting_file_open(const char *file_name, size_t mode);
+long semihosting_file_seek(long file_handle, ssize_t offset);
+long semihosting_file_read(long file_handle, size_t *length, uintptr_t buffer);
+long semihosting_file_write(long file_handle,
+ size_t *length,
+ const uintptr_t buffer);
+long semihosting_file_close(long file_handle);
+long semihosting_file_length(long file_handle);
+long semihosting_system(char *command_line);
+long semihosting_get_flen(const char *file_name);
+long semihosting_download_file(const char *file_name,
+ size_t buf_size,
+ uintptr_t buf);
+void semihosting_write_char(char character);
+void semihosting_write_string(char *string);
+char semihosting_read_char(void);
+void semihosting_exit(uint32_t reason, uint32_t subcode);
+
+#endif /* SEMIHOSTING_H */
diff --git a/include/lib/smccc.h b/include/lib/smccc.h
new file mode 100644
index 0000000..8fd6093
--- /dev/null
+++ b/include/lib/smccc.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SMCCC_H
+#define SMCCC_H
+
+#include <lib/utils_def.h>
+
+#define SMCCC_VERSION_MAJOR_SHIFT U(16)
+#define SMCCC_VERSION_MAJOR_MASK U(0x7FFF)
+#define SMCCC_VERSION_MINOR_SHIFT U(0)
+#define SMCCC_VERSION_MINOR_MASK U(0xFFFF)
+#define MAKE_SMCCC_VERSION(_major, _minor) \
+ ((((uint32_t)(_major) & SMCCC_VERSION_MAJOR_MASK) << \
+ SMCCC_VERSION_MAJOR_SHIFT) \
+ | (((uint32_t)(_minor) & SMCCC_VERSION_MINOR_MASK) << \
+ SMCCC_VERSION_MINOR_SHIFT))
+
+#define SMCCC_MAJOR_VERSION U(1)
+#define SMCCC_MINOR_VERSION U(4)
+
+/*******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT U(31)
+#define FUNCID_TYPE_MASK U(0x1)
+#define FUNCID_TYPE_WIDTH U(1)
+
+#define FUNCID_CC_SHIFT U(30)
+#define FUNCID_CC_MASK U(0x1)
+#define FUNCID_CC_WIDTH U(1)
+
+#define FUNCID_OEN_SHIFT U(24)
+#define FUNCID_OEN_MASK U(0x3f)
+#define FUNCID_OEN_WIDTH U(6)
+
+#define FUNCID_FC_RESERVED_SHIFT U(17)
+#define FUNCID_FC_RESERVED_MASK U(0x7f)
+#define FUNCID_FC_RESERVED_WIDTH U(7)
+
+#define FUNCID_SVE_HINT_SHIFT U(16)
+#define FUNCID_SVE_HINT_MASK U(1)
+#define FUNCID_SVE_HINT_WIDTH U(1)
+
+#define FUNCID_NUM_SHIFT U(0)
+#define FUNCID_NUM_MASK U(0xffff)
+#define FUNCID_NUM_WIDTH U(16)
+
+#define FUNCID_MASK U(0xffffffff)
+
+#define GET_SMC_NUM(id) (((id) >> FUNCID_NUM_SHIFT) & \
+ FUNCID_NUM_MASK)
+#define GET_SMC_TYPE(id) (((id) >> FUNCID_TYPE_SHIFT) & \
+ FUNCID_TYPE_MASK)
+#define GET_SMC_CC(id) (((id) >> FUNCID_CC_SHIFT) & \
+ FUNCID_CC_MASK)
+#define GET_SMC_OEN(id) (((id) >> FUNCID_OEN_SHIFT) & \
+ FUNCID_OEN_MASK)
+
+/*******************************************************************************
+ * SMCCC_ARCH_SOC_ID SoC version & revision bit definition
+ ******************************************************************************/
+#define SOC_ID_JEP_106_BANK_IDX_MASK GENMASK_32(30, 24)
+#define SOC_ID_JEP_106_BANK_IDX_SHIFT U(24)
+#define SOC_ID_JEP_106_ID_CODE_MASK GENMASK_32(23, 16)
+#define SOC_ID_JEP_106_ID_CODE_SHIFT U(16)
+#define SOC_ID_IMPL_DEF_MASK GENMASK_32(15, 0)
+#define SOC_ID_IMPL_DEF_SHIFT U(0)
+#define SOC_ID_SET_JEP_106(bkid, mfid) ((((bkid) << SOC_ID_JEP_106_BANK_IDX_SHIFT) & \
+ SOC_ID_JEP_106_BANK_IDX_MASK) | \
+ (((mfid) << SOC_ID_JEP_106_ID_CODE_SHIFT) & \
+ SOC_ID_JEP_106_ID_CODE_MASK))
+
+#define SOC_ID_REV_MASK GENMASK_32(30, 0)
+#define SOC_ID_REV_SHIFT U(0)
+
+/*******************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ ******************************************************************************/
+#define OEN_ARM_START U(0)
+#define OEN_ARM_END U(0)
+#define OEN_CPU_START U(1)
+#define OEN_CPU_END U(1)
+#define OEN_SIP_START U(2)
+#define OEN_SIP_END U(2)
+#define OEN_OEM_START U(3)
+#define OEN_OEM_END U(3)
+#define OEN_STD_START U(4) /* Standard Service Calls */
+#define OEN_STD_END U(4)
+#define OEN_STD_HYP_START U(5) /* Standard Hypervisor Service calls */
+#define OEN_STD_HYP_END U(5)
+#define OEN_VEN_HYP_START U(6) /* Vendor Hypervisor Service calls */
+#define OEN_VEN_HYP_END U(6)
+#define OEN_TAP_START U(48) /* Trusted Applications */
+#define OEN_TAP_END U(49)
+#define OEN_TOS_START U(50) /* Trusted OS */
+#define OEN_TOS_END U(63)
+#define OEN_LIMIT U(64)
+
+/* Flags and error codes */
+#define SMC_64 U(1)
+#define SMC_32 U(0)
+
+#define SMC_TYPE_FAST UL(1)
+#define SMC_TYPE_YIELD UL(0)
+
+#define SMC_OK ULL(0)
+#define SMC_UNK -1
+#define SMC_PREEMPTED -2 /* Not defined by the SMCCC */
+
+/* Return codes for Arm Architecture Service SMC calls */
+#define SMC_ARCH_CALL_SUCCESS 0
+#define SMC_ARCH_CALL_NOT_SUPPORTED -1
+#define SMC_ARCH_CALL_NOT_REQUIRED -2
+#define SMC_ARCH_CALL_INVAL_PARAM -3
+
+/*
+ * Various flags passed to SMC handlers
+ *
+ * Bit 5 and bit 0 of the flag are used to
+ * determine the source security state as
+ * follows:
+ * ---------------------------------
+ * Bit 5 | Bit 0 | Security state
+ * ---------------------------------
+ * 0 0 SMC_FROM_SECURE
+ * 0 1 SMC_FROM_NON_SECURE
+ * 1 1 SMC_FROM_REALM
+ *
+ * Bit 16 of flags records the caller's SMC
+ * SVE hint bit according to SMCCCv1.3.
+ * It can be consumed by dispatchers using
+ * is_sve_hint_set macro.
+ *
+ */
+
+#define SMC_FROM_SECURE (U(0) << 0)
+#define SMC_FROM_NON_SECURE (U(1) << 0)
+#define SMC_FROM_REALM U(0x21)
+#define SMC_FROM_MASK U(0x21)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+#if ENABLE_RME
+#define is_caller_non_secure(_f) (((_f) & SMC_FROM_MASK) \
+ == SMC_FROM_NON_SECURE)
+#define is_caller_secure(_f) (((_f) & SMC_FROM_MASK) \
+ == SMC_FROM_SECURE)
+#define is_caller_realm(_f) (((_f) & SMC_FROM_MASK) \
+ == SMC_FROM_REALM)
+#define caller_sec_state(_f) ((_f) & SMC_FROM_MASK)
+#else /* ENABLE_RME */
+#define is_caller_non_secure(_f) (((_f) & SMC_FROM_NON_SECURE) != U(0))
+#define is_caller_secure(_f) (!is_caller_non_secure(_f))
+#endif /* ENABLE_RME */
+
+#define is_sve_hint_set(_f) (((_f) & (FUNCID_SVE_HINT_MASK \
+ << FUNCID_SVE_HINT_SHIFT)) != U(0))
+
+/* The macro below is used to identify a Standard Service SMC call */
+#define is_std_svc_call(_fid) (GET_SMC_OEN(_fid) == OEN_STD_START)
+
+/* The macro below is used to identify a Arm Architectural Service SMC call */
+#define is_arm_arch_svc_call(_fid) (GET_SMC_OEN(_fid) == OEN_ARM_START)
+
+/* The macro below is used to identify a valid Fast SMC call */
+#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & U(0xff))) && \
+ (GET_SMC_TYPE(_fid) \
+ == (uint32_t)SMC_TYPE_FAST))
+
+/*
+ * Macro to define UUID for services. Apart from defining and initializing a
+ * uuid_t structure, this macro verifies that the first word of the defined UUID
+ * does not equal SMC_UNK. This is to ensure that the caller won't mistake the
+ * returned UUID in x0 for an invalid SMC error return
+ */
+#define DEFINE_SVC_UUID2(_name, _tl, _tm, _th, _cl, _ch, \
+ _n0, _n1, _n2, _n3, _n4, _n5) \
+ CASSERT((uint32_t)(_tl) != (uint32_t)SMC_UNK, \
+ invalid_svc_uuid_##_name); \
+ static const uuid_t _name = { \
+ {((_tl) >> 24) & 0xFF, \
+ ((_tl) >> 16) & 0xFF, \
+ ((_tl) >> 8) & 0xFF, \
+ ((_tl) & 0xFF)}, \
+ {((_tm) >> 8) & 0xFF, \
+ ((_tm) & 0xFF)}, \
+ {((_th) >> 8) & 0xFF, \
+ ((_th) & 0xFF)}, \
+ (_cl), (_ch), \
+ { (_n0), (_n1), (_n2), (_n3), (_n4), (_n5) } \
+ }
+
+/*
+ * Return a UUID in the SMC return registers.
+ *
+ * Acccording to section 5.3 of the SMCCC, UUIDs are returned as a single
+ * 128-bit value using the SMC32 calling convention. This value is mapped to
+ * argument registers x0-x3 on AArch64 (resp. r0-r3 on AArch32). x0 for example
+ * shall hold bytes 0 to 3, with byte 0 in the low-order bits.
+ */
+static inline uint32_t smc_uuid_word(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3)
+{
+ return ((uint32_t) b0) | (((uint32_t) b1) << 8) |
+ (((uint32_t) b2) << 16) | (((uint32_t) b3) << 24);
+}
+
+#define SMC_UUID_RET(_h, _uuid) \
+ SMC_RET4(handle, \
+ smc_uuid_word((_uuid).time_low[0], (_uuid).time_low[1], \
+ (_uuid).time_low[2], (_uuid).time_low[3]), \
+ smc_uuid_word((_uuid).time_mid[0], (_uuid).time_mid[1], \
+ (_uuid).time_hi_and_version[0], \
+ (_uuid).time_hi_and_version[1]), \
+ smc_uuid_word((_uuid).clock_seq_hi_and_reserved, \
+ (_uuid).clock_seq_low, (_uuid).node[0], \
+ (_uuid).node[1]), \
+ smc_uuid_word((_uuid).node[2], (_uuid).node[3], \
+ (_uuid).node[4], (_uuid).node[5]))
+
+#endif /*__ASSEMBLER__*/
+#endif /* SMCCC_H */
diff --git a/include/lib/spinlock.h b/include/lib/spinlock.h
new file mode 100644
index 0000000..9fd3fc6
--- /dev/null
+++ b/include/lib/spinlock.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPINLOCK_H
+#define SPINLOCK_H
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+typedef struct spinlock {
+ volatile uint32_t lock;
+} spinlock_t;
+
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+
+#else
+
+/* Spin lock definitions for use in assembly */
+#define SPINLOCK_ASM_ALIGN 2
+#define SPINLOCK_ASM_SIZE 4
+
+#endif
+
+#endif /* SPINLOCK_H */
diff --git a/include/lib/transfer_list.h b/include/lib/transfer_list.h
new file mode 100644
index 0000000..54c8643
--- /dev/null
+++ b/include/lib/transfer_list.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2023, Linaro Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TRANSFER_LIST_H
+#define __TRANSFER_LIST_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <lib/utils_def.h>
+
+#define TRANSFER_LIST_SIGNATURE U(0x006ed0ff)
+#define TRANSFER_LIST_VERSION U(0x0001)
+
+// Init value of maximum alignment required by any TE data in the TL
+// specified as a power of two
+#define TRANSFER_LIST_INIT_MAX_ALIGN U(3)
+
+// alignment required by TE header start address, in bytes
+#define TRANSFER_LIST_GRANULE U(8)
+
+// version of the register convention used.
+// Set to 1 for both AArch64 and AArch32 according to fw handoff spec v0.9
+#define REGISTER_CONVENTION_VERSION_MASK (1 << 24)
+
+#ifndef __ASSEMBLER__
+
+enum transfer_list_tag_id {
+ TL_TAG_EMPTY = 0,
+ TL_TAG_FDT = 1,
+ TL_TAG_HOB_BLOCK = 2,
+ TL_TAG_HOB_LIST = 3,
+ TL_TAG_ACPI_TABLE_AGGREGATE = 4,
+};
+
+enum transfer_list_ops {
+ TL_OPS_NON, // invalid for any operation
+ TL_OPS_ALL, // valid for all operations
+ TL_OPS_RO, // valid for read only
+ TL_OPS_CUS, // either abort or switch to special code to interpret
+};
+
+struct transfer_list_header {
+ uint32_t signature;
+ uint8_t checksum;
+ uint8_t version;
+ uint8_t hdr_size;
+ uint8_t alignment; // max alignment of TE data
+ uint32_t size; // TL header + all TEs
+ uint32_t max_size;
+ /*
+ * Commented out element used to visualize dynamic part of the
+ * data structure.
+ *
+ * Note that struct transfer_list_entry also is dynamic in size
+ * so the elements can't be indexed directly but instead must be
+ * traversed in order
+ *
+ * struct transfer_list_entry entries[];
+ */
+};
+
+struct transfer_list_entry {
+ uint16_t tag_id;
+ uint8_t reserved0; // place holder
+ uint8_t hdr_size;
+ uint32_t data_size;
+ /*
+ * Commented out element used to visualize dynamic part of the
+ * data structure.
+ *
+ * Note that padding is added at the end of @data to make to reach
+ * a 8-byte boundary.
+ *
+ * uint8_t data[ROUNDUP(data_size, 8)];
+ */
+};
+
+void transfer_list_dump(struct transfer_list_header *tl);
+struct transfer_list_header *transfer_list_init(void *addr, size_t max_size);
+
+struct transfer_list_header *transfer_list_relocate(struct transfer_list_header *tl,
+ void *addr, size_t max_size);
+enum transfer_list_ops transfer_list_check_header(const struct transfer_list_header *tl);
+
+void transfer_list_update_checksum(struct transfer_list_header *tl);
+bool transfer_list_verify_checksum(const struct transfer_list_header *tl);
+
+bool transfer_list_set_data_size(struct transfer_list_header *tl,
+ struct transfer_list_entry *entry,
+ uint32_t new_data_size);
+
+void *transfer_list_entry_data(struct transfer_list_entry *entry);
+bool transfer_list_rem(struct transfer_list_header *tl, struct transfer_list_entry *entry);
+
+struct transfer_list_entry *transfer_list_add(struct transfer_list_header *tl,
+ uint16_t tag_id, uint32_t data_size,
+ const void *data);
+
+struct transfer_list_entry *transfer_list_add_with_align(struct transfer_list_header *tl,
+ uint16_t tag_id, uint32_t data_size,
+ const void *data, uint8_t alignment);
+
+struct transfer_list_entry *transfer_list_next(struct transfer_list_header *tl,
+ struct transfer_list_entry *last);
+
+struct transfer_list_entry *transfer_list_find(struct transfer_list_header *tl,
+ uint16_t tag_id);
+
+#endif /*__ASSEMBLER__*/
+#endif /*__TRANSFER_LIST_H*/
diff --git a/include/lib/utils.h b/include/lib/utils.h
new file mode 100644
index 0000000..ce76de2
--- /dev/null
+++ b/include/lib/utils.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2016-2019, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTILS_H
+#define UTILS_H
+
+/*
+ * C code should be put in this part of the header to avoid breaking ASM files
+ * or linker scripts including it.
+ */
+#if !(defined(__LINKER__) || defined(__ASSEMBLER__))
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct mem_region {
+ uintptr_t base;
+ size_t nbytes;
+} mem_region_t;
+
+/*
+ * zero_normalmem all the regions defined in tbl.
+ */
+void clear_mem_regions(mem_region_t *tbl, size_t nregions);
+
+/*
+ * zero_normalmem all the regions defined in region. It dynamically
+ * maps chunks of 'chunk_size' in 'va' virtual address and clears them.
+ * For this reason memory regions must be multiple of chunk_size and
+ * must be aligned to it as well. chunk_size and va can be selected
+ * in a way that they minimize the number of entries used in the
+ * translation tables.
+ */
+void clear_map_dyn_mem_regions(struct mem_region *regions,
+ size_t nregions,
+ uintptr_t va,
+ size_t chunk);
+
+/*
+ * checks that a region (addr + nbytes-1) of memory is totally covered by
+ * one of the regions defined in tbl. Caller must ensure that (addr+nbytes-1)
+ * doesn't overflow.
+ */
+int mem_region_in_array_chk(mem_region_t *tbl, size_t nregions,
+ uintptr_t addr, size_t nbytes);
+
+/*
+ * Fill a region of normal memory of size "length" in bytes with zero bytes.
+ *
+ * WARNING: This function can only operate on normal memory. This means that
+ * the MMU must be enabled when using this function. Otherwise, use
+ * zeromem.
+ */
+void zero_normalmem(void *mem, u_register_t length);
+
+/*
+ * Fill a region of memory of size "length" in bytes with null bytes.
+ *
+ * Unlike zero_normalmem, this function has no restriction on the type of
+ * memory targeted and can be used for any device memory as well as normal
+ * memory. This function must be used instead of zero_normalmem when MMU is
+ * disabled.
+ *
+ * NOTE: When data cache and MMU are enabled, prefer zero_normalmem for faster
+ * zeroing.
+ */
+void zeromem(void *mem, u_register_t length);
+
+/*
+ * Utility function to return the address of a symbol. By default, the
+ * compiler generates adr/adrp instruction pair to return the reference
+ * to the symbol and this utility is used to override this compiler
+ * generated to code to use `ldr` instruction.
+ *
+ * This helps when Position Independent Executable needs to reference a symbol
+ * which is constant and does not depend on the execute address of the binary.
+ */
+#define DEFINE_LOAD_SYM_ADDR(_name) \
+static inline u_register_t load_addr_## _name(void) \
+{ \
+ u_register_t v; \
+ __asm__ volatile ("ldr %0, =" #_name : "=r" (v) : "X" (#_name));\
+ return v; \
+}
+
+/* Helper to invoke the function defined by DEFINE_LOAD_SYM_ADDR() */
+#define LOAD_ADDR_OF(_name) (typeof(_name) *) load_addr_## _name()
+
+#endif /* !(defined(__LINKER__) || defined(__ASSEMBLER__)) */
+
+#endif /* UTILS_H */
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
new file mode 100644
index 0000000..a170a09
--- /dev/null
+++ b/include/lib/utils_def.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTILS_DEF_H
+#define UTILS_DEF_H
+
+#include <export/lib/utils_def_exp.h>
+
+/* Compute the number of elements in the given array */
+#define ARRAY_SIZE(a) \
+ (sizeof(a) / sizeof((a)[0]))
+
+#define IS_POWER_OF_TWO(x) \
+ (((x) & ((x) - 1)) == 0)
+
+#define SIZE_FROM_LOG2_WORDS(n) (U(4) << (n))
+
+#define BIT_32(nr) (U(1) << (nr))
+#define BIT_64(nr) (ULL(1) << (nr))
+
+#ifdef __aarch64__
+#define BIT BIT_64
+#else
+#define BIT BIT_32
+#endif
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_64(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#if defined(__LINKER__) || defined(__ASSEMBLER__)
+#define GENMASK_32(h, l) \
+ (((0xFFFFFFFF) << (l)) & (0xFFFFFFFF >> (32 - 1 - (h))))
+
+#define GENMASK_64(h, l) \
+ ((~0 << (l)) & (~0 >> (64 - 1 - (h))))
+#else
+#define GENMASK_32(h, l) \
+ (((~UINT32_C(0)) << (l)) & (~UINT32_C(0) >> (32 - 1 - (h))))
+
+#define GENMASK_64(h, l) \
+ (((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
+#endif
+
+#ifdef __aarch64__
+#define GENMASK GENMASK_64
+#else
+#define GENMASK GENMASK_32
+#endif
+
+/*
+ * This variant of div_round_up can be used in macro definition but should not
+ * be used in C code as the `div` parameter is evaluated twice.
+ */
+#define DIV_ROUND_UP_2EVAL(n, d) (((n) + (d) - 1) / (d))
+
+#define div_round_up(val, div) __extension__ ({ \
+ __typeof__(div) _div = (div); \
+ ((val) + _div - (__typeof__(div)) 1) / _div; \
+})
+
+#define MIN(x, y) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ (_x < _y) ? _x : _y; \
+})
+
+#define MAX(x, y) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(y) _y = (y); \
+ (void)(&_x == &_y); \
+ (_x > _y) ? _x : _y; \
+})
+
+#define CLAMP(x, min, max) __extension__ ({ \
+ __typeof__(x) _x = (x); \
+ __typeof__(min) _min = (min); \
+ __typeof__(max) _max = (max); \
+ (void)(&_x == &_min); \
+ (void)(&_x == &_max); \
+ ((_x > _max) ? _max : ((_x < _min) ? _min : _x)); \
+})
+
+/*
+ * The round_up() macro rounds up a value to the given boundary in a
+ * type-agnostic yet type-safe manner. The boundary must be a power of two.
+ * In other words, it computes the smallest multiple of boundary which is
+ * greater than or equal to value.
+ *
+ * round_down() is similar but rounds the value down instead.
+ */
+#define round_boundary(value, boundary) \
+ ((__typeof__(value))((boundary) - 1))
+
+#define round_up(value, boundary) \
+ ((((value) - 1) | round_boundary(value, boundary)) + 1)
+
+#define round_down(value, boundary) \
+ ((value) & ~round_boundary(value, boundary))
+
+/* add operation together with checking whether the operation overflowed
+ * The result is '*res',
+ * return 0 on success and 1 on overflow
+ */
+#define add_overflow(a, b, res) __builtin_add_overflow((a), (b), (res))
+
+/*
+ * Round up a value to align with a given size and
+ * check whether overflow happens.
+ * The rounduped value is '*res',
+ * return 0 on success and 1 on overflow
+ */
+#define round_up_overflow(v, size, res) (__extension__({ \
+ typeof(res) __res = res; \
+ typeof(*(__res)) __roundup_tmp = 0; \
+ typeof(v) __roundup_mask = (typeof(v))(size) - 1; \
+ \
+ add_overflow((v), __roundup_mask, &__roundup_tmp) ? 1 : \
+ (void)(*(__res) = __roundup_tmp & ~__roundup_mask), 0; \
+}))
+
+/*
+ * Add a with b, then round up the result to align with a given size and
+ * check whether overflow happens.
+ * The rounduped value is '*res',
+ * return 0 on success and 1 on overflow
+ */
+#define add_with_round_up_overflow(a, b, size, res) (__extension__({ \
+ typeof(a) __a = (a); \
+ typeof(__a) __add_res = 0; \
+ \
+ add_overflow((__a), (b), &__add_res) ? 1 : \
+ round_up_overflow(__add_res, (size), (res)) ? 1 : 0; \
+}))
+
+/**
+ * Helper macro to ensure a value lies on a given boundary.
+ */
+#define is_aligned(value, boundary) \
+ (round_up((uintptr_t) value, boundary) == \
+ round_down((uintptr_t) value, boundary))
+
+/*
+ * Evaluates to 1 if (ptr + inc) overflows, 0 otherwise.
+ * Both arguments must be unsigned pointer values (i.e. uintptr_t).
+ */
+#define check_uptr_overflow(_ptr, _inc) \
+ ((_ptr) > (UINTPTR_MAX - (_inc)))
+
+/*
+ * Evaluates to 1 if (u32 + inc) overflows, 0 otherwise.
+ * Both arguments must be 32-bit unsigned integers (i.e. effectively uint32_t).
+ */
+#define check_u32_overflow(_u32, _inc) \
+ ((_u32) > (UINT32_MAX - (_inc)))
+
+/* Register size of the current architecture. */
+#ifdef __aarch64__
+#define REGSZ U(8)
+#else
+#define REGSZ U(4)
+#endif
+
+/*
+ * Test for the current architecture version to be at least the version
+ * expected.
+ */
+#define ARM_ARCH_AT_LEAST(_maj, _min) \
+ ((ARM_ARCH_MAJOR > (_maj)) || \
+ ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
+
+/*
+ * Import an assembly or linker symbol as a C expression with the specified
+ * type
+ */
+#define IMPORT_SYM(type, sym, name) \
+ extern char sym[];\
+ static const __attribute__((unused)) type name = (type) sym;
+
+/*
+ * When the symbol is used to hold a pointer, its alignment can be asserted
+ * with this macro. For example, if there is a linker symbol that is going to
+ * be used as a 64-bit pointer, the value of the linker symbol must also be
+ * aligned to 64 bit. This macro makes sure this is the case.
+ */
+#define ASSERT_SYM_PTR_ALIGN(sym) assert(((size_t)(sym) % __alignof__(*(sym))) == 0)
+
+#define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
+
+/* Compiler builtin of GCC >= 9 and planned in llvm */
+#ifdef __HAVE_SPECULATION_SAFE_VALUE
+# define SPECULATION_SAFE_VALUE(var) __builtin_speculation_safe_value(var)
+#else
+# define SPECULATION_SAFE_VALUE(var) var
+#endif
+
+/*
+ * Ticks elapsed in one second with a signal of 1 MHz
+ */
+#define MHZ_TICKS_PER_SEC U(1000000)
+
+/*
+ * Ticks elapsed in one second with a signal of 1 KHz
+ */
+#define KHZ_TICKS_PER_SEC U(1000)
+
+#endif /* UTILS_DEF_H */
diff --git a/include/lib/xlat_mpu/xlat_mpu.h b/include/lib/xlat_mpu/xlat_mpu.h
new file mode 100644
index 0000000..3a470ad
--- /dev/null
+++ b/include/lib/xlat_mpu/xlat_mpu.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MPU_H
+#define XLAT_MPU_H
+
+#ifndef __ASSEMBLER__
+
+#include <lib/cassert.h>
+
+#define XLAT_TABLES_LIB_V2 1
+
+void enable_mpu_el2(unsigned int flags);
+void enable_mpu_direct_el2(unsigned int flags);
+
+/*
+ * Function to wipe clean and disable all MPU regions. This function expects
+ * that the MPU has already been turned off, and caching concerns addressed,
+ * but it nevertheless also explicitly turns off the MPU.
+ */
+void clear_all_mpu_regions(void);
+
+#endif /* __ASSEMBLER__ */
+#endif /* XLAT_MPU_H */
diff --git a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
new file mode 100644
index 0000000..42a48f4
--- /dev/null
+++ b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH32_H
+#define XLAT_TABLES_AARCH32_H
+
+#include <arch.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE != PAGE_SIZE_4KB
+#error "Invalid granule size. AArch32 supports 4KB pages only."
+#endif
+
+#define MIN_LVL_BLOCK_DESC U(1)
+
+#define XLAT_TABLE_LEVEL_MIN U(1)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch32
+ * state.
+ *
+ * TTBCR.TxSZ is calculated as 32 minus the width of said address space. The
+ * value of TTBCR.TxSZ must be in the range 0 to 7 [1], which means that the
+ * virtual address space width must be in the range 32 to 25 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ */
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MAX))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(32) - TTBCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 1 supports virtual address spaces of widths 32 to 31 bits;
+ * - level 2 from 30 to 25.
+ *
+ * Wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as the initial lookup level with 4 KB granularity.
+ * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information, Section G4.6.5
+ *
+ * For example, for a 31-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? \
+ U(1) : U(2))
+
+#endif /* XLAT_TABLES_AARCH32_H */
diff --git a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
new file mode 100644
index 0000000..6c0d73b
--- /dev/null
+++ b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_AARCH64_H
+#define XLAT_TABLES_AARCH64_H
+
+#include <arch.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#if !defined(PAGE_SIZE)
+#error "PAGE_SIZE is not defined."
+#endif
+
+/*
+ * Encode a Physical Address Space size for its use in TCR_ELx.
+ */
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr);
+
+/*
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+#if PAGE_SIZE == PAGE_SIZE_4KB
+# define MIN_LVL_BLOCK_DESC U(1)
+#elif (PAGE_SIZE == PAGE_SIZE_16KB) || (PAGE_SIZE == PAGE_SIZE_64KB)
+# define MIN_LVL_BLOCK_DESC U(2)
+#endif
+
+#define XLAT_TABLE_LEVEL_MIN U(0)
+
+/*
+ * Define the architectural limits of the virtual address space in AArch64
+ * state.
+ *
+ * TCR.TxSZ is calculated as 64 minus the width of said address space.
+ * The value of TCR.TxSZ must be in the range 16 to 39 [1] or 48 [2],
+ * depending on Small Translation Table Support which means that
+ * the virtual address space width must be in the range 48 to 25 or 16 bits.
+ *
+ * [1] See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information:
+ * Page 1730: 'Input address size', 'For all translation stages'.
+ * [2] See section 12.2.55 in the ARMv8-A Architecture Reference Manual
+ * (DDI 0487D.a)
+ */
+/* Maximum value of TCR_ELx.T(0,1)SZ is 39 */
+#define MIN_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MAX))
+
+/* Maximum value of TCR_ELx.T(0,1)SZ is 48 */
+#define MIN_VIRT_ADDR_SPACE_SIZE_TTST \
+ (ULL(1) << (U(64) - TCR_TxSZ_MAX_TTST))
+#define MAX_VIRT_ADDR_SPACE_SIZE (ULL(1) << (U(64) - TCR_TxSZ_MIN))
+
+/*
+ * Here we calculate the initial lookup level from the value of the given
+ * virtual address space size. For a 4 KB page size,
+ * - level 0 supports virtual address spaces of widths 48 to 40 bits;
+ * - level 1 from 39 to 31;
+ * - level 2 from 30 to 22.
+ * - level 3 from 21 to 16.
+ *
+ * Small Translation Table (Armv8.4-TTST) support allows the starting level
+ * of the translation table from 3 for 4KB granularity. See section 12.2.55 in
+ * the ARMv8-A Architecture Reference Manual (DDI 0487D.a). In Armv8.3 and below
+ * wider or narrower address spaces are not supported. As a result, level 3
+ * cannot be used as initial lookup level with 4 KB granularity. See section
+ * D4.2.5 in the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
+ * information.
+ *
+ * For example, for a 35-bit address space (i.e. virt_addr_space_size ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
+ *
+ * Note that this macro assumes that the given virtual address space size is
+ * valid.
+ */
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_sz) \
+ (((_virt_addr_space_sz) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT)) \
+ ? 0U \
+ : (((_virt_addr_space_sz) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) \
+ ? 1U \
+ : (((_virt_addr_space_sz) > (ULL(1) << L2_XLAT_ADDRESS_SHIFT)) \
+ ? 2U : 3U)))
+
+#endif /* XLAT_TABLES_AARCH64_H */
diff --git a/include/lib/xlat_tables/xlat_mmu_helpers.h b/include/lib/xlat_tables/xlat_mmu_helpers.h
new file mode 100644
index 0000000..fabc494
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_mmu_helpers.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_MMU_HELPERS_H
+#define XLAT_MMU_HELPERS_H
+
+/*
+ * The following flags are passed to enable_mmu_xxx() to override the default
+ * values used to program system registers while enabling the MMU.
+ */
+
+/*
+ * When this flag is used, all data access to Normal memory from this EL and all
+ * Normal memory accesses to the translation tables of this EL are non-cacheable
+ * for all levels of data and unified cache until the caches are enabled by
+ * setting the bit SCTLR_ELx.C.
+ */
+#define DISABLE_DCACHE (U(1) << 0)
+
+/*
+ * Mark the translation tables as non-cacheable for the MMU table walker, which
+ * is a different observer from the PE/CPU. If the flag is not specified, the
+ * tables are cacheable for the MMU table walker.
+ *
+ * Note that, as far as the PE/CPU observer is concerned, the attributes used
+ * are the ones specified in the translation tables themselves. The MAIR
+ * register specifies the cacheability through the field AttrIndx of the lower
+ * attributes of the translation tables. The shareability is specified in the SH
+ * field of the lower attributes.
+ *
+ * The MMU table walker uses the attributes specified in the fields ORGNn, IRGNn
+ * and SHn of the TCR register to access the translation tables.
+ *
+ * The attributes specified in the TCR register and the tables can be different
+ * as there are no checks to prevent that. Special care must be taken to ensure
+ * that there aren't mismatches. The behaviour in that case is described in the
+ * sections 'Mismatched memory attributes' in the ARMv8 ARM.
+ */
+#define XLAT_TABLE_NC (U(1) << 1)
+
+/*
+ * Offsets into a mmu_cfg_params array generated by setup_mmu_cfg(). All
+ * parameters are 64 bits wide.
+ */
+#define MMU_CFG_MAIR 0
+#define MMU_CFG_TCR 1
+#define MMU_CFG_TTBR0 2
+#define MMU_CFG_PARAM_MAX 3
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+
+/*
+ * Return the values that the MMU configuration registers must contain for the
+ * specified translation context. `params` must be a pointer to array of size
+ * MMU_CFG_PARAM_MAX.
+ */
+void setup_mmu_cfg(uint64_t *params, unsigned int flags,
+ const uint64_t *base_table, unsigned long long max_pa,
+ uintptr_t max_va, int xlat_regime);
+
+#ifdef __aarch64__
+/* AArch64 specific translation table APIs */
+void enable_mmu_el1(unsigned int flags);
+void enable_mmu_el2(unsigned int flags);
+void enable_mmu_el3(unsigned int flags);
+void enable_mmu(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el2(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
+#else
+/* AArch32 specific translation table API */
+void enable_mmu_svc_mon(unsigned int flags);
+void enable_mmu_hyp(unsigned int flags);
+
+void enable_mmu_direct_svc_mon(unsigned int flags);
+void enable_mmu_direct_hyp(unsigned int flags);
+#endif /* __aarch64__ */
+
+bool xlat_arch_is_granule_size_supported(size_t size);
+size_t xlat_arch_get_max_supported_granule_size(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* XLAT_MMU_HELPERS_H */
diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h
new file mode 100644
index 0000000..24f833c
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_H
+#define XLAT_TABLES_H
+
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+#ifndef __ASSEMBLER__
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+
+/* Helper macro to define entries for mmap_region_t. It creates
+ * identity mappings for each region.
+ */
+#define MAP_REGION_FLAT(adr, sz, attr) MAP_REGION(adr, adr, sz, attr)
+
+/* Helper macro to define entries for mmap_region_t. It allows to
+ * re-map address mappings from 'pa' to 'va' for each region.
+ */
+#define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
+
+/*
+ * Shifts and masks to access fields of an mmap attribute
+ */
+#define MT_TYPE_MASK U(0x7)
+#define MT_TYPE(_attr) ((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT U(3)
+/* Security state (SECURE/NS) */
+#define MT_SEC_SHIFT U(4)
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT U(5)
+
+/*
+ * Memory mapping attributes
+ */
+
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE U(0)
+#define MT_NON_CACHEABLE U(1)
+#define MT_MEMORY U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
+
+#define MT_RO (U(0) << MT_PERM_SHIFT)
+#define MT_RW (U(1) << MT_PERM_SHIFT)
+
+#define MT_SECURE (U(0) << MT_SEC_SHIFT)
+#define MT_NS (U(1) << MT_SEC_SHIFT)
+
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ * - Device memory is always marked as execute-never.
+ * - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE (U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER (U(1) << MT_EXECUTE_SHIFT)
+
+/* Compound attributes for most common usages */
+#define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+
+/* Memory type for EL3 regions */
+#if ENABLE_RME
+#error FEAT_RME requires version 2 of the Translation Tables Library
+#else
+#define EL3_PAS MT_SECURE
+#endif
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+ unsigned long long base_pa;
+ uintptr_t base_va;
+ size_t size;
+ unsigned int attr;
+} mmap_region_t;
+
+/* Generic translation table APIs */
+void init_xlat_tables(void);
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+void mmap_add(const mmap_region_t *mm);
+
+#endif /*__ASSEMBLER__*/
+#endif /* XLAT_TABLES_H */
diff --git a/include/lib/xlat_tables/xlat_tables_arch.h b/include/lib/xlat_tables/xlat_tables_arch.h
new file mode 100644
index 0000000..46e058c
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_arch.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017-2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_ARCH_H
+#define XLAT_TABLES_ARCH_H
+
+#ifdef __aarch64__
+#include "aarch64/xlat_tables_aarch64.h"
+#else
+#include "aarch32/xlat_tables_aarch32.h"
+#endif
+
+/*
+ * Evaluates to 1 if the given physical address space size is a power of 2,
+ * or 0 if it's not.
+ */
+#define CHECK_PHY_ADDR_SPACE_SIZE(size) \
+ (IS_POWER_OF_TWO(size))
+
+/*
+ * Compute the number of entries required at the initial lookup level to address
+ * the whole virtual address space.
+ */
+#define GET_NUM_BASE_LEVEL_ENTRIES(addr_space_size) \
+ ((addr_space_size) >> \
+ XLAT_ADDR_SHIFT(GET_XLAT_TABLE_LEVEL_BASE(addr_space_size)))
+
+#endif /* XLAT_TABLES_ARCH_H */
diff --git a/include/lib/xlat_tables/xlat_tables_compat.h b/include/lib/xlat_tables/xlat_tables_compat.h
new file mode 100644
index 0000000..3877c91
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_compat.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_COMPAT_H
+#define XLAT_TABLES_COMPAT_H
+
+#if XLAT_TABLES_LIB_V2
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#else
+#include <lib/xlat_tables/xlat_tables.h>
+#endif
+
+#endif /* XLAT_TABLES_COMPAT_H */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
new file mode 100644
index 0000000..2d0949b
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_DEFS_H
+#define XLAT_TABLES_DEFS_H
+
+#include <arch.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+
+/* Miscellaneous MMU related constants */
+#define NUM_2MB_IN_GB (U(1) << 9)
+#define NUM_4K_IN_2MB (U(1) << 9)
+#define NUM_GB_IN_4GB (U(1) << 2)
+
+#define TWO_MB_SHIFT U(21)
+#define ONE_GB_SHIFT U(30)
+#define FOUR_KB_SHIFT U(12)
+
+#define ONE_GB_INDEX(x) ((x) >> ONE_GB_SHIFT)
+#define TWO_MB_INDEX(x) ((x) >> TWO_MB_SHIFT)
+#define FOUR_KB_INDEX(x) ((x) >> FOUR_KB_SHIFT)
+
+#define PAGE_SIZE_4KB U(4096)
+#define PAGE_SIZE_16KB U(16384)
+#define PAGE_SIZE_64KB U(65536)
+
+#define INVALID_DESC U(0x0)
+/*
+ * A block descriptor points to a region of memory bigger than the granule size
+ * (e.g. a 2MB region when the granule size is 4KB).
+ */
+#define BLOCK_DESC U(0x1) /* Table levels 0-2 */
+/* A table descriptor points to the next level of translation table. */
+#define TABLE_DESC U(0x3) /* Table levels 0-2 */
+/*
+ * A page descriptor points to a page, i.e. a memory region whose size is the
+ * translation granule size (e.g. 4KB).
+ */
+#define PAGE_DESC U(0x3) /* Table level 3 */
+
+#define DESC_MASK U(0x3)
+
+#define FIRST_LEVEL_DESC_N ONE_GB_SHIFT
+#define SECOND_LEVEL_DESC_N TWO_MB_SHIFT
+#define THIRD_LEVEL_DESC_N FOUR_KB_SHIFT
+
+/* XN: Translation regimes that support one VA range (EL2 and EL3). */
+#define XN (ULL(1) << 2)
+/* UXN, PXN: Translation regimes that support two VA ranges (EL1&0). */
+#define UXN (ULL(1) << 2)
+#define PXN (ULL(1) << 1)
+#define CONT_HINT (ULL(1) << 0)
+#define UPPER_ATTRS(x) (((x) & ULL(0x7)) << 52)
+
+#define NON_GLOBAL (U(1) << 9)
+#define ACCESS_FLAG (U(1) << 8)
+#define NSH (U(0x0) << 6)
+#define OSH (U(0x2) << 6)
+#define ISH (U(0x3) << 6)
+
+#ifdef __aarch64__
+/* Guarded Page bit */
+#define GP (ULL(1) << 50)
+#endif
+
+#define TABLE_ADDR_MASK ULL(0x0000FFFFFFFFF000)
+
+/*
+ * The ARMv8-A architecture allows translation granule sizes of 4KB, 16KB or
+ * 64KB. However, only 4KB are supported at the moment.
+ */
+#define PAGE_SIZE_SHIFT FOUR_KB_SHIFT
+#define PAGE_SIZE (UL(1) << PAGE_SIZE_SHIFT)
+#define PAGE_SIZE_MASK (PAGE_SIZE - UL(1))
+#define IS_PAGE_ALIGNED(addr) (((addr) & PAGE_SIZE_MASK) == U(0))
+
+#if (ARM_ARCH_MAJOR == 7) && !ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING
+#define XLAT_ENTRY_SIZE_SHIFT U(2) /* Each MMU table entry is 4 bytes */
+#else
+#define XLAT_ENTRY_SIZE_SHIFT U(3) /* Each MMU table entry is 8 bytes */
+#endif
+#define XLAT_ENTRY_SIZE (U(1) << XLAT_ENTRY_SIZE_SHIFT)
+
+#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT /* Size of one complete table */
+#define XLAT_TABLE_SIZE (U(1) << XLAT_TABLE_SIZE_SHIFT)
+
+#define XLAT_TABLE_LEVEL_MAX U(3)
+
+/* Values for number of entries in each MMU translation table */
+#define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
+#define XLAT_TABLE_ENTRIES (U(1) << XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_TABLE_ENTRIES_MASK (XLAT_TABLE_ENTRIES - U(1))
+
+/* Values to convert a memory address to an index into a translation table */
+#define L3_XLAT_ADDRESS_SHIFT PAGE_SIZE_SHIFT
+#define L2_XLAT_ADDRESS_SHIFT (L3_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L1_XLAT_ADDRESS_SHIFT (L2_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define L0_XLAT_ADDRESS_SHIFT (L1_XLAT_ADDRESS_SHIFT + XLAT_TABLE_ENTRIES_SHIFT)
+#define XLAT_ADDR_SHIFT(level) (PAGE_SIZE_SHIFT + \
+ ((XLAT_TABLE_LEVEL_MAX - (level)) * XLAT_TABLE_ENTRIES_SHIFT))
+
+#define XLAT_BLOCK_SIZE(level) (UL(1) << XLAT_ADDR_SHIFT(level))
+/* Mask to get the bits used to index inside a block of a certain level */
+#define XLAT_BLOCK_MASK(level) (XLAT_BLOCK_SIZE(level) - UL(1))
+/* Mask to get the address bits common to a block of a certain table level*/
+#define XLAT_ADDR_MASK(level) (~XLAT_BLOCK_MASK(level))
+/*
+ * Extract from the given virtual address the index into the given lookup level.
+ * This macro assumes the system is using the 4KB translation granule.
+ */
+#define XLAT_TABLE_IDX(virtual_addr, level) \
+ (((virtual_addr) >> XLAT_ADDR_SHIFT(level)) & ULL(0x1FF))
+
+/*
+ * The ARMv8 translation table descriptor format defines AP[2:1] as the Access
+ * Permissions bits, and does not define an AP[0] bit.
+ *
+ * AP[1] is valid only for a stage 1 translation that supports two VA ranges
+ * (i.e. in the ARMv8A.0 architecture, that is the S-EL1&0 regime). It is RES1
+ * when stage 1 translations can only support one VA range.
+ */
+#define AP2_SHIFT U(0x7)
+#define AP2_RO ULL(0x1)
+#define AP2_RW ULL(0x0)
+
+#define AP1_SHIFT U(0x6)
+#define AP1_ACCESS_UNPRIVILEGED ULL(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED ULL(0x0)
+#define AP1_RES1 ULL(0x1)
+
+/*
+ * The following definitions must all be passed to the LOWER_ATTRS() macro to
+ * get the right bitmask.
+ */
+#define AP_RO (AP2_RO << 5)
+#define AP_RW (AP2_RW << 5)
+#define AP_ACCESS_UNPRIVILEGED (AP1_ACCESS_UNPRIVILEGED << 4)
+#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
+#define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4)
+#define NS (U(0x1) << 3)
+#define EL3_S1_NSE (U(0x1) << 9)
+#define ATTR_NON_CACHEABLE_INDEX ULL(0x2)
+#define ATTR_DEVICE_INDEX ULL(0x1)
+#define ATTR_IWBWA_OWBWA_NTR_INDEX ULL(0x0)
+#define LOWER_ATTRS(x) (((x) & U(0xfff)) << 2)
+
+/* Normal Memory, Outer Write-Through non-transient, Inner Non-cacheable */
+#define ATTR_NON_CACHEABLE MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_NC, MAIR_NORM_NC)
+/* Device-nGnRE */
+#define ATTR_DEVICE MAIR_DEV_nGnRE
+/* Normal Memory, Outer Write-Back non-transient, Inner Write-Back non-transient */
+#define ATTR_IWBWA_OWBWA_NTR MAKE_MAIR_NORMAL_MEMORY(MAIR_NORM_WB_NTR_RWA, MAIR_NORM_WB_NTR_RWA)
+#define MAIR_ATTR_SET(attr, index) ((attr) << ((index) << 3))
+#define ATTR_INDEX_MASK U(0x3)
+#define ATTR_INDEX_GET(attr) (((attr) >> 2) & ATTR_INDEX_MASK)
+
+/*
+ * Shift values for the attributes fields in a block or page descriptor.
+ * See section D4.3.3 in the ARMv8-A ARM (issue B.a).
+ */
+
+/* Memory attributes index field, AttrIndx[2:0]. */
+#define ATTR_INDEX_SHIFT 2
+/* Non-secure bit, NS. */
+#define NS_SHIFT 5
+/* Shareability field, SH[1:0] */
+#define SHAREABILITY_SHIFT 8
+/* The Access Flag, AF. */
+#define ACCESS_FLAG_SHIFT 10
+/* The not global bit, nG. */
+#define NOT_GLOBAL_SHIFT 11
+/* Contiguous hint bit. */
+#define CONT_HINT_SHIFT 52
+/* Execute-never bits, XN. */
+#define PXN_SHIFT 53
+#define XN_SHIFT 54
+#define UXN_SHIFT XN_SHIFT
+
+#endif /* XLAT_TABLES_DEFS_H */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
new file mode 100644
index 0000000..64fe5ef
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XLAT_TABLES_V2_H
+#define XLAT_TABLES_V2_H
+
+#include <lib/xlat_tables/xlat_tables_defs.h>
+#include <lib/xlat_tables/xlat_tables_v2_helpers.h>
+
+#ifndef __ASSEMBLER__
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+
+/*
+ * Default granularity size for an mmap_region_t.
+ * Useful when no specific granularity is required.
+ *
+ * By default, choose the biggest possible block size allowed by the
+ * architectural state and granule size in order to minimize the number of page
+ * tables required for the mapping.
+ */
+#define REGION_DEFAULT_GRANULARITY XLAT_BLOCK_SIZE(MIN_LVL_BLOCK_DESC)
+
+/* Helper macro to define an mmap_region_t. */
+#define MAP_REGION(_pa, _va, _sz, _attr) \
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, REGION_DEFAULT_GRANULARITY)
+
+/* Helper macro to define an mmap_region_t with an identity mapping. */
+#define MAP_REGION_FLAT(_adr, _sz, _attr) \
+ MAP_REGION(_adr, _adr, _sz, _attr)
+
+/*
+ * Helper macro to define entries for mmap_region_t. It allows to define 'pa'
+ * and sets 'va' to 0 for each region. To be used with mmap_add_alloc_va().
+ */
+#define MAP_REGION_ALLOC_VA(pa, sz, attr) MAP_REGION(pa, 0, sz, attr)
+
+/*
+ * Helper macro to define an mmap_region_t to map with the desired granularity
+ * of translation tables.
+ *
+ * The granularity value passed to this macro must be a valid block or page
+ * size. When using a 4KB translation granule, this might be 4KB, 2MB or 1GB.
+ * Passing REGION_DEFAULT_GRANULARITY is also allowed and means that the library
+ * is free to choose the granularity for this region. In this case, it is
+ * equivalent to the MAP_REGION() macro.
+ */
+#define MAP_REGION2(_pa, _va, _sz, _attr, _gr) \
+ MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
+
+/*
+ * Shifts and masks to access fields of an mmap attribute
+ */
+#define MT_TYPE_MASK U(0x7)
+#define MT_TYPE(_attr) ((_attr) & MT_TYPE_MASK)
+/* Access permissions (RO/RW) */
+#define MT_PERM_SHIFT U(3)
+
+/* Physical address space (SECURE/NS/Root/Realm) */
+#define MT_PAS_SHIFT U(4)
+#define MT_PAS_MASK (U(3) << MT_PAS_SHIFT)
+#define MT_PAS(_attr) ((_attr) & MT_PAS_MASK)
+
+/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
+#define MT_EXECUTE_SHIFT U(6)
+/* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */
+#define MT_USER_SHIFT U(7)
+
+/* Shareability attribute for the memory region */
+#define MT_SHAREABILITY_SHIFT U(8)
+#define MT_SHAREABILITY_MASK (U(3) << MT_SHAREABILITY_SHIFT)
+#define MT_SHAREABILITY(_attr) ((_attr) & MT_SHAREABILITY_MASK)
+
+/* All other bits are reserved */
+
+/*
+ * Memory mapping attributes
+ */
+
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE U(0)
+#define MT_NON_CACHEABLE U(1)
+#define MT_MEMORY U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
+
+#define MT_RO (U(0) << MT_PERM_SHIFT)
+#define MT_RW (U(1) << MT_PERM_SHIFT)
+
+#define MT_SECURE (U(0) << MT_PAS_SHIFT)
+#define MT_NS (U(1) << MT_PAS_SHIFT)
+#define MT_ROOT (U(2) << MT_PAS_SHIFT)
+#define MT_REALM (U(3) << MT_PAS_SHIFT)
+
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ * - Device memory is always marked as execute-never.
+ * - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE (U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER (U(1) << MT_EXECUTE_SHIFT)
+
+/*
+ * When mapping a region at EL0 or EL1, this attribute will be used to determine
+ * if a User mapping (EL0) will be created or a Privileged mapping (EL1).
+ */
+#define MT_USER (U(1) << MT_USER_SHIFT)
+#define MT_PRIVILEGED (U(0) << MT_USER_SHIFT)
+
+/*
+ * Shareability defines the visibility of any cache changes to
+ * all masters belonging to a shareable domain.
+ *
+ * MT_SHAREABILITY_ISH: For inner shareable domain
+ * MT_SHAREABILITY_OSH: For outer shareable domain
+ * MT_SHAREABILITY_NSH: For non shareable domain
+ */
+#define MT_SHAREABILITY_ISH (U(1) << MT_SHAREABILITY_SHIFT)
+#define MT_SHAREABILITY_OSH (U(2) << MT_SHAREABILITY_SHIFT)
+#define MT_SHAREABILITY_NSH (U(3) << MT_SHAREABILITY_SHIFT)
+
+/* Compound attributes for most common usages */
+#define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA (MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+
+/*
+ * Structure for specifying a single region of memory.
+ */
+typedef struct mmap_region {
+ unsigned long long base_pa;
+ uintptr_t base_va;
+ size_t size;
+ unsigned int attr;
+ /* Desired granularity. See the MAP_REGION2() macro for more details. */
+ size_t granularity;
+} mmap_region_t;
+
+/*
+ * Translation regimes supported by this library. EL_REGIME_INVALID tells the
+ * library to detect it at runtime.
+ */
+#define EL1_EL0_REGIME 1
+#define EL2_REGIME 2
+#define EL3_REGIME 3
+#define EL_REGIME_INVALID -1
+
+/* Memory type for EL3 regions. With RME, EL3 is in ROOT PAS */
+#if ENABLE_RME
+#define EL3_PAS MT_ROOT
+#else
+#define EL3_PAS MT_SECURE
+#endif /* ENABLE_RME */
+
+/*
+ * Declare the translation context type.
+ * Its definition is private.
+ */
+typedef struct xlat_ctx xlat_ctx_t;
+
+/*
+ * Statically allocate a translation context and associated structures. Also
+ * initialize them.
+ *
+ * _ctx_name:
+ * Prefix for the translation context variable.
+ * E.g. If _ctx_name is 'foo', the variable will be called 'foo_xlat_ctx'.
+ * Useful to distinguish multiple contexts from one another.
+ *
+ * _mmap_count:
+ * Number of mmap_region_t to allocate.
+ * Would typically be MAX_MMAP_REGIONS for the translation context describing
+ * the BL image currently executing.
+ *
+ * _xlat_tables_count:
+ * Number of sub-translation tables to allocate.
+ * Would typically be MAX_XLAT_TABLES for the translation context describing
+ * the BL image currently executing.
+ * Note that this is only for sub-tables ; at the initial lookup level, there
+ * is always a single table.
+ *
+ * _virt_addr_space_size, _phy_addr_space_size:
+ * Size (in bytes) of the virtual (resp. physical) address space.
+ * Would typically be PLAT_VIRT_ADDR_SPACE_SIZE
+ * (resp. PLAT_PHY_ADDR_SPACE_SIZE) for the translation context describing the
+ * BL image currently executing.
+ */
+#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size) \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ (_xlat_tables_count), \
+ (_virt_addr_space_size), \
+ (_phy_addr_space_size), \
+ EL_REGIME_INVALID, \
+ ".xlat_table", ".base_xlat_table")
+
+/*
+ * Same as REGISTER_XLAT_CONTEXT plus the additional parameters:
+ *
+ * _xlat_regime:
+ * Specify the translation regime managed by this xlat_ctx_t instance. The
+ * values are the one from the EL*_REGIME definitions.
+ *
+ * _section_name:
+ * Specify the name of the section where the translation tables have to be
+ * placed by the linker.
+ *
+ * _base_table_section_name:
+ * Specify the name of the section where the base translation tables have to
+ * be placed by the linker.
+ */
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size, \
+ _xlat_regime, _section_name, _base_table_section_name) \
+ REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, (_mmap_count), \
+ (_xlat_tables_count), \
+ (_virt_addr_space_size), \
+ (_phy_addr_space_size), \
+ (_xlat_regime), \
+ (_section_name), (_base_table_section_name) \
+)
+
+/******************************************************************************
+ * Generic translation table APIs.
+ * Each API comes in 2 variants:
+ * - one that acts on the current translation context for this BL image
+ * - another that acts on the given translation context instead. This variant
+ * is named after the 1st version, with an additional '_ctx' suffix.
+ *****************************************************************************/
+
+/*
+ * Initialize translation tables from the current list of mmap regions. Calling
+ * this function marks the transition point after which static regions can no
+ * longer be added.
+ */
+void init_xlat_tables(void);
+void init_xlat_tables_ctx(xlat_ctx_t *ctx);
+
+/*
+ * Fill all fields of a dynamic translation tables context. It must be done
+ * either statically with REGISTER_XLAT_CONTEXT() or at runtime with this
+ * function.
+ */
+void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
+ uintptr_t va_max, struct mmap_region *mmap,
+ unsigned int mmap_num, uint64_t **tables,
+ unsigned int tables_num, uint64_t *base_table,
+ int xlat_regime, int *mapped_regions);
+
+/*
+ * Add a static region with defined base PA and base VA. This function can only
+ * be used before initializing the translation tables. The region cannot be
+ * removed afterwards.
+ */
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA and base VA. This
+ * function can only be used before initializing the translation tables. The
+ * regions cannot be removed afterwards.
+ */
+void mmap_add(const mmap_region_t *mm);
+void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
+
+/*
+ * Add a region with defined base PA. Returns base VA calculated using the
+ * highest existing region in the mmap array even if it fails to allocate the
+ * region.
+ */
+void mmap_add_region_alloc_va(unsigned long long base_pa, uintptr_t *base_va,
+ size_t size, unsigned int attr);
+void mmap_add_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add an array of static regions with defined base PA, and fill the base VA
+ * field on the array of structs. This function can only be used before
+ * initializing the translation tables. The regions cannot be removed afterwards.
+ */
+void mmap_add_alloc_va(mmap_region_t *mm);
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+/*
+ * Add a dynamic region with defined base PA and base VA. This type of region
+ * can be added and removed even after the translation tables are initialized.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: Invalid values were used as arguments.
+ * ERANGE: Memory limits were surpassed.
+ * ENOMEM: Not enough space in the mmap array or not enough free xlat tables.
+ * EPERM: It overlaps another region in an invalid way.
+ */
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+ size_t size, unsigned int attr);
+int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Add a dynamic region with defined base PA. Returns base VA calculated using
+ * the highest existing region in the mmap array even if it fails to allocate
+ * the region.
+ *
+ * mmap_add_dynamic_region_alloc_va() returns the allocated VA in 'base_va'.
+ * mmap_add_dynamic_region_alloc_va_ctx() returns it in 'mm->base_va'.
+ *
+ * It returns the same error values as mmap_add_dynamic_region().
+ */
+int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa,
+ uintptr_t *base_va,
+ size_t size, unsigned int attr);
+int mmap_add_dynamic_region_alloc_va_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
+
+/*
+ * Remove a region with the specified base VA and size. Only dynamic regions can
+ * be removed, and they can be removed even if the translation tables are
+ * initialized.
+ *
+ * Returns:
+ * 0: Success.
+ * EINVAL: The specified region wasn't found.
+ * EPERM: Trying to remove a static region.
+ */
+int mmap_remove_dynamic_region(uintptr_t base_va, size_t size);
+int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx,
+ uintptr_t base_va,
+ size_t size);
+
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+/*
+ * Change the memory attributes of the memory region starting from a given
+ * virtual address in a set of translation tables.
+ *
+ * This function can only be used after the translation tables have been
+ * initialized.
+ *
+ * The base address of the memory region must be aligned on a page boundary.
+ * The size of this memory region must be a multiple of a page size.
+ * The memory region must be already mapped by the given translation tables
+ * and it must be mapped at the granularity of a page.
+ *
+ * Return 0 on success, a negative value on error.
+ *
+ * In case of error, the memory attributes remain unchanged and this function
+ * has no effect.
+ *
+ * ctx
+ * Translation context to work on.
+ * base_va:
+ * Virtual address of the 1st page to change the attributes of.
+ * size:
+ * Size in bytes of the memory region.
+ * attr:
+ * New attributes of the page tables. The attributes that can be changed are
+ * data access (MT_RO/MT_RW), instruction access (MT_EXECUTE_NEVER/MT_EXECUTE)
+ * and user/privileged access (MT_USER/MT_PRIVILEGED) in the case of contexts
+ * that are used in the EL1&0 translation regime. Also, note that this
+ * function doesn't allow to remap a region as RW and executable, or to remap
+ * device memory as executable.
+ *
+ * NOTE: The caller of this function must be able to write to the translation
+ * tables, i.e. the memory where they are stored must be mapped with read-write
+ * access permissions. This function assumes it is the case. If this is not
+ * the case then this function might trigger a data abort exception.
+ *
+ * NOTE2: The caller is responsible for making sure that the targeted
+ * translation tables are not modified by any other code while this function is
+ * executing.
+ */
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr);
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr);
+
+#if PLAT_RO_XLAT_TABLES
+/*
+ * Change the memory attributes of the memory region encompassing the higher
+ * level translation tables to secure read-only data.
+ *
+ * Return 0 on success, a negative error code on error.
+ */
+int xlat_make_tables_readonly(void);
+#endif
+
+/*
+ * Query the memory attributes of a memory page in a set of translation tables.
+ *
+ * Return 0 on success, a negative error code on error.
+ * On success, the attributes are stored into *attr.
+ *
+ * ctx
+ * Translation context to work on.
+ * base_va
+ * Virtual address of the page to get the attributes of.
+ * There are no alignment restrictions on this address. The attributes of the
+ * memory page it lies within are returned.
+ * attr
+ * Output parameter where to store the attributes of the targeted memory page.
+ */
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr);
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr);
+
+#endif /*__ASSEMBLER__*/
+#endif /* XLAT_TABLES_V2_H */
diff --git a/include/lib/xlat_tables/xlat_tables_v2_helpers.h b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
new file mode 100644
index 0000000..992c94e
--- /dev/null
+++ b/include/lib/xlat_tables/xlat_tables_v2_helpers.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017-2020, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This header file contains internal definitions that are not supposed to be
+ * used outside of this library code.
+ */
+
+#ifndef XLAT_TABLES_V2_HELPERS_H
+#define XLAT_TABLES_V2_HELPERS_H
+
+#ifndef XLAT_TABLES_V2_H
+#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <platform_def.h>
+
+#include <lib/cassert.h>
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_arch.h>
+#include <lib/xlat_tables/xlat_tables_defs.h>
+
+/* Forward declaration */
+struct mmap_region;
+
+/*
+ * Helper macro to define an mmap_region_t. This macro allows to specify all
+ * the fields of the structure but its parameter list is not guaranteed to
+ * remain stable as we add members to mmap_region_t.
+ */
+#define MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr) \
+ { \
+ .base_pa = (_pa), \
+ .base_va = (_va), \
+ .size = (_sz), \
+ .attr = (_attr), \
+ .granularity = (_gr), \
+ }
+
+/* Struct that holds all information about the translation tables. */
+struct xlat_ctx {
+ /*
+ * Max allowed Virtual and Physical Addresses.
+ */
+ unsigned long long pa_max_address;
+ uintptr_t va_max_address;
+
+ /*
+ * Array of all memory regions stored in order of ascending end address
+ * and ascending size to simplify the code that allows overlapping
+ * regions. The list is terminated by the first entry with size == 0.
+ * The max size of the list is stored in `mmap_num`. `mmap` points to an
+ * array of mmap_num + 1 elements, so that there is space for the final
+ * null entry.
+ */
+ struct mmap_region *mmap;
+ int mmap_num;
+
+ /*
+ * Array of finer-grain translation tables.
+ * For example, if the initial lookup level is 1 then this array would
+ * contain both level-2 and level-3 entries.
+ */
+ uint64_t (*tables)[XLAT_TABLE_ENTRIES];
+ int tables_num;
+#if PLAT_RO_XLAT_TABLES
+ bool readonly_tables;
+#endif
+ /*
+ * Keep track of how many regions are mapped in each table. The base
+ * table can't be unmapped so it isn't needed to keep track of it.
+ */
+#if PLAT_XLAT_TABLES_DYNAMIC
+ int *tables_mapped_regions;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+ int next_table;
+
+ /*
+ * Base translation table. It doesn't need to have the same amount of
+ * entries as the ones used for other levels.
+ */
+ uint64_t *base_table;
+ unsigned int base_table_entries;
+
+ /*
+ * Max Physical and Virtual addresses currently in use by the
+ * translation tables. These might get updated as we map/unmap memory
+ * regions but they will never go beyond pa/va_max_address.
+ */
+ unsigned long long max_pa;
+ uintptr_t max_va;
+
+ /* Level of the base translation table. */
+ unsigned int base_level;
+
+ /* Set to true when the translation tables are initialized. */
+ bool initialized;
+
+ /*
+ * Translation regime managed by this xlat_ctx_t. It should be one of
+ * the EL*_REGIME defines.
+ */
+ int xlat_regime;
+};
+
+#if PLAT_XLAT_TABLES_DYNAMIC
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ static int _ctx_name##_mapped_regions[_xlat_tables_count];
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .tables_mapped_regions = _ctx_name##_mapped_regions,
+#else
+#define XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ /* do nothing */
+
+#define XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ /* do nothing */
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
+#if PLAT_RO_XLAT_TABLES
+#define XLAT_CTX_INIT_TABLE_ATTR() \
+ .readonly_tables = false,
+#else
+#define XLAT_CTX_INIT_TABLE_ATTR()
+ /* do nothing */
+#endif
+
+#define REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
+ _xlat_tables_count, _virt_addr_space_size, \
+ _phy_addr_space_size, _xlat_regime, \
+ _table_section, _base_table_section) \
+ CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(_phy_addr_space_size), \
+ assert_invalid_physical_addr_space_sizefor_##_ctx_name);\
+ \
+ static mmap_region_t _ctx_name##_mmap[_mmap_count + 1]; \
+ \
+ static uint64_t _ctx_name##_xlat_tables[_xlat_tables_count] \
+ [XLAT_TABLE_ENTRIES] \
+ __aligned(XLAT_TABLE_SIZE) __section(_table_section); \
+ \
+ static uint64_t _ctx_name##_base_xlat_table \
+ [GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)] \
+ __aligned(GET_NUM_BASE_LEVEL_ENTRIES(_virt_addr_space_size)\
+ * sizeof(uint64_t)) \
+ __section(_base_table_section); \
+ \
+ XLAT_ALLOC_DYNMAP_STRUCT(_ctx_name, _xlat_tables_count) \
+ \
+ static xlat_ctx_t _ctx_name##_xlat_ctx = { \
+ .pa_max_address = (_phy_addr_space_size) - 1ULL, \
+ .va_max_address = (_virt_addr_space_size) - 1UL, \
+ .mmap = _ctx_name##_mmap, \
+ .mmap_num = (_mmap_count), \
+ .tables = _ctx_name##_xlat_tables, \
+ .tables_num = ARRAY_SIZE(_ctx_name##_xlat_tables), \
+ XLAT_CTX_INIT_TABLE_ATTR() \
+ XLAT_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .next_table = 0, \
+ .base_table = _ctx_name##_base_xlat_table, \
+ .base_table_entries = \
+ ARRAY_SIZE(_ctx_name##_base_xlat_table), \
+ .max_pa = 0U, \
+ .max_va = 0U, \
+ .base_level = GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size),\
+ .initialized = false, \
+ .xlat_regime = (_xlat_regime) \
+ }
+
+#endif /*__ASSEMBLER__*/
+
+#endif /* XLAT_TABLES_V2_HELPERS_H */
diff --git a/include/lib/zlib/tf_gunzip.h b/include/lib/zlib/tf_gunzip.h
new file mode 100644
index 0000000..9435860
--- /dev/null
+++ b/include/lib/zlib/tf_gunzip.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TF_GUNZIP_H
+#define TF_GUNZIP_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+int gunzip(uintptr_t *in_buf, size_t in_len, uintptr_t *out_buf,
+ size_t out_len, uintptr_t work_buf, size_t work_len);
+
+#endif /* TF_GUNZIP_H */