summaryrefslogtreecommitdiffstats
path: root/include/lib/el3_runtime
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--include/lib/el3_runtime/aarch32/context.h70
-rw-r--r--include/lib/el3_runtime/aarch64/context.h567
-rw-r--r--include/lib/el3_runtime/context_mgmt.h89
-rw-r--r--include/lib/el3_runtime/cpu_data.h237
-rw-r--r--include/lib/el3_runtime/pubsub.h106
-rw-r--r--include/lib/el3_runtime/pubsub_events.h43
6 files changed, 1112 insertions, 0 deletions
diff --git a/include/lib/el3_runtime/aarch32/context.h b/include/lib/el3_runtime/aarch32/context.h
new file mode 100644
index 0000000..5604c8e
--- /dev/null
+++ b/include/lib/el3_runtime/aarch32/context.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_REGS_OFFSET U(0x0)
+#define CTX_GPREG_R0 U(0x0)
+#define CTX_GPREG_R1 U(0x4)
+#define CTX_GPREG_R2 U(0x8)
+#define CTX_GPREG_R3 U(0xC)
+#define CTX_LR U(0x10)
+#define CTX_SCR U(0x14)
+#define CTX_SPSR U(0x18)
+#define CTX_NS_SCTLR U(0x1C)
+#define CTX_REGS_END U(0x20)
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define WORD_SHIFT U(2)
+#define DEFINE_REG_STRUCT(name, num_regs) \
+ typedef struct name { \
+ uint32_t ctx_regs[num_regs]; \
+ } __aligned(8) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_REG_ALL (CTX_REGS_END >> WORD_SHIFT)
+
+DEFINE_REG_STRUCT(regs, CTX_REG_ALL);
+
+#undef CTX_REG_ALL
+
+#define read_ctx_reg(ctx, offset) ((ctx)->ctx_regs[offset >> WORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val) (((ctx)->ctx_regs[offset >> WORD_SHIFT]) \
+ = val)
+typedef struct cpu_context {
+ regs_t regs_ctx;
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_regs_ctx(h) (&((cpu_context_t *) h)->regs_ctx)
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \
+ assert_core_context_regs_offset_mismatch);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* CONTEXT_H */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
new file mode 100644
index 0000000..6c13166
--- /dev/null
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2013-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <lib/utils_def.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'gp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#define CTX_GPREGS_OFFSET U(0x0)
+#define CTX_GPREG_X0 U(0x0)
+#define CTX_GPREG_X1 U(0x8)
+#define CTX_GPREG_X2 U(0x10)
+#define CTX_GPREG_X3 U(0x18)
+#define CTX_GPREG_X4 U(0x20)
+#define CTX_GPREG_X5 U(0x28)
+#define CTX_GPREG_X6 U(0x30)
+#define CTX_GPREG_X7 U(0x38)
+#define CTX_GPREG_X8 U(0x40)
+#define CTX_GPREG_X9 U(0x48)
+#define CTX_GPREG_X10 U(0x50)
+#define CTX_GPREG_X11 U(0x58)
+#define CTX_GPREG_X12 U(0x60)
+#define CTX_GPREG_X13 U(0x68)
+#define CTX_GPREG_X14 U(0x70)
+#define CTX_GPREG_X15 U(0x78)
+#define CTX_GPREG_X16 U(0x80)
+#define CTX_GPREG_X17 U(0x88)
+#define CTX_GPREG_X18 U(0x90)
+#define CTX_GPREG_X19 U(0x98)
+#define CTX_GPREG_X20 U(0xa0)
+#define CTX_GPREG_X21 U(0xa8)
+#define CTX_GPREG_X22 U(0xb0)
+#define CTX_GPREG_X23 U(0xb8)
+#define CTX_GPREG_X24 U(0xc0)
+#define CTX_GPREG_X25 U(0xc8)
+#define CTX_GPREG_X26 U(0xd0)
+#define CTX_GPREG_X27 U(0xd8)
+#define CTX_GPREG_X28 U(0xe0)
+#define CTX_GPREG_X29 U(0xe8)
+#define CTX_GPREG_LR U(0xf0)
+#define CTX_GPREG_SP_EL0 U(0xf8)
+#define CTX_GPREGS_END U(0x100)
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'el3_state'
+ * structure at their correct offsets. Note that some of the registers are only
+ * 32-bits wide but are stored as 64-bit values for convenience
+ ******************************************************************************/
+#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
+#define CTX_SCR_EL3 U(0x0)
+#define CTX_ESR_EL3 U(0x8)
+#define CTX_RUNTIME_SP U(0x10)
+#define CTX_SPSR_EL3 U(0x18)
+#define CTX_ELR_EL3 U(0x20)
+#define CTX_PMCR_EL0 U(0x28)
+#define CTX_IS_IN_EL3 U(0x30)
+#define CTX_CPTR_EL3 U(0x38)
+#define CTX_ZCR_EL3 U(0x40)
+#define CTX_EL3STATE_END U(0x50) /* Align to the next 16 byte boundary */
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the
+ * 'el1_sys_regs' structure at their correct offsets. Note that some of the
+ * registers are only 32-bits wide but are stored as 64-bit values for
+ * convenience
+ ******************************************************************************/
+#define CTX_EL1_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
+#define CTX_SPSR_EL1 U(0x0)
+#define CTX_ELR_EL1 U(0x8)
+#define CTX_SCTLR_EL1 U(0x10)
+#define CTX_TCR_EL1 U(0x18)
+#define CTX_CPACR_EL1 U(0x20)
+#define CTX_CSSELR_EL1 U(0x28)
+#define CTX_SP_EL1 U(0x30)
+#define CTX_ESR_EL1 U(0x38)
+#define CTX_TTBR0_EL1 U(0x40)
+#define CTX_TTBR1_EL1 U(0x48)
+#define CTX_MAIR_EL1 U(0x50)
+#define CTX_AMAIR_EL1 U(0x58)
+#define CTX_ACTLR_EL1 U(0x60)
+#define CTX_TPIDR_EL1 U(0x68)
+#define CTX_TPIDR_EL0 U(0x70)
+#define CTX_TPIDRRO_EL0 U(0x78)
+#define CTX_PAR_EL1 U(0x80)
+#define CTX_FAR_EL1 U(0x88)
+#define CTX_AFSR0_EL1 U(0x90)
+#define CTX_AFSR1_EL1 U(0x98)
+#define CTX_CONTEXTIDR_EL1 U(0xa0)
+#define CTX_VBAR_EL1 U(0xa8)
+
+/*
+ * If the platform is AArch64-only, there is no need to save and restore these
+ * AArch32 registers.
+ */
+#if CTX_INCLUDE_AARCH32_REGS
+#define CTX_SPSR_ABT U(0xb0) /* Align to the next 16 byte boundary */
+#define CTX_SPSR_UND U(0xb8)
+#define CTX_SPSR_IRQ U(0xc0)
+#define CTX_SPSR_FIQ U(0xc8)
+#define CTX_DACR32_EL2 U(0xd0)
+#define CTX_IFSR32_EL2 U(0xd8)
+#define CTX_AARCH32_END U(0xe0) /* Align to the next 16 byte boundary */
+#else
+#define CTX_AARCH32_END U(0xb0) /* Align to the next 16 byte boundary */
+#endif /* CTX_INCLUDE_AARCH32_REGS */
+
+/*
+ * If the timer registers aren't saved and restored, we don't have to reserve
+ * space for them in the context
+ */
+#if NS_TIMER_SWITCH
+#define CTX_CNTP_CTL_EL0 (CTX_AARCH32_END + U(0x0))
+#define CTX_CNTP_CVAL_EL0 (CTX_AARCH32_END + U(0x8))
+#define CTX_CNTV_CTL_EL0 (CTX_AARCH32_END + U(0x10))
+#define CTX_CNTV_CVAL_EL0 (CTX_AARCH32_END + U(0x18))
+#define CTX_CNTKCTL_EL1 (CTX_AARCH32_END + U(0x20))
+#define CTX_TIMER_SYSREGS_END (CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
+#else
+#define CTX_TIMER_SYSREGS_END CTX_AARCH32_END
+#endif /* NS_TIMER_SWITCH */
+
+#if CTX_INCLUDE_MTE_REGS
+#define CTX_TFSRE0_EL1 (CTX_TIMER_SYSREGS_END + U(0x0))
+#define CTX_TFSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x8))
+#define CTX_RGSR_EL1 (CTX_TIMER_SYSREGS_END + U(0x10))
+#define CTX_GCR_EL1 (CTX_TIMER_SYSREGS_END + U(0x18))
+
+/* Align to the next 16 byte boundary */
+#define CTX_MTE_REGS_END (CTX_TIMER_SYSREGS_END + U(0x20))
+#else
+#define CTX_MTE_REGS_END CTX_TIMER_SYSREGS_END
+#endif /* CTX_INCLUDE_MTE_REGS */
+
+/*
+ * End of system registers.
+ */
+#define CTX_EL1_SYSREGS_END CTX_MTE_REGS_END
+
+/*
+ * EL2 register set
+ */
+
+#if CTX_INCLUDE_EL2_REGS
+/* For later discussion
+ * ICH_AP0R<n>_EL2
+ * ICH_AP1R<n>_EL2
+ * AMEVCNTVOFF0<n>_EL2
+ * AMEVCNTVOFF1<n>_EL2
+ * ICH_LR<n>_EL2
+ */
+#define CTX_EL2_SYSREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
+
+#define CTX_ACTLR_EL2 U(0x0)
+#define CTX_AFSR0_EL2 U(0x8)
+#define CTX_AFSR1_EL2 U(0x10)
+#define CTX_AMAIR_EL2 U(0x18)
+#define CTX_CNTHCTL_EL2 U(0x20)
+#define CTX_CNTVOFF_EL2 U(0x28)
+#define CTX_CPTR_EL2 U(0x30)
+#define CTX_DBGVCR32_EL2 U(0x38)
+#define CTX_ELR_EL2 U(0x40)
+#define CTX_ESR_EL2 U(0x48)
+#define CTX_FAR_EL2 U(0x50)
+#define CTX_HACR_EL2 U(0x58)
+#define CTX_HCR_EL2 U(0x60)
+#define CTX_HPFAR_EL2 U(0x68)
+#define CTX_HSTR_EL2 U(0x70)
+#define CTX_ICC_SRE_EL2 U(0x78)
+#define CTX_ICH_HCR_EL2 U(0x80)
+#define CTX_ICH_VMCR_EL2 U(0x88)
+#define CTX_MAIR_EL2 U(0x90)
+#define CTX_MDCR_EL2 U(0x98)
+#define CTX_PMSCR_EL2 U(0xa0)
+#define CTX_SCTLR_EL2 U(0xa8)
+#define CTX_SPSR_EL2 U(0xb0)
+#define CTX_SP_EL2 U(0xb8)
+#define CTX_TCR_EL2 U(0xc0)
+#define CTX_TPIDR_EL2 U(0xc8)
+#define CTX_TTBR0_EL2 U(0xd0)
+#define CTX_VBAR_EL2 U(0xd8)
+#define CTX_VMPIDR_EL2 U(0xe0)
+#define CTX_VPIDR_EL2 U(0xe8)
+#define CTX_VTCR_EL2 U(0xf0)
+#define CTX_VTTBR_EL2 U(0xf8)
+
+// Only if MTE registers in use
+#define CTX_TFSR_EL2 U(0x100)
+
+// Only if ENABLE_MPAM_FOR_LOWER_ELS==1
+#define CTX_MPAM2_EL2 U(0x108)
+#define CTX_MPAMHCR_EL2 U(0x110)
+#define CTX_MPAMVPM0_EL2 U(0x118)
+#define CTX_MPAMVPM1_EL2 U(0x120)
+#define CTX_MPAMVPM2_EL2 U(0x128)
+#define CTX_MPAMVPM3_EL2 U(0x130)
+#define CTX_MPAMVPM4_EL2 U(0x138)
+#define CTX_MPAMVPM5_EL2 U(0x140)
+#define CTX_MPAMVPM6_EL2 U(0x148)
+#define CTX_MPAMVPM7_EL2 U(0x150)
+#define CTX_MPAMVPMV_EL2 U(0x158)
+
+// Starting with Armv8.6
+#define CTX_HDFGRTR_EL2 U(0x160)
+#define CTX_HAFGRTR_EL2 U(0x168)
+#define CTX_HDFGWTR_EL2 U(0x170)
+#define CTX_HFGITR_EL2 U(0x178)
+#define CTX_HFGRTR_EL2 U(0x180)
+#define CTX_HFGWTR_EL2 U(0x188)
+#define CTX_CNTPOFF_EL2 U(0x190)
+
+// Starting with Armv8.4
+#define CTX_CONTEXTIDR_EL2 U(0x198)
+#define CTX_TTBR1_EL2 U(0x1a0)
+#define CTX_VDISR_EL2 U(0x1a8)
+#define CTX_VSESR_EL2 U(0x1b0)
+#define CTX_VNCR_EL2 U(0x1b8)
+#define CTX_TRFCR_EL2 U(0x1c0)
+
+// Starting with Armv8.5
+#define CTX_SCXTNUM_EL2 U(0x1c8)
+
+// Register for FEAT_HCX
+#define CTX_HCRX_EL2 U(0x1d0)
+
+/* Align to the next 16 byte boundary */
+#define CTX_EL2_SYSREGS_END U(0x1e0)
+
+#endif /* CTX_INCLUDE_EL2_REGS */
+
+/*******************************************************************************
+ * Constants that allow assembler code to access members of and the 'fp_regs'
+ * structure at their correct offsets.
+ ******************************************************************************/
+#if CTX_INCLUDE_EL2_REGS
+# define CTX_FPREGS_OFFSET (CTX_EL2_SYSREGS_OFFSET + CTX_EL2_SYSREGS_END)
+#else
+# define CTX_FPREGS_OFFSET (CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
+#endif
+#if CTX_INCLUDE_FPREGS
+#define CTX_FP_Q0 U(0x0)
+#define CTX_FP_Q1 U(0x10)
+#define CTX_FP_Q2 U(0x20)
+#define CTX_FP_Q3 U(0x30)
+#define CTX_FP_Q4 U(0x40)
+#define CTX_FP_Q5 U(0x50)
+#define CTX_FP_Q6 U(0x60)
+#define CTX_FP_Q7 U(0x70)
+#define CTX_FP_Q8 U(0x80)
+#define CTX_FP_Q9 U(0x90)
+#define CTX_FP_Q10 U(0xa0)
+#define CTX_FP_Q11 U(0xb0)
+#define CTX_FP_Q12 U(0xc0)
+#define CTX_FP_Q13 U(0xd0)
+#define CTX_FP_Q14 U(0xe0)
+#define CTX_FP_Q15 U(0xf0)
+#define CTX_FP_Q16 U(0x100)
+#define CTX_FP_Q17 U(0x110)
+#define CTX_FP_Q18 U(0x120)
+#define CTX_FP_Q19 U(0x130)
+#define CTX_FP_Q20 U(0x140)
+#define CTX_FP_Q21 U(0x150)
+#define CTX_FP_Q22 U(0x160)
+#define CTX_FP_Q23 U(0x170)
+#define CTX_FP_Q24 U(0x180)
+#define CTX_FP_Q25 U(0x190)
+#define CTX_FP_Q26 U(0x1a0)
+#define CTX_FP_Q27 U(0x1b0)
+#define CTX_FP_Q28 U(0x1c0)
+#define CTX_FP_Q29 U(0x1d0)
+#define CTX_FP_Q30 U(0x1e0)
+#define CTX_FP_Q31 U(0x1f0)
+#define CTX_FP_FPSR U(0x200)
+#define CTX_FP_FPCR U(0x208)
+#if CTX_INCLUDE_AARCH32_REGS
+#define CTX_FP_FPEXC32_EL2 U(0x210)
+#define CTX_FPREGS_END U(0x220) /* Align to the next 16 byte boundary */
+#else
+#define CTX_FPREGS_END U(0x210) /* Align to the next 16 byte boundary */
+#endif
+#else
+#define CTX_FPREGS_END U(0)
+#endif
+
+/*******************************************************************************
+ * Registers related to CVE-2018-3639
+ ******************************************************************************/
+#define CTX_CVE_2018_3639_OFFSET (CTX_FPREGS_OFFSET + CTX_FPREGS_END)
+#define CTX_CVE_2018_3639_DISABLE U(0)
+#define CTX_CVE_2018_3639_END U(0x10) /* Align to the next 16 byte boundary */
+
+/*******************************************************************************
+ * Registers related to ARMv8.3-PAuth.
+ ******************************************************************************/
+#define CTX_PAUTH_REGS_OFFSET (CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_END)
+#if CTX_INCLUDE_PAUTH_REGS
+#define CTX_PACIAKEY_LO U(0x0)
+#define CTX_PACIAKEY_HI U(0x8)
+#define CTX_PACIBKEY_LO U(0x10)
+#define CTX_PACIBKEY_HI U(0x18)
+#define CTX_PACDAKEY_LO U(0x20)
+#define CTX_PACDAKEY_HI U(0x28)
+#define CTX_PACDBKEY_LO U(0x30)
+#define CTX_PACDBKEY_HI U(0x38)
+#define CTX_PACGAKEY_LO U(0x40)
+#define CTX_PACGAKEY_HI U(0x48)
+#define CTX_PAUTH_REGS_END U(0x50) /* Align to the next 16 byte boundary */
+#else
+#define CTX_PAUTH_REGS_END U(0)
+#endif /* CTX_INCLUDE_PAUTH_REGS */
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#include <lib/cassert.h>
+
+/*
+ * Common constants to help define the 'cpu_context' structure and its
+ * members below.
+ */
+#define DWORD_SHIFT U(3)
+#define DEFINE_REG_STRUCT(name, num_regs) \
+ typedef struct name { \
+ uint64_t ctx_regs[num_regs]; \
+ } __aligned(16) name##_t
+
+/* Constants to determine the size of individual context structures */
+#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
+#define CTX_EL1_SYSREGS_ALL (CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_EL2_REGS
+# define CTX_EL2_SYSREGS_ALL (CTX_EL2_SYSREGS_END >> DWORD_SHIFT)
+#endif
+#if CTX_INCLUDE_FPREGS
+# define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
+#endif
+#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
+#define CTX_CVE_2018_3639_ALL (CTX_CVE_2018_3639_END >> DWORD_SHIFT)
+#if CTX_INCLUDE_PAUTH_REGS
+# define CTX_PAUTH_REGS_ALL (CTX_PAUTH_REGS_END >> DWORD_SHIFT)
+#endif
+
+/*
+ * AArch64 general purpose register context structure. Usually x0-x18,
+ * lr are saved as the compiler is expected to preserve the remaining
+ * callee saved registers if used by the C runtime and the assembler
+ * does not touch the remaining. But in case of world switch during
+ * exception handling, we need to save the callee registers too.
+ */
+DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
+
+/*
+ * AArch64 EL1 system register context structure for preserving the
+ * architectural state during world switches.
+ */
+DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
+
+
+/*
+ * AArch64 EL2 system register context structure for preserving the
+ * architectural state during world switches.
+ */
+#if CTX_INCLUDE_EL2_REGS
+DEFINE_REG_STRUCT(el2_sysregs, CTX_EL2_SYSREGS_ALL);
+#endif
+
+/*
+ * AArch64 floating point register context structure for preserving
+ * the floating point state during switches from one security state to
+ * another.
+ */
+#if CTX_INCLUDE_FPREGS
+DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
+#endif
+
+/*
+ * Miscellaneous registers used by EL3 firmware to maintain its state
+ * across exception entries and exits
+ */
+DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
+
+/* Function pointer used by CVE-2018-3639 dynamic mitigation */
+DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
+
+/* Registers associated to ARMv8.3-PAuth */
+#if CTX_INCLUDE_PAUTH_REGS
+DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
+#endif
+
+/*
+ * Macros to access members of any of the above structures using their
+ * offsets
+ */
+#define read_ctx_reg(ctx, offset) ((ctx)->ctx_regs[(offset) >> DWORD_SHIFT])
+#define write_ctx_reg(ctx, offset, val) (((ctx)->ctx_regs[(offset) >> DWORD_SHIFT]) \
+ = (uint64_t) (val))
+
+/*
+ * Top-level context structure which is used by EL3 firmware to preserve
+ * the state of a core at the next lower EL in a given security state and
+ * save enough EL3 meta data to be able to return to that EL and security
+ * state. The context management library will be used to ensure that
+ * SP_EL3 always points to an instance of this structure at exception
+ * entry and exit.
+ */
+typedef struct cpu_context {
+ gp_regs_t gpregs_ctx;
+ el3_state_t el3state_ctx;
+ el1_sysregs_t el1_sysregs_ctx;
+#if CTX_INCLUDE_EL2_REGS
+ el2_sysregs_t el2_sysregs_ctx;
+#endif
+#if CTX_INCLUDE_FPREGS
+ fp_regs_t fpregs_ctx;
+#endif
+ cve_2018_3639_t cve_2018_3639_ctx;
+#if CTX_INCLUDE_PAUTH_REGS
+ pauth_t pauth_ctx;
+#endif
+} cpu_context_t;
+
+/* Macros to access members of the 'cpu_context_t' structure */
+#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx)
+#if CTX_INCLUDE_FPREGS
+# define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx)
+#endif
+#define get_el1_sysregs_ctx(h) (&((cpu_context_t *) h)->el1_sysregs_ctx)
+#if CTX_INCLUDE_EL2_REGS
+# define get_el2_sysregs_ctx(h) (&((cpu_context_t *) h)->el2_sysregs_ctx)
+#endif
+#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx)
+#define get_cve_2018_3639_ctx(h) (&((cpu_context_t *) h)->cve_2018_3639_ctx)
+#if CTX_INCLUDE_PAUTH_REGS
+# define get_pauth_ctx(h) (&((cpu_context_t *) h)->pauth_ctx)
+#endif
+
+/*
+ * Compile time assertions related to the 'cpu_context' structure to
+ * ensure that the assembler and the compiler view of the offsets of
+ * the structure members is the same.
+ */
+CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \
+ assert_core_context_gp_offset_mismatch);
+CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx), \
+ assert_core_context_el1_sys_offset_mismatch);
+#if CTX_INCLUDE_EL2_REGS
+CASSERT(CTX_EL2_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el2_sysregs_ctx), \
+ assert_core_context_el2_sys_offset_mismatch);
+#endif
+#if CTX_INCLUDE_FPREGS
+CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \
+ assert_core_context_fp_offset_mismatch);
+#endif
+CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \
+ assert_core_context_el3state_offset_mismatch);
+CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx), \
+ assert_core_context_cve_2018_3639_offset_mismatch);
+#if CTX_INCLUDE_PAUTH_REGS
+CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx), \
+ assert_core_context_pauth_offset_mismatch);
+#endif
+
+/*
+ * Helper macro to set the general purpose registers that correspond to
+ * parameters in an aapcs_64 call i.e. x0-x7
+ */
+#define set_aapcs_args0(ctx, x0) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \
+ } while (0)
+#define set_aapcs_args1(ctx, x0, x1) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \
+ set_aapcs_args0(ctx, x0); \
+ } while (0)
+#define set_aapcs_args2(ctx, x0, x1, x2) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \
+ set_aapcs_args1(ctx, x0, x1); \
+ } while (0)
+#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \
+ set_aapcs_args2(ctx, x0, x1, x2); \
+ } while (0)
+#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \
+ set_aapcs_args3(ctx, x0, x1, x2, x3); \
+ } while (0)
+#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \
+ set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \
+ } while (0)
+#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \
+ set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \
+ } while (0)
+#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \
+ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \
+ set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \
+ } while (0)
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+void el1_sysregs_context_save(el1_sysregs_t *regs);
+void el1_sysregs_context_restore(el1_sysregs_t *regs);
+
+#if CTX_INCLUDE_EL2_REGS
+void el2_sysregs_context_save_common(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_common(el2_sysregs_t *regs);
+#if ENABLE_SPE_FOR_LOWER_ELS
+void el2_sysregs_context_save_spe(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_spe(el2_sysregs_t *regs);
+#endif /* ENABLE_SPE_FOR_LOWER_ELS */
+#if CTX_INCLUDE_MTE_REGS
+void el2_sysregs_context_save_mte(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_mte(el2_sysregs_t *regs);
+#endif /* CTX_INCLUDE_MTE_REGS */
+#if ENABLE_MPAM_FOR_LOWER_ELS
+void el2_sysregs_context_save_mpam(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_mpam(el2_sysregs_t *regs);
+#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
+#if ENABLE_FEAT_FGT
+void el2_sysregs_context_save_fgt(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_fgt(el2_sysregs_t *regs);
+#endif /* ENABLE_FEAT_FGT */
+#if ENABLE_FEAT_ECV
+void el2_sysregs_context_save_ecv(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_ecv(el2_sysregs_t *regs);
+#endif /* ENABLE_FEAT_ECV */
+#if ENABLE_FEAT_VHE
+void el2_sysregs_context_save_vhe(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_vhe(el2_sysregs_t *regs);
+#endif /* ENABLE_FEAT_VHE */
+#if RAS_EXTENSION
+void el2_sysregs_context_save_ras(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_ras(el2_sysregs_t *regs);
+#endif /* RAS_EXTENSION */
+#if CTX_INCLUDE_NEVE_REGS
+void el2_sysregs_context_save_nv2(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_nv2(el2_sysregs_t *regs);
+#endif /* CTX_INCLUDE_NEVE_REGS */
+#if ENABLE_TRF_FOR_NS
+void el2_sysregs_context_save_trf(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_trf(el2_sysregs_t *regs);
+#endif /* ENABLE_TRF_FOR_NS */
+#if ENABLE_FEAT_CSV2_2
+void el2_sysregs_context_save_csv2(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_csv2(el2_sysregs_t *regs);
+#endif /* ENABLE_FEAT_CSV2_2 */
+#if ENABLE_FEAT_HCX
+void el2_sysregs_context_save_hcx(el2_sysregs_t *regs);
+void el2_sysregs_context_restore_hcx(el2_sysregs_t *regs);
+#endif /* ENABLE_FEAT_HCX */
+#endif /* CTX_INCLUDE_EL2_REGS */
+
+#if CTX_INCLUDE_FPREGS
+void fpregs_context_save(fp_regs_t *regs);
+void fpregs_context_restore(fp_regs_t *regs);
+#endif
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* CONTEXT_H */
diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h
new file mode 100644
index 0000000..1a76d8e
--- /dev/null
+++ b/include/lib/el3_runtime/context_mgmt.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CONTEXT_MGMT_H
+#define CONTEXT_MGMT_H
+
+#include <assert.h>
+#include <context.h>
+#include <stdint.h>
+
+#include <arch.h>
+
+/*******************************************************************************
+ * Forward declarations
+ ******************************************************************************/
+struct entry_point_info;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+void cm_init(void);
+void *cm_get_context_by_index(unsigned int cpu_idx,
+ unsigned int security_state);
+void cm_set_context_by_index(unsigned int cpu_idx,
+ void *context,
+ unsigned int security_state);
+void *cm_get_context(uint32_t security_state);
+void cm_set_context(void *context, uint32_t security_state);
+void cm_init_my_context(const struct entry_point_info *ep);
+void cm_init_context_by_index(unsigned int cpu_idx,
+ const struct entry_point_info *ep);
+void cm_setup_context(cpu_context_t *ctx, const struct entry_point_info *ep);
+void cm_prepare_el3_exit(uint32_t security_state);
+void cm_prepare_el3_exit_ns(void);
+
+#ifdef __aarch64__
+#if CTX_INCLUDE_EL2_REGS
+void cm_el2_sysregs_context_save(uint32_t security_state);
+void cm_el2_sysregs_context_restore(uint32_t security_state);
+#endif
+
+void cm_el1_sysregs_context_save(uint32_t security_state);
+void cm_el1_sysregs_context_restore(uint32_t security_state);
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
+void cm_set_elr_spsr_el3(uint32_t security_state,
+ uintptr_t entrypoint, uint32_t spsr);
+void cm_write_scr_el3_bit(uint32_t security_state,
+ uint32_t bit_pos,
+ uint32_t value);
+void cm_set_next_eret_context(uint32_t security_state);
+u_register_t cm_get_scr_el3(uint32_t security_state);
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+static inline void cm_set_next_context(void *context)
+{
+#if ENABLE_ASSERTIONS
+ uint64_t sp_mode;
+
+ /*
+ * Check that this function is called with SP_EL0 as the stack
+ * pointer
+ */
+ __asm__ volatile("mrs %0, SPSel\n"
+ : "=r" (sp_mode));
+
+ assert(sp_mode == MODE_SP_EL0);
+#endif /* ENABLE_ASSERTIONS */
+
+ __asm__ volatile("msr spsel, #1\n"
+ "mov sp, %0\n"
+ "msr spsel, #0\n"
+ : : "r" (context));
+}
+
+#else
+void *cm_get_next_context(void);
+void cm_set_next_context(void *context);
+#endif /* __aarch64__ */
+
+#endif /* CONTEXT_MGMT_H */
diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h
new file mode 100644
index 0000000..2c7b619
--- /dev/null
+++ b/include/lib/el3_runtime/cpu_data.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CPU_DATA_H
+#define CPU_DATA_H
+
+#include <platform_def.h> /* CACHE_WRITEBACK_GRANULE required */
+
+#include <bl31/ehf.h>
+
+/* Size of psci_cpu_data structure */
+#define PSCI_CPU_DATA_SIZE 12
+
+#ifdef __aarch64__
+
+/* 8-bytes aligned size of psci_cpu_data structure */
+#define PSCI_CPU_DATA_SIZE_ALIGNED ((PSCI_CPU_DATA_SIZE + 7) & ~7)
+
+#if ENABLE_RME
+/* Size of cpu_context array */
+#define CPU_DATA_CONTEXT_NUM 3
+/* Offset of cpu_ops_ptr, size 8 bytes */
+#define CPU_DATA_CPU_OPS_PTR 0x18
+#else /* ENABLE_RME */
+#define CPU_DATA_CONTEXT_NUM 2
+#define CPU_DATA_CPU_OPS_PTR 0x10
+#endif /* ENABLE_RME */
+
+#if ENABLE_PAUTH
+/* 8-bytes aligned offset of apiakey[2], size 16 bytes */
+#define CPU_DATA_APIAKEY_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
+ + CPU_DATA_CPU_OPS_PTR)
+#define CPU_DATA_CRASH_BUF_OFFSET (0x10 + CPU_DATA_APIAKEY_OFFSET)
+#else /* ENABLE_PAUTH */
+#define CPU_DATA_CRASH_BUF_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
+ + CPU_DATA_CPU_OPS_PTR)
+#endif /* ENABLE_PAUTH */
+
+/* need enough space in crash buffer to save 8 registers */
+#define CPU_DATA_CRASH_BUF_SIZE 64
+
+#else /* !__aarch64__ */
+
+#if CRASH_REPORTING
+#error "Crash reporting is not supported in AArch32"
+#endif
+#define CPU_DATA_CPU_OPS_PTR 0x0
+#define CPU_DATA_CRASH_BUF_OFFSET (0x4 + PSCI_CPU_DATA_SIZE)
+
+#endif /* __aarch64__ */
+
+#if CRASH_REPORTING
+#define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \
+ CPU_DATA_CRASH_BUF_SIZE)
+#else
+#define CPU_DATA_CRASH_BUF_END CPU_DATA_CRASH_BUF_OFFSET
+#endif
+
+/* cpu_data size is the data size rounded up to the platform cache line size */
+#define CPU_DATA_SIZE (((CPU_DATA_CRASH_BUF_END + \
+ CACHE_WRITEBACK_GRANULE - 1) / \
+ CACHE_WRITEBACK_GRANULE) * \
+ CACHE_WRITEBACK_GRANULE)
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+/* Temporary space to store PMF timestamps from assembly code */
+#define CPU_DATA_PMF_TS_COUNT 1
+#define CPU_DATA_PMF_TS0_OFFSET CPU_DATA_CRASH_BUF_END
+#define CPU_DATA_PMF_TS0_IDX 0
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <lib/cassert.h>
+#include <lib/psci/psci.h>
+
+#include <platform_def.h>
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\
+ (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
+
+#if PLAT_PCPU_DATA_SIZE
+#define CPU_DATA_PLAT_PCPU_OFFSET __builtin_offsetof\
+ (cpu_data_t, platform_cpu_data)
+#endif
+
+typedef enum context_pas {
+ CPU_CONTEXT_SECURE = 0,
+ CPU_CONTEXT_NS,
+#if ENABLE_RME
+ CPU_CONTEXT_REALM,
+#endif
+ CPU_CONTEXT_NUM
+} context_pas_t;
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ * Pointers to non-secure, realm, and secure security state contexts
+ * Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+typedef struct cpu_data {
+#ifdef __aarch64__
+ void *cpu_context[CPU_DATA_CONTEXT_NUM];
+#endif /* __aarch64__ */
+ uintptr_t cpu_ops_ptr;
+ struct psci_cpu_data psci_svc_cpu_data;
+#if ENABLE_PAUTH
+ uint64_t apiakey[2];
+#endif
+#if CRASH_REPORTING
+ u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
+#endif
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
+#endif
+#if PLAT_PCPU_DATA_SIZE
+ uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
+#endif
+#if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
+ pe_exc_data_t ehf_data;
+#endif
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
+
+#ifdef __aarch64__
+CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
+ assert_cpu_data_context_num_mismatch);
+#endif
+
+#if ENABLE_PAUTH
+CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
+ (cpu_data_t, apiakey),
+ assert_cpu_data_pauth_stack_offset_mismatch);
+#endif
+
+#if CRASH_REPORTING
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
+ (cpu_data_t, crash_buf),
+ assert_cpu_data_crash_stack_offset_mismatch);
+#endif
+
+CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
+ assert_cpu_data_size_mismatch);
+
+CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
+ (cpu_data_t, cpu_ops_ptr),
+ assert_cpu_data_cpu_ops_ptr_offset_mismatch);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
+ (cpu_data_t, cpu_data_pmf_ts[0]),
+ assert_cpu_data_pmf_ts0_offset_mismatch);
+#endif
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+
+#ifdef __aarch64__
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+ return (cpu_data_t *)read_tpidr_el3();
+}
+#else
+struct cpu_data *_cpu_data(void);
+#endif
+
+/*
+ * Returns the index of the cpu_context array for the given security state.
+ * All accesses to cpu_context should be through this helper to make sure
+ * an access is not out-of-bounds. The function assumes security_state is
+ * valid.
+ */
+static inline context_pas_t get_cpu_context_index(uint32_t security_state)
+{
+ if (security_state == SECURE) {
+ return CPU_CONTEXT_SECURE;
+ } else {
+#if ENABLE_RME
+ if (security_state == NON_SECURE) {
+ return CPU_CONTEXT_NS;
+ } else {
+ assert(security_state == REALM);
+ return CPU_CONTEXT_REALM;
+ }
+#else
+ assert(security_state == NON_SECURE);
+ return CPU_CONTEXT_NS;
+#endif
+ }
+}
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+void init_cpu_ops(void);
+
+#define get_cpu_data(_m) _cpu_data()->_m
+#define set_cpu_data(_m, _v) _cpu_data()->_m = (_v)
+#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
+/* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
+#define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \
+ &(_cpu_data()->_m), \
+ sizeof(((cpu_data_t *)0)->_m))
+#define inv_cpu_data(_m) inv_dcache_range((uintptr_t) \
+ &(_cpu_data()->_m), \
+ sizeof(((cpu_data_t *)0)->_m))
+#define flush_cpu_data_by_index(_ix, _m) \
+ flush_dcache_range((uintptr_t) \
+ &(_cpu_data_by_index(_ix)->_m), \
+ sizeof(((cpu_data_t *)0)->_m))
+
+
+#endif /* __ASSEMBLER__ */
+#endif /* CPU_DATA_H */
diff --git a/include/lib/el3_runtime/pubsub.h b/include/lib/el3_runtime/pubsub.h
new file mode 100644
index 0000000..64fe5cc
--- /dev/null
+++ b/include/lib/el3_runtime/pubsub.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PUBSUB_H
+#define PUBSUB_H
+
+#ifdef __LINKER__
+
+/* For the linker ... */
+#define __pubsub_start_sym(event) __pubsub_##event##_start
+#define __pubsub_end_sym(event) __pubsub_##event##_end
+#define __pubsub_section(event) __pubsub_##event
+
+/*
+ * REGISTER_PUBSUB_EVENT has a different definition between linker and compiler
+ * contexts. In linker context, this collects pubsub sections for each event,
+ * placing guard symbols around each.
+ */
+#if defined(USE_ARM_LINK)
+#define REGISTER_PUBSUB_EVENT(event) \
+ __pubsub_start_sym(event) +0 FIXED \
+ { \
+ *(__pubsub_section(event)) \
+ } \
+ __pubsub_end_sym(event) +0 FIXED EMPTY 0 \
+ { \
+ /* placeholder */ \
+ }
+#else
+#define REGISTER_PUBSUB_EVENT(event) \
+ __pubsub_start_sym(event) = .; \
+ KEEP(*(__pubsub_section(event))); \
+ __pubsub_end_sym(event) = .
+#endif
+
+#else /* __LINKER__ */
+
+/* For the compiler ... */
+
+#include <assert.h>
+#include <cdefs.h>
+#include <stddef.h>
+
+#include <arch_helpers.h>
+
+#if defined(USE_ARM_LINK)
+#define __pubsub_start_sym(event) Load$$__pubsub_##event##_start$$Base
+#define __pubsub_end_sym(event) Load$$__pubsub_##event##_end$$Base
+#else
+#define __pubsub_start_sym(event) __pubsub_##event##_start
+#define __pubsub_end_sym(event) __pubsub_##event##_end
+#endif
+
+#define __pubsub_section(event) __section("__pubsub_" #event)
+
+/*
+ * In compiler context, REGISTER_PUBSUB_EVENT declares the per-event symbols
+ * exported by the linker required for the other pubsub macros to work.
+ */
+#define REGISTER_PUBSUB_EVENT(event) \
+ extern pubsub_cb_t __pubsub_start_sym(event)[]; \
+ extern pubsub_cb_t __pubsub_end_sym(event)[]
+
+/*
+ * Have the function func called back when the specified event happens. This
+ * macro places the function address into the pubsub section, which is picked up
+ * and invoked by the invoke_pubsubs() function via the PUBLISH_EVENT* macros.
+ *
+ * The extern declaration is there to satisfy MISRA C-2012 rule 8.4.
+ */
+#define SUBSCRIBE_TO_EVENT(event, func) \
+ extern pubsub_cb_t __cb_func_##func##event __pubsub_section(event); \
+ pubsub_cb_t __cb_func_##func##event __pubsub_section(event) = (func)
+
+/*
+ * Iterate over subscribed handlers for a defined event. 'event' is the name of
+ * the event, and 'subscriber' a local variable of type 'pubsub_cb_t *'.
+ */
+#define for_each_subscriber(event, subscriber) \
+ for (subscriber = __pubsub_start_sym(event); \
+ subscriber < __pubsub_end_sym(event); \
+ subscriber++)
+
+/*
+ * Publish a defined event supplying an argument. All subscribed handlers are
+ * invoked, but the return value of handlers are ignored for now.
+ */
+#define PUBLISH_EVENT_ARG(event, arg) \
+ do { \
+ pubsub_cb_t *subscriber; \
+ for_each_subscriber(event, subscriber) { \
+ (*subscriber)(arg); \
+ } \
+ } while (0)
+
+/* Publish a defined event with NULL argument */
+#define PUBLISH_EVENT(event) PUBLISH_EVENT_ARG(event, NULL)
+
+/* Subscriber callback type */
+typedef void* (*pubsub_cb_t)(const void *arg);
+
+#endif /* __LINKER__ */
+#endif /* PUBSUB_H */
diff --git a/include/lib/el3_runtime/pubsub_events.h b/include/lib/el3_runtime/pubsub_events.h
new file mode 100644
index 0000000..5012082
--- /dev/null
+++ b/include/lib/el3_runtime/pubsub_events.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/el3_runtime/pubsub.h>
+
+/*
+ * This file defines a list of pubsub events, declared using
+ * REGISTER_PUBSUB_EVENT() macro.
+ */
+
+/*
+ * Event published after a CPU has been powered up and finished its
+ * initialization.
+ */
+REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
+
+/*
+ * These events are published before/after a CPU has been powered down/up
+ * via the PSCI CPU SUSPEND API.
+ */
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
+REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
+
+#ifdef __aarch64__
+/*
+ * These events are published by the AArch64 context management framework
+ * after the secure context is restored/saved via
+ * cm_el1_sysregs_context_{restore,save}() API.
+ */
+REGISTER_PUBSUB_EVENT(cm_entering_secure_world);
+REGISTER_PUBSUB_EVENT(cm_exited_secure_world);
+
+/*
+ * These events are published by the AArch64 context management framework
+ * after the normal context is restored/saved via
+ * cm_el1_sysregs_context_{restore,save}() API.
+ */
+REGISTER_PUBSUB_EVENT(cm_entering_normal_world);
+REGISTER_PUBSUB_EVENT(cm_exited_normal_world);
+#endif /* __aarch64__ */