summaryrefslogtreecommitdiffstats
path: root/services/std_svc/sdei
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /services/std_svc/sdei
parentInitial commit. (diff)
downloadarm-trusted-firmware-upstream/2.8.0+dfsg.tar.xz
arm-trusted-firmware-upstream/2.8.0+dfsg.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'services/std_svc/sdei')
-rw-r--r--services/std_svc/sdei/sdei_dispatch.S26
-rw-r--r--services/std_svc/sdei/sdei_event.c122
-rw-r--r--services/std_svc/sdei/sdei_intr_mgmt.c774
-rw-r--r--services/std_svc/sdei/sdei_main.c1114
-rw-r--r--services/std_svc/sdei/sdei_private.h248
-rw-r--r--services/std_svc/sdei/sdei_state.c150
6 files changed, 2434 insertions, 0 deletions
diff --git a/services/std_svc/sdei/sdei_dispatch.S b/services/std_svc/sdei/sdei_dispatch.S
new file mode 100644
index 0000000..8449e4b
--- /dev/null
+++ b/services/std_svc/sdei/sdei_dispatch.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl begin_sdei_synchronous_dispatch
+
+/*
+ * void begin_sdei_synchronous_dispatch(jmp_buf *buffer);
+ *
+ * Begin SDEI dispatch synchronously by setting up a jump point, and exiting
+ * EL3. This jump point is jumped to by the dispatcher after the event is
+ * completed by the client.
+ */
+func begin_sdei_synchronous_dispatch
+ stp x30, xzr, [sp, #-16]!
+ bl setjmp
+ cbz x0, 1f
+ ldp x30, xzr, [sp], #16
+ ret
+1:
+ b el3_exit
+endfunc begin_sdei_synchronous_dispatch
diff --git a/services/std_svc/sdei/sdei_event.c b/services/std_svc/sdei/sdei_event.c
new file mode 100644
index 0000000..e0c7971
--- /dev/null
+++ b/services/std_svc/sdei/sdei_event.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <lib/utils.h>
+
+#include "sdei_private.h"
+
+#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
+
+/*
+ * Get SDEI entry with the given mapping: on success, returns pointer to SDEI
+ * entry. On error, returns NULL.
+ *
+ * Both shared and private maps are stored in single-dimensional array. Private
+ * event entries are kept for each PE forming a 2D array.
+ */
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
+{
+ const sdei_mapping_t *mapping;
+ sdei_entry_t *cpu_priv_base;
+ unsigned int base_idx;
+ long int idx;
+
+ if (is_event_private(map)) {
+ /*
+ * For a private map, find the index of the mapping in the
+ * array.
+ */
+ mapping = SDEI_PRIVATE_MAPPING();
+ idx = MAP_OFF(map, mapping);
+
+ /* Base of private mappings for this CPU */
+ base_idx = plat_my_core_pos() * ((unsigned int) mapping->num_maps);
+ cpu_priv_base = &sdei_private_event_table[base_idx];
+
+ /*
+ * Return the address of the entry at the same index in the
+ * per-CPU event entry.
+ */
+ return &cpu_priv_base[idx];
+ } else {
+ mapping = SDEI_SHARED_MAPPING();
+ idx = MAP_OFF(map, mapping);
+
+ return &sdei_shared_event_table[idx];
+ }
+}
+
+/*
+ * Find event mapping for a given interrupt number: On success, returns pointer
+ * to the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map_by_intr(unsigned int intr_num, bool shared)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i;
+
+ /*
+ * Look for a match in private and shared mappings, as requested. This
+ * is a linear search. However, if the mappings are required to be
+ * sorted, for large maps, we could consider binary search.
+ */
+ mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
+ iterate_mapping(mapping, i, map) {
+ if (map->intr == intr_num)
+ return map;
+ }
+
+ return NULL;
+}
+
+/*
+ * Find event mapping for a given event number: On success returns pointer to
+ * the event mapping. On error, returns NULL.
+ */
+sdei_ev_map_t *find_event_map(int ev_num)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i, j;
+
+ /*
+ * Iterate through mappings to find a match. This is a linear search.
+ * However, if the mappings are required to be sorted, for large maps,
+ * we could consider binary search.
+ */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ if (map->ev_num == ev_num)
+ return map;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Return the total number of currently registered SDEI events.
+ */
+int sdei_get_registered_event_count(void)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ unsigned int i;
+ unsigned int j;
+ int count = 0;
+
+ /* Add up reg counts for each mapping. */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ count += map->reg_count;
+ }
+ }
+
+ return count;
+}
diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c
new file mode 100644
index 0000000..87a1fb7
--- /dev/null
+++ b/services/std_svc/sdei/sdei_intr_mgmt.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/cassert.h>
+#include <services/sdei.h>
+
+#include "sdei_private.h"
+
+/* x0-x17 GPREGS context */
+#define SDEI_SAVED_GPREGS 18U
+
+/* Maximum preemption nesting levels: Critical priority and Normal priority */
+#define MAX_EVENT_NESTING 2U
+
+/* Per-CPU SDEI state access macro */
+#define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()])
+
+/* Structure to store information about an outstanding dispatch */
+typedef struct sdei_dispatch_context {
+ sdei_ev_map_t *map;
+ uint64_t x[SDEI_SAVED_GPREGS];
+ jmp_buf *dispatch_jmp;
+
+ /* Exception state registers */
+ uint64_t elr_el3;
+ uint64_t spsr_el3;
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ /* CVE-2018-3639 mitigation state */
+ uint64_t disable_cve_2018_3639;
+#endif
+} sdei_dispatch_context_t;
+
+/* Per-CPU SDEI state data */
+typedef struct sdei_cpu_state {
+ sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
+ unsigned short stack_top; /* Empty ascending */
+ bool pe_masked;
+ bool pending_enables;
+} sdei_cpu_state_t;
+
+/* SDEI states for all cores in the system */
+static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT];
+
+int64_t sdei_pe_mask(void)
+{
+ int64_t ret = 0;
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ /*
+ * Return value indicates whether this call had any effect in the mask
+ * status of this PE.
+ */
+ if (!state->pe_masked) {
+ state->pe_masked = true;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+void sdei_pe_unmask(void)
+{
+ unsigned int i;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+ uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
+
+ /*
+ * If there are pending enables, iterate through the private mappings
+ * and enable those bound maps that are in enabled state. Also, iterate
+ * through shared mappings and enable interrupts of events that are
+ * targeted to this PE.
+ */
+ if (state->pending_enables) {
+ for_each_private_map(i, map) {
+ se = get_event_entry(map);
+ if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
+ plat_ic_enable_interrupt(map->intr);
+ }
+
+ for_each_shared_map(i, map) {
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+ if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) &&
+ (se->reg_flags == SDEI_REGF_RM_PE) &&
+ (se->affinity == my_mpidr)) {
+ plat_ic_enable_interrupt(map->intr);
+ }
+ sdei_map_unlock(map);
+ }
+ }
+
+ state->pending_enables = false;
+ state->pe_masked = false;
+}
+
+/* Push a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *push_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+ sdei_dispatch_context_t *disp_ctx;
+
+ /* Cannot have more than max events */
+ assert(state->stack_top < MAX_EVENT_NESTING);
+
+ disp_ctx = &state->dispatch_stack[state->stack_top];
+ state->stack_top++;
+
+ return disp_ctx;
+}
+
+/* Pop a dispatch context to the dispatch stack */
+static sdei_dispatch_context_t *pop_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ if (state->stack_top == 0U)
+ return NULL;
+
+ assert(state->stack_top <= MAX_EVENT_NESTING);
+
+ state->stack_top--;
+
+ return &state->dispatch_stack[state->stack_top];
+}
+
+/* Retrieve the context at the top of dispatch stack */
+static sdei_dispatch_context_t *get_outstanding_dispatch(void)
+{
+ sdei_cpu_state_t *state = sdei_get_this_pe_state();
+
+ if (state->stack_top == 0U)
+ return NULL;
+
+ assert(state->stack_top <= MAX_EVENT_NESTING);
+
+ return &state->dispatch_stack[state->stack_top - 1U];
+}
+
+static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map,
+ void *tgt_ctx)
+{
+ sdei_dispatch_context_t *disp_ctx;
+ const gp_regs_t *tgt_gpregs;
+ const el3_state_t *tgt_el3;
+
+ assert(tgt_ctx != NULL);
+ tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+ tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+ disp_ctx = push_dispatch();
+ assert(disp_ctx != NULL);
+ disp_ctx->map = map;
+
+ /* Save general purpose and exception registers */
+ memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
+ disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
+ disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
+
+ return disp_ctx;
+}
+
+static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
+{
+ gp_regs_t *tgt_gpregs;
+ el3_state_t *tgt_el3;
+
+ assert(tgt_ctx != NULL);
+ tgt_gpregs = get_gpregs_ctx(tgt_ctx);
+ tgt_el3 = get_el3state_ctx(tgt_ctx);
+
+ CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
+ foo);
+
+ /* Restore general purpose and exception registers */
+ memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
+ write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
+ write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ cve_2018_3639_t *tgt_cve_2018_3639;
+ tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
+
+ /* Restore CVE-2018-3639 mitigation state */
+ write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
+ disp_ctx->disable_cve_2018_3639);
+#endif
+}
+
+static void save_secure_context(void)
+{
+ cm_el1_sysregs_context_save(SECURE);
+}
+
+/* Restore Secure context and arrange to resume it at the next ERET */
+static void restore_and_resume_secure_context(void)
+{
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+}
+
+/*
+ * Restore Non-secure context and arrange to resume it at the next ERET. Return
+ * pointer to the Non-secure context.
+ */
+static cpu_context_t *restore_and_resume_ns_context(void)
+{
+ cpu_context_t *ns_ctx;
+
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ ns_ctx = cm_get_context(NON_SECURE);
+ assert(ns_ctx != NULL);
+
+ return ns_ctx;
+}
+
+/*
+ * Prepare for ERET:
+ * - Set the ELR to the registered handler address
+ * - Set the SPSR register as described in the SDEI documentation and
+ * the AArch64.TakeException() pseudocode function in
+ * ARM DDI 0487F.c page J1-7635
+ */
+
+static void sdei_set_elr_spsr(sdei_entry_t *se, sdei_dispatch_context_t *disp_ctx)
+{
+ unsigned int client_el = sdei_client_el();
+ u_register_t sdei_spsr = SPSR_64(client_el, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+
+ u_register_t interrupted_pstate = disp_ctx->spsr_el3;
+
+ /* Check the SPAN bit in the client el SCTLR */
+ u_register_t client_el_sctlr;
+
+ if (client_el == MODE_EL2) {
+ client_el_sctlr = read_sctlr_el2();
+ } else {
+ client_el_sctlr = read_sctlr_el1();
+ }
+
+ /*
+ * Check whether to force the PAN bit or use the value in the
+ * interrupted EL according to the check described in
+ * TakeException. Since the client can only be Non-Secure
+ * EL2 or El1 some of the conditions in ElIsInHost() we know
+ * will always be True.
+ * When the client_el is EL2 we know that there will be a SPAN
+ * bit in SCTLR_EL2 as we have already checked for the condition
+ * HCR_EL2.E2H = 1 and HCR_EL2.TGE = 1
+ */
+ u_register_t hcr_el2 = read_hcr();
+ bool el_is_in_host = is_armv8_1_vhe_present() &&
+ (hcr_el2 & HCR_TGE_BIT) &&
+ (hcr_el2 & HCR_E2H_BIT);
+
+ if (is_armv8_1_pan_present() &&
+ ((client_el == MODE_EL1) ||
+ (client_el == MODE_EL2 && el_is_in_host)) &&
+ ((client_el_sctlr & SCTLR_SPAN_BIT) == 0U)) {
+ sdei_spsr |= SPSR_PAN_BIT;
+ } else {
+ sdei_spsr |= (interrupted_pstate & SPSR_PAN_BIT);
+ }
+
+ /* If SSBS is implemented, take the value from the client el SCTLR */
+ u_register_t ssbs_enabled = (read_id_aa64pfr1_el1()
+ >> ID_AA64PFR1_EL1_SSBS_SHIFT)
+ & ID_AA64PFR1_EL1_SSBS_MASK;
+ if (ssbs_enabled != SSBS_UNAVAILABLE) {
+ u_register_t ssbs_bit = ((client_el_sctlr & SCTLR_DSSBS_BIT)
+ >> SCTLR_DSSBS_SHIFT)
+ << SPSR_SSBS_SHIFT_AARCH64;
+ sdei_spsr |= ssbs_bit;
+ }
+
+ /* If MTE is implemented in the client el set the TCO bit */
+ if (get_armv8_5_mte_support() >= MTE_IMPLEMENTED_ELX) {
+ sdei_spsr |= SPSR_TCO_BIT_AARCH64;
+ }
+
+ /* Take the DIT field from the pstate of the interrupted el */
+ sdei_spsr |= (interrupted_pstate & SPSR_DIT_BIT);
+
+ cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep, sdei_spsr);
+}
+
+/*
+ * Populate the Non-secure context so that the next ERET will dispatch to the
+ * SDEI client.
+ */
+static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
+ cpu_context_t *ctx, jmp_buf *dispatch_jmp)
+{
+ sdei_dispatch_context_t *disp_ctx;
+
+ /* Push the event and context */
+ disp_ctx = save_event_ctx(map, ctx);
+
+ /*
+ * Setup handler arguments:
+ *
+ * - x0: Event number
+ * - x1: Handler argument supplied at the time of event registration
+ * - x2: Interrupted PC
+ * - x3: Interrupted SPSR
+ */
+ SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num);
+ SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
+ SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3);
+ SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3);
+
+ /* Setup the elr and spsr register to prepare for ERET */
+ sdei_set_elr_spsr(se, disp_ctx);
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+ cve_2018_3639_t *tgt_cve_2018_3639;
+ tgt_cve_2018_3639 = get_cve_2018_3639_ctx(ctx);
+
+ /* Save CVE-2018-3639 mitigation state */
+ disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
+ CTX_CVE_2018_3639_DISABLE);
+
+ /* Force SDEI handler to execute with mitigation enabled by default */
+ write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
+#endif
+
+ disp_ctx->dispatch_jmp = dispatch_jmp;
+}
+
+/* Handle a triggered SDEI interrupt while events were masked on this PE */
+static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
+ sdei_cpu_state_t *state, unsigned int intr_raw)
+{
+ uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
+ bool disable = false;
+
+ /* Nothing to do for event 0 */
+ if (map->ev_num == SDEI_EVENT_0)
+ return;
+
+ /*
+ * For a private event, or for a shared event specifically routed to
+ * this CPU, we disable interrupt, leave the interrupt pending, and do
+ * EOI.
+ */
+ if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE))
+ disable = true;
+
+ if (se->reg_flags == SDEI_REGF_RM_PE)
+ assert(se->affinity == my_mpidr);
+
+ if (disable) {
+ plat_ic_disable_interrupt(map->intr);
+ plat_ic_set_interrupt_pending(map->intr);
+ plat_ic_end_of_interrupt(intr_raw);
+ state->pending_enables = true;
+
+ return;
+ }
+
+ /*
+ * We just received a shared event with routing set to ANY PE. The
+ * interrupt can't be delegated on this PE as SDEI events are masked.
+ * However, because its routing mode is ANY, it is possible that the
+ * event can be delegated on any other PE that hasn't masked events.
+ * Therefore, we set the interrupt back pending so as to give other
+ * suitable PEs a chance of handling it.
+ */
+ assert(plat_ic_is_spi(map->intr) != 0);
+ plat_ic_set_interrupt_pending(map->intr);
+
+ /*
+ * Leaving the same interrupt pending also means that the same interrupt
+ * can target this PE again as soon as this PE leaves EL3. Whether and
+ * how often that happens depends on the implementation of GIC.
+ *
+ * We therefore call a platform handler to resolve this situation.
+ */
+ plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
+
+ /* This PE is masked. We EOI the interrupt, as it can't be delegated */
+ plat_ic_end_of_interrupt(intr_raw);
+}
+
+/* SDEI main interrupt handler */
+int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
+ void *cookie)
+{
+ sdei_entry_t *se;
+ cpu_context_t *ctx;
+ sdei_ev_map_t *map;
+ const sdei_dispatch_context_t *disp_ctx;
+ unsigned int sec_state;
+ sdei_cpu_state_t *state;
+ uint32_t intr;
+ jmp_buf dispatch_jmp;
+ const uint64_t mpidr = read_mpidr_el1();
+
+ /*
+ * To handle an event, the following conditions must be true:
+ *
+ * 1. Event must be signalled
+ * 2. Event must be enabled
+ * 3. This PE must be a target PE for the event
+ * 4. PE must be unmasked for SDEI
+ * 5. If this is a normal event, no event must be running
+ * 6. If this is a critical event, no critical event must be running
+ *
+ * (1) and (2) are true when this function is running
+ * (3) is enforced in GIC by selecting the appropriate routing option
+ * (4) is satisfied by client calling PE_UNMASK
+ * (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
+ * - Normal SDEI events belong to Normal SDE priority class
+ * - Critical SDEI events belong to Critical CSDE priority class
+ *
+ * The interrupt has already been acknowledged, and therefore is active,
+ * so no other PE can handle this event while we are at it.
+ *
+ * Find if this is an SDEI interrupt. There must be an event mapped to
+ * this interrupt
+ */
+ intr = plat_ic_get_interrupt_id(intr_raw);
+ map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0));
+ if (map == NULL) {
+ ERROR("No SDEI map for interrupt %u\n", intr);
+ panic();
+ }
+
+ /*
+ * Received interrupt number must either correspond to event 0, or must
+ * be bound interrupt.
+ */
+ assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
+
+ se = get_event_entry(map);
+ state = sdei_get_this_pe_state();
+
+ if (state->pe_masked) {
+ /*
+ * Interrupts received while this PE was masked can't be
+ * dispatched.
+ */
+ SDEI_LOG("interrupt %u on %" PRIx64 " while PE masked\n",
+ map->intr, mpidr);
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ handle_masked_trigger(map, se, state, intr_raw);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+ }
+
+ /* Insert load barrier for signalled SDEI event */
+ if (map->ev_num == SDEI_EVENT_0)
+ dmbld();
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* Assert shared event routed to this PE had been configured so */
+ if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
+ assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK));
+ }
+
+ if (!can_sdei_state_trans(se, DO_DISPATCH)) {
+ SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
+ map->ev_num, se->state);
+
+ /*
+ * If the event is registered, leave the interrupt pending so
+ * that it's delivered when the event is enabled.
+ */
+ if (GET_EV_STATE(se, REGISTERED))
+ plat_ic_set_interrupt_pending(map->intr);
+
+ /*
+ * The interrupt was disabled or unregistered after the handler
+ * started to execute, which means now the interrupt is already
+ * disabled and we just need to EOI the interrupt.
+ */
+ plat_ic_end_of_interrupt(intr_raw);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return 0;
+ }
+
+ disp_ctx = get_outstanding_dispatch();
+ if (is_event_critical(map)) {
+ /*
+ * If this event is Critical, and if there's an outstanding
+ * dispatch, assert the latter is a Normal dispatch. Critical
+ * events can preempt an outstanding Normal event dispatch.
+ */
+ if (disp_ctx != NULL)
+ assert(is_event_normal(disp_ctx->map));
+ } else {
+ /*
+ * If this event is Normal, assert that there are no outstanding
+ * dispatches. Normal events can't preempt any outstanding event
+ * dispatches.
+ */
+ assert(disp_ctx == NULL);
+ }
+
+ sec_state = get_interrupt_src_ss(flags);
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ SDEI_LOG("ACK %" PRIx64 ", ev:0x%x ss:%d spsr:%lx ELR:%lx\n",
+ mpidr, map->ev_num, sec_state, read_spsr_el3(), read_elr_el3());
+
+ ctx = handle;
+
+ /*
+ * Check if we interrupted secure state. Perform a context switch so
+ * that we can delegate to NS.
+ */
+ if (sec_state == SECURE) {
+ save_secure_context();
+ ctx = restore_and_resume_ns_context();
+ }
+
+ /* Synchronously dispatch event */
+ setup_ns_dispatch(map, se, ctx, &dispatch_jmp);
+ begin_sdei_synchronous_dispatch(&dispatch_jmp);
+
+ /*
+ * We reach here when client completes the event.
+ *
+ * If the cause of dispatch originally interrupted the Secure world,
+ * resume Secure.
+ *
+ * No need to save the Non-secure context ahead of a world switch: the
+ * Non-secure context was fully saved before dispatch, and has been
+ * returned to its pre-dispatch state.
+ */
+ if (sec_state == SECURE)
+ restore_and_resume_secure_context();
+
+ /*
+ * The event was dispatched after receiving SDEI interrupt. With
+ * the event handling completed, EOI the corresponding
+ * interrupt.
+ */
+ if ((map->ev_num != SDEI_EVENT_0) && !is_map_bound(map)) {
+ ERROR("Invalid SDEI mapping: ev=0x%x\n", map->ev_num);
+ panic();
+ }
+ plat_ic_end_of_interrupt(intr_raw);
+
+ return 0;
+}
+
+/*
+ * Explicitly dispatch the given SDEI event.
+ *
+ * When calling this API, the caller must be prepared for the SDEI dispatcher to
+ * restore and make Non-secure context as active. This call returns only after
+ * the client has completed the dispatch. Then, the Non-secure context will be
+ * active, and the following ERET will return to Non-secure.
+ *
+ * Should the caller require re-entry to Secure, it must restore the Secure
+ * context and program registers for ERET.
+ */
+int sdei_dispatch_event(int ev_num)
+{
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ cpu_context_t *ns_ctx;
+ sdei_dispatch_context_t *disp_ctx;
+ sdei_cpu_state_t *state;
+ jmp_buf dispatch_jmp;
+
+ /* Can't dispatch if events are masked on this PE */
+ state = sdei_get_this_pe_state();
+ if (state->pe_masked)
+ return -1;
+
+ /* Event 0 can't be dispatched */
+ if (ev_num == SDEI_EVENT_0)
+ return -1;
+
+ /* Locate mapping corresponding to this event */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return -1;
+
+ /* Only explicit events can be dispatched */
+ if (!is_map_explicit(map))
+ return -1;
+
+ /* Examine state of dispatch stack */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx != NULL) {
+ /*
+ * There's an outstanding dispatch. If the outstanding dispatch
+ * is critical, no more dispatches are possible.
+ */
+ if (is_event_critical(disp_ctx->map))
+ return -1;
+
+ /*
+ * If the outstanding dispatch is Normal, only critical events
+ * can be dispatched.
+ */
+ if (is_event_normal(map))
+ return -1;
+ }
+
+ se = get_event_entry(map);
+ if (!can_sdei_state_trans(se, DO_DISPATCH))
+ return -1;
+
+ /*
+ * Prepare for NS dispatch by restoring the Non-secure context and
+ * marking that as active.
+ */
+ ns_ctx = restore_and_resume_ns_context();
+
+ /* Activate the priority corresponding to the event being dispatched */
+ ehf_activate_priority(sdei_event_priority(map));
+
+ /* Dispatch event synchronously */
+ setup_ns_dispatch(map, se, ns_ctx, &dispatch_jmp);
+ begin_sdei_synchronous_dispatch(&dispatch_jmp);
+
+ /*
+ * We reach here when client completes the event.
+ *
+ * Deactivate the priority level that was activated at the time of
+ * explicit dispatch.
+ */
+ ehf_deactivate_priority(sdei_event_priority(map));
+
+ return 0;
+}
+
+static void end_sdei_synchronous_dispatch(jmp_buf *buffer)
+{
+ longjmp(*buffer, 1);
+}
+
+int sdei_event_complete(bool resume, uint64_t pc)
+{
+ sdei_dispatch_context_t *disp_ctx;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ cpu_context_t *ctx;
+ sdei_action_t act;
+ unsigned int client_el = sdei_client_el();
+
+ /* Return error if called without an active event */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx == NULL)
+ return SDEI_EDENY;
+
+ /* Validate resumption point */
+ if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
+ return SDEI_EDENY;
+
+ map = disp_ctx->map;
+ assert(map != NULL);
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
+ if (!can_sdei_state_trans(se, act)) {
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+ return SDEI_EDENY;
+ }
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ /* Having done sanity checks, pop dispatch */
+ (void) pop_dispatch();
+
+ SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
+ map->ev_num, read_spsr_el3(), read_elr_el3());
+
+ /*
+ * Restore Non-secure to how it was originally interrupted. Once done,
+ * it's up-to-date with the saved copy.
+ */
+ ctx = cm_get_context(NON_SECURE);
+ restore_event_ctx(disp_ctx, ctx);
+
+ if (resume) {
+ /*
+ * Complete-and-resume call. Prepare the Non-secure context
+ * (currently active) for complete and resume.
+ */
+ cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
+ MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
+
+ /*
+ * Make it look as if a synchronous exception were taken at the
+ * supplied Non-secure resumption point. Populate SPSR and
+ * ELR_ELx so that an ERET from there works as expected.
+ *
+ * The assumption is that the client, if necessary, would have
+ * saved any live content in these registers before making this
+ * call.
+ */
+ if (client_el == MODE_EL2) {
+ write_elr_el2(disp_ctx->elr_el3);
+ write_spsr_el2(disp_ctx->spsr_el3);
+ } else {
+ /* EL1 */
+ write_elr_el1(disp_ctx->elr_el3);
+ write_spsr_el1(disp_ctx->spsr_el3);
+ }
+ }
+
+ /* End the outstanding dispatch */
+ end_sdei_synchronous_dispatch(disp_ctx->dispatch_jmp);
+
+ return 0;
+}
+
+int64_t sdei_event_context(void *handle, unsigned int param)
+{
+ sdei_dispatch_context_t *disp_ctx;
+
+ if (param >= SDEI_SAVED_GPREGS)
+ return SDEI_EINVAL;
+
+ /* Get outstanding dispatch on this CPU */
+ disp_ctx = get_outstanding_dispatch();
+ if (disp_ctx == NULL)
+ return SDEI_EDENY;
+
+ assert(disp_ctx->map != NULL);
+
+ if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
+ return SDEI_EDENY;
+
+ /*
+ * No locking is required for the Running status as this is the only CPU
+ * which can complete the event
+ */
+
+ return (int64_t) disp_ctx->x[param];
+}
diff --git a/services/std_svc/sdei/sdei_main.c b/services/std_svc/sdei/sdei_main.c
new file mode 100644
index 0000000..44178ed
--- /dev/null
+++ b/services/std_svc/sdei/sdei_main.c
@@ -0,0 +1,1114 @@
+/*
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <bl31/bl31.h>
+#include <bl31/ehf.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <context.h>
+#include <lib/cassert.h>
+#include <lib/el3_runtime/pubsub.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+#include <services/sdei.h>
+
+#include "sdei_private.h"
+
+#define MAJOR_VERSION 1ULL
+#define MINOR_VERSION 0ULL
+#define VENDOR_VERSION 0ULL
+
+#define MAKE_SDEI_VERSION(_major, _minor, _vendor) \
+ ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor))
+
+#define LOWEST_INTR_PRIORITY 0xff
+
+#define is_valid_affinity(_mpidr) (plat_core_pos_by_mpidr(_mpidr) >= 0)
+
+CASSERT(PLAT_SDEI_CRITICAL_PRI < PLAT_SDEI_NORMAL_PRI,
+ sdei_critical_must_have_higher_priority);
+
+static unsigned int num_dyn_priv_slots, num_dyn_shrd_slots;
+
+/* Initialise SDEI map entries */
+static void init_map(sdei_ev_map_t *map)
+{
+ map->reg_count = 0;
+}
+
+/* Convert mapping to SDEI class */
+static sdei_class_t map_to_class(sdei_ev_map_t *map)
+{
+ return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL;
+}
+
+/* Clear SDEI event entries except state */
+static void clear_event_entries(sdei_entry_t *se)
+{
+ se->ep = 0;
+ se->arg = 0;
+ se->affinity = 0;
+ se->reg_flags = 0;
+}
+
+/* Perform CPU-specific state initialisation */
+static void *sdei_cpu_on_init(const void *arg)
+{
+ unsigned int i;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ /* Initialize private mappings on this CPU */
+ for_each_private_map(i, map) {
+ se = get_event_entry(map);
+ clear_event_entries(se);
+ se->state = 0;
+ }
+
+ SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1());
+
+ /* All PEs start with SDEI events masked */
+ (void) sdei_pe_mask();
+
+ return NULL;
+}
+
+/* CPU initialisation after wakeup from suspend */
+static void *sdei_cpu_wakeup_init(const void *arg)
+{
+ SDEI_LOG("Events masked on %lx\n", read_mpidr_el1());
+
+ /* All PEs wake up with SDEI events masked */
+ sdei_pe_mask();
+
+ return 0;
+}
+
+/* Initialise an SDEI class */
+static void sdei_class_init(sdei_class_t class)
+{
+ unsigned int i;
+ bool zero_found __unused = false;
+ int ev_num_so_far __unused;
+ sdei_ev_map_t *map;
+
+ /* Sanity check and configuration of shared events */
+ ev_num_so_far = -1;
+ for_each_shared_map(i, map) {
+#if ENABLE_ASSERTIONS
+ /* Ensure mappings are sorted */
+ assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+ ev_num_so_far = map->ev_num;
+
+ /* Event 0 must not be shared */
+ assert(map->ev_num != SDEI_EVENT_0);
+
+ /* Check for valid event */
+ assert(map->ev_num >= 0);
+
+ /* Make sure it's a shared event */
+ assert(is_event_shared(map));
+
+ /* No shared mapping should have signalable property */
+ assert(!is_event_signalable(map));
+
+ /* Shared mappings can't be explicit */
+ assert(!is_map_explicit(map));
+#endif
+
+ /* Skip initializing the wrong priority */
+ if (map_to_class(map) != class)
+ continue;
+
+ /* Platform events are always bound, so set the bound flag */
+ if (is_map_dynamic(map)) {
+ assert(map->intr == SDEI_DYN_IRQ);
+ assert(is_event_normal(map));
+ num_dyn_shrd_slots++;
+ } else {
+ /* Shared mappings must be bound to shared interrupt */
+ assert(plat_ic_is_spi(map->intr) != 0);
+ set_map_bound(map);
+ }
+
+ init_map(map);
+ }
+
+ /* Sanity check and configuration of private events for this CPU */
+ ev_num_so_far = -1;
+ for_each_private_map(i, map) {
+#if ENABLE_ASSERTIONS
+ /* Ensure mappings are sorted */
+ assert((ev_num_so_far < 0) || (map->ev_num > ev_num_so_far));
+
+ ev_num_so_far = map->ev_num;
+
+ if (map->ev_num == SDEI_EVENT_0) {
+ zero_found = true;
+
+ /* Event 0 must be a Secure SGI */
+ assert(is_secure_sgi(map->intr));
+
+ /*
+ * Event 0 can have only have signalable flag (apart
+ * from being private
+ */
+ assert(map->map_flags == (SDEI_MAPF_SIGNALABLE |
+ SDEI_MAPF_PRIVATE));
+ } else {
+ /* No other mapping should have signalable property */
+ assert(!is_event_signalable(map));
+ }
+
+ /* Check for valid event */
+ assert(map->ev_num >= 0);
+
+ /* Make sure it's a private event */
+ assert(is_event_private(map));
+
+ /*
+ * Other than priority, explicit events can only have explicit
+ * and private flags set.
+ */
+ if (is_map_explicit(map)) {
+ assert((map->map_flags | SDEI_MAPF_CRITICAL) ==
+ (SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE
+ | SDEI_MAPF_CRITICAL));
+ }
+#endif
+
+ /* Skip initializing the wrong priority */
+ if (map_to_class(map) != class)
+ continue;
+
+ /* Platform events are always bound, so set the bound flag */
+ if (map->ev_num != SDEI_EVENT_0) {
+ if (is_map_dynamic(map)) {
+ assert(map->intr == SDEI_DYN_IRQ);
+ assert(is_event_normal(map));
+ num_dyn_priv_slots++;
+ } else if (is_map_explicit(map)) {
+ /*
+ * Explicit mappings don't have a backing
+ * SDEI interrupt, but verify that anyway.
+ */
+ assert(map->intr == SDEI_DYN_IRQ);
+ } else {
+ /*
+ * Private mappings must be bound to private
+ * interrupt.
+ */
+ assert(plat_ic_is_ppi((unsigned) map->intr) != 0);
+ set_map_bound(map);
+ }
+ }
+
+ init_map(map);
+ }
+
+ /* Ensure event 0 is in the mapping */
+ assert(zero_found);
+
+ (void) sdei_cpu_on_init(NULL);
+}
+
+/* SDEI dispatcher initialisation */
+void sdei_init(void)
+{
+ plat_sdei_setup();
+ sdei_class_init(SDEI_CRITICAL);
+ sdei_class_init(SDEI_NORMAL);
+
+ /* Register priority level handlers */
+ ehf_register_priority_handler(PLAT_SDEI_CRITICAL_PRI,
+ sdei_intr_handler);
+ ehf_register_priority_handler(PLAT_SDEI_NORMAL_PRI,
+ sdei_intr_handler);
+}
+
+/* Populate SDEI event entry */
+static void set_sdei_entry(sdei_entry_t *se, uint64_t ep, uint64_t arg,
+ unsigned int flags, uint64_t affinity)
+{
+ assert(se != NULL);
+
+ se->ep = ep;
+ se->arg = arg;
+ se->affinity = (affinity & MPIDR_AFFINITY_MASK);
+ se->reg_flags = flags;
+}
+
+static uint64_t sdei_version(void)
+{
+ return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION);
+}
+
+/* Validate flags and MPIDR values for REGISTER and ROUTING_SET calls */
+static int validate_flags(uint64_t flags, uint64_t mpidr)
+{
+ /* Validate flags */
+ switch (flags) {
+ case SDEI_REGF_RM_PE:
+ if (!is_valid_affinity(mpidr))
+ return SDEI_EINVAL;
+ break;
+ case SDEI_REGF_RM_ANY:
+ break;
+ default:
+ /* Unknown flags */
+ return SDEI_EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set routing of an SDEI event */
+static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr)
+{
+ int ret;
+ unsigned int routing;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ ret = validate_flags(flags, mpidr);
+ if (ret != 0)
+ return ret;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ /* The event must not be private */
+ if (is_event_private(map))
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+
+ if (!is_map_bound(map) || is_event_private(map)) {
+ ret = SDEI_EINVAL;
+ goto finish;
+ }
+
+ if (!can_sdei_state_trans(se, DO_ROUTING)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /* Choose appropriate routing */
+ routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
+ INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
+
+ /* Update event registration flag */
+ se->reg_flags = (unsigned int) flags;
+ if (flags == SDEI_REGF_RM_PE) {
+ se->affinity = (mpidr & MPIDR_AFFINITY_MASK);
+ }
+
+ /*
+ * ROUTING_SET is permissible only when event composite state is
+ * 'registered, disabled, and not running'. This means that the
+ * interrupt is currently disabled, and not active.
+ */
+ plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr);
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Register handler and argument for an SDEI event */
+static int64_t sdei_event_register(int ev_num,
+ uint64_t ep,
+ uint64_t arg,
+ uint64_t flags,
+ uint64_t mpidr)
+{
+ int ret;
+ unsigned int routing;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+ sdei_state_t backup_state;
+
+ if ((ep == 0U) || (plat_sdei_validate_entry_point(
+ ep, sdei_client_el()) != 0)) {
+ return SDEI_EINVAL;
+ }
+
+ ret = validate_flags(flags, mpidr);
+ if (ret != 0)
+ return ret;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ /* Private events always target the PE */
+ if (is_event_private(map)) {
+ /*
+ * SDEI internally handles private events in the same manner
+ * as public events with routing mode=RM_PE, since the routing
+ * mode flag and affinity fields are not used when registering
+ * a private event, set them here.
+ */
+ flags = SDEI_REGF_RM_PE;
+ /*
+ * Kernel may pass 0 as mpidr, as we set flags to
+ * SDEI_REGF_RM_PE, so set mpidr also.
+ */
+ mpidr = read_mpidr_el1();
+ }
+
+ se = get_event_entry(map);
+
+ /*
+ * Even though register operation is per-event (additionally for private
+ * events, registration is required individually), it has to be
+ * serialised with respect to bind/release, which are global operations.
+ * So we hold the lock throughout, unconditionally.
+ */
+ sdei_map_lock(map);
+
+ backup_state = se->state;
+ if (!can_sdei_state_trans(se, DO_REGISTER))
+ goto fallback;
+
+ /*
+ * When registering for dynamic events, make sure it's been bound
+ * already. This has to be the case as, without binding, the client
+ * can't know about the event number to register for.
+ */
+ if (is_map_dynamic(map) && !is_map_bound(map))
+ goto fallback;
+
+ if (is_event_private(map)) {
+ /* Multiple calls to register are possible for private events */
+ assert(map->reg_count >= 0);
+ } else {
+ /* Only single call to register is possible for shared events */
+ assert(map->reg_count == 0);
+ }
+
+ if (is_map_bound(map)) {
+ /* Meanwhile, did any PE ACK the interrupt? */
+ if (plat_ic_get_interrupt_active(map->intr) != 0U)
+ goto fallback;
+
+ /* The interrupt must currently owned by Non-secure */
+ if (plat_ic_get_interrupt_type(map->intr) != INTR_TYPE_NS)
+ goto fallback;
+
+ /*
+ * Disable forwarding of new interrupt triggers to CPU
+ * interface.
+ */
+ plat_ic_disable_interrupt(map->intr);
+
+ /*
+ * Any events that are triggered after register and before
+ * enable should remain pending. Clear any previous interrupt
+ * triggers which are pending (except for SGIs). This has no
+ * affect on level-triggered interrupts.
+ */
+ if (ev_num != SDEI_EVENT_0)
+ plat_ic_clear_interrupt_pending(map->intr);
+
+ /* Map interrupt to EL3 and program the correct priority */
+ plat_ic_set_interrupt_type(map->intr, INTR_TYPE_EL3);
+
+ /* Program the appropriate interrupt priority */
+ plat_ic_set_interrupt_priority(map->intr, sdei_event_priority(map));
+
+ /*
+ * Set the routing mode for shared event as requested. We
+ * already ensure that shared events get bound to SPIs.
+ */
+ if (is_event_shared(map)) {
+ routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ?
+ INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE);
+ plat_ic_set_spi_routing(map->intr, routing,
+ (u_register_t) mpidr);
+ }
+ }
+
+ /* Populate event entries */
+ set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr);
+
+ /* Increment register count */
+ map->reg_count++;
+
+ sdei_map_unlock(map);
+
+ return 0;
+
+fallback:
+ /* Reinstate previous state */
+ se->state = backup_state;
+
+ sdei_map_unlock(map);
+
+ return SDEI_EDENY;
+}
+
+/* Enable SDEI event */
+static int64_t sdei_event_enable(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ int ret;
+ bool before, after;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+ ret = SDEI_EDENY;
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ before = GET_EV_STATE(se, ENABLED);
+ if (!can_sdei_state_trans(se, DO_ENABLE))
+ goto finish;
+ after = GET_EV_STATE(se, ENABLED);
+
+ /*
+ * Enable interrupt for bound events only if there's a change in enabled
+ * state.
+ */
+ if (is_map_bound(map) && (!before && after))
+ plat_ic_enable_interrupt(map->intr);
+
+ ret = 0;
+
+finish:
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Disable SDEI event */
+static int sdei_event_disable(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ int ret;
+ bool before, after;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+ ret = SDEI_EDENY;
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ before = GET_EV_STATE(se, ENABLED);
+ if (!can_sdei_state_trans(se, DO_DISABLE))
+ goto finish;
+ after = GET_EV_STATE(se, ENABLED);
+
+ /*
+ * Disable interrupt for bound events only if there's a change in
+ * enabled state.
+ */
+ if (is_map_bound(map) && (before && !after))
+ plat_ic_disable_interrupt(map->intr);
+
+ ret = 0;
+
+finish:
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Query SDEI event information */
+static int64_t sdei_event_get_info(int ev_num, int info)
+{
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+
+ uint64_t flags;
+ bool registered;
+ uint64_t affinity;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* Sample state under lock */
+ registered = GET_EV_STATE(se, REGISTERED);
+ flags = se->reg_flags;
+ affinity = se->affinity;
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ switch (info) {
+ case SDEI_INFO_EV_TYPE:
+ return is_event_shared(map);
+
+ case SDEI_INFO_EV_NOT_SIGNALED:
+ return !is_event_signalable(map);
+
+ case SDEI_INFO_EV_PRIORITY:
+ return is_event_critical(map);
+
+ case SDEI_INFO_EV_ROUTING_MODE:
+ if (!is_event_shared(map))
+ return SDEI_EINVAL;
+ if (!registered)
+ return SDEI_EDENY;
+ return (flags == SDEI_REGF_RM_PE);
+
+ case SDEI_INFO_EV_ROUTING_AFF:
+ if (!is_event_shared(map))
+ return SDEI_EINVAL;
+ if (!registered)
+ return SDEI_EDENY;
+ if (flags != SDEI_REGF_RM_PE)
+ return SDEI_EINVAL;
+ return affinity;
+
+ default:
+ return SDEI_EINVAL;
+ }
+}
+
+/* Unregister an SDEI event */
+static int sdei_event_unregister(int ev_num)
+{
+ int ret = 0;
+ sdei_entry_t *se;
+ sdei_ev_map_t *map;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ /*
+ * Even though unregister operation is per-event (additionally for
+ * private events, unregistration is required individually), it has to
+ * be serialised with respect to bind/release, which are global
+ * operations. So we hold the lock throughout, unconditionally.
+ */
+ sdei_map_lock(map);
+
+ if (!can_sdei_state_trans(se, DO_UNREGISTER)) {
+ /*
+ * Even if the call is invalid, and the handler is running (for
+ * example, having unregistered from a running handler earlier),
+ * return pending error code; otherwise, return deny.
+ */
+ ret = GET_EV_STATE(se, RUNNING) ? SDEI_EPEND : SDEI_EDENY;
+
+ goto finish;
+ }
+
+ map->reg_count--;
+ if (is_event_private(map)) {
+ /* Multiple calls to register are possible for private events */
+ assert(map->reg_count >= 0);
+ } else {
+ /* Only single call to register is possible for shared events */
+ assert(map->reg_count == 0);
+ }
+
+ if (is_map_bound(map)) {
+ plat_ic_disable_interrupt(map->intr);
+
+ /*
+ * Clear pending interrupt. Skip for SGIs as they may not be
+ * cleared on interrupt controllers.
+ */
+ if (ev_num != SDEI_EVENT_0)
+ plat_ic_clear_interrupt_pending(map->intr);
+
+ assert(plat_ic_get_interrupt_type(map->intr) == INTR_TYPE_EL3);
+ plat_ic_set_interrupt_type(map->intr, INTR_TYPE_NS);
+ plat_ic_set_interrupt_priority(map->intr, LOWEST_INTR_PRIORITY);
+ }
+
+ clear_event_entries(se);
+
+ /*
+ * If the handler is running at the time of unregister, return the
+ * pending error code.
+ */
+ if (GET_EV_STATE(se, RUNNING))
+ ret = SDEI_EPEND;
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Query status of an SDEI event */
+static int sdei_event_status(int ev_num)
+{
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+ sdei_state_t state;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ if (is_event_shared(map))
+ sdei_map_lock(map);
+
+ /* State value directly maps to the expected return format */
+ state = se->state;
+
+ if (is_event_shared(map))
+ sdei_map_unlock(map);
+
+ return (int) state;
+}
+
+/* Bind an SDEI event to an interrupt */
+static int sdei_interrupt_bind(unsigned int intr_num)
+{
+ sdei_ev_map_t *map;
+ bool retry = true, shared_mapping;
+
+ /* SGIs are not allowed to be bound */
+ if (plat_ic_is_sgi(intr_num) != 0)
+ return SDEI_EINVAL;
+
+ shared_mapping = (plat_ic_is_spi(intr_num) != 0);
+ do {
+ /*
+ * Bail out if there is already an event for this interrupt,
+ * either platform-defined or dynamic.
+ */
+ map = find_event_map_by_intr(intr_num, shared_mapping);
+ if (map != NULL) {
+ if (is_map_dynamic(map)) {
+ if (is_map_bound(map)) {
+ /*
+ * Dynamic event, already bound. Return
+ * event number.
+ */
+ return map->ev_num;
+ }
+ } else {
+ /* Binding non-dynamic event */
+ return SDEI_EINVAL;
+ }
+ }
+
+ /*
+ * The interrupt is not bound yet. Try to find a free slot to
+ * bind it. Free dynamic mappings have their interrupt set as
+ * SDEI_DYN_IRQ.
+ */
+ map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping);
+ if (map == NULL)
+ return SDEI_ENOMEM;
+
+ /* The returned mapping must be dynamic */
+ assert(is_map_dynamic(map));
+
+ /*
+ * We cannot assert for bound maps here, as we might be racing
+ * with another bind.
+ */
+
+ /* The requested interrupt must already belong to NS */
+ if (plat_ic_get_interrupt_type(intr_num) != INTR_TYPE_NS)
+ return SDEI_EDENY;
+
+ /*
+ * Interrupt programming and ownership transfer are deferred
+ * until register.
+ */
+
+ sdei_map_lock(map);
+ if (!is_map_bound(map)) {
+ map->intr = intr_num;
+ set_map_bound(map);
+ retry = false;
+ }
+ sdei_map_unlock(map);
+ } while (retry);
+
+ return map->ev_num;
+}
+
+/* Release a bound SDEI event previously to an interrupt */
+static int sdei_interrupt_release(int ev_num)
+{
+ int ret = 0;
+ sdei_ev_map_t *map;
+ sdei_entry_t *se;
+
+ /* Check if valid event number */
+ map = find_event_map(ev_num);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ if (!is_map_dynamic(map))
+ return SDEI_EINVAL;
+
+ se = get_event_entry(map);
+
+ sdei_map_lock(map);
+
+ /* Event must have been unregistered before release */
+ if (map->reg_count != 0) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /*
+ * Interrupt release never causes the state to change. We only check
+ * whether it's permissible or not.
+ */
+ if (!can_sdei_state_trans(se, DO_RELEASE)) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ if (is_map_bound(map)) {
+ /*
+ * Deny release if the interrupt is active, which means it's
+ * probably being acknowledged and handled elsewhere.
+ */
+ if (plat_ic_get_interrupt_active(map->intr) != 0U) {
+ ret = SDEI_EDENY;
+ goto finish;
+ }
+
+ /*
+ * Interrupt programming and ownership transfer are already done
+ * during unregister.
+ */
+
+ map->intr = SDEI_DYN_IRQ;
+ clr_map_bound(map);
+ } else {
+ SDEI_LOG("Error release bound:%d cnt:%d\n", is_map_bound(map),
+ map->reg_count);
+ ret = SDEI_EINVAL;
+ }
+
+finish:
+ sdei_map_unlock(map);
+
+ return ret;
+}
+
+/* Perform reset of private SDEI events */
+static int sdei_private_reset(void)
+{
+ sdei_ev_map_t *map;
+ int ret = 0, final_ret = 0;
+ unsigned int i;
+
+ /* Unregister all private events */
+ for_each_private_map(i, map) {
+ /*
+ * The unregister can fail if the event is not registered, which
+ * is allowed, and a deny will be returned. But if the event is
+ * running or unregister pending, the call fails.
+ */
+ ret = sdei_event_unregister(map->ev_num);
+ if ((ret == SDEI_EPEND) && (final_ret == 0))
+ final_ret = SDEI_EDENY;
+ }
+
+ return final_ret;
+}
+
+/* Perform reset of shared SDEI events */
+static int sdei_shared_reset(void)
+{
+ const sdei_mapping_t *mapping;
+ sdei_ev_map_t *map;
+ int ret = 0, final_ret = 0;
+ unsigned int i, j;
+
+ /* Unregister all shared events */
+ for_each_shared_map(i, map) {
+ /*
+ * The unregister can fail if the event is not registered, which
+ * is allowed, and a deny will be returned. But if the event is
+ * running or unregister pending, the call fails.
+ */
+ ret = sdei_event_unregister(map->ev_num);
+ if ((ret == SDEI_EPEND) && (final_ret == 0))
+ final_ret = SDEI_EDENY;
+ }
+
+ if (final_ret != 0)
+ return final_ret;
+
+ /*
+ * Loop through both private and shared mappings, and release all
+ * bindings.
+ */
+ for_each_mapping_type(i, mapping) {
+ iterate_mapping(mapping, j, map) {
+ /*
+ * Release bindings for mappings that are dynamic and
+ * bound.
+ */
+ if (is_map_dynamic(map) && is_map_bound(map)) {
+ /*
+ * Any failure to release would mean there is at
+ * least a PE registered for the event.
+ */
+ ret = sdei_interrupt_release(map->ev_num);
+ if ((ret != 0) && (final_ret == 0))
+ final_ret = ret;
+ }
+ }
+ }
+
+ return final_ret;
+}
+
+/* Send a signal to another SDEI client PE */
+static int sdei_signal(int ev_num, uint64_t target_pe)
+{
+ sdei_ev_map_t *map;
+
+ /* Only event 0 can be signalled */
+ if (ev_num != SDEI_EVENT_0)
+ return SDEI_EINVAL;
+
+ /* Find mapping for event 0 */
+ map = find_event_map(SDEI_EVENT_0);
+ if (map == NULL)
+ return SDEI_EINVAL;
+
+ /* The event must be signalable */
+ if (!is_event_signalable(map))
+ return SDEI_EINVAL;
+
+ /* Validate target */
+ if (plat_core_pos_by_mpidr(target_pe) < 0)
+ return SDEI_EINVAL;
+
+ /* Raise SGI. Platform will validate target_pe */
+ plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe);
+
+ return 0;
+}
+
+/* Query SDEI dispatcher features */
+static uint64_t sdei_features(unsigned int feature)
+{
+ if (feature == SDEI_FEATURE_BIND_SLOTS) {
+ return FEATURE_BIND_SLOTS(num_dyn_priv_slots,
+ num_dyn_shrd_slots);
+ }
+
+ return (uint64_t) SDEI_EINVAL;
+}
+
+/* SDEI top level handler for servicing SMCs */
+uint64_t sdei_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+
+ uint64_t x5;
+ unsigned int ss = (unsigned int) get_interrupt_src_ss(flags);
+ int64_t ret;
+ bool resume = false;
+ cpu_context_t *ctx = handle;
+ int ev_num = (int) x1;
+
+ if (ss != NON_SECURE)
+ SMC_RET1(ctx, SMC_UNK);
+
+ /* Verify the caller EL */
+ if (GET_EL(read_spsr_el3()) != sdei_client_el())
+ SMC_RET1(ctx, SMC_UNK);
+
+ switch (smc_fid) {
+ case SDEI_VERSION:
+ SDEI_LOG("> VER\n");
+ ret = (int64_t) sdei_version();
+ SDEI_LOG("< VER:%" PRIx64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_REGISTER:
+ x5 = SMC_GET_GP(ctx, CTX_GPREG_X5);
+ SDEI_LOG("> REG(n:%d e:%" PRIx64 " a:%" PRIx64 " f:%x m:%" PRIx64 "\n", ev_num,
+ x2, x3, (int) x4, x5);
+ ret = sdei_event_register(ev_num, x2, x3, x4, x5);
+ SDEI_LOG("< REG:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_ENABLE:
+ SDEI_LOG("> ENABLE(n:%d)\n", (int) x1);
+ ret = sdei_event_enable(ev_num);
+ SDEI_LOG("< ENABLE:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_DISABLE:
+ SDEI_LOG("> DISABLE(n:0x%x)\n", ev_num);
+ ret = sdei_event_disable(ev_num);
+ SDEI_LOG("< DISABLE:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_CONTEXT:
+ SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1());
+ ret = sdei_event_context(ctx, (unsigned int) x1);
+ SDEI_LOG("< CTX:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_COMPLETE_AND_RESUME:
+ resume = true;
+ /* Fallthrough */
+
+ case SDEI_EVENT_COMPLETE:
+ SDEI_LOG("> COMPLETE(r:%u sta/ep:%" PRIx64 "):%lx\n",
+ (unsigned int) resume, x1, read_mpidr_el1());
+ ret = sdei_event_complete(resume, x1);
+ SDEI_LOG("< COMPLETE:%" PRIx64 "\n", ret);
+
+ /*
+ * Set error code only if the call failed. If the call
+ * succeeded, we discard the dispatched context, and restore the
+ * interrupted context to a pristine condition, and therefore
+ * shouldn't be modified. We don't return to the caller in this
+ * case anyway.
+ */
+ if (ret != 0)
+ SMC_RET1(ctx, ret);
+
+ SMC_RET0(ctx);
+
+ case SDEI_EVENT_STATUS:
+ SDEI_LOG("> STAT(n:0x%x)\n", ev_num);
+ ret = sdei_event_status(ev_num);
+ SDEI_LOG("< STAT:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_GET_INFO:
+ SDEI_LOG("> INFO(n:0x%x, %d)\n", ev_num, (int) x2);
+ ret = sdei_event_get_info(ev_num, (int) x2);
+ SDEI_LOG("< INFO:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_UNREGISTER:
+ SDEI_LOG("> UNREG(n:0x%x)\n", ev_num);
+ ret = sdei_event_unregister(ev_num);
+ SDEI_LOG("< UNREG:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_PE_UNMASK:
+ SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1());
+ sdei_pe_unmask();
+ SDEI_LOG("< UNMASK:%d\n", 0);
+ SMC_RET1(ctx, 0);
+
+ case SDEI_PE_MASK:
+ SDEI_LOG("> MASK:%lx\n", read_mpidr_el1());
+ ret = sdei_pe_mask();
+ SDEI_LOG("< MASK:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_INTERRUPT_BIND:
+ SDEI_LOG("> BIND(%d)\n", (int) x1);
+ ret = sdei_interrupt_bind((unsigned int) x1);
+ SDEI_LOG("< BIND:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_INTERRUPT_RELEASE:
+ SDEI_LOG("> REL(0x%x)\n", ev_num);
+ ret = sdei_interrupt_release(ev_num);
+ SDEI_LOG("< REL:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_SHARED_RESET:
+ SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1());
+ ret = sdei_shared_reset();
+ SDEI_LOG("< S_RESET:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_PRIVATE_RESET:
+ SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1());
+ ret = sdei_private_reset();
+ SDEI_LOG("< P_RESET:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_ROUTING_SET:
+ SDEI_LOG("> ROUTE_SET(n:%d f:%" PRIx64 " aff:%" PRIx64 ")\n", ev_num, x2, x3);
+ ret = sdei_event_routing_set(ev_num, x2, x3);
+ SDEI_LOG("< ROUTE_SET:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_FEATURES:
+ SDEI_LOG("> FTRS(f:%" PRIx64 ")\n", x1);
+ ret = (int64_t) sdei_features((unsigned int) x1);
+ SDEI_LOG("< FTRS:%" PRIx64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ case SDEI_EVENT_SIGNAL:
+ SDEI_LOG("> SIGNAL(e:%d t:%" PRIx64 ")\n", ev_num, x2);
+ ret = sdei_signal(ev_num, x2);
+ SDEI_LOG("< SIGNAL:%" PRId64 "\n", ret);
+ SMC_RET1(ctx, ret);
+
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+
+ WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid);
+ SMC_RET1(ctx, SMC_UNK);
+}
+
+/* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */
+SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, sdei_cpu_on_init);
+
+/* Subscribe to PSCI CPU suspend finisher for per-CPU configuration */
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, sdei_cpu_wakeup_init);
diff --git a/services/std_svc/sdei/sdei_private.h b/services/std_svc/sdei/sdei_private.h
new file mode 100644
index 0000000..44a7301
--- /dev/null
+++ b/services/std_svc/sdei/sdei_private.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SDEI_PRIVATE_H
+#define SDEI_PRIVATE_H
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/debug.h>
+#include <context.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/spinlock.h>
+#include <lib/utils_def.h>
+#include <plat/common/platform.h>
+#include <services/sdei.h>
+#include <setjmp.h>
+
+#ifndef __aarch64__
+# error SDEI is implemented only for AArch64 systems
+#endif
+
+#ifndef PLAT_SDEI_CRITICAL_PRI
+# error Platform must define SDEI critical priority value
+#endif
+
+#ifndef PLAT_SDEI_NORMAL_PRI
+# error Platform must define SDEI normal priority value
+#endif
+
+/* Output SDEI logs as verbose */
+#define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__)
+
+/* SDEI handler unregistered state. This is the default state. */
+#define SDEI_STATE_UNREGISTERED 0U
+
+/* SDE event status values in bit position */
+#define SDEI_STATF_REGISTERED 0U
+#define SDEI_STATF_ENABLED 1U
+#define SDEI_STATF_RUNNING 2U
+
+/* SDEI SMC error codes */
+#define SDEI_EINVAL (-2)
+#define SDEI_EDENY (-3)
+#define SDEI_EPEND (-5)
+#define SDEI_ENOMEM (-10)
+
+/*
+ * 'info' parameter to SDEI_EVENT_GET_INFO SMC.
+ *
+ * Note that the SDEI v1.0 specification mistakenly enumerates the
+ * SDEI_INFO_EV_SIGNALED as SDEI_INFO_SIGNALED. This will be corrected in a
+ * future version.
+ */
+#define SDEI_INFO_EV_TYPE 0
+#define SDEI_INFO_EV_NOT_SIGNALED 1
+#define SDEI_INFO_EV_PRIORITY 2
+#define SDEI_INFO_EV_ROUTING_MODE 3
+#define SDEI_INFO_EV_ROUTING_AFF 4
+
+#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_PRIV_])
+#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_SHRD_])
+
+#define for_each_mapping_type(_i, _mapping) \
+ for ((_i) = 0, (_mapping) = &sdei_global_mappings[(_i)]; \
+ (_i) < SDEI_MAP_IDX_MAX_; \
+ (_i)++, (_mapping) = &sdei_global_mappings[(_i)])
+
+#define iterate_mapping(_mapping, _i, _map) \
+ for ((_map) = (_mapping)->map, (_i) = 0; \
+ (_i) < (_mapping)->num_maps; \
+ (_i)++, (_map)++)
+
+#define for_each_private_map(_i, _map) \
+ iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
+
+#define for_each_shared_map(_i, _map) \
+ iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
+
+/* SDEI_FEATURES */
+#define SDEI_FEATURE_BIND_SLOTS 0U
+#define BIND_SLOTS_MASK 0xffffU
+#define FEATURES_SHARED_SLOTS_SHIFT 16U
+#define FEATURES_PRIVATE_SLOTS_SHIFT 0U
+#define FEATURE_BIND_SLOTS(_priv, _shrd) \
+ (((((uint64_t) (_priv)) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
+ ((((uint64_t) (_shrd)) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
+
+#define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s)
+#define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
+
+static inline bool is_event_private(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_PRIVATE_SHIFT_)) != 0U);
+}
+
+static inline bool is_event_shared(sdei_ev_map_t *map)
+{
+ return !is_event_private(map);
+}
+
+static inline bool is_event_critical(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_CRITICAL_SHIFT_)) != 0U);
+}
+
+static inline bool is_event_normal(sdei_ev_map_t *map)
+{
+ return !is_event_critical(map);
+}
+
+static inline bool is_event_signalable(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_SIGNALABLE_SHIFT_)) != 0U);
+}
+
+static inline bool is_map_dynamic(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_DYNAMIC_SHIFT_)) != 0U);
+}
+
+/*
+ * Checks whether an event is associated with an interrupt. Static events always
+ * return true, and dynamic events return whether SDEI_INTERRUPT_BIND had been
+ * called on them. This can be used on both static or dynamic events to check
+ * for an associated interrupt.
+ */
+static inline bool is_map_bound(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_BOUND_SHIFT_)) != 0U);
+}
+
+static inline void set_map_bound(sdei_ev_map_t *map)
+{
+ map->map_flags |= BIT_32(SDEI_MAPF_BOUND_SHIFT_);
+}
+
+static inline bool is_map_explicit(sdei_ev_map_t *map)
+{
+ return ((map->map_flags & BIT_32(SDEI_MAPF_EXPLICIT_SHIFT_)) != 0U);
+}
+
+static inline void clr_map_bound(sdei_ev_map_t *map)
+{
+ map->map_flags &= ~BIT_32(SDEI_MAPF_BOUND_SHIFT_);
+}
+
+static inline bool is_secure_sgi(unsigned int intr)
+{
+ return ((plat_ic_is_sgi(intr) != 0) &&
+ (plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3));
+}
+
+/*
+ * Determine EL of the client. If EL2 is implemented (hence the enabled HCE
+ * bit), deem EL2; otherwise, deem EL1.
+ */
+static inline unsigned int sdei_client_el(void)
+{
+ cpu_context_t *ns_ctx = cm_get_context(NON_SECURE);
+ el3_state_t *el3_ctx = get_el3state_ctx(ns_ctx);
+
+ return ((read_ctx_reg(el3_ctx, CTX_SCR_EL3) & SCR_HCE_BIT) != 0U) ?
+ MODE_EL2 : MODE_EL1;
+}
+
+static inline unsigned int sdei_event_priority(sdei_ev_map_t *map)
+{
+ return (unsigned int) (is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI :
+ PLAT_SDEI_NORMAL_PRI);
+}
+
+static inline bool get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+ return ((se->state & BIT_32(bit_no)) != 0U);
+}
+
+static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
+{
+ se->state &= ~BIT_32(bit_no);
+}
+
+/* SDEI actions for state transition */
+typedef enum {
+ /*
+ * Actions resulting from client requests. These directly map to SMC
+ * calls. Note that the state table columns are listed in this order
+ * too.
+ */
+ DO_REGISTER = 0,
+ DO_RELEASE = 1,
+ DO_ENABLE = 2,
+ DO_DISABLE = 3,
+ DO_UNREGISTER = 4,
+ DO_ROUTING = 5,
+ DO_CONTEXT = 6,
+ DO_COMPLETE = 7,
+ DO_COMPLETE_RESUME = 8,
+
+ /* Action for event dispatch */
+ DO_DISPATCH = 9,
+
+ DO_MAX,
+} sdei_action_t;
+
+typedef enum {
+ SDEI_NORMAL,
+ SDEI_CRITICAL
+} sdei_class_t;
+
+static inline void sdei_map_lock(sdei_ev_map_t *map)
+{
+ spin_lock(&map->lock);
+}
+
+static inline void sdei_map_unlock(sdei_ev_map_t *map)
+{
+ spin_unlock(&map->lock);
+}
+
+extern const sdei_mapping_t sdei_global_mappings[];
+extern sdei_entry_t sdei_private_event_table[];
+extern sdei_entry_t sdei_shared_event_table[];
+
+void init_sdei_state(void);
+
+sdei_ev_map_t *find_event_map_by_intr(unsigned int intr_num, bool shared);
+sdei_ev_map_t *find_event_map(int ev_num);
+sdei_entry_t *get_event_entry(sdei_ev_map_t *map);
+
+int64_t sdei_event_context(void *handle, unsigned int param);
+int sdei_event_complete(bool resume, uint64_t pc);
+
+void sdei_pe_unmask(void);
+int64_t sdei_pe_mask(void);
+
+int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
+ void *cookie);
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act);
+void begin_sdei_synchronous_dispatch(jmp_buf *buffer);
+
+#endif /* SDEI_PRIVATE_H */
diff --git a/services/std_svc/sdei/sdei_state.c b/services/std_svc/sdei/sdei_state.c
new file mode 100644
index 0000000..1b448e6
--- /dev/null
+++ b/services/std_svc/sdei/sdei_state.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <lib/cassert.h>
+
+#include "sdei_private.h"
+
+/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
+#define r_ 0U
+#define R_ (1u << SDEI_STATF_RUNNING)
+
+#define e_ 0U
+#define E_ (1u << SDEI_STATF_ENABLED)
+
+#define g_ 0U
+#define G_ (1u << SDEI_STATF_REGISTERED)
+
+/* All possible composite handler states */
+#define reg_ (r_ | e_ | g_)
+#define reG_ (r_ | e_ | G_)
+#define rEg_ (r_ | E_ | g_)
+#define rEG_ (r_ | E_ | G_)
+#define Reg_ (R_ | e_ | g_)
+#define ReG_ (R_ | e_ | G_)
+#define REg_ (R_ | E_ | g_)
+#define REG_ (R_ | E_ | G_)
+
+#define MAX_STATES (REG_ + 1u)
+
+/* Invalid state */
+#define SDEI_STATE_INVALID ((sdei_state_t) (-1))
+
+/* No change in state */
+#define SDEI_STATE_NOP ((sdei_state_t) (-2))
+
+#define X___ SDEI_STATE_INVALID
+#define NOP_ SDEI_STATE_NOP
+
+/* Ensure special states don't overlap with valid ones */
+CASSERT(X___ > REG_, sdei_state_overlap_invalid);
+CASSERT(NOP_ > REG_, sdei_state_overlap_nop);
+
+/*
+ * SDEI handler state machine: refer to sections 6.1 and 6.1.2 of the SDEI v1.0
+ * specification (ARM DEN0054A).
+ *
+ * Not all calls contribute to handler state transition. This table is also used
+ * to validate whether a call is permissible at a given handler state:
+ *
+ * - X___ denotes a forbidden transition;
+ * - NOP_ denotes a permitted transition, but there's no change in state;
+ * - Otherwise, XXX_ gives the new state.
+ *
+ * DISP[atch] is a transition added for the implementation, but is not mentioned
+ * in the spec.
+ *
+ * Those calls that the spec mentions as can be made any time don't picture in
+ * this table.
+ */
+
+static const sdei_state_t sdei_state_table[MAX_STATES][DO_MAX] = {
+/*
+ * Action: REG REL ENA DISA UREG ROUT CTX COMP COMPR DISP
+ * Notes: [3] [1] [3] [3][4] [2]
+ */
+ /* Handler unregistered, disabled, and not running. This is the default state. */
+/* 0 */ [reg_] = { reG_, NOP_, X___, X___, X___, X___, X___, X___, X___, X___, },
+
+ /* Handler unregistered and running */
+/* 4 */ [Reg_] = { X___, X___, X___, X___, X___, X___, NOP_, reg_, reg_, X___, },
+
+ /* Handler registered */
+/* 1 */ [reG_] = { X___, X___, rEG_, NOP_, reg_, NOP_, X___, X___, X___, X___, },
+
+ /* Handler registered and running */
+/* 5 */ [ReG_] = { X___, X___, REG_, NOP_, Reg_, X___, NOP_, reG_, reG_, X___, },
+
+ /* Handler registered and enabled */
+/* 3 */ [rEG_] = { X___, X___, NOP_, reG_, reg_, X___, X___, X___, X___, REG_, },
+
+ /* Handler registered, enabled, and running */
+/* 7 */ [REG_] = { X___, X___, NOP_, ReG_, Reg_, X___, NOP_, rEG_, rEG_, X___, },
+
+ /*
+ * Invalid states: no valid transition would leave the handler in these
+ * states; and no transition from these states is possible either.
+ */
+
+ /*
+ * Handler can't be enabled without being registered. I.e., XEg is
+ * impossible.
+ */
+/* 2 */ [rEg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
+/* 6 */ [REg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
+};
+
+/*
+ * [1] Unregister will always also disable the event, so the new state will have
+ * Xeg.
+ * [2] Event is considered for dispatch only when it's both registered and
+ * enabled.
+ * [3] Never causes change in state.
+ * [4] Only allowed when running.
+ */
+
+/*
+ * Given an action, transition the state of an event by looking up the state
+ * table above:
+ *
+ * - Return false for invalid transition;
+ * - Return true for valid transition that causes no change in state;
+ * - Otherwise, update state and return true.
+ *
+ * This function assumes that the caller holds necessary locks. If the
+ * transition has constrains other than the state table describes, the caller is
+ * expected to restore the previous state. See sdei_event_register() for
+ * example.
+ */
+bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act)
+{
+ sdei_state_t next;
+
+ assert(act < DO_MAX);
+ if (se->state >= MAX_STATES) {
+ WARN(" event state invalid: %x\n", se->state);
+ return false;
+ }
+
+ next = sdei_state_table[se->state][act];
+ switch (next) {
+ case SDEI_STATE_INVALID:
+ return false;
+
+ case SDEI_STATE_NOP:
+ return true;
+
+ default:
+ /* Valid transition. Update state. */
+ SDEI_LOG(" event state 0x%x => 0x%x\n", se->state, next);
+ se->state = next;
+
+ return true;
+ }
+}