summaryrefslogtreecommitdiffstats
path: root/drivers/arm/css
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /drivers/arm/css
parentInitial commit. (diff)
downloadarm-trusted-firmware-upstream/2.8.0+dfsg.tar.xz
arm-trusted-firmware-upstream/2.8.0+dfsg.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/arm/css/mhu/css_mhu.c100
-rw-r--r--drivers/arm/css/mhu/css_mhu_doorbell.c40
-rw-r--r--drivers/arm/css/scmi/scmi_ap_core_proto.c81
-rw-r--r--drivers/arm/css/scmi/scmi_common.c210
-rw-r--r--drivers/arm/css/scmi/scmi_private.h160
-rw-r--r--drivers/arm/css/scmi/scmi_pwr_dmn_proto.c88
-rw-r--r--drivers/arm/css/scmi/scmi_sys_pwr_proto.c78
-rw-r--r--drivers/arm/css/scmi/vendor/scmi_sq.c62
-rw-r--r--drivers/arm/css/scmi/vendor/scmi_sq.h25
-rw-r--r--drivers/arm/css/scp/css_bom_bootloader.c195
-rw-r--r--drivers/arm/css/scp/css_pm_scmi.c499
-rw-r--r--drivers/arm/css/scp/css_pm_scpi.c165
-rw-r--r--drivers/arm/css/scp/css_sds.c95
-rw-r--r--drivers/arm/css/scpi/css_scpi.c272
-rw-r--r--drivers/arm/css/sds/aarch32/sds_helpers.S64
-rw-r--r--drivers/arm/css/sds/aarch64/sds_helpers.S62
-rw-r--r--drivers/arm/css/sds/sds.c259
-rw-r--r--drivers/arm/css/sds/sds_private.h100
18 files changed, 2555 insertions, 0 deletions
diff --git a/drivers/arm/css/mhu/css_mhu.c b/drivers/arm/css/mhu/css_mhu.c
new file mode 100644
index 0000000..b7faf7e
--- /dev/null
+++ b/drivers/arm/css/mhu/css_mhu.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <drivers/arm/css/css_mhu.h>
+#include <lib/bakery_lock.h>
+#include <lib/mmio.h>
+#include <plat/arm/common/plat_arm.h>
+
+/* SCP MHU secure channel registers */
+#define SCP_INTR_S_STAT 0x200
+#define SCP_INTR_S_SET 0x208
+#define SCP_INTR_S_CLEAR 0x210
+
+/* CPU MHU secure channel registers */
+#define CPU_INTR_S_STAT 0x300
+#define CPU_INTR_S_SET 0x308
+#define CPU_INTR_S_CLEAR 0x310
+
+ARM_INSTANTIATE_LOCK;
+
+/* Weak definition may be overridden in specific CSS based platform */
+#pragma weak plat_arm_pwrc_setup
+
+
+/*
+ * Slot 31 is reserved because the MHU hardware uses this register bit to
+ * indicate a non-secure access attempt. The total number of available slots is
+ * therefore 31 [30:0].
+ */
+#define MHU_MAX_SLOT_ID 30
+
+void mhu_secure_message_start(unsigned int slot_id)
+{
+ assert(slot_id <= MHU_MAX_SLOT_ID);
+
+ arm_lock_get();
+
+ /* Make sure any previous command has finished */
+ while (mmio_read_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_STAT) &
+ (1 << slot_id))
+ ;
+}
+
+void mhu_secure_message_send(unsigned int slot_id)
+{
+ assert(slot_id <= MHU_MAX_SLOT_ID);
+ assert(!(mmio_read_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_STAT) &
+ (1 << slot_id)));
+
+ /* Send command to SCP */
+ mmio_write_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_SET, 1 << slot_id);
+}
+
+uint32_t mhu_secure_message_wait(void)
+{
+ /* Wait for response from SCP */
+ uint32_t response;
+ while (!(response = mmio_read_32(PLAT_CSS_MHU_BASE + SCP_INTR_S_STAT)))
+ ;
+
+ return response;
+}
+
+void mhu_secure_message_end(unsigned int slot_id)
+{
+ assert(slot_id <= MHU_MAX_SLOT_ID);
+
+ /*
+ * Clear any response we got by writing one in the relevant slot bit to
+ * the CLEAR register
+ */
+ mmio_write_32(PLAT_CSS_MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id);
+
+ arm_lock_release();
+}
+
+void __init mhu_secure_init(void)
+{
+ arm_lock_init();
+
+ /*
+ * The STAT register resets to zero. Ensure it is in the expected state,
+ * as a stale or garbage value would make us think it's a message we've
+ * already sent.
+ */
+ assert(mmio_read_32(PLAT_CSS_MHU_BASE + CPU_INTR_S_STAT) == 0);
+}
+
+void __init plat_arm_pwrc_setup(void)
+{
+ mhu_secure_init();
+}
diff --git a/drivers/arm/css/mhu/css_mhu_doorbell.c b/drivers/arm/css/mhu/css_mhu_doorbell.c
new file mode 100644
index 0000000..c51f3b1
--- /dev/null
+++ b/drivers/arm/css/mhu/css_mhu_doorbell.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <drivers/arm/css/css_mhu_doorbell.h>
+#include <drivers/arm/css/scmi.h>
+
+void mhu_ring_doorbell(struct scmi_channel_plat_info *plat_info)
+{
+ MHU_RING_DOORBELL(plat_info->db_reg_addr,
+ plat_info->db_modify_mask,
+ plat_info->db_preserve_mask);
+ return;
+}
+
+void mhuv2_ring_doorbell(struct scmi_channel_plat_info *plat_info)
+{
+ uintptr_t mhuv2_base = plat_info->db_reg_addr & MHU_V2_FRAME_BASE_MASK;
+
+ /* wake receiver */
+ MHU_V2_ACCESS_REQUEST(mhuv2_base);
+
+ /* wait for receiver to acknowledge its ready */
+ while (MHU_V2_IS_ACCESS_READY(mhuv2_base) == 0)
+ ;
+
+ MHU_RING_DOORBELL(plat_info->db_reg_addr,
+ plat_info->db_modify_mask,
+ plat_info->db_preserve_mask);
+
+ /* clear the access request for the receiver */
+ MHU_V2_CLEAR_REQUEST(mhuv2_base);
+
+ return;
+}
diff --git a/drivers/arm/css/scmi/scmi_ap_core_proto.c b/drivers/arm/css/scmi/scmi_ap_core_proto.c
new file mode 100644
index 0000000..5941b87
--- /dev/null
+++ b/drivers/arm/css/scmi/scmi_ap_core_proto.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/scmi.h>
+
+#include "scmi_private.h"
+
+/*
+ * API to set the SCMI AP core reset address and attributes
+ */
+int scmi_ap_core_set_reset_addr(void *p, uint64_t reset_addr, uint32_t attr)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_AP_CORE_PROTO_ID,
+ SCMI_AP_CORE_RESET_ADDR_SET_MSG, token);
+ mbx_mem->len = SCMI_AP_CORE_RESET_ADDR_SET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+ SCMI_PAYLOAD_ARG3(mbx_mem->payload, reset_addr & 0xffffffff,
+ reset_addr >> 32, attr);
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+ assert(mbx_mem->len == SCMI_AP_CORE_RESET_ADDR_SET_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
+
+/*
+ * API to get the SCMI AP core reset address and attributes
+ */
+int scmi_ap_core_get_reset_addr(void *p, uint64_t *reset_addr, uint32_t *attr)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+ uint32_t lo_addr, hi_addr;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_AP_CORE_PROTO_ID,
+ SCMI_AP_CORE_RESET_ADDR_GET_MSG, token);
+ mbx_mem->len = SCMI_AP_CORE_RESET_ADDR_GET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL4(mbx_mem->payload, ret, lo_addr, hi_addr, *attr);
+ *reset_addr = lo_addr | (uint64_t)hi_addr << 32;
+ assert(mbx_mem->len == SCMI_AP_CORE_RESET_ADDR_GET_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
diff --git a/drivers/arm/css/scmi/scmi_common.c b/drivers/arm/css/scmi/scmi_common.c
new file mode 100644
index 0000000..ec749fb
--- /dev/null
+++ b/drivers/arm/css/scmi/scmi_common.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/scmi.h>
+
+#include "scmi_private.h"
+
+#if HW_ASSISTED_COHERENCY
+#define scmi_lock_init(lock)
+#define scmi_lock_get(lock) spin_lock(lock)
+#define scmi_lock_release(lock) spin_unlock(lock)
+#else
+#define scmi_lock_init(lock) bakery_lock_init(lock)
+#define scmi_lock_get(lock) bakery_lock_get(lock)
+#define scmi_lock_release(lock) bakery_lock_release(lock)
+#endif
+
+
+/*
+ * Private helper function to get exclusive access to SCMI channel.
+ */
+void scmi_get_channel(scmi_channel_t *ch)
+{
+ assert(ch->lock);
+ scmi_lock_get(ch->lock);
+
+ /* Make sure any previous command has finished */
+ assert(SCMI_IS_CHANNEL_FREE(
+ ((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status));
+}
+
+/*
+ * Private helper function to transfer ownership of channel from AP to SCP.
+ */
+void scmi_send_sync_command(scmi_channel_t *ch)
+{
+ mailbox_mem_t *mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+
+ SCMI_MARK_CHANNEL_BUSY(mbx_mem->status);
+
+ /*
+ * Ensure that any write to the SCMI payload area is seen by SCP before
+ * we write to the doorbell register. If these 2 writes were reordered
+ * by the CPU then SCP would read stale payload data
+ */
+ dmbst();
+
+ ch->info->ring_doorbell(ch->info);
+ /*
+ * Ensure that the write to the doorbell register is ordered prior to
+ * checking whether the channel is free.
+ */
+ dmbsy();
+
+ /* Wait for channel to be free */
+ while (!SCMI_IS_CHANNEL_FREE(mbx_mem->status))
+ ;
+
+ /*
+ * Ensure that any read to the SCMI payload area is done after reading
+ * mailbox status. If these 2 reads were reordered then the CPU would
+ * read invalid payload data
+ */
+ dmbld();
+}
+
+/*
+ * Private helper function to release exclusive access to SCMI channel.
+ */
+void scmi_put_channel(scmi_channel_t *ch)
+{
+ /* Make sure any previous command has finished */
+ assert(SCMI_IS_CHANNEL_FREE(
+ ((mailbox_mem_t *)(ch->info->scmi_mbx_mem))->status));
+
+ assert(ch->lock);
+ scmi_lock_release(ch->lock);
+}
+
+/*
+ * API to query the SCMI protocol version.
+ */
+int scmi_proto_version(void *p, uint32_t proto_id, uint32_t *version)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id, SCMI_PROTO_VERSION_MSG,
+ token);
+ mbx_mem->len = SCMI_PROTO_VERSION_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *version);
+ assert(mbx_mem->len == SCMI_PROTO_VERSION_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
+
+/*
+ * API to query the protocol message attributes for a SCMI protocol.
+ */
+int scmi_proto_msg_attr(void *p, uint32_t proto_id,
+ uint32_t command_id, uint32_t *attr)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(proto_id,
+ SCMI_PROTO_MSG_ATTR_MSG, token);
+ mbx_mem->len = SCMI_PROTO_MSG_ATTR_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+ SCMI_PAYLOAD_ARG1(mbx_mem->payload, command_id);
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *attr);
+ assert(mbx_mem->len == SCMI_PROTO_MSG_ATTR_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
+
+/*
+ * SCMI Driver initialization API. Returns initialized channel on success
+ * or NULL on error. The return type is an opaque void pointer.
+ */
+void *scmi_init(scmi_channel_t *ch)
+{
+ uint32_t version;
+ int ret;
+
+ assert(ch && ch->info);
+ assert(ch->info->db_reg_addr);
+ assert(ch->info->db_modify_mask);
+ assert(ch->info->db_preserve_mask);
+ assert(ch->info->ring_doorbell != NULL);
+
+ assert(ch->lock);
+
+ scmi_lock_init(ch->lock);
+
+ ch->is_initialized = 1;
+
+ ret = scmi_proto_version(ch, SCMI_PWR_DMN_PROTO_ID, &version);
+ if (ret != SCMI_E_SUCCESS) {
+ WARN("SCMI power domain protocol version message failed\n");
+ goto error;
+ }
+
+ if (!is_scmi_version_compatible(SCMI_PWR_DMN_PROTO_VER, version)) {
+ WARN("SCMI power domain protocol version 0x%x incompatible with driver version 0x%x\n",
+ version, SCMI_PWR_DMN_PROTO_VER);
+ goto error;
+ }
+
+ VERBOSE("SCMI power domain protocol version 0x%x detected\n", version);
+
+ ret = scmi_proto_version(ch, SCMI_SYS_PWR_PROTO_ID, &version);
+ if ((ret != SCMI_E_SUCCESS)) {
+ WARN("SCMI system power protocol version message failed\n");
+ goto error;
+ }
+
+ if (!is_scmi_version_compatible(SCMI_SYS_PWR_PROTO_VER, version)) {
+ WARN("SCMI system power management protocol version 0x%x incompatible with driver version 0x%x\n",
+ version, SCMI_SYS_PWR_PROTO_VER);
+ goto error;
+ }
+
+ VERBOSE("SCMI system power management protocol version 0x%x detected\n",
+ version);
+
+ INFO("SCMI driver initialized\n");
+
+ return (void *)ch;
+
+error:
+ ch->is_initialized = 0;
+ return NULL;
+}
diff --git a/drivers/arm/css/scmi/scmi_private.h b/drivers/arm/css/scmi/scmi_private.h
new file mode 100644
index 0000000..a684ca5
--- /dev/null
+++ b/drivers/arm/css/scmi/scmi_private.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SCMI_PRIVATE_H
+#define SCMI_PRIVATE_H
+
+#include <lib/mmio.h>
+
+/*
+ * SCMI power domain management protocol message and response lengths. It is
+ * calculated as sum of length in bytes of the message header (4) and payload
+ * area (the number of bytes of parameters or return values in the payload).
+ */
+#define SCMI_PROTO_VERSION_MSG_LEN 4
+#define SCMI_PROTO_VERSION_RESP_LEN 12
+
+#define SCMI_PROTO_MSG_ATTR_MSG_LEN 8
+#define SCMI_PROTO_MSG_ATTR_RESP_LEN 12
+
+#define SCMI_AP_CORE_RESET_ADDR_SET_MSG_LEN 16
+#define SCMI_AP_CORE_RESET_ADDR_SET_RESP_LEN 8
+
+#define SCMI_AP_CORE_RESET_ADDR_GET_MSG_LEN 4
+#define SCMI_AP_CORE_RESET_ADDR_GET_RESP_LEN 20
+
+#define SCMI_PWR_STATE_SET_MSG_LEN 16
+#define SCMI_PWR_STATE_SET_RESP_LEN 8
+
+#define SCMI_PWR_STATE_GET_MSG_LEN 8
+#define SCMI_PWR_STATE_GET_RESP_LEN 12
+
+#define SCMI_SYS_PWR_STATE_SET_MSG_LEN 12
+#define SCMI_SYS_PWR_STATE_SET_RESP_LEN 8
+
+#define SCMI_SYS_PWR_STATE_GET_MSG_LEN 4
+#define SCMI_SYS_PWR_STATE_GET_RESP_LEN 12
+
+/* SCMI message header format bit field */
+#define SCMI_MSG_ID_SHIFT 0
+#define SCMI_MSG_ID_WIDTH 8
+#define SCMI_MSG_ID_MASK ((1 << SCMI_MSG_ID_WIDTH) - 1)
+
+#define SCMI_MSG_TYPE_SHIFT 8
+#define SCMI_MSG_TYPE_WIDTH 2
+#define SCMI_MSG_TYPE_MASK ((1 << SCMI_MSG_TYPE_WIDTH) - 1)
+
+#define SCMI_MSG_PROTO_ID_SHIFT 10
+#define SCMI_MSG_PROTO_ID_WIDTH 8
+#define SCMI_MSG_PROTO_ID_MASK ((1 << SCMI_MSG_PROTO_ID_WIDTH) - 1)
+
+#define SCMI_MSG_TOKEN_SHIFT 18
+#define SCMI_MSG_TOKEN_WIDTH 10
+#define SCMI_MSG_TOKEN_MASK ((1 << SCMI_MSG_TOKEN_WIDTH) - 1)
+
+
+/* SCMI mailbox flags */
+#define SCMI_FLAG_RESP_POLL 0
+#define SCMI_FLAG_RESP_INT 1
+
+/* SCMI power domain protocol `POWER_STATE_SET` message flags */
+#define SCMI_PWR_STATE_SET_FLAG_SYNC 0
+#define SCMI_PWR_STATE_SET_FLAG_ASYNC 1
+
+/*
+ * Helper macro to create an SCMI message header given protocol, message id
+ * and token.
+ */
+#define SCMI_MSG_CREATE(_protocol, _msg_id, _token) \
+ ((((_protocol) & SCMI_MSG_PROTO_ID_MASK) << SCMI_MSG_PROTO_ID_SHIFT) | \
+ (((_msg_id) & SCMI_MSG_ID_MASK) << SCMI_MSG_ID_SHIFT) | \
+ (((_token) & SCMI_MSG_TOKEN_MASK) << SCMI_MSG_TOKEN_SHIFT))
+
+/* Helper macro to get the token from a SCMI message header */
+#define SCMI_MSG_GET_TOKEN(_msg) \
+ (((_msg) >> SCMI_MSG_TOKEN_SHIFT) & SCMI_MSG_TOKEN_MASK)
+
+/* SCMI Channel Status bit fields */
+#define SCMI_CH_STATUS_RES0_MASK 0xFFFFFFFE
+#define SCMI_CH_STATUS_FREE_SHIFT 0
+#define SCMI_CH_STATUS_FREE_WIDTH 1
+#define SCMI_CH_STATUS_FREE_MASK ((1 << SCMI_CH_STATUS_FREE_WIDTH) - 1)
+
+/* Helper macros to check and write the channel status */
+#define SCMI_IS_CHANNEL_FREE(status) \
+ (!!(((status) >> SCMI_CH_STATUS_FREE_SHIFT) & SCMI_CH_STATUS_FREE_MASK))
+
+#define SCMI_MARK_CHANNEL_BUSY(status) do { \
+ assert(SCMI_IS_CHANNEL_FREE(status)); \
+ (status) &= ~(SCMI_CH_STATUS_FREE_MASK << \
+ SCMI_CH_STATUS_FREE_SHIFT); \
+ } while (0)
+
+/* Helper macros to copy arguments to the mailbox payload */
+#define SCMI_PAYLOAD_ARG1(payld_arr, arg1) \
+ mmio_write_32((uintptr_t)&payld_arr[0], arg1)
+
+#define SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2) do { \
+ SCMI_PAYLOAD_ARG1(payld_arr, arg1); \
+ mmio_write_32((uintptr_t)&payld_arr[1], arg2); \
+ } while (0)
+
+#define SCMI_PAYLOAD_ARG3(payld_arr, arg1, arg2, arg3) do { \
+ SCMI_PAYLOAD_ARG2(payld_arr, arg1, arg2); \
+ mmio_write_32((uintptr_t)&payld_arr[2], arg3); \
+ } while (0)
+
+/* Helper macros to read return values from the mailbox payload */
+#define SCMI_PAYLOAD_RET_VAL1(payld_arr, val1) \
+ (val1) = mmio_read_32((uintptr_t)&payld_arr[0])
+
+#define SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2) do { \
+ SCMI_PAYLOAD_RET_VAL1(payld_arr, val1); \
+ (val2) = mmio_read_32((uintptr_t)&payld_arr[1]); \
+ } while (0)
+
+#define SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3) do { \
+ SCMI_PAYLOAD_RET_VAL2(payld_arr, val1, val2); \
+ (val3) = mmio_read_32((uintptr_t)&payld_arr[2]); \
+ } while (0)
+
+#define SCMI_PAYLOAD_RET_VAL4(payld_arr, val1, val2, val3, val4) do { \
+ SCMI_PAYLOAD_RET_VAL3(payld_arr, val1, val2, val3); \
+ (val4) = mmio_read_32((uintptr_t)&payld_arr[3]); \
+ } while (0)
+
+/*
+ * Private data structure for representing the mailbox memory layout. Refer
+ * the SCMI specification for more details.
+ */
+typedef struct mailbox_mem {
+ uint32_t res_a; /* Reserved */
+ volatile uint32_t status;
+ uint64_t res_b; /* Reserved */
+ uint32_t flags;
+ volatile uint32_t len;
+ volatile uint32_t msg_header;
+ uint32_t payload[];
+} mailbox_mem_t;
+
+
+/* Private APIs for use within SCMI driver */
+void scmi_get_channel(scmi_channel_t *ch);
+void scmi_send_sync_command(scmi_channel_t *ch);
+void scmi_put_channel(scmi_channel_t *ch);
+
+static inline void validate_scmi_channel(scmi_channel_t *ch)
+{
+ assert(ch && ch->is_initialized);
+ assert(ch->info && ch->info->scmi_mbx_mem);
+}
+
+/*
+ * SCMI vendor specific protocol
+ */
+#define SCMI_SYS_VENDOR_EXT_PROTO_ID 0x80
+
+#endif /* SCMI_PRIVATE_H */
diff --git a/drivers/arm/css/scmi/scmi_pwr_dmn_proto.c b/drivers/arm/css/scmi/scmi_pwr_dmn_proto.c
new file mode 100644
index 0000000..a342aa8
--- /dev/null
+++ b/drivers/arm/css/scmi/scmi_pwr_dmn_proto.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/scmi.h>
+
+#include "scmi_private.h"
+
+/*
+ * API to set the SCMI power domain power state.
+ */
+int scmi_pwr_state_set(void *p, uint32_t domain_id,
+ uint32_t scmi_pwr_state)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+
+ /*
+ * Only asynchronous mode of `set power state` command is allowed on
+ * application processors.
+ */
+ uint32_t pwr_state_set_msg_flag = SCMI_PWR_STATE_SET_FLAG_ASYNC;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID,
+ SCMI_PWR_STATE_SET_MSG, token);
+ mbx_mem->len = SCMI_PWR_STATE_SET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+ SCMI_PAYLOAD_ARG3(mbx_mem->payload, pwr_state_set_msg_flag,
+ domain_id, scmi_pwr_state);
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+ assert(mbx_mem->len == SCMI_PWR_STATE_SET_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
+
+/*
+ * API to get the SCMI power domain power state.
+ */
+int scmi_pwr_state_get(void *p, uint32_t domain_id,
+ uint32_t *scmi_pwr_state)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_PWR_DMN_PROTO_ID,
+ SCMI_PWR_STATE_GET_MSG, token);
+ mbx_mem->len = SCMI_PWR_STATE_GET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+ SCMI_PAYLOAD_ARG1(mbx_mem->payload, domain_id);
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *scmi_pwr_state);
+ assert(mbx_mem->len == SCMI_PWR_STATE_GET_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
diff --git a/drivers/arm/css/scmi/scmi_sys_pwr_proto.c b/drivers/arm/css/scmi/scmi_sys_pwr_proto.c
new file mode 100644
index 0000000..c8e62d1
--- /dev/null
+++ b/drivers/arm/css/scmi/scmi_sys_pwr_proto.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/scmi.h>
+
+#include "scmi_private.h"
+
+/*
+ * API to set the SCMI system power state
+ */
+int scmi_sys_pwr_state_set(void *p, uint32_t flags, uint32_t system_state)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID,
+ SCMI_SYS_PWR_STATE_SET_MSG, token);
+ mbx_mem->len = SCMI_SYS_PWR_STATE_SET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+ SCMI_PAYLOAD_ARG2(mbx_mem->payload, flags, system_state);
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+ assert(mbx_mem->len == SCMI_SYS_PWR_STATE_SET_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
+
+/*
+ * API to get the SCMI system power state
+ */
+int scmi_sys_pwr_state_get(void *p, uint32_t *system_state)
+{
+ mailbox_mem_t *mbx_mem;
+ unsigned int token = 0;
+ int ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_PWR_PROTO_ID,
+ SCMI_SYS_PWR_STATE_GET_MSG, token);
+ mbx_mem->len = SCMI_SYS_PWR_STATE_GET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+ scmi_send_sync_command(ch);
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL2(mbx_mem->payload, ret, *system_state);
+ assert(mbx_mem->len == SCMI_SYS_PWR_STATE_GET_RESP_LEN);
+ assert(token == SCMI_MSG_GET_TOKEN(mbx_mem->msg_header));
+
+ scmi_put_channel(ch);
+
+ return ret;
+}
diff --git a/drivers/arm/css/scmi/vendor/scmi_sq.c b/drivers/arm/css/scmi/vendor/scmi_sq.c
new file mode 100644
index 0000000..f185424
--- /dev/null
+++ b/drivers/arm/css/scmi/vendor/scmi_sq.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/scmi.h>
+
+#include "scmi_private.h"
+#include "scmi_sq.h"
+
+#include <sq_common.h>
+
+/* SCMI messge ID to get the available DRAM region */
+#define SCMI_VENDOR_EXT_MEMINFO_GET_MSG 0x3
+
+#define SCMI_VENDOR_EXT_MEMINFO_GET_MSG_LEN 4
+
+/*
+ * API to get the available DRAM region
+ */
+int scmi_get_draminfo(void *p, struct draminfo *info)
+{
+ mailbox_mem_t *mbx_mem;
+ int token = 0, ret;
+ scmi_channel_t *ch = (scmi_channel_t *)p;
+ struct dram_info_resp response;
+
+ validate_scmi_channel(ch);
+
+ scmi_get_channel(ch);
+
+ mbx_mem = (mailbox_mem_t *)(ch->info->scmi_mbx_mem);
+ mbx_mem->msg_header = SCMI_MSG_CREATE(SCMI_SYS_VENDOR_EXT_PROTO_ID,
+ SCMI_VENDOR_EXT_MEMINFO_GET_MSG, token);
+ mbx_mem->len = SCMI_VENDOR_EXT_MEMINFO_GET_MSG_LEN;
+ mbx_mem->flags = SCMI_FLAG_RESP_POLL;
+
+ scmi_send_sync_command(ch);
+
+ /*
+ * Ensure that any read to the SCPI payload area is done after reading
+ * the MHU register. If these 2 reads were reordered then the CPU would
+ * read invalid payload data
+ */
+ dmbld();
+
+ /* Get the return values */
+ SCMI_PAYLOAD_RET_VAL1(mbx_mem->payload, ret);
+
+ memcpy(&response, (void *)mbx_mem->payload, sizeof(response));
+
+ scmi_put_channel(ch);
+
+ *info = response.info;
+
+ return ret;
+}
diff --git a/drivers/arm/css/scmi/vendor/scmi_sq.h b/drivers/arm/css/scmi/vendor/scmi_sq.h
new file mode 100644
index 0000000..aee1a3a
--- /dev/null
+++ b/drivers/arm/css/scmi/vendor/scmi_sq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SCMI_SQ_H
+#define SCMI_SQ_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <sq_common.h>
+
+/* Structure to represent available DRAM region */
+struct dram_info_resp {
+ int status;
+ int reserved;
+ struct draminfo info;
+};
+
+/* API to get the available DRAM region */
+int scmi_get_draminfo(void *p, struct draminfo *info);
+
+#endif /* SCMI_SQ_H */
diff --git a/drivers/arm/css/scp/css_bom_bootloader.c b/drivers/arm/css/scp/css_bom_bootloader.c
new file mode 100644
index 0000000..74121b4
--- /dev/null
+++ b/drivers/arm/css/scp/css_bom_bootloader.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/css_mhu.h>
+#include <drivers/arm/css/css_scp.h>
+#include <drivers/arm/css/css_scpi.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+/* ID of the MHU slot used for the BOM protocol */
+#define BOM_MHU_SLOT_ID 0
+
+/* Boot commands sent from AP -> SCP */
+#define BOOT_CMD_INFO 0x00
+#define BOOT_CMD_DATA 0x01
+
+/* BOM command header */
+typedef struct {
+ uint32_t id : 8;
+ uint32_t reserved : 24;
+} bom_cmd_t;
+
+typedef struct {
+ uint32_t image_size;
+ uint32_t checksum;
+} cmd_info_payload_t;
+
+/*
+ * Unlike the SCPI protocol, the boot protocol uses the same memory region
+ * for both AP -> SCP and SCP -> AP transfers; define the address of this...
+ */
+#define BOM_SHARED_MEM PLAT_CSS_SCP_COM_SHARED_MEM_BASE
+#define BOM_CMD_HEADER ((bom_cmd_t *) BOM_SHARED_MEM)
+#define BOM_CMD_PAYLOAD ((void *) (BOM_SHARED_MEM + sizeof(bom_cmd_t)))
+
+typedef struct {
+ /* Offset from the base address of the Trusted RAM */
+ uint32_t offset;
+ uint32_t block_size;
+} cmd_data_payload_t;
+
+/*
+ * All CSS platforms load SCP_BL2/SCP_BL2U just below BL2 (this is where BL31
+ * usually resides except when ARM_BL31_IN_DRAM is
+ * set). Ensure that SCP_BL2/SCP_BL2U do not overflow into shared RAM and
+ * the fw_config.
+ */
+CASSERT(SCP_BL2_LIMIT <= BL2_BASE, assert_scp_bl2_overwrite_bl2);
+CASSERT(SCP_BL2U_LIMIT <= BL2_BASE, assert_scp_bl2u_overwrite_bl2);
+
+CASSERT(SCP_BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_scp_bl2_overflow);
+CASSERT(SCP_BL2U_BASE >= ARM_FW_CONFIG_LIMIT, assert_scp_bl2u_overflow);
+
+static void scp_boot_message_start(void)
+{
+ mhu_secure_message_start(BOM_MHU_SLOT_ID);
+}
+
+static void scp_boot_message_send(size_t payload_size)
+{
+ /* Ensure that any write to the BOM payload area is seen by SCP before
+ * we write to the MHU register. If these 2 writes were reordered by
+ * the CPU then SCP would read stale payload data */
+ dmbst();
+
+ /* Send command to SCP */
+ mhu_secure_message_send(BOM_MHU_SLOT_ID);
+}
+
+static uint32_t scp_boot_message_wait(size_t size)
+{
+ uint32_t mhu_status;
+
+ mhu_status = mhu_secure_message_wait();
+
+ /* Expect an SCP Boot Protocol message, reject any other protocol */
+ if (mhu_status != (1 << BOM_MHU_SLOT_ID)) {
+ ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+ mhu_status);
+ panic();
+ }
+
+ /* Ensure that any read to the BOM payload area is done after reading
+ * the MHU register. If these 2 reads were reordered then the CPU would
+ * read invalid payload data */
+ dmbld();
+
+ return *(uint32_t *) BOM_SHARED_MEM;
+}
+
+static void scp_boot_message_end(void)
+{
+ mhu_secure_message_end(BOM_MHU_SLOT_ID);
+}
+
+int css_scp_boot_image_xfer(void *image, unsigned int image_size)
+{
+ uint32_t response;
+ uint32_t checksum;
+ cmd_info_payload_t *cmd_info_payload;
+ cmd_data_payload_t *cmd_data_payload;
+
+ assert((uintptr_t) image == SCP_BL2_BASE);
+
+ if ((image_size == 0) || (image_size % 4 != 0)) {
+ ERROR("Invalid size for the SCP_BL2 image. Must be a multiple of "
+ "4 bytes and not zero (current size = 0x%x)\n",
+ image_size);
+ return -1;
+ }
+
+ /* Extract the checksum from the image */
+ checksum = *(uint32_t *) image;
+ image = (char *) image + sizeof(checksum);
+ image_size -= sizeof(checksum);
+
+ mhu_secure_init();
+
+ VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
+
+ /*
+ * Send information about the SCP firmware image about to be transferred
+ * to SCP
+ */
+ scp_boot_message_start();
+
+ BOM_CMD_HEADER->id = BOOT_CMD_INFO;
+ cmd_info_payload = BOM_CMD_PAYLOAD;
+ cmd_info_payload->image_size = image_size;
+ cmd_info_payload->checksum = checksum;
+
+ scp_boot_message_send(sizeof(*cmd_info_payload));
+#if CSS_DETECT_PRE_1_7_0_SCP
+ {
+ const uint32_t deprecated_scp_nack_cmd = 0x404;
+ uint32_t mhu_status;
+
+ VERBOSE("Detecting SCP version incompatibility\n");
+
+ mhu_status = mhu_secure_message_wait();
+ if (mhu_status == deprecated_scp_nack_cmd) {
+ ERROR("Detected an incompatible version of the SCP firmware.\n");
+ ERROR("Only versions from v1.7.0 onwards are supported.\n");
+ ERROR("Please update the SCP firmware.\n");
+ return -1;
+ }
+
+ VERBOSE("SCP version looks OK\n");
+ }
+#endif /* CSS_DETECT_PRE_1_7_0_SCP */
+ response = scp_boot_message_wait(sizeof(response));
+ scp_boot_message_end();
+
+ if (response != 0) {
+ ERROR("SCP BOOT_CMD_INFO returned error %u\n", response);
+ return -1;
+ }
+
+ VERBOSE("Transferring SCP_BL2 image to SCP\n");
+
+ /* Transfer SCP_BL2 image to SCP */
+ scp_boot_message_start();
+
+ BOM_CMD_HEADER->id = BOOT_CMD_DATA;
+ cmd_data_payload = BOM_CMD_PAYLOAD;
+ cmd_data_payload->offset = (uintptr_t) image - ARM_TRUSTED_SRAM_BASE;
+ cmd_data_payload->block_size = image_size;
+
+ scp_boot_message_send(sizeof(*cmd_data_payload));
+ response = scp_boot_message_wait(sizeof(response));
+ scp_boot_message_end();
+
+ if (response != 0) {
+ ERROR("SCP BOOT_CMD_DATA returned error %u\n", response);
+ return -1;
+ }
+
+ return 0;
+}
+
+int css_scp_boot_ready(void)
+{
+ VERBOSE("Waiting for SCP to signal it is ready to go on\n");
+
+ /* Wait for SCP to signal it's ready */
+ return scpi_wait_ready();
+}
diff --git a/drivers/arm/css/scp/css_pm_scmi.c b/drivers/arm/css/scp/css_pm_scmi.c
new file mode 100644
index 0000000..9fe8b37
--- /dev/null
+++ b/drivers/arm/css/scp/css_pm_scmi.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/css_scp.h>
+#include <drivers/arm/css/scmi.h>
+#include <lib/mmio.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/arm/css/common/css_pm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+/*
+ * This file implements the SCP helper functions using SCMI protocol.
+ */
+
+/*
+ * SCMI power state parameter bit field encoding for ARM CSS platforms.
+ *
+ * 31 20 19 16 15 12 11 8 7 4 3 0
+ * +-------------------------------------------------------------+
+ * | SBZ | Max level | Level 3 | Level 2 | Level 1 | Level 0 |
+ * | | | state | state | state | state |
+ * +-------------------------------------------------------------+
+ *
+ * `Max level` encodes the highest level that has a valid power state
+ * encoded in the power state.
+ */
+#define SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT 16
+#define SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH 4
+#define SCMI_PWR_STATE_MAX_PWR_LVL_MASK \
+ ((1 << SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH) - 1)
+#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(_power_state, _max_level) \
+ (_power_state) |= ((_max_level) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)\
+ << SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT
+#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(_power_state) \
+ (((_power_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT) \
+ & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)
+
+#define SCMI_PWR_STATE_LVL_WIDTH 4
+#define SCMI_PWR_STATE_LVL_MASK \
+ ((1 << SCMI_PWR_STATE_LVL_WIDTH) - 1)
+#define SCMI_SET_PWR_STATE_LVL(_power_state, _level, _level_state) \
+ (_power_state) |= ((_level_state) & SCMI_PWR_STATE_LVL_MASK) \
+ << (SCMI_PWR_STATE_LVL_WIDTH * (_level))
+#define SCMI_GET_PWR_STATE_LVL(_power_state, _level) \
+ (((_power_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (_level))) & \
+ SCMI_PWR_STATE_LVL_MASK)
+
+/*
+ * The SCMI power state enumeration for a power domain level
+ */
+typedef enum {
+ scmi_power_state_off = 0,
+ scmi_power_state_on = 1,
+ scmi_power_state_sleep = 2,
+} scmi_power_state_t;
+
+/*
+ * The global handles for invoking the SCMI driver APIs after the driver
+ * has been initialized.
+ */
+static void *scmi_handles[PLAT_ARM_SCMI_CHANNEL_COUNT];
+
+/* The global SCMI channels array */
+static scmi_channel_t scmi_channels[PLAT_ARM_SCMI_CHANNEL_COUNT];
+
+/*
+ * Channel ID for the default SCMI channel.
+ * The default channel is used to issue SYSTEM level SCMI requests and is
+ * initialized to the channel which has the boot cpu as its resource.
+ */
+static uint32_t default_scmi_channel_id;
+
+/*
+ * TODO: Allow use of channel specific lock instead of using a single lock for
+ * all the channels.
+ */
+ARM_SCMI_INSTANTIATE_LOCK;
+
+/*
+ * Function to obtain the SCMI Domain ID and SCMI Channel number from the linear
+ * core position. The SCMI Channel number is encoded in the upper 16 bits and
+ * the Domain ID is encoded in the lower 16 bits in each entry of the mapping
+ * array exported by the platform.
+ */
+static void css_scp_core_pos_to_scmi_channel(unsigned int core_pos,
+ unsigned int *scmi_domain_id, unsigned int *scmi_channel_id)
+{
+ unsigned int composite_id;
+
+ composite_id = plat_css_core_pos_to_scmi_dmn_id_map[core_pos];
+
+ *scmi_channel_id = GET_SCMI_CHANNEL_ID(composite_id);
+ *scmi_domain_id = GET_SCMI_DOMAIN_ID(composite_id);
+}
+
+/*
+ * Helper function to suspend a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_suspend(const struct psci_power_state *target_state)
+{
+ int ret;
+
+ /* At least power domain level 0 should be specified to be suspended */
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
+
+ /* Check if power down at system power domain level is requested */
+ if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
+ /* Issue SCMI command for SYSTEM_SUSPEND on all SCMI channels */
+ ret = scmi_sys_pwr_state_set(
+ scmi_handles[default_scmi_channel_id],
+ SCMI_SYS_PWR_FORCEFUL_REQ, SCMI_SYS_PWR_SUSPEND);
+ if (ret != SCMI_E_SUCCESS) {
+ ERROR("SCMI system power domain suspend return 0x%x unexpected\n",
+ ret);
+ panic();
+ }
+ return;
+ }
+#if !HW_ASSISTED_COHERENCY
+ unsigned int lvl, channel_id, domain_id;
+ uint32_t scmi_pwr_state = 0;
+ /*
+ * If we reach here, then assert that power down at system power domain
+ * level is running.
+ */
+ assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
+
+ /* For level 0, specify `scmi_power_state_sleep` as the power state */
+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, ARM_PWR_LVL0,
+ scmi_power_state_sleep);
+
+ for (lvl = ARM_PWR_LVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+ if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
+ break;
+
+ assert(target_state->pwr_domain_state[lvl] ==
+ ARM_LOCAL_STATE_OFF);
+ /*
+ * Specify `scmi_power_state_off` as power state for higher
+ * levels.
+ */
+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+ scmi_power_state_off);
+ }
+
+ SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+ css_scp_core_pos_to_scmi_channel(plat_my_core_pos(),
+ &domain_id, &channel_id);
+ ret = scmi_pwr_state_set(scmi_handles[channel_id],
+ domain_id, scmi_pwr_state);
+
+ if (ret != SCMI_E_SUCCESS) {
+ ERROR("SCMI set power state command return 0x%x unexpected\n",
+ ret);
+ panic();
+ }
+#endif
+}
+
+/*
+ * Helper function to turn off a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_off(const struct psci_power_state *target_state)
+{
+ unsigned int lvl = 0, channel_id, domain_id;
+ int ret;
+ uint32_t scmi_pwr_state = 0;
+
+ /* At-least the CPU level should be specified to be OFF */
+ assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
+ ARM_LOCAL_STATE_OFF);
+
+ /* PSCI CPU OFF cannot be used to turn OFF system power domain */
+ assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
+
+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
+ if (target_state->pwr_domain_state[lvl] == ARM_LOCAL_STATE_RUN)
+ break;
+
+ assert(target_state->pwr_domain_state[lvl] ==
+ ARM_LOCAL_STATE_OFF);
+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+ scmi_power_state_off);
+ }
+
+ SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+ css_scp_core_pos_to_scmi_channel(plat_my_core_pos(),
+ &domain_id, &channel_id);
+ ret = scmi_pwr_state_set(scmi_handles[channel_id],
+ domain_id, scmi_pwr_state);
+ if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
+ ERROR("SCMI set power state command return 0x%x unexpected\n",
+ ret);
+ panic();
+ }
+}
+
+/*
+ * Helper function to turn ON a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_on(u_register_t mpidr)
+{
+ unsigned int lvl = 0, channel_id, core_pos, domain_id;
+ int ret;
+ uint32_t scmi_pwr_state = 0;
+
+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+ SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl,
+ scmi_power_state_on);
+
+ SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1);
+
+ core_pos = (unsigned int)plat_core_pos_by_mpidr(mpidr);
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ css_scp_core_pos_to_scmi_channel(core_pos, &domain_id,
+ &channel_id);
+ ret = scmi_pwr_state_set(scmi_handles[channel_id],
+ domain_id, scmi_pwr_state);
+ if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) {
+ ERROR("SCMI set power state command return 0x%x unexpected\n",
+ ret);
+ panic();
+ }
+}
+
+/*
+ * Helper function to get the power state of a power domain node as reported
+ * by the SCP.
+ */
+int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level)
+{
+ int ret;
+ uint32_t scmi_pwr_state = 0, lvl_state;
+ unsigned int channel_id, cpu_idx, domain_id;
+
+ /* We don't support get power state at the system power domain level */
+ if ((power_level > PLAT_MAX_PWR_LVL) ||
+ (power_level == CSS_SYSTEM_PWR_DMN_LVL)) {
+ WARN("Invalid power level %u specified for SCMI get power state\n",
+ power_level);
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ cpu_idx = (unsigned int)plat_core_pos_by_mpidr(mpidr);
+ assert(cpu_idx < PLATFORM_CORE_COUNT);
+
+ css_scp_core_pos_to_scmi_channel(cpu_idx, &domain_id, &channel_id);
+ ret = scmi_pwr_state_get(scmi_handles[channel_id],
+ domain_id, &scmi_pwr_state);
+
+ if (ret != SCMI_E_SUCCESS) {
+ WARN("SCMI get power state command return 0x%x unexpected\n",
+ ret);
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ /*
+ * Find the maximum power level described in the get power state
+ * command. If it is less than the requested power level, then assume
+ * the requested power level is ON.
+ */
+ if (SCMI_GET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state) < power_level)
+ return HW_ON;
+
+ lvl_state = SCMI_GET_PWR_STATE_LVL(scmi_pwr_state, power_level);
+ if (lvl_state == scmi_power_state_on)
+ return HW_ON;
+
+ assert((lvl_state == scmi_power_state_off) ||
+ (lvl_state == scmi_power_state_sleep));
+ return HW_OFF;
+}
+
+/*
+ * Callback function to raise a SGI designated to trigger the CPU power down
+ * sequence on all the online secondary cores.
+ */
+static void css_raise_pwr_down_interrupt(u_register_t mpidr)
+{
+#if CSS_SYSTEM_GRACEFUL_RESET
+ plat_ic_raise_el3_sgi(CSS_CPU_PWR_DOWN_REQ_INTR, mpidr);
+#endif
+}
+
+void __dead2 css_scp_system_off(int state)
+{
+ int ret;
+
+ /*
+ * Before issuing the system power down command, set the trusted mailbox
+ * to 0. This will ensure that in the case of a warm/cold reset, the
+ * primary CPU executes from the cold boot sequence.
+ */
+ mmio_write_64(PLAT_ARM_TRUSTED_MAILBOX_BASE, 0U);
+
+ /*
+ * Send powerdown request to online secondary core(s)
+ */
+ ret = psci_stop_other_cores(0, css_raise_pwr_down_interrupt);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("Failed to powerdown secondary core(s)\n");
+ }
+
+ /*
+ * Disable GIC CPU interface to prevent pending interrupt from waking
+ * up the AP from WFI.
+ */
+ plat_arm_gic_cpuif_disable();
+ plat_arm_gic_redistif_off();
+
+ /*
+ * Issue SCMI command. First issue a graceful
+ * request and if that fails force the request.
+ */
+ ret = scmi_sys_pwr_state_set(scmi_handles[default_scmi_channel_id],
+ SCMI_SYS_PWR_FORCEFUL_REQ,
+ state);
+
+ if (ret != SCMI_E_SUCCESS) {
+ ERROR("SCMI system power state set 0x%x returns unexpected 0x%x\n",
+ state, ret);
+ panic();
+ }
+
+ /* Powerdown of primary core */
+ psci_pwrdown_cpu(PLAT_MAX_PWR_LVL);
+ wfi();
+ ERROR("CSS set power state: operation not handled.\n");
+ panic();
+}
+
+/*
+ * Helper function to shutdown the system via SCMI.
+ */
+void __dead2 css_scp_sys_shutdown(void)
+{
+ css_scp_system_off(SCMI_SYS_PWR_SHUTDOWN);
+}
+
+/*
+ * Helper function to reset the system via SCMI.
+ */
+void __dead2 css_scp_sys_reboot(void)
+{
+ css_scp_system_off(SCMI_SYS_PWR_COLD_RESET);
+}
+
+static int scmi_ap_core_init(scmi_channel_t *ch)
+{
+#if PROGRAMMABLE_RESET_ADDRESS
+ uint32_t version;
+ int ret;
+
+ ret = scmi_proto_version(ch, SCMI_AP_CORE_PROTO_ID, &version);
+ if (ret != SCMI_E_SUCCESS) {
+ WARN("SCMI AP core protocol version message failed\n");
+ return -1;
+ }
+
+ if (!is_scmi_version_compatible(SCMI_AP_CORE_PROTO_VER, version)) {
+ WARN("SCMI AP core protocol version 0x%x incompatible with driver version 0x%x\n",
+ version, SCMI_AP_CORE_PROTO_VER);
+ return -1;
+ }
+ INFO("SCMI AP core protocol version 0x%x detected\n", version);
+#endif
+ return 0;
+}
+
+void __init plat_arm_pwrc_setup(void)
+{
+ unsigned int composite_id, idx;
+
+ for (idx = 0; idx < PLAT_ARM_SCMI_CHANNEL_COUNT; idx++) {
+ INFO("Initializing SCMI driver on channel %d\n", idx);
+
+ scmi_channels[idx].info = plat_css_get_scmi_info(idx);
+ scmi_channels[idx].lock = ARM_SCMI_LOCK_GET_INSTANCE;
+ scmi_handles[idx] = scmi_init(&scmi_channels[idx]);
+
+ if (scmi_handles[idx] == NULL) {
+ ERROR("SCMI Initialization failed on channel %d\n", idx);
+ panic();
+ }
+
+ if (scmi_ap_core_init(&scmi_channels[idx]) < 0) {
+ ERROR("SCMI AP core protocol initialization failed\n");
+ panic();
+ }
+ }
+
+ composite_id = plat_css_core_pos_to_scmi_dmn_id_map[plat_my_core_pos()];
+ default_scmi_channel_id = GET_SCMI_CHANNEL_ID(composite_id);
+}
+
+/******************************************************************************
+ * This function overrides the default definition for ARM platforms. Initialize
+ * the SCMI driver, query capability via SCMI and modify the PSCI capability
+ * based on that.
+ *****************************************************************************/
+const plat_psci_ops_t *css_scmi_override_pm_ops(plat_psci_ops_t *ops)
+{
+ uint32_t msg_attr;
+ int ret;
+ void *scmi_handle = scmi_handles[default_scmi_channel_id];
+
+ assert(scmi_handle);
+
+ /* Check that power domain POWER_STATE_SET message is supported */
+ ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID,
+ SCMI_PWR_STATE_SET_MSG, &msg_attr);
+ if (ret != SCMI_E_SUCCESS) {
+ ERROR("Set power state command is not supported by SCMI\n");
+ panic();
+ }
+
+ /*
+ * Don't support PSCI NODE_HW_STATE call if SCMI doesn't support
+ * POWER_STATE_GET message.
+ */
+ ret = scmi_proto_msg_attr(scmi_handle, SCMI_PWR_DMN_PROTO_ID,
+ SCMI_PWR_STATE_GET_MSG, &msg_attr);
+ if (ret != SCMI_E_SUCCESS)
+ ops->get_node_hw_state = NULL;
+
+ /* Check if the SCMI SYSTEM_POWER_STATE_SET message is supported */
+ ret = scmi_proto_msg_attr(scmi_handle, SCMI_SYS_PWR_PROTO_ID,
+ SCMI_SYS_PWR_STATE_SET_MSG, &msg_attr);
+ if (ret != SCMI_E_SUCCESS) {
+ /* System power management operations are not supported */
+ ops->system_off = NULL;
+ ops->system_reset = NULL;
+ ops->get_sys_suspend_power_state = NULL;
+ } else {
+ if (!(msg_attr & SCMI_SYS_PWR_SUSPEND_SUPPORTED)) {
+ /*
+ * System power management protocol is available, but
+ * it does not support SYSTEM SUSPEND.
+ */
+ ops->get_sys_suspend_power_state = NULL;
+ }
+ if (!(msg_attr & SCMI_SYS_PWR_WARM_RESET_SUPPORTED)) {
+ /*
+ * WARM reset is not available.
+ */
+ ops->system_reset2 = NULL;
+ }
+ }
+
+ return ops;
+}
+
+int css_system_reset2(int is_vendor, int reset_type, u_register_t cookie)
+{
+ if (is_vendor || (reset_type != PSCI_RESET2_SYSTEM_WARM_RESET))
+ return PSCI_E_INVALID_PARAMS;
+
+ css_scp_system_off(SCMI_SYS_PWR_WARM_RESET);
+ /*
+ * css_scp_system_off cannot return (it is a __dead function),
+ * but css_system_reset2 has to return some value, even in
+ * this case.
+ */
+ return 0;
+}
+
+#if PROGRAMMABLE_RESET_ADDRESS
+void plat_arm_program_trusted_mailbox(uintptr_t address)
+{
+ int ret, i;
+
+ for (i = 0; i < PLAT_ARM_SCMI_CHANNEL_COUNT; i++) {
+ assert(scmi_handles[i]);
+
+ ret = scmi_ap_core_set_reset_addr(scmi_handles[i], address,
+ SCMI_AP_CORE_LOCK_ATTR);
+ if (ret != SCMI_E_SUCCESS) {
+ ERROR("CSS: Failed to program reset address: %d\n", ret);
+ panic();
+ }
+ }
+}
+#endif
diff --git a/drivers/arm/css/scp/css_pm_scpi.c b/drivers/arm/css/scp/css_pm_scpi.c
new file mode 100644
index 0000000..b4019ce
--- /dev/null
+++ b/drivers/arm/css/scp/css_pm_scpi.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/css_scp.h>
+#include <drivers/arm/css/css_scpi.h>
+#include <plat/arm/common/plat_arm.h>
+#include <plat/arm/css/common/css_pm.h>
+
+/*
+ * This file implements the SCP power management functions using SCPI protocol.
+ */
+
+/*
+ * Helper function to inform power down state to SCP.
+ */
+void css_scp_suspend(const struct psci_power_state *target_state)
+{
+ uint32_t cluster_state = scpi_power_on;
+ uint32_t system_state = scpi_power_on;
+
+ /* Check if power down at system power domain level is requested */
+ if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
+ system_state = scpi_power_retention;
+
+ /* Cluster is to be turned off, so disable coherency */
+ if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
+ cluster_state = scpi_power_off;
+
+ /*
+ * Ask the SCP to power down the appropriate components depending upon
+ * their state.
+ */
+ scpi_set_css_power_state(read_mpidr_el1(),
+ scpi_power_off,
+ cluster_state,
+ system_state);
+}
+
+/*
+ * Helper function to turn off a CPU power domain and its parent power domains
+ * if applicable. Since SCPI doesn't differentiate between OFF and suspend, we
+ * call the suspend helper here.
+ */
+void css_scp_off(const struct psci_power_state *target_state)
+{
+ css_scp_suspend(target_state);
+}
+
+/*
+ * Helper function to turn ON a CPU power domain and its parent power domains
+ * if applicable.
+ */
+void css_scp_on(u_register_t mpidr)
+{
+ /*
+ * SCP takes care of powering up parent power domains so we
+ * only need to care about level 0
+ */
+ scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
+ scpi_power_on);
+}
+
+/*
+ * Helper function to get the power state of a power domain node as reported
+ * by the SCP.
+ */
+int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level)
+{
+ int rc, element;
+ unsigned int cpu_state, cluster_state;
+
+ /*
+ * The format of 'power_level' is implementation-defined, but 0 must
+ * mean a CPU. We also allow 1 to denote the cluster
+ */
+ if (power_level != ARM_PWR_LVL0 && power_level != ARM_PWR_LVL1)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Query SCP */
+ rc = scpi_get_css_power_state(mpidr, &cpu_state, &cluster_state);
+ if (rc != 0)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Map power states of CPU and cluster to expected PSCI return codes */
+ if (power_level == ARM_PWR_LVL0) {
+ /*
+ * The CPU state returned by SCP is an 8-bit bit mask
+ * corresponding to each CPU in the cluster
+ */
+#if ARM_PLAT_MT
+ /*
+ * The current SCPI driver only caters for single-threaded
+ * platforms. Hence we ignore the thread ID (which is always 0)
+ * for such platforms.
+ */
+ element = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+#else
+ element = mpidr & MPIDR_AFFLVL_MASK;
+#endif /* ARM_PLAT_MT */
+ return CSS_CPU_PWR_STATE(cpu_state, element) ==
+ CSS_CPU_PWR_STATE_ON ? HW_ON : HW_OFF;
+ } else {
+ assert(cluster_state == CSS_CLUSTER_PWR_STATE_ON ||
+ cluster_state == CSS_CLUSTER_PWR_STATE_OFF);
+ return cluster_state == CSS_CLUSTER_PWR_STATE_ON ? HW_ON :
+ HW_OFF;
+ }
+}
+
+/*
+ * Helper function to shutdown the system via SCPI.
+ */
+void __dead2 css_scp_sys_shutdown(void)
+{
+ uint32_t response;
+
+ /*
+ * Disable GIC CPU interface to prevent pending interrupt
+ * from waking up the AP from WFI.
+ */
+ plat_arm_gic_cpuif_disable();
+
+ /* Send the power down request to the SCP */
+ response = scpi_sys_power_state(scpi_system_shutdown);
+
+ if (response != SCP_OK) {
+ ERROR("CSS System Off: SCP error %u.\n", response);
+ panic();
+ }
+ wfi();
+ ERROR("CSS System Off: operation not handled.\n");
+ panic();
+}
+
+/*
+ * Helper function to reset the system via SCPI.
+ */
+void __dead2 css_scp_sys_reboot(void)
+{
+ uint32_t response;
+
+ /*
+ * Disable GIC CPU interface to prevent pending interrupt
+ * from waking up the AP from WFI.
+ */
+ plat_arm_gic_cpuif_disable();
+
+ /* Send the system reset request to the SCP */
+ response = scpi_sys_power_state(scpi_system_reboot);
+
+ if (response != SCP_OK) {
+ ERROR("CSS System Reset: SCP error %u.\n", response);
+ panic();
+ }
+ wfi();
+ ERROR("CSS System Reset: operation not handled.\n");
+ panic();
+}
diff --git a/drivers/arm/css/scp/css_sds.c b/drivers/arm/css/scp/css_sds.c
new file mode 100644
index 0000000..e42ee10
--- /dev/null
+++ b/drivers/arm/css/scp/css_sds.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/css_scp.h>
+#include <drivers/arm/css/sds.h>
+#include <drivers/delay_timer.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+int css_scp_boot_image_xfer(void *image, unsigned int image_size)
+{
+ int ret;
+ unsigned int image_offset, image_flags;
+
+ ret = sds_init();
+ if (ret != SDS_OK) {
+ ERROR("SCP SDS initialization failed\n");
+ panic();
+ }
+
+ VERBOSE("Writing SCP image metadata\n");
+ image_offset = (uintptr_t) image - ARM_TRUSTED_SRAM_BASE;
+ ret = sds_struct_write(SDS_SCP_IMG_STRUCT_ID, SDS_SCP_IMG_ADDR_OFFSET,
+ &image_offset, SDS_SCP_IMG_ADDR_SIZE,
+ SDS_ACCESS_MODE_NON_CACHED);
+ if (ret != SDS_OK)
+ goto sds_fail;
+
+ ret = sds_struct_write(SDS_SCP_IMG_STRUCT_ID, SDS_SCP_IMG_SIZE_OFFSET,
+ &image_size, SDS_SCP_IMG_SIZE_SIZE,
+ SDS_ACCESS_MODE_NON_CACHED);
+ if (ret != SDS_OK)
+ goto sds_fail;
+
+ VERBOSE("Marking SCP image metadata as valid\n");
+ image_flags = SDS_SCP_IMG_VALID_FLAG_BIT;
+ ret = sds_struct_write(SDS_SCP_IMG_STRUCT_ID, SDS_SCP_IMG_FLAG_OFFSET,
+ &image_flags, SDS_SCP_IMG_FLAG_SIZE,
+ SDS_ACCESS_MODE_NON_CACHED);
+ if (ret != SDS_OK)
+ goto sds_fail;
+
+ return 0;
+sds_fail:
+ ERROR("SCP SDS write to SCP IMG struct failed\n");
+ panic();
+}
+
+/*
+ * API to wait for SCP to signal till it's ready after booting the transferred
+ * image.
+ */
+int css_scp_boot_ready(void)
+{
+ uint32_t scp_feature_availability_flags;
+ int ret, retry = CSS_SCP_READY_10US_RETRIES;
+
+
+ VERBOSE("Waiting for SCP RAM to complete its initialization process\n");
+
+ /* Wait for the SCP RAM Firmware to complete its initialization process */
+ while (retry > 0) {
+ ret = sds_struct_read(SDS_FEATURE_AVAIL_STRUCT_ID, 0,
+ &scp_feature_availability_flags,
+ SDS_FEATURE_AVAIL_SIZE,
+ SDS_ACCESS_MODE_NON_CACHED);
+ if (ret == SDS_ERR_STRUCT_NOT_FINALIZED)
+ continue;
+
+ if (ret != SDS_OK) {
+ ERROR(" sds_struct_read failed\n");
+ panic();
+ }
+
+ if (scp_feature_availability_flags &
+ SDS_FEATURE_AVAIL_SCP_RAM_READY_BIT)
+ return 0;
+
+ udelay(10);
+ retry--;
+ }
+
+ ERROR("Timeout of %d ms expired waiting for SCP RAM Ready flag\n",
+ CSS_SCP_READY_10US_RETRIES/100);
+
+ plat_panic_handler();
+}
diff --git a/drivers/arm/css/scpi/css_scpi.c b/drivers/arm/css/scpi/css_scpi.c
new file mode 100644
index 0000000..416356b
--- /dev/null
+++ b/drivers/arm/css/scpi/css_scpi.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/css_mhu.h>
+#include <drivers/arm/css/css_scpi.h>
+#include <lib/utils.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#define SCPI_SHARED_MEM_SCP_TO_AP PLAT_CSS_SCP_COM_SHARED_MEM_BASE
+#define SCPI_SHARED_MEM_AP_TO_SCP (PLAT_CSS_SCP_COM_SHARED_MEM_BASE \
+ + 0x100)
+
+/* Header and payload addresses for commands from AP to SCP */
+#define SCPI_CMD_HEADER_AP_TO_SCP \
+ ((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
+#define SCPI_CMD_PAYLOAD_AP_TO_SCP \
+ ((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
+
+/* Header and payload addresses for responses from SCP to AP */
+#define SCPI_RES_HEADER_SCP_TO_AP \
+ ((scpi_cmd_t *) SCPI_SHARED_MEM_SCP_TO_AP)
+#define SCPI_RES_PAYLOAD_SCP_TO_AP \
+ ((void *) (SCPI_SHARED_MEM_SCP_TO_AP + sizeof(scpi_cmd_t)))
+
+/* ID of the MHU slot used for the SCPI protocol */
+#define SCPI_MHU_SLOT_ID 0
+
+static void scpi_secure_message_start(void)
+{
+ mhu_secure_message_start(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_send(size_t payload_size)
+{
+ /*
+ * Ensure that any write to the SCPI payload area is seen by SCP before
+ * we write to the MHU register. If these 2 writes were reordered by
+ * the CPU then SCP would read stale payload data
+ */
+ dmbst();
+
+ mhu_secure_message_send(SCPI_MHU_SLOT_ID);
+}
+
+static int scpi_secure_message_receive(scpi_cmd_t *cmd)
+{
+ uint32_t mhu_status;
+
+ assert(cmd != NULL);
+
+ mhu_status = mhu_secure_message_wait();
+
+ /* Expect an SCPI message, reject any other protocol */
+ if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
+ ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+ mhu_status);
+ return -1;
+ }
+
+ /*
+ * Ensure that any read to the SCPI payload area is done after reading
+ * the MHU register. If these 2 reads were reordered then the CPU would
+ * read invalid payload data
+ */
+ dmbld();
+
+ memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
+
+ return 0;
+}
+
+static void scpi_secure_message_end(void)
+{
+ mhu_secure_message_end(SCPI_MHU_SLOT_ID);
+}
+
+int scpi_wait_ready(void)
+{
+ scpi_cmd_t scpi_cmd;
+ int rc;
+
+ VERBOSE("Waiting for SCP_READY command...\n");
+
+ /* Get a message from the SCP */
+ scpi_secure_message_start();
+ rc = scpi_secure_message_receive(&scpi_cmd);
+ scpi_secure_message_end();
+
+ /* If no message was received, don't send a response */
+ if (rc != 0)
+ return rc;
+
+ /* We are expecting 'SCP Ready', produce correct error if it's not */
+ scpi_status_t status = SCP_OK;
+ if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
+ ERROR("Unexpected SCP command: expected command #%u, got command #%u\n",
+ SCPI_CMD_SCP_READY, scpi_cmd.id);
+ status = SCP_E_SUPPORT;
+ } else if (scpi_cmd.size != 0) {
+ ERROR("SCP_READY command has incorrect size: expected 0, got %u\n",
+ scpi_cmd.size);
+ status = SCP_E_SIZE;
+ }
+
+ VERBOSE("Sending response for SCP_READY command\n");
+
+ /*
+ * Send our response back to SCP.
+ * We are using the same SCPI header, just update the status field.
+ */
+ scpi_cmd.status = status;
+ scpi_secure_message_start();
+ memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
+ scpi_secure_message_send(0);
+ scpi_secure_message_end();
+
+ return status == SCP_OK ? 0 : -1;
+}
+
+void scpi_set_css_power_state(unsigned int mpidr,
+ scpi_power_state_t cpu_state, scpi_power_state_t cluster_state,
+ scpi_power_state_t css_state)
+{
+ scpi_cmd_t *cmd;
+ uint32_t state = 0;
+ uint32_t *payload_addr;
+
+#if ARM_PLAT_MT
+ /*
+ * The current SCPI driver only caters for single-threaded platforms.
+ * Hence we ignore the thread ID (which is always 0) for such platforms.
+ */
+ state |= (mpidr >> MPIDR_AFF1_SHIFT) & 0x0f; /* CPU ID */
+ state |= ((mpidr >> MPIDR_AFF2_SHIFT) & 0x0f) << 4; /* Cluster ID */
+#else
+ state |= mpidr & 0x0f; /* CPU ID */
+ state |= (mpidr & 0xf00) >> 4; /* Cluster ID */
+#endif /* ARM_PLAT_MT */
+
+ state |= cpu_state << 8;
+ state |= cluster_state << 12;
+ state |= css_state << 16;
+
+ scpi_secure_message_start();
+
+ /* Populate the command header */
+ cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+ cmd->id = SCPI_CMD_SET_CSS_POWER_STATE;
+ cmd->set = SCPI_SET_NORMAL;
+ cmd->sender = 0;
+ cmd->size = sizeof(state);
+ /* Populate the command payload */
+ payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+ *payload_addr = state;
+ scpi_secure_message_send(sizeof(state));
+ /*
+ * SCP does not reply to this command in order to avoid MHU interrupts
+ * from the sender, which could interfere with its power state request.
+ */
+
+ scpi_secure_message_end();
+}
+
+/*
+ * Query and obtain CSS power state from SCP.
+ *
+ * In response to the query, SCP returns power states of all CPUs in all
+ * clusters of the system. The returned response is then filtered based on the
+ * supplied MPIDR. Power states of requested cluster and CPUs within are updated
+ * via supplied non-NULL pointer arguments.
+ *
+ * Returns 0 on success, or -1 on errors.
+ */
+int scpi_get_css_power_state(unsigned int mpidr, unsigned int *cpu_state_p,
+ unsigned int *cluster_state_p)
+{
+ scpi_cmd_t *cmd;
+ scpi_cmd_t response;
+ int power_state, cpu, cluster, rc = -1;
+
+ /*
+ * Extract CPU and cluster membership of the given MPIDR. SCPI caters
+ * for only up to 0xf clusters, and 8 CPUs per cluster
+ */
+#if ARM_PLAT_MT
+ /*
+ * The current SCPI driver only caters for single-threaded platforms.
+ * Hence we ignore the thread ID (which is always 0) for such platforms.
+ */
+ cpu = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cluster = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
+#else
+ cpu = mpidr & MPIDR_AFFLVL_MASK;
+ cluster = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+#endif /* ARM_PLAT_MT */
+ if (cpu >= 8 || cluster >= 0xf)
+ return -1;
+
+ scpi_secure_message_start();
+
+ /* Populate request headers */
+ zeromem(SCPI_CMD_HEADER_AP_TO_SCP, sizeof(*cmd));
+ cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+ cmd->id = SCPI_CMD_GET_CSS_POWER_STATE;
+
+ /*
+ * Send message and wait for SCP's response
+ */
+ scpi_secure_message_send(0);
+ if (scpi_secure_message_receive(&response) != 0)
+ goto exit;
+
+ if (response.status != SCP_OK)
+ goto exit;
+
+ /* Validate SCP response */
+ if (!CHECK_RESPONSE(response, cluster))
+ goto exit;
+
+ /* Extract power states for required cluster */
+ power_state = *(((uint16_t *) SCPI_RES_PAYLOAD_SCP_TO_AP) + cluster);
+ if (CLUSTER_ID(power_state) != cluster)
+ goto exit;
+
+ /* Update power state via pointers */
+ if (cluster_state_p)
+ *cluster_state_p = CLUSTER_POWER_STATE(power_state);
+ if (cpu_state_p)
+ *cpu_state_p = CPU_POWER_STATE(power_state);
+ rc = 0;
+
+exit:
+ scpi_secure_message_end();
+ return rc;
+}
+
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
+{
+ scpi_cmd_t *cmd;
+ uint8_t *payload_addr;
+ scpi_cmd_t response;
+
+ scpi_secure_message_start();
+
+ /* Populate the command header */
+ cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+ cmd->id = SCPI_CMD_SYS_POWER_STATE;
+ cmd->set = 0;
+ cmd->sender = 0;
+ cmd->size = sizeof(*payload_addr);
+ /* Populate the command payload */
+ payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+ *payload_addr = system_state & 0xff;
+ scpi_secure_message_send(sizeof(*payload_addr));
+
+ /* If no response is received, fill in an error status */
+ if (scpi_secure_message_receive(&response) != 0)
+ response.status = SCP_E_TIMEOUT;
+
+ scpi_secure_message_end();
+
+ return response.status;
+}
diff --git a/drivers/arm/css/sds/aarch32/sds_helpers.S b/drivers/arm/css/sds/aarch32/sds_helpers.S
new file mode 100644
index 0000000..13ff0e1
--- /dev/null
+++ b/drivers/arm/css/sds/aarch32/sds_helpers.S
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/css/sds.h>
+#include <platform_def.h>
+
+#include "../sds_private.h"
+
+ .globl sds_get_primary_cpu_id
+
+ /*
+ * int sds_get_primary_cpu_id(void);
+ * Return the primary CPU ID from SDS Structure
+ * Returns CPUID on success or -1 on failure
+ */
+func sds_get_primary_cpu_id
+ ldr r0, =PLAT_ARM_SDS_MEM_BASE
+ ldr r2, =SDS_REGION_SIGNATURE
+ ldr r1, [r0]
+ ubfx r3, r1, #0, #16
+
+ /* Check if the SDS region signature found */
+ cmp r2, r3
+ bne 2f
+
+ /* Get the structure count from region descriptor in r1 */
+ ubfx r1, r1, #SDS_REGION_STRUCT_COUNT_SHIFT, #SDS_REGION_STRUCT_COUNT_WIDTH
+ cmp r1, #0
+ beq 2f
+ add r0, r0, #SDS_REGION_DESC_SIZE
+
+ /* Initialize the loop iterator count in r3 */
+ mov r3, #0
+loop_begin:
+ ldrh r2, [r0]
+ cmp r2, #SDS_AP_CPU_INFO_STRUCT_ID
+ bne continue_loop
+
+ /* We have found the required structure */
+ ldr r0, [r0,#(SDS_HEADER_SIZE + SDS_AP_CPU_INFO_PRIMARY_CPUID_OFFSET)]
+ bx lr
+continue_loop:
+ /* Increment the loop counter and exit loop if counter == structure count */
+ add r3, r3, #0x1
+ cmp r1, r3
+ beq 2f
+
+ /* Read the 2nd word in header */
+ ldr r2, [r0,#4]
+ /* Get the structure size from header */
+ ubfx r2, r2, #SDS_HEADER_STRUCT_SIZE_SHIFT, #SDS_HEADER_STRUCT_SIZE_WIDTH
+ /* Add the structure size and SDS HEADER SIZE to point to next header */
+ add r2, r2, #SDS_HEADER_SIZE
+ add r0, r0, r2
+ b loop_begin
+2:
+ mov r0, #0xffffffff
+ bx lr
+endfunc sds_get_primary_cpu_id
diff --git a/drivers/arm/css/sds/aarch64/sds_helpers.S b/drivers/arm/css/sds/aarch64/sds_helpers.S
new file mode 100644
index 0000000..3256c2b
--- /dev/null
+++ b/drivers/arm/css/sds/aarch64/sds_helpers.S
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/css/sds.h>
+#include <platform_def.h>
+
+#include "../sds_private.h"
+
+ .globl sds_get_primary_cpu_id
+
+ /*
+ * int sds_get_primary_cpu_id(void);
+ * Return the primary CPI ID from SDS Structure
+ * Returns CPUID on success or -1 on failure
+ */
+func sds_get_primary_cpu_id
+ mov_imm x0, PLAT_ARM_SDS_MEM_BASE
+ mov w2, #SDS_REGION_SIGNATURE
+ ldr w1, [x0]
+
+ /* Check if the SDS region signature found */
+ cmp w2, w1, uxth
+ b.ne 2f
+
+ /* Get the structure count from region descriptor in `w1 */
+ ubfx w1, w1, #SDS_REGION_STRUCT_COUNT_SHIFT, #SDS_REGION_STRUCT_COUNT_WIDTH
+ cbz w1, 2f
+ add x0, x0, #SDS_REGION_DESC_SIZE
+
+ /* Initialize the loop iterator count in w3 */
+ mov w3, #0
+loop_begin:
+ ldrh w2, [x0]
+ cmp w2, #SDS_AP_CPU_INFO_STRUCT_ID
+ b.ne continue_loop
+
+ /* We have found the required structure */
+ ldr w0, [x0,#(SDS_HEADER_SIZE + SDS_AP_CPU_INFO_PRIMARY_CPUID_OFFSET)]
+ ret
+continue_loop:
+ /* Increment the loop counter and exit loop if counter == structure count */
+ add w3, w3, #0x1
+ cmp w1, w3
+ b.eq 2f
+
+ /* Read the 2nd word in header */
+ ldr w2, [x0,#4]
+ /* Get the structure size from header */
+ ubfx x2, x2, #SDS_HEADER_STRUCT_SIZE_SHIFT, #SDS_HEADER_STRUCT_SIZE_WIDTH
+ /* Add the structure size and SDS HEADER SIZE to point to next header */
+ add x2, x2, #SDS_HEADER_SIZE
+ add x0, x0, x2
+ b loop_begin
+2:
+ mov w0, #0xffffffff
+ ret
+endfunc sds_get_primary_cpu_id
diff --git a/drivers/arm/css/sds/sds.c b/drivers/arm/css/sds/sds.c
new file mode 100644
index 0000000..1fb196c
--- /dev/null
+++ b/drivers/arm/css/sds/sds.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/css/sds.h>
+#include <platform_def.h>
+
+#include "sds_private.h"
+
+/*
+ * Variables used to track and maintain the state of the memory region reserved
+ * for usage by the SDS framework.
+ */
+
+/* Pointer to the base of the SDS memory region */
+static uintptr_t sds_mem_base;
+
+/* Size of the SDS memory region in bytes */
+static size_t sds_mem_size;
+
+/*
+ * Perform some non-exhaustive tests to determine whether any of the fields
+ * within a Structure Header contain obviously invalid data.
+ * Returns SDS_OK on success, SDS_ERR_FAIL on error.
+ */
+static int sds_struct_is_valid(uintptr_t header)
+{
+ size_t struct_size = GET_SDS_HEADER_STRUCT_SIZE(header);
+
+ /* Zero is not a valid identifier */
+ if (GET_SDS_HEADER_ID(header) == 0)
+ return SDS_ERR_FAIL;
+
+ /* Check SDS Schema version */
+ if (GET_SDS_HEADER_VERSION(header) == SDS_REGION_SCH_VERSION)
+ return SDS_ERR_FAIL;
+
+ /* The SDS Structure sizes have to be multiple of 8 */
+ if ((struct_size == 0) || ((struct_size % 8) != 0))
+ return SDS_ERR_FAIL;
+
+ if (struct_size > sds_mem_size)
+ return SDS_ERR_FAIL;
+
+ return SDS_OK;
+}
+
+/*
+ * Validate the SDS structure headers.
+ * Returns SDS_OK on success, SDS_ERR_FAIL on error.
+ */
+static int validate_sds_struct_headers(void)
+{
+ unsigned int i, structure_count;
+ uintptr_t header;
+
+ structure_count = GET_SDS_REGION_STRUCTURE_COUNT(sds_mem_base);
+
+ if (structure_count == 0)
+ return SDS_ERR_FAIL;
+
+ header = sds_mem_base + SDS_REGION_DESC_SIZE;
+
+ /* Iterate over structure headers and validate each one */
+ for (i = 0; i < structure_count; i++) {
+ if (sds_struct_is_valid(header) != SDS_OK) {
+ WARN("SDS: Invalid structure header detected\n");
+ return SDS_ERR_FAIL;
+ }
+ header += GET_SDS_HEADER_STRUCT_SIZE(header) + SDS_HEADER_SIZE;
+ }
+ return SDS_OK;
+}
+
+/*
+ * Get the structure header pointer corresponding to the structure ID.
+ * Returns SDS_OK on success, SDS_ERR_STRUCT_NOT_FOUND on error.
+ */
+static int get_struct_header(uint32_t structure_id, struct_header_t **header)
+{
+ unsigned int i, structure_count;
+ uintptr_t current_header;
+
+ assert(header);
+
+ structure_count = GET_SDS_REGION_STRUCTURE_COUNT(sds_mem_base);
+ if (structure_count == 0)
+ return SDS_ERR_STRUCT_NOT_FOUND;
+
+ current_header = ((uintptr_t)sds_mem_base) + SDS_REGION_DESC_SIZE;
+
+ /* Iterate over structure headers to find one with a matching ID */
+ for (i = 0; i < structure_count; i++) {
+ if (GET_SDS_HEADER_ID(current_header) == structure_id) {
+ *header = (struct_header_t *)current_header;
+ return SDS_OK;
+ }
+ current_header += GET_SDS_HEADER_STRUCT_SIZE(current_header) +
+ SDS_HEADER_SIZE;
+ }
+
+ *header = NULL;
+ return SDS_ERR_STRUCT_NOT_FOUND;
+}
+
+/*
+ * Check if a structure header corresponding to the structure ID exists.
+ * Returns SDS_OK if structure header exists else SDS_ERR_STRUCT_NOT_FOUND
+ * if not found.
+ */
+int sds_struct_exists(unsigned int structure_id)
+{
+ struct_header_t *header = NULL;
+ int ret;
+
+ ret = get_struct_header(structure_id, &header);
+ if (ret == SDS_OK) {
+ assert(header);
+ }
+
+ return ret;
+}
+
+/*
+ * Read from field in the structure corresponding to `structure_id`.
+ * `fld_off` is the offset to the field in the structure and `mode`
+ * indicates whether cache maintenance need to performed prior to the read.
+ * The `data` is the pointer to store the read data of size specified by `size`.
+ * Returns SDS_OK on success or corresponding error codes on failure.
+ */
+int sds_struct_read(uint32_t structure_id, unsigned int fld_off,
+ void *data, size_t size, sds_access_mode_t mode)
+{
+ int status;
+ uintptr_t field_base;
+ struct_header_t *header = NULL;
+
+ if (!data)
+ return SDS_ERR_INVALID_PARAMS;
+
+ /* Check if a structure with this ID exists */
+ status = get_struct_header(structure_id, &header);
+ if (status != SDS_OK)
+ return status;
+
+ assert(header);
+
+ if (mode == SDS_ACCESS_MODE_CACHED)
+ inv_dcache_range((uintptr_t)header, SDS_HEADER_SIZE + size);
+
+ if (!IS_SDS_HEADER_VALID(header)) {
+ WARN("SDS: Reading from un-finalized structure 0x%x\n",
+ structure_id);
+ return SDS_ERR_STRUCT_NOT_FINALIZED;
+ }
+
+ if ((fld_off + size) > GET_SDS_HEADER_STRUCT_SIZE(header))
+ return SDS_ERR_FAIL;
+
+ field_base = (uintptr_t)header + SDS_HEADER_SIZE + fld_off;
+ if (check_uptr_overflow(field_base, size - 1))
+ return SDS_ERR_FAIL;
+
+ /* Copy the required field in the struct */
+ memcpy(data, (void *)field_base, size);
+
+ return SDS_OK;
+}
+
+/*
+ * Write to the field in the structure corresponding to `structure_id`.
+ * `fld_off` is the offset to the field in the structure and `mode`
+ * indicates whether cache maintenance need to performed for the write.
+ * The `data` is the pointer to data of size specified by `size`.
+ * Returns SDS_OK on success or corresponding error codes on failure.
+ */
+int sds_struct_write(uint32_t structure_id, unsigned int fld_off,
+ void *data, size_t size, sds_access_mode_t mode)
+{
+ int status;
+ uintptr_t field_base;
+ struct_header_t *header = NULL;
+
+ if (!data)
+ return SDS_ERR_INVALID_PARAMS;
+
+ /* Check if a structure with this ID exists */
+ status = get_struct_header(structure_id, &header);
+ if (status != SDS_OK)
+ return status;
+
+ assert(header);
+
+ if (mode == SDS_ACCESS_MODE_CACHED)
+ inv_dcache_range((uintptr_t)header, SDS_HEADER_SIZE + size);
+
+ if (!IS_SDS_HEADER_VALID(header)) {
+ WARN("SDS: Writing to un-finalized structure 0x%x\n",
+ structure_id);
+ return SDS_ERR_STRUCT_NOT_FINALIZED;
+ }
+
+ if ((fld_off + size) > GET_SDS_HEADER_STRUCT_SIZE(header))
+ return SDS_ERR_FAIL;
+
+ field_base = (uintptr_t)header + SDS_HEADER_SIZE + fld_off;
+ if (check_uptr_overflow(field_base, size - 1))
+ return SDS_ERR_FAIL;
+
+ /* Copy the required field in the struct */
+ memcpy((void *)field_base, data, size);
+
+ if (mode == SDS_ACCESS_MODE_CACHED)
+ flush_dcache_range((uintptr_t)field_base, size);
+
+ return SDS_OK;
+}
+
+/*
+ * Initialize the SDS driver. Also verifies the SDS version and sanity of
+ * the SDS structure headers.
+ * Returns SDS_OK on success, SDS_ERR_FAIL on error.
+ */
+int sds_init(void)
+{
+ sds_mem_base = (uintptr_t)PLAT_ARM_SDS_MEM_BASE;
+
+ if (!IS_SDS_REGION_VALID(sds_mem_base)) {
+ WARN("SDS: No valid SDS Memory Region found\n");
+ return SDS_ERR_FAIL;
+ }
+
+ if (GET_SDS_REGION_SCHEMA_VERSION(sds_mem_base)
+ != SDS_REGION_SCH_VERSION) {
+ WARN("SDS: Unsupported SDS schema version\n");
+ return SDS_ERR_FAIL;
+ }
+
+ sds_mem_size = GET_SDS_REGION_SIZE(sds_mem_base);
+ if (sds_mem_size > PLAT_ARM_SDS_MEM_SIZE_MAX) {
+ WARN("SDS: SDS Memory Region exceeds size limit\n");
+ return SDS_ERR_FAIL;
+ }
+
+ INFO("SDS: Detected SDS Memory Region (%zu bytes)\n", sds_mem_size);
+
+ if (validate_sds_struct_headers() != SDS_OK)
+ return SDS_ERR_FAIL;
+
+ return SDS_OK;
+}
diff --git a/drivers/arm/css/sds/sds_private.h b/drivers/arm/css/sds/sds_private.h
new file mode 100644
index 0000000..d801a04
--- /dev/null
+++ b/drivers/arm/css/sds/sds_private.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SDS_PRIVATE_H
+#define SDS_PRIVATE_H
+
+/* SDS Header defines */
+#define SDS_HEADER_ID_SHIFT 0
+#define SDS_HEADER_ID_WIDTH 16
+#define SDS_HEADER_ID_MASK ((1 << SDS_HEADER_ID_WIDTH) - 1)
+
+#define SDS_HEADER_MINOR_VERSION_WIDTH 8
+#define SDS_HEADER_MINOR_VERSION_SHIFT 16
+#define SDS_HEADER_MAJOR_VERSION_WIDTH 8
+
+#define MAKE_SDS_HEADER_VERSION(major, minor) \
+ (((((major) & 0xff) << SDS_HEADER_MINOR_VERSION_WIDTH) | ((minor) & 0xff)))
+#define SDS_HEADER_VERSION_MASK \
+ ((1 << (SDS_HEADER_MINOR_VERSION_WIDTH + SDS_HEADER_MAJOR_VERSION_WIDTH)) - 1)
+
+#define SDS_HEADER_VERSION MAKE_SDS_HEADER_VERSION(1, 0)
+#define SDS_HEADER_STRUCT_SIZE_WIDTH 23
+#define SDS_HEADER_STRUCT_SIZE_SHIFT 1
+#define SDS_HEADER_STRUCT_SIZE_MASK ((1 << SDS_HEADER_STRUCT_SIZE_WIDTH) - 1)
+#define SDS_HEADER_VALID_MASK 0x1
+#define SDS_HEADER_VALID_SHIFT 0
+#define SDS_HEADER_SIZE 0x8
+
+/* Arbitrary, 16 bit value that indicates a valid SDS Memory Region */
+#define SDS_REGION_SIGNATURE 0xAA7A
+#define SDS_REGION_SIGNATURE_WIDTH 16
+#define SDS_REGION_SIGNATURE_SHIFT 0
+#define SDS_REGION_SIGNATURE_MASK ((1 << SDS_REGION_SIGNATURE_WIDTH) - 1)
+
+#define SDS_REGION_STRUCT_COUNT_SHIFT 16
+#define SDS_REGION_STRUCT_COUNT_WIDTH 8
+#define SDS_REGION_STRUCT_COUNT_MASK ((1 << SDS_REGION_STRUCT_COUNT_WIDTH) - 1)
+
+#define SDS_REGION_SCH_MINOR_SHIFT 24
+#define SDS_REGION_SCH_MINOR_WIDTH 4
+#define SDS_REGION_SCH_MINOR_MASK ((1 << SDS_REGION_SCH_MINOR_WIDTH) - 1)
+
+#define SDS_REGION_SCH_MAJOR_SHIFT 28
+#define SDS_REGION_SCH_MAJOR_WIDTH 4
+#define SDS_REGION_SCH_MAJOR_MASK ((1 << SDS_REGION_SCH_MAJOR_WIDTH) - 1)
+
+#define SDS_REGION_SCH_VERSION_MASK \
+ ((1 << (SDS_REGION_SCH_MINOR_WIDTH + SDS_REGION_SCH_MAJOR_WIDTH)) - 1)
+
+#define MAKE_SDS_REGION_SCH_VERSION(maj, min) \
+ ((((maj) & SDS_REGION_SCH_MAJOR_MASK) << SDS_REGION_SCH_MINOR_WIDTH) | \
+ ((min) & SDS_REGION_SCH_MINOR_MASK))
+
+#define SDS_REGION_SCH_VERSION MAKE_SDS_REGION_SCH_VERSION(1, 0)
+#define SDS_REGION_REGIONSIZE_OFFSET 0x4
+#define SDS_REGION_DESC_SIZE 0x8
+
+#ifndef __ASSEMBLER__
+#include <stddef.h>
+#include <stdint.h>
+
+/* Header containing Shared Data Structure metadata */
+typedef struct structure_header {
+ uint32_t reg[2];
+} struct_header_t;
+
+#define GET_SDS_HEADER_ID(_header) \
+ ((((struct_header_t *)(_header))->reg[0]) & SDS_HEADER_ID_MASK)
+#define GET_SDS_HEADER_VERSION(_header) \
+ (((((struct_header_t *)(_header))->reg[0]) >> SDS_HEADER_MINOR_VERSION_SHIFT)\
+ & SDS_HEADER_VERSION_MASK)
+#define GET_SDS_HEADER_STRUCT_SIZE(_header) \
+ (((((struct_header_t *)(_header))->reg[1]) >> SDS_HEADER_STRUCT_SIZE_SHIFT)\
+ & SDS_HEADER_STRUCT_SIZE_MASK)
+#define IS_SDS_HEADER_VALID(_header) \
+ ((((struct_header_t *)(_header))->reg[1]) & SDS_HEADER_VALID_MASK)
+#define GET_SDS_STRUCT_FIELD(_header, _field_offset) \
+ ((((uint8_t *)(_header)) + sizeof(struct_header_t)) + (_field_offset))
+
+/* Region Descriptor describing the SDS Memory Region */
+typedef struct region_descriptor {
+ uint32_t reg[2];
+} region_desc_t;
+
+#define IS_SDS_REGION_VALID(region) \
+ (((((region_desc_t *)(region))->reg[0]) & SDS_REGION_SIGNATURE_MASK) == SDS_REGION_SIGNATURE)
+#define GET_SDS_REGION_STRUCTURE_COUNT(region) \
+ (((((region_desc_t *)(region))->reg[0]) >> SDS_REGION_STRUCT_COUNT_SHIFT)\
+ & SDS_REGION_STRUCT_COUNT_MASK)
+#define GET_SDS_REGION_SCHEMA_VERSION(region) \
+ (((((region_desc_t *)(region))->reg[0]) >> SDS_REGION_SCH_MINOR_SHIFT)\
+ & SDS_REGION_SCH_VERSION_MASK)
+#define GET_SDS_REGION_SIZE(region) ((((region_desc_t *)(region))->reg[1]))
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SDS_PRIVATE_H */